code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
from sklearn.preprocessing import normalize
import numpy as np
class HMM():
''' Hmm class expecting transition matrix only...
'''
def __init__(self,A):
'''
:param A: transition matrix, A_ij is p(Z_t=j|Z_t-1=i)
'''
assert all(np.sum(A, axis = 1))==1
# transpose to get left eigen vector
eigen_vals,eigen_vecs = np.linalg.eig(A.T)
ind = np.where(eigen_vals ==1)[0]
self.stationary_dist = eigen_vecs[:,ind].T[0] # transpose back should be row vec
self.A = A
@staticmethod
def forward(x,k,N,A,phi,stationary_dist):
alpha = np.zeros((k,N)) # init alpha vect to store alpha vals for each z_k (rows)
alpha[:,0] = np.log((phi[:,0] * stationary_dist))
for t in np.arange(1,N):
max_alpha_t = max(alpha[:,t-1]) # alphas are alredy logs, therefreo exp to cancel
exp_alpha_t = np.exp(alpha[:,t-1]-max_alpha_t) # exp sum over alphas - b
alpha_t = phi[:,t]*(exp_alpha_t.T.dot(A)) # sure no undeflow here...
alpha[:,t] = np.log(alpha_t) + max_alpha_t # take log and add back max (already in logs)
# this may be so small there is an overflow?
return alpha
@staticmethod
def calc_phi(x,stationary_dist):
phi = np.zeros(x.shape)
for t in range(x.shape[1]):
phi[:,t] = x[:,t]#/stationary_dist
return phi
@staticmethod
def backward(x,k,N,A,phi,stationary_dist,alpha):
beta = np.zeros((k,N))
posterior = np.zeros((k,N))
beta[:,N-1] = 1 # minus one for pythons indexing
posterior_t = np.exp(alpha[:,N-1]+beta[:,N-1])
posterior_t /= sum(posterior_t)
posterior[:,N-1] = posterior_t
for t in range(0,N-1)[::-1]: # python actually starts N-2 if [::-1]
#print(t,end=',')
max_beta_t = max(beta[:,t+1]) # previous beta
exp_beta_t = np.exp(beta[:,t+1]-max_beta_t)
beta_t = A.dot((phi[:,t+1]*exp_beta_t))# is this correct?
# phi inside the dot product as dependnds on the
beta[:,t] = np.log(beta_t)
posterior_t = np.exp(alpha[:,t]+beta[:,t])
posterior_t /=sum(posterior_t) # normalise as just proportional too...
posterior[:,t] = posterior_t
return beta, posterior
@staticmethod
def calc_phi_from_emission_matrix(x,phi_mat,stationary_dist):
phi = np.zeros((phi_mat.shape[0],x.shape[0]))
for t in range(x.shape[0]):
phi[:,t] = phi_mat[:,x[t]]
return phi
def forward_backward(self,x, phi_mat = None):
'''
If provide phi_mat, x is assumed to be a 1d vector of emissions. Else, if phi_mat = None,
assumed x is a 2d vector of p(zt|xt)
x is a vector of p(zt|xt)
x_i is hidden state (rows)
x_it t is the timepoint
returns posterior distribution of p(Zt
'''
if phi_mat is None:
self.phi =self.calc_phi(x,self.stationary_dist)
k = x.shape[0]
N = x.shape[1]
else:
self.phi =self.calc_phi_from_emission_matrix(x,phi_mat,self.stationary_dist)
N = x.shape[0]
k = phi_mat.shape[0]
self.alpha = self.forward(x,k,N,self.A,self.phi,self.stationary_dist)
self.beta, self.posterior = self.backward(x,k,N,self.A,self.phi,self.stationary_dist,self.alpha)
return self.posterior
def get_state_emission_probs(emissions, annotated_states):
n_states = len(np.unique(annotated_states))
n_emiss = len(np.unique(emissions))
emis_mat = np.zeros(shape= (n_states,n_emiss))
for i,label in enumerate(annotated_states.astype('int')):
emis = emissions.astype('int')[i]
emis_mat[label, emis] += 1
emis_probs = normalize(emis_mat, axis = 1, norm='l1')
return emis_probs
def get_state_transition_probs(labels):
if len (labels.shape) > 1:
labels = np.ravel(labels)
tp = np.zeros(shape= (2,2)) # todo why is this is hardcoded?
for i, label in enumerate(labels[:-1]):
next_label = int(labels[i+1])
label = int(label)
tp[label,next_label] += 1
tp = normalize(tp, axis = 1, norm='l1')
return tp
| jcornford/pyecog | pyecog/ndf/hmm_pyecog.py | Python | mit | 4,391 |
from model_mommy.recipe import Recipe, seq, foreign_key
from cla_common.money_interval.models import MoneyInterval
from diagnosis.tests.mommy_recipes import diagnosis_yes
from ..models import (
Category,
EligibilityCheck,
Property,
Savings,
Case,
PersonalDetails,
ContactResearchMethod,
Income,
Deductions,
Person,
ThirdPartyDetails,
AdaptationDetails,
MatterType,
MediaCode,
MediaCodeGroup,
CaseNotesHistory,
EODDetails,
EODDetailsCategory,
)
category = Recipe(Category, name=seq("Name"), code=seq("Code"), order=seq(0))
income = Recipe(
Income,
earnings=MoneyInterval("per_month", pennies=2200),
self_employment_drawings=MoneyInterval("per_month", pennies=0),
benefits=MoneyInterval("per_month", pennies=0),
tax_credits=MoneyInterval("per_month", pennies=0),
child_benefits=MoneyInterval("per_month", pennies=0),
maintenance_received=MoneyInterval("per_month", pennies=0),
pension=MoneyInterval("per_month", pennies=0),
other_income=MoneyInterval("per_week", pennies=2200),
)
savings = Recipe(Savings)
deductions = Recipe(
Deductions,
income_tax=MoneyInterval("per_week", pennies=2200),
national_insurance=MoneyInterval("per_4week", pennies=2200),
maintenance=MoneyInterval("per_year", pennies=2200),
childcare=MoneyInterval("per_week", pennies=2200),
mortgage=MoneyInterval("per_week", pennies=2200),
rent=MoneyInterval("per_week", pennies=2200),
)
person = Recipe(Person)
full_person = Recipe(
Person, income=foreign_key(income), savings=foreign_key(savings), deductions=foreign_key(deductions)
)
eligibility_check = Recipe(
EligibilityCheck,
category=foreign_key(category),
dependants_young=5,
dependants_old=6,
you=foreign_key(person),
partner=foreign_key(person),
)
eligibility_check_yes = Recipe(
EligibilityCheck,
category=foreign_key(category),
dependants_young=5,
dependants_old=6,
you=foreign_key(person),
partner=foreign_key(person),
state="yes",
)
property = Recipe(Property, eligibility_check=foreign_key(eligibility_check))
contact_research_method = Recipe(ContactResearchMethod, method="PHONE")
personal_details = Recipe(
PersonalDetails,
mobile_phone=seq(555),
home_phone=seq(7777),
title="Dr",
street=seq("Street"),
postcode=seq("postcode"),
full_name=seq("fullname"),
)
research_method = Recipe(ContactResearchMethod, personal_details_id=foreign_key(personal_details))
thirdparty_details = Recipe(ThirdPartyDetails, personal_details=foreign_key(personal_details))
adaptation_details = Recipe(AdaptationDetails)
matter_type1 = Recipe(MatterType, level=1)
matter_type2 = Recipe(MatterType, level=2)
media_code_group = Recipe(MediaCodeGroup)
media_code = Recipe(MediaCode, group=foreign_key(media_code_group))
empty_case = Recipe(Case)
case = Recipe(
Case,
eligibility_check=foreign_key(eligibility_check),
personal_details=foreign_key(personal_details),
media_code=foreign_key(media_code),
)
eligible_case = Recipe(
Case,
eligibility_check=foreign_key(eligibility_check_yes),
diagnosis=foreign_key(diagnosis_yes),
personal_details=foreign_key(personal_details),
media_code=foreign_key(media_code),
)
notes_history = Recipe(CaseNotesHistory)
eod_details = Recipe(EODDetails, case=foreign_key(case))
eod_details_category = Recipe(EODDetailsCategory, eod_details=foreign_key(eod_details))
| ministryofjustice/cla_backend | cla_backend/apps/legalaid/tests/mommy_recipes.py | Python | mit | 3,481 |
"""\
Copyright (c) 2009 Paul J. Davis <[email protected]>
This file is part of hypercouch which is released uner the MIT license.
"""
import time
import unittest
import couchdb
COUCHURI = "http://127.0.0.1:5984/"
TESTDB = "hyper_tests"
class AttrTest(unittest.TestCase):
def setUp(self):
self.srv = couchdb.Server(COUCHURI)
if TESTDB in self.srv:
del self.srv[TESTDB]
self.db = self.srv.create(TESTDB)
self.db["_design/tests"] = {
"ft_index": """\
function(doc) {
if(doc.body) index(doc.body);
if(doc.foo) property("foo", doc.foo);
if(doc.bar) property("bar", doc.bar);
}
"""
}
self._wait()
def tearDown(self):
del self.srv[TESTDB]
def _query(self, **kwargs):
resp, data = self.db.resource.get("_fti", **kwargs)
return data
def _wait(self, expect=0, retries=10):
data = self._query(q="*.**")
while retries > 0 and len(data["rows"]) != expect:
retries -= 1
time.sleep(0.2)
data = self._query(q="*.**")
if retries < 1:
raise RuntimeError("Failed to find expected index state.")
def test_attr(self):
docs = [{"_id": str(i), "body": "This is document %d" % i, "foo": i, "bar": str(i*i)} for i in range(10)]
self.db.update(docs)
self._wait(expect=10)
data = self._query(q="document", foo="NUMEQ 3")
self.assertEqual(data["total_rows"], 1)
self.assertEqual(data["rows"][0]["id"], "3")
data = self._query(q="document", foo="NUMBT 2 4", order="foo NUMA")
self.assertEqual(data["total_rows"], 3)
for i in range(2,5):
self.assertEqual(data["rows"][i-2]["id"], str(i))
data = self._query(q="document", bar="STREW 0")
self.assertEqual(data["total_rows"], 1)
self.assertEqual(data["rows"][0]["id"], "0")
data = self._query(q="*.**", foo="NUMLE 4", bar="NUMGE 9", order="bar NUMD")
self.assertEqual(data["total_rows"], 2)
self.assertEqual(data["rows"][0]["id"], "4")
self.assertEqual(data["rows"][1]["id"], "3")
| benoitc/hypercouch | tests/attr_test.py | Python | mit | 2,262 |
# Copyright (C) 2010-2013 Claudio Guarnieri.
# Copyright (C) 2014-2016 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
from lib.cuckoo.common.abstracts import Signature
class CreatesExe(Signature):
name = "creates_exe"
description = "Creates a Windows executable on the filesystem"
severity = 2
categories = ["generic"]
authors = ["Cuckoo Developers"]
minimum = "2.0"
# This is a signature template. It should be used as a skeleton for
# creating custom signatures, therefore is disabled by default.
# It doesn't verify whether a .exe is actually being created, but
# it matches files being opened with any access type, including
# read and attributes lookup.
enabled = False
def on_complete(self):
match = self.check_file(pattern=".*\\.exe$", regex=True)
if match:
self.mark_ioc("file", match)
return True
| mburakergenc/Malware-Detection-using-Machine-Learning | cuckoo/modules/signatures/creates_exe.py | Python | mit | 997 |
default_app_config = 'quran_tafseer.apps.QuranTafseerConfig'
| EmadMokhtar/tafseer_api | quran_tafseer/__init__.py | Python | mit | 61 |
#!/usr/bin/env python
from __future__ import print_function
import httplib2
import os, re, sys, time
from apiclient import discovery
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
try:
import argparse
p = argparse.ArgumentParser(parents=[tools.argparser])
p.add_argument("file",help="Path for the picture to post.")
flags = p.parse_args()
except ImportError:
flags = None
# If modifying these scopes, delete your previously saved credentials
# at ~/.credentials/drive-python-quickstart.json
#SCOPES = 'https://www.googleapis.com/auth/drive.metadata.readonly'
SCOPES = 'https://www.googleapis.com/auth/drive'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Drive API Python Quickstart'
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
'drive-python-quickstart.json')
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def timestamp(filename):
"""
Get timestamp from given filename(xxx-yyyymmddhhmmss.jpg).
Not used for now.
"""
t = re.split('[-.]',filename)[1]
t = time.strptime(t,"%Y%m%d%H%M%S")
# TODO: How to take care of timezone?
return time.strftime("%Y-%m-%dT%H:%M:%S%z",t)
def main():
"""
"""
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
service = discovery.build('drive', 'v3', http=http)
filepath = flags.file # saved file name
filename = os.path.basename(filepath)
mid = os.environ['MUKOYAMA_ID']
delete_img = os.environ['MUKOYAMA_DELETE_IMG']
foldername = "pictures-%s" % (mid)
print("local: %s" % (filepath))
print("remote: %s/%s" % (foldername,filename))
# Checks if the folder already exists and gets the ID.
items = service.files().list(q="name = '%s'" % (foldername)).execute().get('files')
if len(items) == 0:
body = {'name': foldername,'mimeType': "application/vnd.google-apps.folder"}
fid = service.files().create(body=body).execute().get('id')
else:
fid = items[0]['id']
# Sends the picture.
body = { 'name': filename, 'parents':[fid] }
service.files().create(media_body=filepath, body=body).execute()
if delete_img == 'true':
print(filename+" deleted on local.")
os.remove(filepath)
if __name__ == '__main__':
main()
| koki-h/motion_for_mukoyama | scripts/post2googledrive.py | Python | mit | 3,272 |
import _plotly_utils.basevalidators
class ColorscaleValidator(_plotly_utils.basevalidators.ColorscaleValidator):
def __init__(
self, plotly_name="colorscale", parent_name="scattergeo.marker", **kwargs
):
super(ColorscaleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {"autocolorscale": False}),
**kwargs
)
| plotly/plotly.py | packages/python/plotly/plotly/validators/scattergeo/marker/_colorscale.py | Python | mit | 515 |
__author__ = 'sathley'
from .error import ValidationError, UserAuthError, AppacitiveError
from .file import AppacitiveFile
from .push import AppacitivePushNotification
from .appacitive_email import AppacitiveEmail
from .response import AppacitiveCollection, PagingInfo
from .entity import AppacitiveEntity
from .object import AppacitiveObject
from .endpoint import AppacitiveEndpoint
from .connection import AppacitiveConnection
from .user import AppacitiveUser
from .device import AppacitiveDevice
from .appcontext import ApplicationContext
from .link import Link
from .node import GraphNode
from .graphsearch import AppacitiveGraphSearch
from .query import PropertyFilter, TagFilter, AttributeFilter, AggregateFilter, GeoFilter
from .query import BooleanOperator
from .query import AppacitiveQuery
| appacitive/pyappacitive | pyappacitive/__init__.py | Python | mit | 805 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class FrontendEndpointsOperations:
"""FrontendEndpointsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.frontdoor.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_front_door(
self,
resource_group_name: str,
front_door_name: str,
**kwargs
) -> AsyncIterable["_models.FrontendEndpointsListResult"]:
"""Lists all of the frontend endpoints within a Front Door.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param front_door_name: Name of the Front Door which is globally unique.
:type front_door_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either FrontendEndpointsListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.frontdoor.models.FrontendEndpointsListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FrontendEndpointsListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_front_door.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=80, min_length=1, pattern=r'^[a-zA-Z0-9_\-\(\)\.]*[^\.]$'),
'frontDoorName': self._serialize.url("front_door_name", front_door_name, 'str', max_length=64, min_length=5, pattern=r'^[a-zA-Z0-9]+([-a-zA-Z0-9]?[a-zA-Z0-9])*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('FrontendEndpointsListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_front_door.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/frontDoors/{frontDoorName}/frontendEndpoints'} # type: ignore
async def get(
self,
resource_group_name: str,
front_door_name: str,
frontend_endpoint_name: str,
**kwargs
) -> "_models.FrontendEndpoint":
"""Gets a Frontend endpoint with the specified name within the specified Front Door.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param front_door_name: Name of the Front Door which is globally unique.
:type front_door_name: str
:param frontend_endpoint_name: Name of the Frontend endpoint which is unique within the Front
Door.
:type frontend_endpoint_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FrontendEndpoint, or the result of cls(response)
:rtype: ~azure.mgmt.frontdoor.models.FrontendEndpoint
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FrontendEndpoint"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=80, min_length=1, pattern=r'^[a-zA-Z0-9_\-\(\)\.]*[^\.]$'),
'frontDoorName': self._serialize.url("front_door_name", front_door_name, 'str', max_length=64, min_length=5, pattern=r'^[a-zA-Z0-9]+([-a-zA-Z0-9]?[a-zA-Z0-9])*$'),
'frontendEndpointName': self._serialize.url("frontend_endpoint_name", frontend_endpoint_name, 'str', max_length=255, min_length=1, pattern=r'^[a-zA-Z0-9]+(-*[a-zA-Z0-9])*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('FrontendEndpoint', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/frontDoors/{frontDoorName}/frontendEndpoints/{frontendEndpointName}'} # type: ignore
async def _enable_https_initial(
self,
resource_group_name: str,
front_door_name: str,
frontend_endpoint_name: str,
custom_https_configuration: "_models.CustomHttpsConfiguration",
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._enable_https_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=80, min_length=1, pattern=r'^[a-zA-Z0-9_\-\(\)\.]*[^\.]$'),
'frontDoorName': self._serialize.url("front_door_name", front_door_name, 'str', max_length=64, min_length=5, pattern=r'^[a-zA-Z0-9]+([-a-zA-Z0-9]?[a-zA-Z0-9])*$'),
'frontendEndpointName': self._serialize.url("frontend_endpoint_name", frontend_endpoint_name, 'str', max_length=255, min_length=1, pattern=r'^[a-zA-Z0-9]+(-*[a-zA-Z0-9])*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(custom_https_configuration, 'CustomHttpsConfiguration')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_enable_https_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/frontDoors/{frontDoorName}/frontendEndpoints/{frontendEndpointName}/enableHttps'} # type: ignore
async def begin_enable_https(
self,
resource_group_name: str,
front_door_name: str,
frontend_endpoint_name: str,
custom_https_configuration: "_models.CustomHttpsConfiguration",
**kwargs
) -> AsyncLROPoller[None]:
"""Enables a frontendEndpoint for HTTPS traffic.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param front_door_name: Name of the Front Door which is globally unique.
:type front_door_name: str
:param frontend_endpoint_name: Name of the Frontend endpoint which is unique within the Front
Door.
:type frontend_endpoint_name: str
:param custom_https_configuration: The configuration specifying how to enable HTTPS.
:type custom_https_configuration: ~azure.mgmt.frontdoor.models.CustomHttpsConfiguration
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._enable_https_initial(
resource_group_name=resource_group_name,
front_door_name=front_door_name,
frontend_endpoint_name=frontend_endpoint_name,
custom_https_configuration=custom_https_configuration,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=80, min_length=1, pattern=r'^[a-zA-Z0-9_\-\(\)\.]*[^\.]$'),
'frontDoorName': self._serialize.url("front_door_name", front_door_name, 'str', max_length=64, min_length=5, pattern=r'^[a-zA-Z0-9]+([-a-zA-Z0-9]?[a-zA-Z0-9])*$'),
'frontendEndpointName': self._serialize.url("frontend_endpoint_name", frontend_endpoint_name, 'str', max_length=255, min_length=1, pattern=r'^[a-zA-Z0-9]+(-*[a-zA-Z0-9])*$'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_enable_https.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/frontDoors/{frontDoorName}/frontendEndpoints/{frontendEndpointName}/enableHttps'} # type: ignore
async def _disable_https_initial(
self,
resource_group_name: str,
front_door_name: str,
frontend_endpoint_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self._disable_https_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=80, min_length=1, pattern=r'^[a-zA-Z0-9_\-\(\)\.]*[^\.]$'),
'frontDoorName': self._serialize.url("front_door_name", front_door_name, 'str', max_length=64, min_length=5, pattern=r'^[a-zA-Z0-9]+([-a-zA-Z0-9]?[a-zA-Z0-9])*$'),
'frontendEndpointName': self._serialize.url("frontend_endpoint_name", frontend_endpoint_name, 'str', max_length=255, min_length=1, pattern=r'^[a-zA-Z0-9]+(-*[a-zA-Z0-9])*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_disable_https_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/frontDoors/{frontDoorName}/frontendEndpoints/{frontendEndpointName}/disableHttps'} # type: ignore
async def begin_disable_https(
self,
resource_group_name: str,
front_door_name: str,
frontend_endpoint_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Disables a frontendEndpoint for HTTPS traffic.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param front_door_name: Name of the Front Door which is globally unique.
:type front_door_name: str
:param frontend_endpoint_name: Name of the Frontend endpoint which is unique within the Front
Door.
:type frontend_endpoint_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._disable_https_initial(
resource_group_name=resource_group_name,
front_door_name=front_door_name,
frontend_endpoint_name=frontend_endpoint_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=80, min_length=1, pattern=r'^[a-zA-Z0-9_\-\(\)\.]*[^\.]$'),
'frontDoorName': self._serialize.url("front_door_name", front_door_name, 'str', max_length=64, min_length=5, pattern=r'^[a-zA-Z0-9]+([-a-zA-Z0-9]?[a-zA-Z0-9])*$'),
'frontendEndpointName': self._serialize.url("frontend_endpoint_name", frontend_endpoint_name, 'str', max_length=255, min_length=1, pattern=r'^[a-zA-Z0-9]+(-*[a-zA-Z0-9])*$'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_disable_https.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/frontDoors/{frontDoorName}/frontendEndpoints/{frontendEndpointName}/disableHttps'} # type: ignore
| Azure/azure-sdk-for-python | sdk/network/azure-mgmt-frontdoor/azure/mgmt/frontdoor/aio/operations/_frontend_endpoints_operations.py | Python | mit | 23,414 |
import logging
import os
import re
import shutil
import subprocess
import time
from teuthology import misc
from teuthology.util.flock import FileLock
from teuthology.config import config
from teuthology.contextutil import MaxWhileTries, safe_while
from teuthology.exceptions import BootstrapError, BranchNotFoundError, CommitNotFoundError, GitError
log = logging.getLogger(__name__)
# Repos must not have been fetched in the last X seconds to get fetched again.
# Similar for teuthology's bootstrap
FRESHNESS_INTERVAL = 60
def touch_file(path):
out = subprocess.check_output(('touch', path))
if out:
log.info(out)
def is_fresh(path):
"""
Has this file been modified in the last FRESHNESS_INTERVAL seconds?
Returns False if the file does not exist
"""
if not os.path.exists(path):
return False
elif time.time() - os.stat(path).st_mtime < FRESHNESS_INTERVAL:
return True
return False
def build_git_url(project, project_owner='ceph'):
"""
Return the git URL to clone the project
"""
if project == 'ceph-qa-suite':
base = config.get_ceph_qa_suite_git_url()
elif project == 'ceph-cm-ansible':
base = config.get_ceph_cm_ansible_git_url()
elif project == 'ceph':
base = config.get_ceph_git_url()
else:
base = 'https://github.com/{project_owner}/{project}'
url_templ = re.sub('\.git$', '', base)
return url_templ.format(project_owner=project_owner, project=project)
def ls_remote(url, ref):
"""
Return the current sha1 for a given repository and ref
:returns: The sha1 if found; else None
"""
sha1 = None
cmd = "git ls-remote {} {}".format(url, ref)
result = subprocess.check_output(
cmd, shell=True).split()
if result:
sha1 = result[0].decode()
log.debug("{} -> {}".format(cmd, sha1))
return sha1
def enforce_repo_state(repo_url, dest_path, branch, commit=None, remove_on_error=True):
"""
Use git to either clone or update a given repo, forcing it to switch to the
specified branch.
:param repo_url: The full URL to the repo (not including the branch)
:param dest_path: The full path to the destination directory
:param branch: The branch.
:param commit: The sha1 to checkout. Defaults to None, which uses HEAD of the branch.
:param remove_on_error: Whether or not to remove dest_dir when an error occurs
:raises: BranchNotFoundError if the branch is not found;
CommitNotFoundError if the commit is not found;
GitError for other errors
"""
validate_branch(branch)
sentinel = os.path.join(dest_path, '.fetched')
# sentinel to track whether the repo has checked out the intended
# version, in addition to being cloned
repo_reset = os.path.join(dest_path, '.fetched_and_reset')
try:
if not os.path.isdir(dest_path):
clone_repo(repo_url, dest_path, branch, shallow=commit is None)
elif not commit and not is_fresh(sentinel):
set_remote(dest_path, repo_url)
fetch_branch(dest_path, branch)
touch_file(sentinel)
else:
log.info("%s was just updated or references a specific commit; assuming it is current", dest_path)
if commit and os.path.exists(repo_reset):
return
reset_repo(repo_url, dest_path, branch, commit)
touch_file(repo_reset)
# remove_pyc_files(dest_path)
except (BranchNotFoundError, CommitNotFoundError):
if remove_on_error:
shutil.rmtree(dest_path, ignore_errors=True)
raise
def clone_repo(repo_url, dest_path, branch, shallow=True):
"""
Clone a repo into a path
:param repo_url: The full URL to the repo (not including the branch)
:param dest_path: The full path to the destination directory
:param branch: The branch.
:param shallow: Whether to perform a shallow clone (--depth 1)
:raises: BranchNotFoundError if the branch is not found;
GitError for other errors
"""
validate_branch(branch)
log.info("Cloning %s %s from upstream", repo_url, branch)
if branch.startswith('refs/'):
clone_repo_ref(repo_url, dest_path, branch)
return
args = ['git', 'clone', '--single-branch']
if shallow:
args.extend(['--depth', '1'])
args.extend(['--branch', branch, repo_url, dest_path])
proc = subprocess.Popen(
args,
cwd=os.path.dirname(dest_path),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
not_found_str = "Remote branch %s not found" % branch
out = proc.stdout.read().decode()
result = proc.wait()
# Newer git versions will bail if the branch is not found, but older ones
# will not. Fortunately they both output similar text.
if not_found_str in out:
log.error(out)
if result == 0:
# Old git left a repo with the wrong branch. Remove it.
shutil.rmtree(dest_path, ignore_errors=True)
raise BranchNotFoundError(branch, repo_url)
elif result != 0:
# Unknown error
raise GitError("git clone failed!")
def rsstrip(s, suffix):
return s[:-len(suffix)] if s.endswith(suffix) else s
def lsstrip(s, prefix):
return s[len(prefix):] if s.startswith(prefix) else s
def remote_ref_from_ref(ref, remote='origin'):
if ref.startswith('refs/pull/'):
return 'refs/remotes/' + remote + lsstrip(ref, 'refs')
elif ref.startswith('refs/heads/'):
return 'refs/remotes/' + remote + lsstrip(ref, 'refs/heads')
raise GitError("Unsupported ref '%s'" % ref)
def local_branch_from_ref(ref):
if ref.startswith('refs/pull/'):
s = lsstrip(ref, 'refs/pull/')
s = rsstrip(s, '/merge')
s = rsstrip(s, '/head')
return "PR#%s" % s
elif ref.startswith('refs/heads/'):
return lsstrip(ref, 'refs/heads/')
raise GitError("Unsupported ref '%s', try 'refs/heads/' or 'refs/pull/'" % ref)
def fetch_refspec(ref):
if '/' in ref:
remote_ref = remote_ref_from_ref(ref)
return "+%s:%s" % (ref, remote_ref)
else:
# looks like a branch name
return ref
def clone_repo_ref(repo_url, dest_path, ref):
branch_name = local_branch_from_ref(ref)
remote_ref = remote_ref_from_ref(ref)
misc.sh('git init %s' % dest_path)
misc.sh('git remote add origin %s' % repo_url, cwd=dest_path)
#misc.sh('git fetch --depth 1 origin %s' % fetch_refspec(ref),
# cwd=dest_path)
fetch_branch(dest_path, ref)
misc.sh('git checkout -b %s %s' % (branch_name, remote_ref),
cwd=dest_path)
def set_remote(repo_path, repo_url):
"""
Call "git remote set-url origin <repo_url>"
:param repo_url: The full URL to the repo (not including the branch)
:param repo_path: The full path to the repository
:raises: GitError if the operation fails
"""
log.debug("Setting repo remote to %s", repo_url)
proc = subprocess.Popen(
('git', 'remote', 'set-url', 'origin', repo_url),
cwd=repo_path,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
if proc.wait() != 0:
out = proc.stdout.read()
log.error(out)
raise GitError("git remote set-url failed!")
def fetch(repo_path):
"""
Call "git fetch -p origin"
:param repo_path: The full path to the repository
:raises: GitError if the operation fails
"""
log.info("Fetching from upstream into %s", repo_path)
proc = subprocess.Popen(
('git', 'fetch', '-p', 'origin'),
cwd=repo_path,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
if proc.wait() != 0:
out = proc.stdout.read().decode()
log.error(out)
raise GitError("git fetch failed!")
def fetch_branch(repo_path, branch, shallow=True):
"""
Call "git fetch -p origin <branch>"
:param repo_path: The full path to the repository on-disk
:param branch: The branch.
:param shallow: Whether to perform a shallow fetch (--depth 1)
:raises: BranchNotFoundError if the branch is not found;
GitError for other errors
"""
validate_branch(branch)
log.info("Fetching %s from origin", branch)
args = ['git', 'fetch']
if shallow:
args.extend(['--depth', '1'])
args.extend(['-p', 'origin', fetch_refspec(branch)])
proc = subprocess.Popen(
args,
cwd=repo_path,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
if proc.wait() != 0:
not_found_str = "fatal: couldn't find remote ref %s" % branch
out = proc.stdout.read().decode()
log.error(out)
if not_found_str in out.lower():
raise BranchNotFoundError(branch)
else:
raise GitError("git fetch failed!")
def reset_repo(repo_url, dest_path, branch, commit=None):
"""
:param repo_url: The full URL to the repo (not including the branch)
:param dest_path: The full path to the destination directory
:param branch: The branch.
:param commit: The sha1 to checkout. Defaults to None, which uses HEAD of the branch.
:raises: BranchNotFoundError if the branch is not found;
CommitNotFoundError if the commit is not found;
GitError for other errors
"""
validate_branch(branch)
if '/' in branch:
reset_branch = lsstrip(remote_ref_from_ref(branch), 'refs/remotes/')
else:
reset_branch = 'origin/%s' % branch
reset_ref = commit or reset_branch
log.info('Resetting repo at %s to %s', dest_path, reset_ref)
# This try/except block will notice if the requested branch doesn't
# exist, whether it was cloned or fetched.
try:
subprocess.check_output(
('git', 'reset', '--hard', reset_ref),
cwd=dest_path,
)
except subprocess.CalledProcessError:
if commit:
raise CommitNotFoundError(commit, repo_url)
raise BranchNotFoundError(branch, repo_url)
def remove_pyc_files(dest_path):
subprocess.check_call(
['find', dest_path, '-name', '*.pyc', '-exec', 'rm', '{}', ';']
)
def validate_branch(branch):
if ' ' in branch:
raise ValueError("Illegal branch name: '%s'" % branch)
def fetch_repo(url, branch, commit=None, bootstrap=None, lock=True):
"""
Make sure we have a given project's repo checked out and up-to-date with
the current branch requested
:param url: The URL to the repo
:param bootstrap: An optional callback function to execute. Gets passed a
dest_dir argument: the path to the repo on-disk.
:param branch: The branch we want
:param commit: The sha1 to checkout. Defaults to None, which uses HEAD of the branch.
:returns: The destination path
"""
src_base_path = config.src_base_path
if not os.path.exists(src_base_path):
os.mkdir(src_base_path)
ref_dir = ref_to_dirname(commit or branch)
dirname = '%s_%s' % (url_to_dirname(url), ref_dir)
dest_path = os.path.join(src_base_path, dirname)
# only let one worker create/update the checkout at a time
lock_path = dest_path.rstrip('/') + '.lock'
with FileLock(lock_path, noop=not lock):
with safe_while(sleep=10, tries=60) as proceed:
try:
while proceed():
try:
enforce_repo_state(url, dest_path, branch, commit)
if bootstrap:
sentinel = os.path.join(dest_path, '.bootstrapped')
if commit and os.path.exists(sentinel) or is_fresh(sentinel):
log.info(
"Skipping bootstrap as it was already done in the last %ss",
FRESHNESS_INTERVAL,
)
break
bootstrap(dest_path)
touch_file(sentinel)
break
except GitError:
log.exception("Git error encountered; retrying")
except BootstrapError:
log.exception("Bootstrap error encountered; retrying")
except MaxWhileTries:
shutil.rmtree(dest_path, ignore_errors=True)
raise
return dest_path
def ref_to_dirname(branch):
if '/' in branch:
return local_branch_from_ref(branch)
else:
return branch
def url_to_dirname(url):
"""
Given a URL, returns a string that's safe to use as a directory name.
Examples:
[email protected]/ceph-qa-suite.git -> git.ceph.com_ceph-qa-suite
git://git.ceph.com/ceph-qa-suite.git -> git.ceph.com_ceph-qa-suite
https://github.com/ceph/ceph -> github.com_ceph_ceph
https://github.com/liewegas/ceph.git -> github.com_liewegas_ceph
file:///my/dir/has/ceph.git -> my_dir_has_ceph
"""
# Strip protocol from left-hand side
string = re.match('(?:.*://|.*@)(.*)', url).groups()[0]
# Strip '.git' from the right-hand side
string = string.rstrip('.git')
# Replace certain characters with underscores
string = re.sub('[:/]', '_', string)
# Remove duplicate underscores
string = re.sub('_+', '_', string)
# Remove leading or trailing underscore
string = string.strip('_')
return string
def fetch_qa_suite(branch, commit=None, lock=True):
"""
Make sure ceph-qa-suite is checked out.
:param branch: The branch to fetch
:param commit: The sha1 to checkout. Defaults to None, which uses HEAD of the branch.
:returns: The destination path
"""
return fetch_repo(config.get_ceph_qa_suite_git_url(),
branch, commit, lock=lock)
def fetch_teuthology(branch, commit=None, lock=True):
"""
Make sure we have the correct teuthology branch checked out and up-to-date
:param branch: The branch we want
:param commit: The sha1 to checkout. Defaults to None, which uses HEAD of the branch.
:returns: The destination path
"""
url = config.ceph_git_base_url + 'teuthology.git'
return fetch_repo(url, branch, commit, bootstrap_teuthology, lock)
def bootstrap_teuthology(dest_path):
log.info("Bootstrapping %s", dest_path)
# This magic makes the bootstrap script not attempt to clobber an
# existing virtualenv. But the branch's bootstrap needs to actually
# check for the NO_CLOBBER variable.
env = os.environ.copy()
env['NO_CLOBBER'] = '1'
cmd = './bootstrap'
boot_proc = subprocess.Popen(cmd, shell=True, cwd=dest_path, env=env,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, err = boot_proc.communicate()
returncode = boot_proc.wait()
log.info("Bootstrap exited with status %s", returncode)
if returncode != 0:
for line in out.split():
log.warn(line.strip())
venv_path = os.path.join(dest_path, 'virtualenv')
log.info("Removing %s", venv_path)
shutil.rmtree(venv_path, ignore_errors=True)
raise BootstrapError("Bootstrap failed!")
| SUSE/teuthology | teuthology/repo_utils.py | Python | mit | 15,687 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('pfss', '0023_auto_20150515_1059'),
]
operations = [
migrations.AddField(
model_name='specialability',
name='isDefense',
field=models.BooleanField(default=False),
preserve_default=True,
),
migrations.AddField(
model_name='specialability',
name='isGeneral',
field=models.BooleanField(default=False),
preserve_default=True,
),
]
| qu0zl/pfss | pfss/migrations/0024_auto_20150515_1129.py | Python | mit | 646 |
# -*- coding: utf-8 -*-
import locale
import logging
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.core.mail import EmailMessage
from django.template.loader import render_to_string
from django.utils import timezone
from django.utils.translation import ugettext as _
from pytz import timezone as tz
from apps.events.models import AttendanceEvent, Attendee
from apps.marks.models import Mark, MarkUser, Suspension
from apps.mommy import schedule
from apps.mommy.registry import Task
from apps.payment.models import Payment, PaymentDelay
class PaymentReminder(Task):
@staticmethod
def run():
logging.basicConfig()
# logger = logging.getLogger()
# logger.info("Event payment job started")
locale.setlocale(locale.LC_ALL, "nb_NO.UTF-8")
# All payments using deadline
event_payments = Payment.objects.filter(
payment_type=2, active=True,
content_type=ContentType.objects.get_for_model(AttendanceEvent)
)
today = timezone.now()
for payment in event_payments:
# Number of days until the deadline
deadline_diff = (payment.deadline.date() - today.date()).days
if deadline_diff <= 0:
if PaymentReminder.not_paid(payment):
PaymentReminder.send_deadline_passed_mail(payment)
PaymentReminder.notify_committee(payment)
PaymentReminder.set_marks(payment)
PaymentReminder.suspend(payment)
payment.active = False
payment.save()
elif deadline_diff < 3:
if PaymentReminder.not_paid(payment):
PaymentReminder.send_reminder_mail(payment)
@staticmethod
def send_reminder_mail(payment):
subject = _("Betaling: ") + payment.description()
content = render_to_string('payment/email/reminder_notification.txt', {
'payment_description': payment.description(),
'payment_deadline': payment.deadline.astimezone(tz('Europe/Oslo')).strftime("%-d %B %Y kl. %H:%M"),
'payment_url': settings.BASE_URL + payment.content_object.event.get_absolute_url(),
'payment_email': payment.responsible_mail()
})
receivers = PaymentReminder.not_paid_mail_addresses(payment)
EmailMessage(subject, content, payment.responsible_mail(), [], receivers).send()
@staticmethod
def send_deadline_passed_mail(payment):
subject = _("Betalingsfrist utgått: ") + payment.description()
content = render_to_string('payment/email/reminder_deadline_passed.txt', {
'payment_description': payment.description(),
'payment_url': settings.BASE_URL + payment.content_object.event.get_absolute_url(),
'payment_email': payment.responsible_mail()
})
receivers = PaymentReminder.not_paid_mail_addresses(payment)
EmailMessage(subject, content, payment.responsible_mail(), [], receivers).send()
@staticmethod
def send_missed_payment_mail(payment):
# NOTE
# This method does nothing. Guess it was left here in cases rules for expired payments
# were altered
subject = _("Betalingsfrist utgått: ") + payment.description()
message = _("Hei, du har ikke betalt for følgende arrangement: ") + payment.description()
message += _("Fristen har gått ut, og du har mistet plassen din på arrangementet")
message += _("\nFor mer info om arrangementet se:")
message += "\n" + str(settings.BASE_URL + payment.content_object.event.get_absolute_url())
message += _("Dersom du har spørsmål kan du sende mail til ") + payment.responsible_mail()
message += _("\n\nMvh\nLinjeforeningen Online")
logging.getLogger(__name__).warn(
'Call to method that does nothing. Should it send a mail? Subject: %s' % subject
)
@staticmethod
def notify_committee(payment):
subject = _("Manglende betaling: ") + payment.description()
content = render_to_string('payment/email/payment_expired_list.txt', {
'payment_description': payment.description(),
'payment_users': PaymentReminder.not_paid(payment)
})
receivers = [payment.responsible_mail()]
EmailMessage(subject, content, "[email protected]", [], receivers).send()
@staticmethod
def not_paid(payment):
attendees = payment.content_object.attending_attendees_qs
not_paid_users = [attendee.user for attendee in attendees if not attendee.paid]
# Removes users with active payment delays from the list
return [user for user in not_paid_users if user not in payment.payment_delay_users()]
@staticmethod
def not_paid_mail_addresses(payment):
# Returns users in the list of attendees but not in the list of paid users
return [user.email for user in PaymentReminder.not_paid(payment)]
@staticmethod
def set_marks(payment):
mark = Mark()
mark.title = _("Manglende betaling på %s") % payment.description()
mark.category = 6 # Manglende betaling
mark.description = _("Du har fått en prikk fordi du ikke har betalt for et arrangement.")
mark.save()
for user in PaymentReminder.not_paid(payment):
user_entry = MarkUser()
user_entry.user = user
user_entry.mark = mark
user_entry.save()
@staticmethod
def unattend(payment):
for user in PaymentReminder.not_paid(payment):
Attendee.objects.get(event=payment.content_object, user=user).delete()
@staticmethod
def suspend(payment):
for user in PaymentReminder.not_paid(payment):
suspension = Suspension()
suspension.title = "Manglende betaling"
suspension.user = user
suspension.payment_id = payment.id
suspension.description = """
Du har ikke betalt for et arangement du har vært med på. For å fjerne denne suspensjonen må du betale.\n
Mer informasjon om betalingen finner du her: """
suspension.description += str(
settings.BASE_URL + payment.content_object.event.get_absolute_url()
)
suspension.save()
class PaymentDelayHandler(Task):
@staticmethod
def run():
logging.basicConfig()
logger = logging.getLogger("feedback")
logger.info("Payment delay handler started")
locale.setlocale(locale.LC_ALL, "nb_NO.UTF-8")
payment_delays = PaymentDelay.objects.filter(active=True)
for payment_delay in payment_delays:
unattend_deadline_passed = payment_delay.payment.content_object.unattend_deadline < payment_delay.valid_to
if payment_delay.valid_to < timezone.now():
PaymentDelayHandler.handle_deadline_passed(payment_delay, unattend_deadline_passed)
logger.info("Deadline passed: " + str(payment_delay))
elif (payment_delay.valid_to.date() - timezone.now().date()).days <= 2:
PaymentDelayHandler.send_notification_mail(payment_delay, unattend_deadline_passed)
logger.info("Notification sent to: " + str(payment_delay.user))
# TODO handle committee notifying
@staticmethod
def handle_deadline_passed(payment_delay, unattend_deadline_passed):
if unattend_deadline_passed:
PaymentDelayHandler.set_mark(payment_delay)
PaymentDelayHandler.handle_suspensions(payment_delay)
else:
PaymentDelayHandler.set_mark(payment_delay)
PaymentDelayHandler.unattend(payment_delay)
payment_delay.active = False
payment_delay.save()
PaymentDelayHandler.send_deadline_passed_mail(payment_delay, unattend_deadline_passed)
@staticmethod
def handle_suspensions(payment_delay):
suspension = Suspension()
suspension.title = "Manglende betaling"
suspension.user = payment_delay.user
suspension.payment_id = payment_delay.payment.id
suspension.description = """
Du har ikke betalt for et arangement du har vært med på. For å fjerne denne suspensjonen må du betale.\n
Mer informasjon om betalingen finner du her: """
suspension.description += str(
settings.BASE_URL + payment_delay.payment.content_object.event.get_absolute_url()
)
suspension.save()
@staticmethod
def send_deadline_passed_mail(payment_delay, unattend_deadline_passed):
payment = payment_delay.payment
subject = _("Betalingsfrist utgått: ") + payment.description()
content = render_to_string('payment/email/delay_reminder_deadline_passed.txt', {
'payment_description': payment.description(),
'payment_unattend_passed': unattend_deadline_passed,
'payment_email': payment.responsible_mail()
})
receivers = [payment_delay.user.email]
EmailMessage(subject, content, payment.responsible_mail(), [], receivers).send()
@staticmethod
def send_notification_mail(payment_delay, unattend_deadline_passed):
payment = payment_delay.payment
subject = _("Husk betaling for ") + payment.description()
valid_to = payment_delay.valid_to.astimezone(tz('Europe/Oslo'))
# If event unattend deadline has not passed when payment deadline passes,
# then the user will be automatically unattended, and given a mark.
# Else, the unattend deadlline has passed, and the user will not be unattended,
# but given a mark, and can't attend any other events untill payment is recived.
content = render_to_string('payment/email/delay_reminder_notification.txt', {
'payment_description': payment.description(),
'payment_deadline': valid_to.strftime("%-d. %B %Y kl. %H:%M").encode("utf-8"),
'payment_url': settings.BASE_URL + payment.content_object.event.get_absolute_url(),
'payment_unattend_passed': unattend_deadline_passed,
'payment_email': payment.responsible_mail()
})
receivers = [payment_delay.user.email]
EmailMessage(subject, content, payment.responsible_mail(), [], receivers).send()
@staticmethod
def set_mark(payment_delay):
mark = Mark()
mark.title = _("Manglende betaling på %s") % payment_delay.payment.description()
mark.category = 6 # Manglende betaling
mark.description = _("Du har fått en prikk fordi du ikke har betalt for et arrangement.")
mark.save()
user_entry = MarkUser()
user_entry.user = payment_delay.user
user_entry.mark = mark
user_entry.save()
@staticmethod
def unattend(payment_delay):
Attendee.objects.get(event=payment_delay.payment.content_object, user=payment_delay.user).delete()
schedule.register(PaymentReminder, day_of_week='mon-sun', hour=7, minute=30)
schedule.register(PaymentDelayHandler, day_of_week='mon-sun', hour=7, minute=45)
| dotKom/onlineweb4 | apps/payment/mommy.py | Python | mit | 11,223 |
# main_program.py
# runs the parser and produces sentences that politicians might say
# J. Hassler Thurston
# RocHack Hackathon December 7, 2013
import os
from scraper import *
from get_words import *
from parser2 import *
# TODO: use os.path.set() instead of just referencing local (Mac OS X) filename
speeches_to_get = ['http://www.presidentialrhetoric.com/speeches/08.22.13.print.html',
'http://www.presidentialrhetoric.com/speeches/01.21.13.print.html',
'http://www.presidentialrhetoric.com/speeches/02.04.13.print.html'
]
csv_files = []
def get_and_export(website):
lines = get_line_list(website)
words = get_words(lines[0])
speech_name = get_speech_name(website)
# make the relevant politician's folder if it doesn't already exist
# from http://stackoverflow.com/questions/1274405/how-to-create-new-folder
folder = 'database/' + str(lines[1]) + '/'
if not os.path.exists(folder): os.makedirs(folder)
filename = folder + speech_name + '.csv'
csv_files.append(filename)
export_to_csv(words, filename)
# gets the speech name from the website (only supports presidentialrhetoric.com currently)
def get_speech_name(website):
relevant_string = website.split('/')[-1].split('.print.html')
return relevant_string[0]
# export functionality
def export_fun():
for speech in speeches_to_get:
get_and_export(speech)
# adding words functionality
def add_word_fun():
get_dictionary()
for csv_file in csv_files:
add_words(csv_file)
# main program
def main():
#export_fun()
add_word_fun()
parse('database/Barack Obama/01.21.13.csv')
if __name__ == '__main__':
get_initial_rules()
| jthurst3/newspeeches | main_program.py | Python | mit | 1,612 |
import os
import json
cfg = {}
cfg_file = os.path.join(os.path.dirname(__file__) + '/conf', "cfg.json")
def set_config_file(fp):
global cfg_file
cfg_file = fp
def load_config():
global cfg
with open(cfg_file) as fp:
content = fp.read()
cfg = json.loads(content)
return cfg
def dump_config():
with open(cfg_file, 'w') as fp:
fp.write(json.dumps(cfg))
def get_global_config(key, default=None):
return cfg.get(key, default)
def set_global_config(key, value):
global cfg
cfg[key] = value
dump_config()
| c4pt0r/purelog | utils.py | Python | mit | 569 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._managed_service_identity_client import ManagedServiceIdentityClient
__all__ = ['ManagedServiceIdentityClient']
| Azure/azure-sdk-for-python | sdk/resources/azure-mgmt-msi/azure/mgmt/msi/aio/__init__.py | Python | mit | 586 |
from kivy.tests.common import GraphicUnitTest
from kivy.lang import Builder
from kivy.base import EventLoop
from kivy.weakproxy import WeakProxy
from kivy.uix.dropdown import DropDown
from kivy.input.motionevent import MotionEvent
KV = '''
# +/- copied from ActionBar example + edited for the test
FloatLayout:
ActionBar:
pos_hint: {'top': 1}
ActionView:
use_separator: True
ActionPrevious:
title: 'Action Bar'
with_previous: False
ActionOverflow:
ActionButton:
text: 'Btn0'
icon: 'atlas://data/images/defaulttheme/audio-volume-high'
ActionButton:
text: 'Btn1'
ActionButton:
text: 'Btn2'
ActionGroup:
id: group1
text: 'group 1'
ActionButton:
id: group1button
text: 'Btn3'
on_release:
setattr(root, 'g1button', True)
ActionButton:
text: 'Btn4'
ActionGroup:
id: group2
dropdown_width: 200
text: 'group 2'
ActionButton:
id: group2button
text: 'Btn5'
on_release:
setattr(root, 'g2button', True)
ActionButton:
text: 'Btn6'
ActionButton:
text: 'Btn7'
'''
class UTMotionEvent(MotionEvent):
def depack(self, args):
self.is_touch = True
self.sx = args['x']
self.sy = args['y']
self.profile = ['pos']
super(UTMotionEvent, self).depack(args)
class TouchPoint(UTMotionEvent):
def __init__(self, raw_x, raw_y):
win = EventLoop.window
super(UTMotionEvent, self).__init__(
"unittest", 1, {
"x": raw_x / float(win.width),
"y": raw_y / float(win.height),
}
)
# press & release
EventLoop.post_dispatch_input("begin", self)
EventLoop.post_dispatch_input("end", self)
EventLoop.idle()
class ActionBarTestCase(GraphicUnitTest):
framecount = 0
def move_frames(self, t):
for i in range(t):
EventLoop.idle()
def setUp(self):
from os import environ
environ['KIVY_UNITTEST_NOBUILDERTRACE'] = '1'
environ['KIVY_UNITTEST_NOPARSERTRACE'] = '1'
super(self.__class__, self).setUp()
def tearDown(self):
from os import environ
del environ['KIVY_UNITTEST_NOBUILDERTRACE']
del environ['KIVY_UNITTEST_NOPARSERTRACE']
def clean_garbage(self, *args):
for child in self._win.children[:]:
self._win.remove_widget(child)
self.move_frames(5)
def check_dropdown(self, present=True):
any_list = [
isinstance(child, DropDown)
for child in self._win.children
]
# mustn't allow more than one DropDown opened!
self.assertLess(sum(any_list), 2)
# passed
if not present and not any(any_list):
return
elif present and any(any_list):
return
print('DropDown either missing, or isn\'t supposed to be there')
self.assertTrue(False)
def test_1_openclose(self, *args):
# click on Group 2 to open its DropDown
# - DropDown shows up
# then click away
# - Group 2 DropDown disappears
# click on Group 1 to open its DropDown
# - DropDown shows up
# then click away
# - Group 1 DropDown disappears
self._win = EventLoop.window
self.clean_garbage()
root = Builder.load_string(KV)
self.render(root)
self.assertLess(len(self._win.children), 2)
group2 = root.ids.group2
group1 = root.ids.group1
self.move_frames(5)
# no DropDown present yet
self.check_dropdown(present=False)
self.assertFalse(group2.is_open)
self.assertFalse(group1.is_open)
items = ((group2, group1), (group1, group2))
for item in items:
active, passive = item
# click on active Group
TouchPoint(*active.center)
# active Group DropDown shows up
self.check_dropdown(present=True)
gdd = WeakProxy(self._win.children[0])
# active Group DropDown == value in WeakProxy
self.assertIn(gdd, self._win.children)
self.assertEqual(gdd, self._win.children[0])
self.assertTrue(active.is_open)
self.assertFalse(passive.is_open)
# click away
TouchPoint(0, 0)
# wait for closed Group DropDown to disappear
# go to the next frame after the DropDown disappeared
self.move_frames(5)
# no DropDown is open
self.assertNotEqual(gdd, self._win.children[0])
self.assertLess(len(self._win.children), 2)
self.check_dropdown(present=False)
self.assertFalse(active.is_open)
self.assertFalse(passive.is_open)
self._win.remove_widget(root)
def test_2_switch(self, *args):
# click on Group 2 to open its DropDown
# - DropDown shows up
# then click on Group 1 to open its DropDown
# - Group 2 DropDown disappears, Group 1 DropDown shows up
# click away
# - no DropDown is opened
self._win = EventLoop.window
self.clean_garbage()
root = Builder.load_string(KV)
self.render(root)
self.assertLess(len(self._win.children), 2)
group2 = root.ids.group2
group1 = root.ids.group1
self.move_frames(5)
# no DropDown present yet
self.check_dropdown(present=False)
self.assertFalse(group2.is_open)
self.assertFalse(group1.is_open)
# click on Group 2
TouchPoint(*group2.center)
# Group 2 DropDown shows up
self.check_dropdown(present=True)
g2dd = WeakProxy(self._win.children[0])
# Group 2 DropDown == value in WeakProxy
self.assertIn(g2dd, self._win.children)
self.assertEqual(g2dd, self._win.children[0])
self.assertTrue(group2.is_open)
self.assertFalse(group1.is_open)
# click on Group 1
TouchPoint(*group1.center)
# wait for closed Group 2 DropDown to disappear
# and for Group 1 DropDown to appear (there are 2 DDs now)
# go to the next frame after the DropDown disappeared
self.move_frames(5)
# Group 1 DropDown != value in WeakProxy (Group 2 DD)
self.assertNotEqual(g2dd, self._win.children[0])
self.assertFalse(group2.is_open)
self.assertTrue(group1.is_open)
self.check_dropdown(present=True)
# click away from ActionBar
TouchPoint(0, 0)
# wait for closed Group DropDown to disappear
# go to the next frame after the DropDown disappeared
self.move_frames(5)
# no DropDown present in Window
self.check_dropdown(present=False)
self.assertFalse(group2.is_open)
self.assertFalse(group1.is_open)
self.assertNotIn(g2dd, self._win.children)
self._win.remove_widget(root)
def test_3_openpress(self, *args):
# click on Group 2 to open its DropDown
# - DropDown shows up
# then click on Group 2 DropDown button
# - DropDown disappears
# click on Group 1 to open its DropDown
# - DropDown shows up
# then click on Group 1 DropDown button
# - DropDown disappears
self._win = EventLoop.window
self.clean_garbage()
root = Builder.load_string(KV)
self.render(root)
self.assertLess(len(self._win.children), 2)
group2 = root.ids.group2
group2button = root.ids.group2button
group1 = root.ids.group1
group1button = root.ids.group1button
self.move_frames(5)
# no DropDown present yet
self.check_dropdown(present=False)
self.assertFalse(group2.is_open)
self.assertFalse(group1.is_open)
items = (
(group2, group1, group2button),
(group1, group2, group1button)
)
for item in items:
active, passive, button = item
# click on active Group
TouchPoint(*active.center)
# active Group DropDown shows up
self.check_dropdown(present=True)
gdd = WeakProxy(self._win.children[0])
# active Group DropDown == value in WeakProxy
self.assertIn(gdd, self._win.children)
self.assertEqual(gdd, self._win.children[0])
self.assertTrue(active.is_open)
self.assertFalse(passive.is_open)
# click on active Group DropDown Button (needed to_window)
TouchPoint(*button.to_window(*button.center))
self.assertTrue(getattr(
root, active.text[0::6] + 'button'
))
# wait for closed Group DropDown to disappear
# go to the next frame after the DropDown disappeared
self.move_frames(5)
# no DropDown is open
self.assertNotEqual(gdd, self._win.children[0])
self.assertLess(len(self._win.children), 2)
self.assertFalse(active.is_open)
self.assertFalse(passive.is_open)
self.check_dropdown(present=False)
self._win.remove_widget(root)
def test_4_openmulti(self, *args):
# click on Group to open its DropDown
# - DropDown shows up
# then click on Group DropDown button
# - DropDown disappears
# repeat
self._win = EventLoop.window
self.clean_garbage()
root = Builder.load_string(KV)
self.render(root)
self.assertLess(len(self._win.children), 2)
group2 = root.ids.group2
group2button = root.ids.group2button
group1 = root.ids.group1
group1button = root.ids.group1button
self.move_frames(5)
# no DropDown present yet
self.check_dropdown(present=False)
self.assertFalse(group2.is_open)
items = ((group2, group2button), (group1, group1button))
for item in items:
group, button = item
for _ in range(5):
# click on Group
TouchPoint(*group.center)
# Group DropDown shows up
self.check_dropdown(present=True)
gdd = WeakProxy(self._win.children[0])
# Group DropDown == value in WeakProxy
self.assertIn(gdd, self._win.children)
self.assertEqual(gdd, self._win.children[0])
self.assertTrue(group.is_open)
# click on Group DropDown Button
TouchPoint(*button.to_window(*button.center))
# wait for closed Group DropDown to disappear
# go to the next frame after the DropDown disappeared
self.move_frames(5)
# no DropDown is open
self.assertNotEqual(gdd, self._win.children[0])
self.assertFalse(group.is_open)
self.check_dropdown(present=False)
self._win.remove_widget(root)
if __name__ == '__main__':
import unittest
unittest.main()
| jegger/kivy | kivy/tests/test_uix_actionbar.py | Python | mit | 11,580 |
#!/usr/bin/env python3
# Copyright (c) 2021 The Dash Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test being able to connect to the same devnet"""
from test_framework.mininode import P2PInterface
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, connect_nodes
class ConnectDevnetNodes(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.chain = "devnet"
self.num_nodes = 2
def setup_network(self):
self.add_nodes(self.num_nodes)
self.start_node(0)
self.start_node(1)
connect_nodes(self.nodes[0], 1)
self.sync_all()
def run_test(self):
self.nodes[0].add_p2p_connection(P2PInterface())
assert_equal(self.nodes[0].getconnectioncount(), 2) # 1 out dashd + 1 p2p
if __name__ == '__main__':
ConnectDevnetNodes().main()
| thelazier/dash | test/functional/p2p_connect_to_devnet.py | Python | mit | 1,020 |
import os
from datetime import datetime, timezone, timedelta
from pyshorteners import Shortener
class EQData():
def __init__(self, input):
self.input = input
self.properties = self.input['properties']
self.geometry = self.input['geometry']
self.coordinates = self.geometry['coordinates']
self.magnitude = self.properties['mag']
self.place = self.properties['place']
self.time = self.properties['time']
self.url = self.properties['url']
self.lon, self.lat, self.depth = self.coordinates
def map_url(self):
"""Returns Google Map URL of given coordinates"""
lon, lat, _ = self.coordinates()
url = "http://www.google.com/maps/place/%s,%s" % (lat,lon)
return url
def minutes_ago(self):
"""Returns minutes since event"""
d0 = datetime.fromtimestamp(self.time/1000, tz=timezone(timedelta(hours=8)))
d1 = datetime.now(tz=timezone(timedelta(hours=8)))
delta = d1-d0
return int(delta.total_seconds()/60)
def ftime(self):
"""Returns formatted time"""
time = datetime.fromtimestamp(self.time/1000, tz=timezone(timedelta(hours=8)))
return time.strftime("%I:%M%p")
def url_shorten(self):
# shortener = Shortener('Tinyurl')
shortener = Shortener('Google', api_key=os.environ['API_KEY'])
return shortener.short(self.url)
def to_sentence(self):
"""Makes the ugly json readable"""
sentence = "A magnitude %s earthquake hits %s at %s UTC+8 (%sm ago) - Details %s #earthquakePH" \
% (self.magnitude, self.place, self.ftime(), self.minutes_ago(), self.url_shorten())
if len(sentence) > 140:
sentence = sentence.replace("Philippines", "PH")
return sentence | leoorpillaiii/earthquakePH-bot | eqdata.py | Python | mit | 1,812 |
"""
A workflow to process the URL scan & join results.
"""
from __future__ import absolute_import
import argparse
import datetime
import logging
import json
import apache_beam as beam
from apache_beam.utils.options import PipelineOptions
from apache_beam.utils.options import SetupOptions
from apache_beam.utils.options import StandardOptions
from apache_beam.utils.options import GoogleCloudOptions
from apache_beam.utils.options import WorkerOptions
from apache_beam.internal.clients import bigquery
from apache_beam.coders import Coder
# https://github.com/golang/go/blob/master/src/crypto/tls/common.go#L25-L28
TLS_VERSIONS = {
0x0300: "SSL 3.0",
0x0301: "TLS 1.0",
0x0302: "TLS 1.1",
0x0303: "TLS 1.2"
}
# https://github.com/golang/go/blob/master/src/crypto/tls/cipher_suites.go#L369-L390
CIPHER_SUITES = {
0x0005: "TLS_RSA_WITH_RC4_128_SHA",
0x000a: "TLS_RSA_WITH_3DES_EDE_CBC_SHA",
0x002f: "TLS_RSA_WITH_AES_128_CBC_SHA",
0x0035: "TLS_RSA_WITH_AES_256_CBC_SHA",
0x003c: "TLS_RSA_WITH_AES_128_CBC_SHA256",
0x009c: "TLS_RSA_WITH_AES_128_GCM_SHA256",
0x009d: "TLS_RSA_WITH_AES_256_GCM_SHA384",
0xc007: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA",
0xc009: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
0xc00a: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA",
0xc011: "TLS_ECDHE_RSA_WITH_RC4_128_SHA",
0xc012: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA",
0xc013: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA",
0xc014: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA",
0xc023: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256",
0xc027: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256",
0xc02f: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
0xc02b: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
0xc030: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
0xc02c: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
0xcca8: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
0xcca9: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
}
def process_TLS(record):
data = record['TLS']
if data is None:
return
record['TLS'] = {
'HandshakeComplete': data['HandshakeComplete'],
'NegotiatedProtocol': data['NegotiatedProtocol'],
'ServerName': data['ServerName'],
'Version': TLS_VERSIONS.get(data['Version'], 'unknown'),
'CipherSuite': CIPHER_SUITES.get(data['CipherSuite'], 'unknown')
}
def process_headers(record):
headers = record['Headers']
record['Headers'] = []
for name, values in headers.iteritems():
record['Headers'].append({'Name': name, 'Value': values})
def process_record(record):
"""Example input record:
https://gist.github.com/igrigorik/97835410c5daee52bc2d4272fc097827
"""
if record['HTTPResponses']:
for req in record['HTTPResponses']:
process_headers(req)
process_TLS(req)
if record['HTTPSResponses']:
for req in record['HTTPSResponses']:
process_headers(req)
process_TLS(req)
yield record
class JsonCoder(Coder):
"""A JSON coder interpreting each line as a JSON string."""
def encode(self, x):
return json.dumps(x)
def decode(self, x):
return json.loads(x)
def field(name, kind='string', mode='nullable'):
f = bigquery.TableFieldSchema()
f.name = name
f.type = kind
f.mode = mode
return f
def build_response_schema(name):
rsp = field(name, 'record', 'repeated')
rsp.fields.append(field('RequestURL'))
rsp.fields.append(field('Status', 'integer'))
rsp.fields.append(field('Protocol'))
head = field('Headers', 'record', 'repeated')
head.fields.append(field('Name'))
head.fields.append(field('Value', 'string', 'repeated'))
tls = field('TLS', 'record')
tls.fields.append(field('CipherSuite'))
tls.fields.append(field('ServerName'))
tls.fields.append(field('HandshakeComplete', 'boolean'))
tls.fields.append(field('Version'))
tls.fields.append(field('NegotiatedProtocol'))
rsp.fields.append(head)
rsp.fields.append(tls)
return rsp
def run(argv=None):
"""Runs the workflow."""
parser = argparse.ArgumentParser()
parser.add_argument('--input',
required=True,
help='Input file to process.')
parser.add_argument('--output',
required=True,
help='Output BigQuery table: PROJECT:DATASET.TABLE')
known_args, pipeline_args = parser.parse_known_args(argv)
schema = bigquery.TableSchema()
schema.fields.append(field('Alexa_rank', 'integer'))
schema.fields.append(field('Alexa_domain'))
schema.fields.append(field('DMOZ_title'))
schema.fields.append(field('DMOZ_description'))
schema.fields.append(field('DMOZ_url'))
schema.fields.append(field('DMOZ_topic', 'string', 'repeated'))
schema.fields.append(field('Host'))
schema.fields.append(field('FinalLocation'))
schema.fields.append(field('HTTPOk', 'boolean'))
schema.fields.append(field('HTTPSOk', 'boolean'))
schema.fields.append(field('HTTPSOnly', 'boolean'))
schema.fields.append(build_response_schema('HTTPResponses'))
schema.fields.append(build_response_schema('HTTPSResponses'))
schema.fields.append(field('Error'))
options = PipelineOptions(pipeline_args)
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
options.view_as(SetupOptions).save_main_session = True
# https://cloud.google.com/dataflow/pipelines/specifying-exec-params
gc_options = options.view_as(GoogleCloudOptions)
gc_options.project = 'httparchive'
gc_options.job_name = 'host-scan-import-' + str(datetime.date.today())
gc_options.staging_location = 'gs://httparchive/dataflow-binaries'
gc_options.temp_location = 'gs://httparchive/dataflow-tmp'
wk_options = options.view_as(WorkerOptions)
wk_options.num_workers = 10
# options.view_as(StandardOptions).runner = 'DirectPipelineRunner'
options.view_as(StandardOptions).runner = 'DataflowPipelineRunner'
p = beam.Pipeline(options=options)
(p
| 'read' >> beam.Read(
beam.io.TextFileSource(known_args.input, coder=JsonCoder()))
| 'process' >> beam.FlatMap(process_record)
# | 'local-write' >> beam.Write(beam.io.TextFileSink('./results')))
| 'bq-write' >> beam.io.Write(
beam.io.BigQuerySink(
known_args.output,
schema=schema,
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
write_disposition=beam.io.BigQueryDisposition.WRITE_TRUNCATE)
)
)
p.run()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run()
| HTTPArchive/hosts | dataflow.py | Python | mit | 6,464 |
# -*- coding: utf-8 -*-
from django.conf.urls.defaults import patterns, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('twitter_oauth.oauth.views',
# Top Page
url(r'^oauth/$', 'index', name='index'),
# Twitter OAuth Authenticate
url(r'^oauth/get/$', 'get', name='get'),
# Callback
url(r'^oauth/get_callback/$', 'get_callback', name='get_callback'),
# Redirect Page of after Authenticate
url(r'^oauth/oauth_index/$', 'oauth_index', name='oauth_index'),
# Tweet
url(r'^oauth/post/$', 'post', name='post'),
)
| ketulive/twitter_oauth | twitter_oauth/urls.py | Python | mit | 638 |
#-------------------------------------------------------------------------------
# Name: Airborne Troops- Countdown to D-Day *.PAK
# Purpose: Extract Archive
#
# Author: Eric Van Hoven
#
# Created: 30/08/2017
# Copyright: (c) Eric Van Hoven 2017
# Licence: <MIT License>
#-------------------------------------------------------------------------------
from inc_noesis import *
def registerNoesisTypes():
handle = noesis.register("Airborne Troops Count-Down to D-Day", ".PAK")
noesis.setHandlerExtractArc(handle, pacExtract)
return 1
def pacExtract(fileName, fileLen, justChecking):
with open(fileName, "rb") as fs:
if justChecking:
return 1
fs.read(8) #null uints
tail = noeUnpack("<I", fs.read(4))[0]
fcount = noeUnpack("<I", fs.read(4))[0]
for i in range(fcount):
fs.seek(tail, 0)
fnsz = 0
while True:
if(noeUnpack("<b", fs.read(1))[0]== 0xA):
break
else:
fnsz += 1
fs.seek(tail, 0)
fileName = noeStrFromBytes(fs.read(fnsz))
fs.read(1) #0xA byte
offset = noeUnpack("<I", fs.read(4))[0]
size = noeUnpack("<I", fs.read(4))[0]
tail = fs.tell()
fs.seek(offset, 0)
print("Writing", fileName)
rapi.exportArchiveFile(fileName, fs.read(size))
return 1
| TheDeverEric/noesis-importers | Eric Van Hoven/fmt_airbornedday_pak.py | Python | mit | 1,523 |
from base.handler import Handler
class DesktopHandler(Handler):
"""Is a Handler specialization for a Desktop platform"""
def __init__(self, service):
super(DesktopHandler, self).__init__()
self.__service = service
def start_async(self, c, program_index=None, option_index=None):
"""Specialization of superclass start_async method
Starts ASP solving Asyncronously on a subset of data and options for a Desktop platform
"""
input_programs = self._collect_programs(program_index)
input_options = self._collect_options(option_index)
self.__service.start_async(c, input_programs, input_options)
def start_sync(self, program_index=None, option_index=None):
"""Specialization of superclass start_sync method
Starts ASP solving Asyncronously on a subset of data and options for a Desktop platform
"""
input_programs = self._collect_programs(program_index)
input_options = self._collect_options(option_index)
return self.__service.start_sync(input_programs, input_options)
| SimoneLucia/EmbASP-Python | platforms/desktop/desktop_handler.py | Python | mit | 1,129 |
import numpy as np
from warnings import warn
from subprocess import Popen, PIPE, STDOUT
from tempfile import mktemp
import os
class Radio(object):
def __init__(self):
self.frequency = 446000000
self.bandwidth = 1000000
self.samplerate = 1000000
def _interleave(self, complex_iq):
# Interleave I and Q
intlv = np.zeros(2*complex_iq.size, dtype=np.float32)
intlv[0::2] = np.real(complex_iq)
intlv[1::2] = np.imag(complex_iq)
return intlv
def _clip(self, complex_iq, limit=1.0):
# Clips amplitude to level
clipped_samples = np.abs(complex_iq) > limit
if np.any(clipped_samples):
clipped = complex_iq
clipped[clipped_samples] = complex_iq[clipped_samples] / np.abs(complex_iq[clipped_samples])
warn('Some samples were clipped')
else:
clipped = complex_iq
return clipped
def transmit(self, complex_iq):
raise NotImplementedError('transmit not implemented for this radio')
def convert(self, complex_iq):
raise NotImplementedError('convert not implemented for this radio')
class GenericFloat(Radio):
"""
Generic interleaved float32 output for custom conversions
"""
def convert(self, complex_iq):
return self._interleave(complex_iq)
class Bladerf(Radio):
"""
Creates BladeRf formatted samples
"""
def convert(self, complex_iq):
intlv = self._interleave(complex_iq)
clipped = self._clip(intlv, limit=1.0)
converted = 2047. * clipped
bladerf_out = converted.astype(np.int16)
return bladerf_out
class BladeOut(GenericFloat):
"""
To be used with the bladeout tool, available on GitHub
"""
def __init__(self):
super(BladeOut, self).__init__()
self.txvga1 = -15
self.txvga2 = 20
def transmit(self, complex_iq):
intlv = self.convert(complex_iq)
bladeout = Popen(['bladeout', '-f', str(self.frequency), '-r', str(self.samplerate), '-b', str(self.bandwidth),
'-g', str(self.txvga1), '-G', str(self.txvga2)], stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdout = bladeout.communicate(input=intlv.tostring())
return stdout
class Hackrf(Radio):
def __init__(self):
super(Hackrf, self).__init__()
self.txvga = 0
self.rxvga = 0
self.rxlna = 0
def convert(self, complex_iq):
intlv = self._interleave(complex_iq)
clipped = self._clip(intlv)
converted = 127. * clipped
hackrf_out = converted.astype(np.int8)
return hackrf_out
def transmit(self, complex_iq):
hackrf_out = self.convert(complex_iq)
pipe_file = mktemp()
os.mkfifo(pipe_file)
hackout = Popen(['hackrf_transfer', '-f', str(self.frequency), '-s', str(self.samplerate), '-b', str(self.bandwidth),
'-x', str(self.txvga), '-t', pipe_file], stdin=PIPE, stdout=PIPE, stderr=PIPE)
pipe = open(pipe_file, 'wb')
pipe.write(hackrf_out)
pipe.close()
hackout.wait()
sout = hackout.communicate()
os.unlink(pipe_file)
return sout | polygon/spectrum_painter | spectrum_painter/radios.py | Python | mit | 3,230 |
#!/usr/bin/env python3
# -*- compile-command: "/usr/local/bin/python3 sim.py" -*-
#---------------------------------------------------------------------------
# encoding/decoding for machine learning
#
import sys, os, datetime, re, json, bz2, math
import basicbot_lib as bblib
DBG_MAX_UNITS = int(os.environ.get('DBG_MAX_UNITS', '75'))
# reserve 0 for unknown terrain & units
def mk_terrain_types():
types = list(set(bblib.TERRAIN_DEFENSE.keys()) - bblib.CAPTURABLE_TERRAIN)
for army_id in range(4):
types += [name+str(army_id) for name in list(bblib.CAPTURABLE_TERRAIN)]
return types
# sorting = reproducibility. unknown==0, shouldn't happen
TERRAIN_VALUES = dict( [(val,idx) for idx,val in enumerate(['unknown'] + sorted(mk_terrain_types()))] )
TERRAIN_NAMES = dict( [(val,key) for key,val in TERRAIN_VALUES.items()] )
UNIT_VALUES = { None: 0, '': 0 }
for unit in sorted(bblib.UNIT_TYPES.keys()):
UNIT_VALUES[unit] = len(UNIT_VALUES)
for carrier in ['Unicorn', 'Skateboard']:
for unit in bblib.LOADABLE_UNITS:
UNIT_VALUES[carrier+unit] = len(UNIT_VALUES)
if len(UNIT_VALUES) > 32: raise Exception('more than 32 UNIT_VALUES - update the code and delete the saved games.')
TERRAIN_NAMES = dict( [(val,key) for key,val in TERRAIN_VALUES.items()] )
UNIT_NAMES = dict( [(val,key) for key,val in UNIT_VALUES.items()] )
MOVED_STATES = { None: 0, '0': 1, '1': 2 }
def append_unit_info(tile, done):
if tile is None: tile = {}
unit_type = tile.get('unit_name')
if bblib.is_loaded_unicorn(tile) or bblib.is_loaded_skateboard(tile):
unit_type += tile['slot1_deployed_unit_name']
# TODO: health of loaded unit
bitmap = "{0:05b}".format(0 if done else UNIT_VALUES[unit_type])
# 1-4=army_id, 5=empty
bitmap += "{0:03b}".format(0 if done else int(tile.get('unit_army_id') or 4)+1)
if 'unit_name' in tile and 'health' not in tile: tile['health'] = "100"
health = tile.get('health')
# 1=empty, 2-7=0-100% in 20% increments
health_val = int(int(health if health else -20)/20)+2
bitmap += "{0:03b}".format(0 if done else health_val)
return bitmap
def emit_tile_loc(tile):
return "{0:05b}{0:05b}".format(tile['x'], tile['y'])
def emit_tile_terrain(tile, done):
return "{0:05b}".format(0 if done else TERRAIN_VALUES[
tile['terrain_name']+str(bblib.bldg_army_id(tile)) if
tile['terrain_name'] in bblib.CAPTURABLE_TERRAIN else tile['terrain_name']])
def encode_board_state(army_id_turn, resigned, game_info, tiles_list, dbgloc=None):
def dbgbitmap(bitmap, msg, dbgloc=dbgloc):
if dbgloc is not None:
print('bitmap loc {:4d} contains {}'.format(dbgloc+len(bitmap), msg))
bitmap = []
dbgbitmap(bitmap, 'army_id_turn')
bitmap += "{0:02b}".format(army_id_turn) # up to 4 players
for player_info in game_info['players'].values():
army_id = player_info['army_id']
dbgbitmap(bitmap, 'resigned[army_id={}]'.format(army_id))
bitmap += "{0:01b}".format(resigned[army_id])
dbgbitmap(bitmap, 'funds[army_id={}]'.format(army_id))
bitmap += "{0:06b}".format(min(int(int(player_info['funds']) / 1000), 63)) # funds: up to 63,000
# TODO: augment with % fog?
# TODO: augment with # towns/castles?
bitmap_units = []
for tile_idx in range(24 * 24):
done = (tile_idx >= len(tiles_list))
tile = {} if done else tiles_list[tile_idx]
# note: don't write x/y coordinates
if tile_idx <= 1: dbgbitmap(bitmap, 'terrain={}'.format(0 if done else tile['terrain_name']))
bitmap += emit_tile_terrain(tile, done)
if bblib.has_unit(tile):
bitmap_unit = "{0:02b}".format(0 if done else MOVED_STATES[tile.get('moved')])
bitmap_unit += append_unit_info(tile, done)
bitmap_units.append(str(bitmap_unit))
if len(bitmap_units) > DBG_MAX_UNITS:
return None
null_unit_bitmap = "00" + append_unit_info({}, True)
for i in range(DBG_MAX_UNITS):
if i < len(bitmap_units): dbgbitmap(bitmap, 'info for unit #{}'.format(i+1))
bitmap += str(bitmap_units[i] if i < len(bitmap_units) else null_unit_bitmap)
# TODO: augment with # of visible enemies?
return bitmap
NO_TILE = {'x':0, 'y':0}
def encode_move(move, tiles_by_idx, dbgloc=None):
def dbgbitmap(bitmap, msg, dbgloc=dbgloc):
if dbgloc is not None:
print('bitmap loc {:4d} (move idx {}) contains {}'.format(
dbgloc+len(bitmap), len(bitmap), msg))
def emit_tile_info(boolval, idx, tiles_by_idx=tiles_by_idx):
tile = tiles_by_idx.get(idx, {'x':0, 'y':0}) if boolval else NO_TILE
return emit_tile_loc(tile)
def emit_bool(boolval):
return '{0:01b}'.format(1 if boolval else 0)
def append_bool(bitmap, skip, boolval):
bitmap += 0 if skip else emit_bool(boolval)
return bitmap, boolval
data = move['data']
dbgbitmap([], "stop_worker_num")
bitmap, done = append_bool([], False, move.get('stop_worker_num', '') != '')
dbgbitmap(bitmap, "move['data']")
bitmap, has_data = append_bool(bitmap, False, bool(move['data']))
dbgbitmap(bitmap, "end_turn")
bitmap, end_turn = append_bool(bitmap, False, bool(data.get('end_turn', False)))
dbgbitmap(bitmap, "has_data")
bitmap, skip = append_bool(bitmap, False, done or (not has_data))
dbgbitmap(bitmap, "has_purchase")
bitmap, has_purchase = append_bool(bitmap, skip, bool(data.get('purchase', False)))
bitmap += '0' # unused
purchase = data['purchase'] if has_purchase else {}
dbgbitmap(bitmap, "purchase unit_name")
bitmap += "{0:05b}".format(UNIT_VALUES[purchase['unit_name']] if has_purchase else 0)
dbgbitmap(bitmap, "purchase loc")
bitmap += emit_tile_info(has_purchase, bblib.movedict_xyidx(purchase))
movemove = data.get('move', False)
dbgbitmap(bitmap, "has_move")
bitmap, has_move = append_bool(bitmap, skip, bool(movemove) and not skip)
if not movemove: movemove = {'xCoordinate':-1,'yCoordinate':-1}
# TODO: no movement?
src_xyidx = bblib.movedict_xyidx(movemove)
dbgbitmap(bitmap, "move src_loc")
bitmap += emit_tile_info(has_move, src_xyidx)
dbgbitmap(bitmap, "is join?")
bitmap, _ = append_bool(bitmap, skip, movemove.get('unit_action') == 'join')
dbgbitmap(bitmap, "is load?")
bitmap, _ = append_bool(bitmap, skip, movemove.get('unit_action') == 'load')
dbgbitmap(bitmap, "is capture?")
bitmap, _ = append_bool(bitmap, skip, movemove.get('unit_action') == 'capture')
dbgbitmap(bitmap, "is unload?")
bitmap, has_unload = append_bool(bitmap, skip, movemove.get('unit_action') == 'unloadSlot1')
action_xyidx = int(movemove.get('y_coord_action', -1))*1000 + \
int(movemove.get('x_coord_action', -1))
dbgbitmap(bitmap, "action loc")
bitmap += emit_tile_info(has_unload, action_xyidx)
dbgbitmap(bitmap, "is attack?")
bitmap, has_attack = append_bool(bitmap, skip, 'x_coord_attack' in movemove)
attack_xyidx = int(movemove.get('y_coord_attack', -1))*1000 + \
int(movemove.get('x_coord_attack', -1))
dbgbitmap(bitmap, "attack loc")
bitmap += emit_tile_info(has_unload, attack_xyidx)
# augment with type & health of attacker & defender
dbgbitmap(bitmap, "attacker unit & health")
bitmap += append_unit_info(tiles_by_idx.get(src_xyidx, {}), skip)
dbgbitmap(bitmap, "defender unit & health")
bitmap += append_unit_info(tiles_by_idx.get(attack_xyidx, {}), skip)
return bitmap
def write_board_move_state(winning_army_id_str, board_move_states):
winning_army_id = int(winning_army_id_str)
filename = 'board-{}.txt.bz2'.format(datetime.datetime.now().strftime('%Y%m%d%H%M%S%f'))
fh = bz2.open(filename, 'w')
for i, bitmap in enumerate(board_move_states):
if i == 0:
print('writing board state to {}: {} moves, {} bits each'.format(
filename, len(board_move_states), len(bitmap)))
bitmap_str = "".join(bitmap)
army_id = int(bitmap_str[0:2], 2)
fh.write("{}\t{}\n".format("1" if army_id == winning_army_id else "0", bitmap_str)
.encode('utf-8'))
fh.close()
def write_board_move_state_json(winning_army_id_str, board_move_states_json):
winning_army_id = int(winning_army_id_str)
filename = 'board-{}.json.bz2'.format(datetime.datetime.now().strftime('%Y%m%d%H%M%S%f'))
fh = bz2.open(filename, 'w')
maxlen = 0
for i, jsondata in enumerate(board_move_states_json):
jsondata['move_led_to_win'] = (1 if jsondata['army_id'] == winning_army_id else 0)
maxlen = max(maxlen, len(json.dumps(jsondata)))
print('writing board state to {}: {} moves, {} max bytes, {:.0f} avg bytes'.format(
filename, len(board_move_states_json), maxlen,
1.0*len(json.dumps(board_move_states_json))/len(board_move_states_json) if
len(board_move_states_json) > 0 else 0))
fh.write(json.dumps(board_move_states_json).encode('utf-8'))
fh.close()
def is_move_attack(move):
movemove = move.get('data', {}).get('move')
return ('x_coord_attack' in movemove) if movemove else False
def encode_attack(move, tiles_by_idx, dbgloc=None):
def dbgbitmap(bitmap, msg, dbgloc=dbgloc):
if dbgloc is not None:
print('bitmap loc {:4d} (move idx {}) contains {}'.format(
dbgloc+len(bitmap), len(bitmap), msg))
def emit_tile_info(boolval, idx, tiles_by_idx=tiles_by_idx):
tile = tiles_by_idx.get(idx, {'x':0, 'y':0}) if boolval else NO_TILE
return emit_tile_loc(tile)
data = move['data']
movemove = data['move']
dbgbitmap(bitmap, "move src_loc")
bitmap += emit_tile_info(has_move, src_xyidx)
attack_xyidx = int(movemove.get('y_coord_attack', -1))*1000 + \
int(movemove.get('x_coord_attack', -1))
dbgbitmap(bitmap, "attack loc")
bitmap += emit_tile_info(has_unload, attack_xyidx)
dbgbitmap(bitmap, "attacker unit & health")
bitmap += append_unit_info(tiles_by_idx.get(src_xyidx, {}), skip)
defender_x, defender_y = int(movemove['x_coord_attack']), int(movemove['y_coord_attack'])
for dx in range(-2,3):
for dy in range(-2,3):
tile = tiles_by_idx.get(defender_x+dx + (defender_y+dy)*1000)
tile = {} if tile is None else bblib.copy_tile_exc_loc(tile)
bitmap += emit_tile_terrain(tile)
bitmap += "{0:02b}".format(0 if done else MOVED_STATES[tile.get('moved')])
bitmap += append_unit_info(tile, done)
return bitmap
def extract_attack_state_json(board_move_state, tiles_by_idx):
movemove = board_move_state['move']['data']['move']
print('movemove: {}'.format(movemove))
print('board_move_state: {}'.format(board_move_state['board']))
res = {'attacker_neighbors': [], 'defender_neighbors': [], 'move': movemove}
attacker = defender = None
attacker_x, attacker_y = int(movemove['x_coordinate']), int(movemove['y_coordinate'])
print('attacker: {},{}'.format(attacker_x, attacker_y))
for dx in range(-2,3):
for dy in range(-2,3):
tile = tiles_by_idx.get(attacker_x+dx + (attacker_y+dy)*1000)
tile = {} if tile is None else bblib.copy_tile_exc_loc(tile)
if dx==0 and dy==0: attacker = tile
res['attacker_neighbors'].append(tile)
print('{}{}'.format('=> ' if dx==0 and dy==0 else '', bblib.tilestr(tile) if len(tile)>0 else ''))
defender_x, defender_y = int(movemove['x_coord_attack']), int(movemove['y_coord_attack'])
print('defender: {},{}'.format(defender_x, defender_y))
for dx in range(-2,3):
for dy in range(-2,3):
tile = tiles_by_idx.get(defender_x+dx + (defender_y+dy)*1000)
tile = {} if tile is None else bblib.copy_tile_exc_loc(tile)
if dx==0 and dy==0: defender = tile
res['defender_neighbors'].append(tile)
print('{}{}'.format('=> ' if dx==0 and dy==0 else '', bblib.tilestr(tile) if len(tile)>0 else ''))
print(attacker)
print(defender)
res['dmg20'] = 20 * int(bblib.compute_damage(attacker, defender) / 20)
sys.exit(0)
return res
if __name__ == '__main__':
movetype = sys.argv[1]
if sys.argv[2] == '-':
fh = sys.stdin
elif 'bz2' in sys.argv[2]:
fh = bz2.open(sys.argv[2], 'r')
else:
fh = open(sys.argv[2])
if re.search(r'json', movetype):
board_game_states = json.loads(fh.read().decode())
# legacy
if len(board_game_states) == 1: board_game_states = board_game_states[0]
for state in board_game_states:
tiles_by_idx = bblib.parse_map(state['army_id'], state['board']['tiles'],
state['board'])
if movetype == 'attack_state_json' and is_move_attack_json(state):
print(extract_attack_state_json(state, tiles_by_idx))
print(bblib.combined_map(list(tiles_by_idx.values()), state['army_id']))
sys.exit(0)
for line in fh:
state = line[2:-1] # trim newline
print(restore_tile(state, 0,0))
if movetype == 'attack_state' and is_move_attack(state):
print(extract_attack_state(state))
continue
if (movetype == 'capture' and is_move_capture(state)) or \
(movetype == 'attack' and is_move_attack(state)) or \
False:
print(state)
| asah/meatshields-python-botkit | board_move_state.py | Python | mit | 13,619 |
from django.core.files.base import ContentFile
from django.core.management.base import BaseCommand
import requests
import kronos
from nba_py.player import PlayerList, PlayerGeneralSplits
from players.models import Player
from teams.models import Team
from seasons.models import Season, PlayerSeason
@kronos.register('0 0 * * *') # Once a day
class Command(BaseCommand):
help = 'Add/update all NBA players from current season'
def add_arguments(self, parser):
parser.add_argument(
'--skip',
action='store_true',
dest='skip',
default=False,
help='Skip existing players',
)
def handle(self, *args, **options):
all_players = PlayerList().info()
for api_player in all_players:
info_msg = "'{} ({})'".format(
api_player['DISPLAY_FIRST_LAST'],
api_player['PERSON_ID']
)
# Get the player, or create him if doesn't exist
qs = Player.objects.filter(PERSON_ID=api_player['PERSON_ID'])
if qs.exists():
if options['skip']:
self.stdout.write(
self.style.SUCCESS("Skipping " + info_msg)
)
continue
player = qs[0]
self.stdout.write(self.style.SUCCESS("Updating " + info_msg))
else:
player = Player()
self.stdout.write(self.style.SUCCESS("Adding " + info_msg))
try:
name = api_player['DISPLAY_LAST_COMMA_FIRST']
last, first = name.replace(' ', '').split(',', 1)
except ValueError:
# Only one name
first = api_player['DISPLAY_LAST_COMMA_FIRST']
last = ''
player.first_name = first
player.last_name = last
player.PERSON_ID = api_player['PERSON_ID']
player.PLAYERCODE = api_player['PLAYERCODE']
# Add player photo only on creation
if not player.photo:
base_url = ('http://i.cdn.turner.com/nba/nba/.element/'
'img/2.0/sect/statscube/players/large/')
filename = api_player['PLAYERCODE'] + '.png'
photo_url = base_url + filename
# Try three times
session = requests.Session()
adapter = requests.adapters.HTTPAdapter(max_retries=3)
session.mount('http://', adapter)
response = session.get(photo_url)
if response:
image_content = ContentFile(response.content)
player.photo.save(filename, image_content)
player.save()
# Player current season
try:
player_stats = PlayerGeneralSplits(
api_player['PERSON_ID']
).overall()[0]
except IndexError:
self.stdout.write(self.style.ERROR("No stats for " + info_msg))
continue
season, __ = Season.objects.get_or_create(
abbr=player_stats['GROUP_VALUE'],
)
qs = PlayerSeason.objects.filter(
player=player, season=season,
)
if qs.exists():
player_season = qs[0]
else:
player_season = PlayerSeason()
# Team
if api_player['TEAM_ID'] and api_player['TEAM_CODE']:
team = Team.objects.get(TEAM_ID=api_player['TEAM_ID'])
else:
# Player played this season, but was cut/moved to D-League
team = None
player_season.team = team
player_season.player = player
player_season.season = season
player_season.ROSTERSTATUS = api_player['ROSTERSTATUS']
player_season.GAMES_PLAYED_FLAG = api_player['GAMES_PLAYED_FLAG']
player_season.pts = player_stats['PTS']
player_season.reb = player_stats['REB']
player_season.ast = player_stats['AST']
player_season.stl = player_stats['STL']
player_season.blk = player_stats['BLK']
player_season.fg_pct = player_stats['FG_PCT']
player_season.fg3_pct = player_stats['FG3_PCT']
player_season.ft_pct = player_stats['FT_PCT']
player_season.save()
self.stdout.write(self.style.SUCCESS("Successfully updated players"))
| pawelad/nba-rank | src/players/management/commands/update_players.py | Python | mit | 4,552 |
#!/usr/bin/env python
"""
Main entry point to application when running client from shell
"""
from __future__ import print_function
import os
import sys
import signal
from lib.service_bus.client import Client
def main():
"""main entry point to application"""
sbs_namespace = ""
access_key = ""
if "HCR_SBS_NAMESPACE" in os.environ:
sbs_namespace = os.environ["HCR_SBS_NAMESPACE"]
if "HCR_SBS_ACCESS_KEY" in os.environ:
access_key = os.environ["HCR_SBS_ACCESS_KEY"]
if not sbs_namespace or not access_key:
print("Namespace or shared access key are not specified")
sys.exit(1)
clnt = Client(sbs_namespace, access_key)
print("Hit 'Enter' or 'Ctr+C' to exit...\n")
# handle graceful shutdown of mq consumer
signal.signal(signal.SIGTERM, lambda signal, frame: _term_handler(clnt))
clnt.start()
try:
# block main thread
input("")
except KeyboardInterrupt:
pass
clnt.stop()
return 0
def _term_handler(consumer):
"""Handles SIGTERM"""
consumer.stop()
sys.exit(0)
if __name__ == "__main__":
sys.exit(main())
| jenyayel/hooks-client-rpi | hooks_listener/__main__.py | Python | mit | 1,141 |
#!python2
from routes import app
import auth, mushrooms
if __name__ == "__main__":
# Parse command line arguments
from argparse import ArgumentParser
parser = ArgumentParser(
description='Web server for mushroom gathering data collection',
epilog='Easy config setup advice: copy and modify existing .json to config.json')
parser.add_argument('-c', '--config',
help='Load configuration from alternate file',
default='config.json')
parser.add_argument('-r','--reload',
help='Reload db from schema and exit',
action='store_true')
args = parser.parse_args()
# Read config
import json
with open(args.config) as config_file:
config = json.loads(config_file.read())
# Database initialization
from database import get_database
app.db = get_database(config)
if args.reload:
app.db.reinitialize()
quit()
# Run server
app.secret_key = 'development key'
params = {
'debug': config.get('debug', False),
'port': config.get('port', 5000)
}
app.run(**params)
| kazagistar/fungitrack | server.py | Python | mit | 1,107 |
from proteus.default_n import *
from proteus import (StepControl,
TimeIntegration,
NonlinearSolvers,
LinearSolvers,
LinearAlgebraTools)
from proteus.mprans import Kappa
import kappa_p as physics
from proteus import Context
ct = Context.get()
nd = ct.domain.nd
timeIntegration = TimeIntegration.BackwardEuler_cfl
stepController = StepControl.Min_dt_controller
femSpaces = {0:ct.basis}
massLumping = False
numericalFluxType = Kappa.NumericalFlux
conservativeFlux = None
subgridError = Kappa.SubgridError(coefficients=physics.coefficients,
nd=nd)
shockCapturing = Kappa.ShockCapturing(coefficients=physics.coefficients,
nd=nd,
shockCapturingFactor=ct.kappa_shockCapturingFactor,
lag=ct.kappa_lag_shockCapturing)
fullNewtonFlag = True
multilevelNonlinearSolver = NonlinearSolvers.Newton
levelNonlinearSolver = NonlinearSolvers.Newton
nonlinearSmoother = None
linearSmoother = None
#printNonlinearSolverInfo = True
matrix = LinearAlgebraTools.SparseMatrix
if not ct.useOldPETSc and not ct.useSuperlu:
multilevelLinearSolver = LinearSolvers.KSP_petsc4py
levelLinearSolver = LinearSolvers.KSP_petsc4py
else:
multilevelLinearSolver = LinearSolvers.LU
levelLinearSolver = LinearSolvers.LU
linear_solver_options_prefix = 'kappa_'
levelNonlinearSolverConvergenceTest = 'r'#'rits'
linearSolverConvergenceTest = 'r'#'rits'
tolFac = 0.0
linTolFac =0.0
l_atol_res = 0.001*ct.kappa_nl_atol_res
nl_atol_res = ct.kappa_nl_atol_res
useEisenstatWalker = True
maxNonlinearIts = 50
maxLineSearches = 0
auxiliaryVariables = ct.domain.auxiliaryVariables['kappa']
| erdc-cm/air-water-vv | 2d/benchmarks/wavesloshing/kappa_n.py | Python | mit | 1,853 |
from text import TextTemplate
from generic import GenericTemplate
from button import ButtonTemplate
from quick_replies import add_quick_reply
from attachment import AttachmentTemplate
__all__ = [
'TextTemplate',
'GenericTemplate',
'ButtonTemplate',
'add_quick_reply',
'AttachmentTemplate'
] | mayukh18/BlindChat | templates/__init__.py | Python | mit | 311 |
'''
Reference
Methods, Structures and Documentation adapted from.
https://msdn.microsoft.com/en-us/library/windows/desktop \
/ms705945%28v=vs.85%29.aspx
'''
from ctypes import *
from ctypes.wintypes import *
from sys import exit
def customresize(array, new_size):
return (array._type_*new_size).from_address(addressof(array))
wlanapi = windll.LoadLibrary('wlanapi.dll')
class GUID(Structure):
_fields_ = [
('Data1', c_ulong),
('Data2', c_ushort),
('Data3', c_ushort),
('Data4', c_ubyte*8),
]
# The WLAN_INTERFACE_STATE enumerated type indicates the state of an interface.
WLAN_INTERFACE_STATE = c_uint
(wlan_interface_state_not_ready,
wlan_interface_state_connected,
wlan_interface_state_ad_hoc_network_formed,
wlan_interface_state_disconnecting,
wlan_interface_state_disconnected,
wlan_interface_state_associating,
wlan_interface_state_discovering,
wlan_interface_state_authenticating) = map(WLAN_INTERFACE_STATE,
xrange(0, 8))
class WLAN_INTERFACE_INFO(Structure):
'''
The WLAN_INTERFACE_STATE enumerated type indicates the state of an
interface.
'''
_fields_ = [
("InterfaceGuid", GUID),
("strInterfaceDescription", c_wchar * 256),
("isState", WLAN_INTERFACE_STATE)
]
class WLAN_INTERFACE_INFO_LIST(Structure):
'''
The WLAN_INTERFACE_INFO_LIST structure contains an array of NIC interface
information.
'''
_fields_ = [
("NumberOfItems", DWORD),
("Index", DWORD),
("InterfaceInfo", WLAN_INTERFACE_INFO * 1)
]
DOT11_MAC_ADDRESS = c_ubyte*6
WLAN_MAX_PHY_TYPE_NUMBER = 0x8
DOT11_SSID_MAX_LENGTH = 32
WLAN_REASON_CODE = DWORD
DOT11_BSS_TYPE = c_uint
(dot11_BSS_type_infrastructure,
dot11_BSS_type_independent,
dot11_BSS_type_any) = map(DOT11_BSS_TYPE, xrange(1, 4))
# The DOT11_PHY_TYPE enumeration defines an 802.11 PHY and media type.
DOT11_PHY_TYPE = c_uint
dot11_phy_type_unknown = 0
dot11_phy_type_any = 0
dot11_phy_type_fhss = 1
dot11_phy_type_dsss = 2
dot11_phy_type_irbaseband = 3
dot11_phy_type_ofdm = 4
dot11_phy_type_hrdsss = 5
dot11_phy_type_erp = 6
dot11_phy_type_ht = 7
dot11_phy_type_IHV_start = 0x80000000
dot11_phy_type_IHV_end = 0xffffffff
# The DOT11_AUTH_ALGORITHM enumerated type defines a wireless
# LAN authentication algorithm.
DOT11_AUTH_ALGORITHM = c_uint
DOT11_AUTH_ALGO_80211_OPEN = 1
DOT11_AUTH_ALGO_80211_SHARED_KEY = 2
DOT11_AUTH_ALGO_WPA = 3
DOT11_AUTH_ALGO_WPA_PSK = 4
DOT11_AUTH_ALGO_WPA_NONE = 5
DOT11_AUTH_ALGO_RSNA = 6
DOT11_AUTH_ALGO_RSNA_PSK = 7
DOT11_AUTH_ALGO_IHV_START = 0x80000000
DOT11_AUTH_ALGO_IHV_END = 0xffffffff
# The DOT11_CIPHER_ALGORITHM enumerated type defines a cipher
# algorithm for data encryption and decryption.
DOT11_CIPHER_ALGORITHM = c_uint
DOT11_CIPHER_ALGO_NONE = 0x00
DOT11_CIPHER_ALGO_WEP40 = 0x01
DOT11_CIPHER_ALGO_TKIP = 0x02
DOT11_CIPHER_ALGO_CCMP = 0x04
DOT11_CIPHER_ALGO_WEP104 = 0x05
DOT11_CIPHER_ALGO_WPA_USE_GROUP = 0x100
DOT11_CIPHER_ALGO_RSN_USE_GROUP = 0x100
DOT11_CIPHER_ALGO_WEP = 0x101
DOT11_CIPHER_ALGO_IHV_START = 0x80000000
DOT11_CIPHER_ALGO_IHV_END = 0xffffffff
class DOT11_SSID(Structure):
'''
A DOT11_SSID structure contains the SSID of an interface
'''
_fields_ = [
("SSIDLength", c_ulong),
("SSID", c_char * DOT11_SSID_MAX_LENGTH)
]
# Enumerated type to define the code of connection.
WLAN_CONNECTION_MODE = c_uint
(wlan_connection_mode_profile,
wlan_connection_mode_temporary_profile,
wlan_connection_mode_discovery_secure,
wlan_connection_mode_discovery_unsecure,
wlan_connection_mode_auto,
wlan_connection_mode_invalid) = map(WLAN_CONNECTION_MODE, xrange(0, 6))
class NDIS_OBJECT_HEADER(Structure):
'''
This Structure packages the object type, version, and size information
that is required in many NDIS (Netword Driver interface Specification)
Structures.
'''
_fields_ = [
("Type", c_char),
("Revision", c_char),
("Size", c_ushort)]
class DOT11_BSSID_LIST(Structure):
'''
The DOT11_BSSID_LIST structure contains a list of basic service set (BSS)
identifiers.
'''
_fields_ = [
("Header", NDIS_OBJECT_HEADER),
("uNumOfEntries", ULONG),
("uTotalNumOfEntries", ULONG),
("BSSIDs", DOT11_MAC_ADDRESS*1)]
class WLAN_CONNECTION_PARAMETERS(Structure):
'''
The WLAN_CONNECTION_PARAMETERS structure specifies the parameters used when
using the WlanConnect function.
'''
_fields_ = [
("wlanConnectionMode", WLAN_CONNECTION_MODE),
("strProfile", LPCWSTR),
("pDot11Ssid", POINTER(DOT11_SSID)),
("pDesiredBssidList", POINTER(DOT11_BSSID_LIST)),
("dot11BssType", DOT11_BSS_TYPE),
("dwFlags", DWORD)]
# The `WlanConnect` attempts to connect to a specific network.
WlanConnect = wlanapi.WlanConnect
WlanConnect.argtypes = (HANDLE,
POINTER(GUID),
POINTER(WLAN_CONNECTION_PARAMETERS),
c_void_p)
WlanConnect.restype = DWORD
# The `WlanDisconnect` method disconnects an interface from its
# current network.
WlanDisconnect = wlanapi.WlanDisconnect
WlanDisconnect.argtypes = (HANDLE,
POINTER(GUID),
c_void_p)
WlanDisconnect.restype = DWORD
# Opens a connection to the server.
WlanOpenHandle = wlanapi.WlanOpenHandle
WlanOpenHandle.argtypes = (DWORD, c_void_p, POINTER(DWORD), POINTER(HANDLE))
WlanOpenHandle.restype = DWORD
# The WlanCloseHandle method closes the connection to the server.
WlanCloseHandle = wlanapi.WlanCloseHandle
WlanCloseHandle.argtypes = (HANDLE, c_void_p)
WlanCloseHandle.restype = DWORD
class WLAN_AVAILABLE_NETWORK(Structure):
'''
The WLAN_INTERFACE_INFO structure contains information about a wireless
LAN interface.
'''
_fields_ = [
("ProfileName", c_wchar * 256),
("dot11Ssid", DOT11_SSID),
("dot11BssType", DOT11_BSS_TYPE),
("NumberOfBssids", c_ulong),
("NetworkConnectable", c_bool),
("wlanNotConnectableReason", WLAN_REASON_CODE),
("NumberOfPhyTypes", c_ulong),
("dot11PhyTypes", DOT11_PHY_TYPE * WLAN_MAX_PHY_TYPE_NUMBER),
("MorePhyTypes", c_bool),
("wlanSignalQuality", c_ulong),
("SecurityEnabled", c_bool),
("dot11DefaultAuthAlgorithm", DOT11_AUTH_ALGORITHM),
("dot11DefaultCipherAlgorithm", DOT11_CIPHER_ALGORITHM),
("Flags", DWORD),
("Reserved", DWORD)]
class WLAN_AVAILABLE_NETWORK_LIST(Structure):
'''
The WLAN_INTERFACE_INFO_LIST structure contains an array of NIC
interface information.
'''
_fields_ = [
("NumberOfItems", DWORD),
("Index", DWORD),
("Network", WLAN_AVAILABLE_NETWORK * 1)]
# The WlanEnumInterfaces function enumerates all of the wireless LAN interfaces
# currently enabled on the local computer.
WlanEnumInterfaces = wlanapi.WlanEnumInterfaces
WlanEnumInterfaces.argtypes = (HANDLE,
c_void_p,
POINTER(POINTER(WLAN_INTERFACE_INFO_LIST)))
WlanEnumInterfaces.restype = DWORD
# The WlanGetAvailableNetworkList function retrieves the list of available
# networks on a wireless LAN interface.
WlanGetAvailableNetworkList = wlanapi.WlanGetAvailableNetworkList
WlanGetAvailableNetworkList.argtypes = (HANDLE,
POINTER(GUID),
DWORD,
c_void_p,
POINTER(POINTER(
WLAN_AVAILABLE_NETWORK_LIST)))
WlanGetAvailableNetworkList.restype = DWORD
# The WlanFreeMemory function frees memory. Any memory returned from Native
# Wifi functions must be freed.
WlanFreeMemory = wlanapi.WlanFreeMemory
WlanFreeMemory.argtypes = [c_void_p]
available = None
_dict = {}
# Private methods.
def _connect(network, parameters):
'''
Attempts to connect to a specific network.
'''
global _dict
wireless_interface = _dict[network]
c_params = parameters
wcp = WLAN_CONNECTION_PARAMETERS()
connection_mode = parameters['connection_mode']
wcp.wlanConnectionMode = WLAN_CONNECTION_MODE(connection_mode)
if connection_mode == 0 or connection_mode == 1:
wcp.strProfile = LPCWSTR(connection_params["profile"])
else:
cnxp.strProfile = None
dot11Ssid = DOT11_SSID()
try:
dot11Ssid.SSID = parameters["ssid"]
dot11Ssid.SSIDLength = len(parameters["ssid"])
except:
dot11Ssid.SSID = network
dot11Ssid.SSIDLength = len(network)
wcp.pDot11Ssid = pointer(dot11Ssid)
dot11bssid = DOT11_BSSID_LIST()
bssid = parameters["bssidList"]
dot11bssid.Header = bssid['Header']
dot11bssid.uNumOfEntries = bssid['uNumOfEntries']
dot11bssid.uTotalNumOfEntries = bssid['uTotalNumOfEntries']
dot11bssid.BSSIDs = bssid['BSSIDs']
wcp.pDesiredBssidList = pointer(bssidList)
bssType = parameters["bssType"]
wcp.dot11BssType = DOT11_BSS_TYPE(bssType)
wcp.dwFlags = DWORD(parameters["flags"])
NegotiatedVersion = DWORD()
ClientHandle = HANDLE()
wlan = WlanOpenHandle(1,
None,
byref(NegotiatedVersion),
byref(ClientHandle))
if wlan:
exit(FormatError(wlan))
pInterfaceList = pointer(WLAN_INTERFACE_INFO_LIST())
wlan = WlanEnumInterfaces(ClientHandle, None, byref(pInterfaceList))
if wlan:
exit(FormatError(wlan))
try:
wlan = WlanConnect(ClientHandle,
wireless_interface,
wcp,
None)
if wlan:
exit(FormatError(wlan))
WlanCloseHandle(ClientHandle)
finally:
WlanFreeMemory(pInterfaceList)
def _disconnect():
'''
To disconnect an interface form the current network.
'''
NegotiatedVersion = DWORD()
ClientHandle = HANDLE()
wlan = WlanOpenHandle(1,
None,
byref(NegotiatedVersion),
byref(ClientHandle))
if wlan:
exit(FormatError(wlan))
pInterfaceList = pointer(WLAN_INTERFACE_INFO_LIST())
wlan = WlanEnumInterfaces(ClientHandle, None, byref(pInterfaceList))
if wlan:
exit(FormatError(wlan))
try:
ifaces = customresize(pInterfaceList.contents.InterfaceInfo,
pInterfaceList.contents.NumberOfItems)
# find each available network for each interface
for iface in ifaces:
wlan = WlanDisconnect(ClientHandle,
byref(iface.InterfaceGuid),
None)
if wlan:
exit(FormatError(wlan))
WlanCloseHandle(ClientHandle)
finally:
WlanFreeMemory(pInterfaceList)
return get_available_wifi()
def _start_scanning():
'''
Private method for scanning and returns the available devices.
'''
global available
global wireless_interfaces
NegotiatedVersion = DWORD()
ClientHandle = HANDLE()
wlan = WlanOpenHandle(1,
None,
byref(NegotiatedVersion),
byref(ClientHandle))
if wlan:
exit(FormatError(wlan))
# find all wireless network interfaces
pInterfaceList = pointer(WLAN_INTERFACE_INFO_LIST())
wlan = WlanEnumInterfaces(ClientHandle, None, byref(pInterfaceList))
if wlan:
exit(FormatError(wlan))
try:
ifaces = customresize(pInterfaceList.contents.InterfaceInfo,
pInterfaceList.contents.NumberOfItems)
# find each available network for each interface
wireless_interfaces = ifaces
for iface in ifaces:
pAvailableNetworkList = pointer(WLAN_AVAILABLE_NETWORK_LIST())
wlan = WlanGetAvailableNetworkList(ClientHandle,
byref(iface.InterfaceGuid),
0,
None,
byref(pAvailableNetworkList))
if wlan:
exit(FormatError(wlan))
try:
avail_net_list = pAvailableNetworkList.contents
networks = customresize(avail_net_list.Network,
avail_net_list.NumberOfItems)
# Assigning the value of networks to the global variable
# `available`, so it could be used in other methods.
available = networks
_make_dict()
wlan = WlanDisconnect(ClientHandle,
byref(iface.InterfaceGuid),
None)
if wlan:
exit(FormatError(wlan))
WlanCloseHandle(ClientHandle)
finally:
WlanFreeMemory(pAvailableNetworkList)
finally:
WlanFreeMemory(pInterfaceList)
return get_available_wifi()
def _get_network_info(name):
'''
returns the list of the network selected in a dict.
'''
global available
global _dict
net = _dict[name]
dot11BssType = net.dot11BssType
dot11DefaultAuthAlgorithm = net.dot11DefaultAuthAlgorithm
dot11DefaultCipherAlgorithm = net.dot11DefaultCipherAlgorithm
dot11PhyTypes = net.dot11PhyTypes[0]
dot11Ssid = net.dot11Ssid
wlanNotConnectableReason = net.wlanNotConnectableReason
wlanSignalQuality = net.wlanSignalQuality
return {"dot11BssType": dot11BssType,
"dot11DefaultAuthAlgorithm": dot11DefaultAuthAlgorithm,
"dot11DefaultCipherAlgorithm": dot11DefaultCipherAlgorithm,
"dot11PhyTypes": dot11PhyTypes,
"SSID": dot11Ssid.SSID,
"SSIDLength": dot11Ssid.SSIDLength,
"wlanNotConnectableReason": wlanNotConnectableReason,
"wlanSignalQuality": wlanSignalQuality}
def _make_dict():
'''
Prepares a dict so it could store network information.
'''
global available
global _dict
_dict = {}
for network in available:
_dict[str(network.dot11Ssid.SSID)] = network
def _get_available_wifi():
'''
returns the available wifi networks.
'''
global _dict
return _dict
def _is_enabled():
'''
Reason for returning true is explained in widi facade.
/plyer/facades/wifi.py
'''
return True
# public methods.
def is_enabled():
'''
calls private method `_is_enabled` and returns the result.
'''
return _is_enabled()
def connect(network, parameters):
'''
Connect to a network with following parameters.
'''
_connect(network=network, parameters=parameters)
def disconnect():
'''
Disconnect from a network.
'''
_disconnect()
def start_scanning():
'''
Start scanning for available wifi networks available.
'''
return _start_scanning()
def get_network_info(name):
'''
return the wifi network info.
'''
return _get_network_info(name=name)
def get_available_wifi():
'''
return the available wifi networks available
'''
return _get_available_wifi()
| johnbolia/plyer | plyer/platforms/win/libs/wifi_defs.py | Python | mit | 15,497 |
# -*- coding: utf8 -*-
# # #
# Downscale PCMDI AR5 data to a pre-processed climatology
# extent, resolution, reference system
#
# Author: Michael Lindgren ([email protected])
# # #
import rasterio, os, copy
import numpy as np
import pandas as pd
import geopandas as gpd
import xarray as xr
from downscale import utils
class DeltaDownscale( object ):
def __init__( self, baseline, clim_begin, clim_end, historical, future=None,
downscaling_operation='add', mask=None, mask_value=0, ncpus=32,
src_crs={'init':'epsg:4326'}, src_nodata=-9999.0, dst_nodata=None,
post_downscale_function=None, varname=None, modelname=None, anom=False,
resample_type='bilinear', fix_clim=False, interp=False, find_bounds=False,
aoi_mask=None, *args, **kwargs ):
'''
simple delta downscaling
Arguments:
----------
baseline = []
clim_begin = []
clim_end = []
historical = []
future = []
...MORE...
Returns:
--------
'''
self.historical = historical
self.future = future
self.baseline = baseline
self.clim_begin = clim_begin
self.clim_end = clim_end
self.downscaling_operation = downscaling_operation
self.level = self.historical.level
self.level_name = self.historical.level_name
self.mask = mask
self.mask_value = mask_value
self.ncpus = ncpus
self.varname = varname
self.modelname = modelname
self.anom = anom
self.resample_type = resample_type
self.affine = self.historical._calc_affine()
self.src_crs = src_crs
self.src_nodata = src_nodata
self.dst_nodata = dst_nodata
self.post_downscale_function = post_downscale_function
self.fix_clim = fix_clim
self.interp = interp
self.find_bounds = find_bounds
self.aoi_mask = aoi_mask
self.utils = utils
# interpolate across space GCLL/PCLL args
self._rotated = False
self._lonpc = None
# empty attributes to calculate
self.anomalies = None
self.climatology = None
self.ds = None
self._concat_nc() # make a self.ds variable...
# fix pr climatologies if desired
if fix_clim == True:
print( 'fixing high/low values -- {}...'.format( self.varname ) )
self.interp = True # force True
if self.aoi_mask is not None: # hairy
mask = self.aoi_mask.mask
else:
mask = None
self._calc_climatolgy()
print( 'climmin:{}'.format( np.nanmin( self.climatology.data ) ) )
print( 'climmax:{}'.format( np.nanmax( self.climatology.data ) ) )
self._fix_clim( aoi_mask=mask, find_bounds=self.find_bounds )
# interpolate clims across space
self._interp_na_fix_clim()
# if there are still values <0.5 set them to 0.5
climatology = self.climatology.data
climatology[ climatology < 0.5 ] = 0.5
self.climatology.data = climatology
del climatology
# fix the ds values -- will be interped below...
self._fix_ds( aoi_mask=mask, find_bounds=self.find_bounds )
print( 'dsmin:{}'.format( np.nanmin( self.ds.data ) ) )
print( 'dsmax:{}'.format( np.nanmax( self.ds.data ) ) )
if self.interp == True:
print( 'running interpolation across NAs -- base resolution' )
self.interp_na( )
if fix_clim == True:
# if there are still values <0.5 set them to 0.5
dat = self.ds.data
dat[ dat < 0.5 ] = 0.5
self.ds.data = dat
del dat
# calculate climatology if fix_clim == False
if self.fix_clim == False:
self._calc_climatolgy()
# calculate anomalies with the new climatology values
self._calc_anomalies()
def _concat_nc( self ):
if self.historical and self.future:
ds = xr.concat([ self.historical.ds, self.future.ds ], dim='time' )
else:
ds = self.historical.ds
self.ds = ds[ self.historical.variable ]
# if self.level:
# # levidx, = np.where( ds[ self.level_name ] == self.level )
# # ds = ds[ :, levidx[0], ... ]
# ds = ds[ :, self.level, ... ] # LEVEL is the index number of the level to use...
# self.ds = ds
def _calc_climatolgy( self ):
'''slice / aggregate to climatology using mean'''
try:
climatology = self.ds.sel( time=slice( self.clim_begin, self.clim_end ) )
self.climatology = climatology.groupby( 'time.month' ).mean( 'time' )
except Exception:
raise AttributeError( 'non-overlapping climatology period and series' )
def _calc_anomalies( self ):
''' calculate simple absolute or relative anomalies depending on variable '''
if self.downscaling_operation == 'add':
anomalies = self.ds.groupby( 'time.month' ) - self.climatology
elif self.downscaling_operation == 'mult':
anomalies = self.ds.groupby( 'time.month' ) / self.climatology
else:
NameError( '_calc_anomalies (ar5): value of downscaling_operation must be "add" or "mult" ' )
# slice back to times we want
if self.historical != None and self.future != None:
self.anomalies = anomalies.sel( time=self.future.ds.time )
else:
self.anomalies = anomalies.sel( time=self.historical.ds.time )
def _fix_clim( self, aoi_mask, find_bounds=False ):
''' fix values in precip data '''
print( '_fix_clim' )
if find_bounds == True:
print('bounds')
bound_mask = find_boundary( self.climatology[ 0, ... ].data )
for idx in range( self.climatology.shape[0] ):
arr = self.climatology[ idx, ... ].data
arr = correct_boundary( arr, bound_mask, aoi_mask=aoi_mask )
self.climatology[ idx, ... ].data = correct_inner( arr, bound_mask, aoi_mask=aoi_mask )
elif find_bounds == False:
for idx in range( self.climatology.shape[0] ):
arr = self.climatology[ idx, ... ].data
self.climatology[ idx, ... ].data = correct_values( arr, aoi_mask=aoi_mask )
else:
ValueError( 'find_bounds arg is boolean only' )
def _fix_ds( self, aoi_mask, find_bounds=False ):
''' fix high/low values in precip data '''
print( '_fix_ds ' )
print( self.ds.shape[0] )
if find_bounds == True:
bound_mask = find_boundary( self.ds[ 0, ... ].data )
for idx in range( self.ds.shape[0] ):
arr = self.ds[ idx, ... ].data
arr = correct_boundary( arr, bound_mask, aoi_mask=aoi_mask )
self.ds[ idx, ... ].data = correct_inner( arr, bound_mask, aoi_mask=aoi_mask )
elif find_bounds == False:
for idx in range( self.ds.shape[0] ):
arr = self.ds[ idx, ... ].data
self.ds[ idx, ... ].data = correct_values( arr, aoi_mask=aoi_mask )
else:
ValueError( 'find_bounds arg is boolean only' )
@staticmethod
def wrap( d ):
''' simple wrapper around utils.xyz_to_grid for mp_map '''
x = np.array( d['x'] )
y = np.array( d['y'] )
z = np.array( d['z'] )
xi, yi = d['grid']
return utils.xyz_to_grid( x, y, z, (xi,yi), interp='linear' )
def interp_na( self ):
'''
np.float32
method = [str] one of 'cubic', 'near', 'linear'
return a list of dicts to pass to the xyz_to_grid in parallel
'''
from copy import copy
import pandas as pd
import numpy as np
from pathos.mp_map import mp_map
# remove the darn scientific notation
np.set_printoptions( suppress=True )
output_dtype = np.float32
# if 0-360 leave it alone
if ( self.ds.lon > 200.0 ).any() == True:
dat, lons = np.array(self.ds.data), np.array(self.ds.lon)
self._lonpc = lons
else:
# greenwich-centered rotate to 0-360 for interpolation across pacific
dat, lons = self.utils.rotate( np.array(self.ds.values), np.array(self.ds.lon), to_pacific=True )
self._rotated = True # update the rotated attribute
self._lonpc = lons
# mesh the lons and lats and unravel them to 1-D
xi, yi = np.meshgrid( self._lonpc, self.ds.lat.data )
lo, la = [ i.ravel() for i in (xi,yi) ]
# setup args for multiprocessing
df_list = [ pd.DataFrame({ 'x':lo, 'y':la, 'z':d.ravel() }).dropna( axis=0, how='any' ) for d in dat ]
args = [ {'x':np.array(df['x']), 'y':np.array(df['y']), 'z':np.array(df['z']), \
'grid':(xi,yi), 'method':self.historical.method, 'output_dtype':output_dtype } for df in df_list ]
print( 'processing interpolation to convex hull in parallel using {} cpus.'.format( self.ncpus ) )
dat_list = mp_map( self.wrap, args, nproc=self.ncpus )
dat_list = [ np.array(i) for i in dat_list ] # drop the output mask
dat = np.array( dat_list )
lons = self._lonpc
if self._rotated == True: # rotate it back
dat, lons = self.utils.rotate( dat, lons, to_pacific=False )
# place back into a new xarray.Dataset object for further processing
# self.ds = self.ds.update( { self.historical.variable:( ['time','lat','lon'], dat ) } )
self.ds.data = dat
print( 'ds interpolated updated into self.ds' )
return 1
def _interp_na_fix_clim( self ):
'''
np.float32
method = [str] one of 'cubic', 'near', 'linear'
return a list of dicts to pass to the xyz_to_grid in parallel
'''
from copy import copy
import pandas as pd
import numpy as np
from pathos.mp_map import mp_map
# remove the darn scientific notation
np.set_printoptions( suppress=True )
output_dtype = np.float32
# if 0-360 leave it alone
if ( self.ds.lon > 200.0 ).any() == True:
dat, lons = self.climatology.data, self.ds.lon
self._lonpc = lons
self._rotated = False
else:
# greenwich-centered rotate to 0-360 for interpolation across pacific
dat, lons = self.utils.rotate( np.array(self.climatology.values), np.array(self.ds.lon), to_pacific=True )
self._rotated = True # update the rotated attribute
self._lonpc = lons
# mesh the lons and lats and unravel them to 1-D
xi, yi = np.meshgrid( self._lonpc, self.ds.lat.data )
lo, la = [ i.ravel() for i in (xi,yi) ]
# setup args for multiprocessing
df_list = [ pd.DataFrame({ 'x':lo, 'y':la, 'z':d.ravel() }).dropna( axis=0, how='any' ) for d in dat ]
# df_list = [ df[ df.z > 0.5 ] for df in df_list ]
args = [ {'x':np.array(df['x']), 'y':np.array(df['y']), 'z':np.array(df['z']), \
'grid':(xi,yi), 'method':self.historical.method, 'output_dtype':output_dtype } for df in df_list ]
print( 'processing interpolation to convex hull in parallel using {} cpus. -- CLIMATOLOGY'.format( self.ncpus ) )
dat_list = mp_map( self.wrap, args, nproc=self.ncpus )
dat_list = [ np.array(i) for i in dat_list ] # drop the output mask
dat = np.array( dat_list )
# # add back only the cells that had NANs before and now have data
# dat[ np.isnan( dat )] = new_dat[ np.isnan( dat )]
# # set low vals to 0.5mm (the minimum acceptable value)
# dat[ (~np.isnan(dat)) and (dat < 0.5) ] = 0.5
lons = self._lonpc
if self._rotated == True: # rotate it back
dat, lons = self.utils.rotate( dat, lons, to_pacific=False )
self._rotated = False # reset it now that its back
# place back into a new xarray.Dataset object for further processing
# self.ds = self.ds.update( { self.historical.variable:( ['time','lat','lon'], dat ) } )
self.climatology.data = dat
print( 'ds interpolated updated into self.ds' )
return 1
def downscale( self, output_dir, prefix=None ):
import affine
from affine import Affine
import itertools
from functools import partial
from pathos.mp_map import mp_map
operation_switch = { 'add':self.utils.add, 'mult':self.utils.mult }
def two_digit_month( x ):
''' make 1 digit month a standard 2-digit for output filenames '''
month = str( x )
if len(month) == 1:
month = '0'+month
return month
time_suffix = [ '_'.join([two_digit_month( t.month ), str(t.year)]) for t in self.anomalies.time.to_pandas() ]
# handle missing variable / model names
if self.varname != None:
variable = self.varname
elif self.historical.variable != None:
variable = self.historical.variable
else:
variable = 'variable'
if self.modelname != None:
model = self.modelname
elif self.historical.model != None:
model = self.historical.model
else:
model = 'model'
# handle situations where project and model are the same and therefore project is passed as None (i.e. CRU)
if self.historical.project is not None:
output_filenames = [ os.path.join( output_dir, '_'.join([variable, self.historical.metric, self.historical.units, \
self.historical.project, model, self.historical.scenario, ts]) + '.tif') for ts in time_suffix ]
else:
output_filenames = [ os.path.join( output_dir, '_'.join([variable, self.historical.metric, self.historical.units,
model, self.historical.scenario, ts]) + '.tif') for ts in time_suffix ]
# if there is a specific name prefix, use it
if prefix != None:
output_filenames = [ os.path.join( output_dir, '_'.join([prefix, ts]) + '.tif' ) for ts in time_suffix ]
# rotate to pacific-centered
if ( self.anomalies.lon.data > 200.0 ).any() == True:
dat, lons = ( np.array(self.anomalies), np.array(self.anomalies.lon) )
self.anomalies_rot = dat
src_transform = self.historical.transform_from_latlon( self.historical.ds.lat, lons )
# print( 'anomalies NOT rotated!' )
else:
dat, lons = self.utils.shiftgrid( 0., np.array(self.anomalies), np.array(self.anomalies.lon) )
self.anomalies_rot = dat
src_transform = self.historical.transform_from_latlon( np.array(self.historical.ds.lat), np.array(lons) )
print( src_transform )
# print( 'anomalies rotated!' )
# run and output # this can get you if there are an incomplete number of monthly baseline rasters
rstlist = self.baseline.repeat( n=self.anomalies_rot.shape[0] / 12 ) # months
if isinstance( self.anomalies_rot, xr.Dataset ):
self.anomalies_rot = self.anomalies_rot[ self.historical.variable ].data
elif isinstance( self.anomalies_rot, xr.DataArray ):
self.anomalies_rot = self.anomalies_rot.data
else:
self.anomalies_rot = self.anomalies_rot
args = zip( self.anomalies_rot, rstlist, output_filenames )
args = [{'anom':i, 'base':j, 'output_filename':k,\
'downscaling_operation':self.downscaling_operation, \
'post_downscale_function':self.post_downscale_function,\
'mask':self.mask, 'mask_value':self.mask_value } for i,j,k in args ]
# partial and wrapper
f = partial( self.utils.interp_ds, src_crs=self.src_crs, src_nodata=self.src_nodata, \
dst_nodata=self.dst_nodata, src_transform=src_transform, resample_type=self.resample_type )
run = partial( self.utils._run_ds, f=f, operation_switch=operation_switch, anom=self.anom, mask_value=self.mask_value )
# run it
out = mp_map( run, args, nproc=self.ncpus )
return output_dir
# # # # # # # # # NEW FILL Dataset FOR A SPECIFIC SNAP ISSUE WITH pre DATA from CRU
# # # # # # # # # # and pr DATA from CMIP5 / CRU
def find_boundary( arr ):
'''
return a mask of the boundary limit of DATA cells (overlays edge DATA not NA)
this is especially useful if the data are land-only. As in the CRU TS3.x data.
'''
from skimage.segmentation import find_boundaries
bool_arr = np.copy( arr )
ind = np.isnan( bool_arr )
bool_arr[ ~ind ] = 1
bool_arr[ ind ] = 0
return find_boundaries( bool_arr, mode='inner' )
def calc_percentile( arr, aoi_mask, percentile=95, fill_value=0, nodata=None ):
'''
calculate the percentile value potentially over a masked domain,
and avoiding nodata and np.nan AND return the nearest actual value to
the np.nanpercentile( arr, percentile )
arr = [numpy.ndarray] 2D array
aoi_mask = [numpy.ndarray] 2D mask array of 0 (nomask) or 1 (mask)
'''
if aoi_mask is not None:
# mask the background
arr = arr[ (aoi_mask != fill_value) ]
if nodata is not None:
arr = arr[ arr != nodata ]
upperthresh = np.nanpercentile( arr, percentile )
idx = (np.abs(arr - upperthresh)).argmin()
return arr[ idx ]
def correct_boundary( arr, bound_mask, aoi_mask, percentile=95, fill_value=0 ):
''' correct the boundary pixels with non-acceptable values '''
upperthresh = calc_percentile( arr, aoi_mask, 95, 0 )
# drop any masks
arr = np.array( arr )
ind = np.where( bound_mask == True )
vals = arr[ ind ]
vals[ vals < 0.5 ] = 0.5
vals[ vals > upperthresh ] = upperthresh
arr[ ind ] = vals
return arr
def correct_inner( arr, bound_mask, aoi_mask, percentile=95, fill_value=0 ):
''' correct the inner pixels with non-acceptable values '''
upperthresh = calc_percentile( arr, aoi_mask, 95, 0 )
# drop any masks
arr = np.array( arr )
ind = np.where( (arr > 0) & bound_mask != True )
vals = arr[ ind ]
vals[ vals < 0.5 ] = np.nan # set to the out-of-bounds value
vals[ vals > upperthresh ] = upperthresh
arr[ ind ] = vals
return arr
def correct_values( arr, aoi_mask, percentile=95, fill_value=0 ):
''' correct the values for precip -- from @leonawicz'''
upperthresh = calc_percentile( arr, aoi_mask, 95, 0 )
print('upperthresh:{}'.format(upperthresh))
# drop any masks
arr = np.array( arr )
arr[ arr < 0.5 ] = np.nan # set to the out-of-bounds value
arr[ arr > upperthresh ] = upperthresh
return arr
| ua-snap/downscale | downscale/ds.py | Python | mit | 16,630 |
from cardinal.decorators import command
class TestCommandRaisesExceptionPlugin:
def __init__(self):
self.command_calls = []
@command('command')
def command(self, *args):
self.command_calls.append(args)
raise Exception()
def setup():
return TestCommandRaisesExceptionPlugin()
| JohnMaguire/Cardinal | cardinal/fixtures/fake_plugins/command_raises_exception/plugin.py | Python | mit | 320 |
"""
fizzbuzz.py
Author: Dimitri
Credit: Mr. Dennison
Assignment:
Write a program that prints the numbers from 1 to 100. But for
multiples of three print “Fizz” instead of the number and for
the multiples of five print “Buzz”. For numbers which are multiples
of both three and five print “FizzBuzz”.
We will use a variation of this test in which the last number of
the series isn't necessarily 100, and the two numbers being tested
for multiples aren't necessarily three and five. For example, your
program should behave just like this:
How many numbers shall we print? 25
For multiples of what number shall we print 'Fizz'? 3
For multiples of what number shall we print 'Buzz'? 5
1
2
Fizz
4
Buzz
Fizz
7
8
Fizz
Buzz
11
Fizz
13
14
FizzBuzz
16
17
Fizz
19
Buzz
Fizz
22
23
Fizz
Buzz
"""
n = int(input("How many numbers shall we print? "))
f = int(input("For multiples of what number shall we print 'Fizz'? "))
b = int(input("For multiples of what number shall we print 'Buzz'? "))
for x in range(1, n+1):
if (x%f==0 and x%b==0):
print("FizzBuzz")
elif (x%b)==0:
print("Buzz")
elif (x%f)==0:
print("Fizz")
else:
print(x) | HHStudent/fizzbuzz | fizzbuzz.py | Python | mit | 1,184 |
# This file is part of the Indico plugins.
# Copyright (C) 2002 - 2021 CERN
#
# The Indico plugins are free software; you can redistribute
# them and/or modify them under the terms of the MIT License;
# see the LICENSE file for more details.
import posixpath
from flask import jsonify, request, session
from flask_pluginengine import render_plugin_template
from werkzeug.exceptions import BadRequest, NotFound
from werkzeug.urls import url_parse
from indico.core.config import config
from indico.modules.events.management.controllers import RHManageEventBase
from indico.web.rh import RH
from indico.web.util import jsonify_template
from indico_ursh import _
from indico_ursh.util import is_configured, register_shortcut, request_short_url, strip_end
from indico_ursh.views import WPShortenURLPage
CUSTOM_SHORTCUT_ALPHABET = frozenset('0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-')
class RHGetShortURL(RH):
"""Make a request to the URL shortening service"""
@staticmethod
def _resolve_full_url(original_url):
if url_parse(original_url).host:
return original_url
original_url = original_url.lstrip('/')
return posixpath.join(config.BASE_URL, original_url)
@staticmethod
def _check_host(full_url):
if url_parse(full_url).host != url_parse(config.BASE_URL).host:
raise BadRequest('Invalid host for URL shortening service')
def _process(self):
original_url = request.json.get('original_url')
full_url = self._resolve_full_url(original_url)
self._check_host(full_url)
short_url = request_short_url(full_url)
return jsonify(url=short_url)
class RHShortURLPage(RH):
"""Provide a simple page where users can submit a URL to be shortened"""
def _process(self):
if not is_configured():
raise NotFound('Plugin is not configured')
return WPShortenURLPage.render_template('ursh_shortener_page.html')
class RHCustomShortURLPage(RHManageEventBase):
"""Provide a simple page, where users can submit a URL to be shortened"""
def _make_absolute_url(self, url):
return posixpath.join(config.BASE_URL, url[1:]) if url.startswith('/') else url
def _get_error_msg(self, result):
if result['status'] == 409:
return _('Shortcut already exists')
elif result['status'] == 400:
return _('Malformed shortcut')
return result['error'].get('description')
def _process_args(self):
from indico_ursh.plugin import UrshPlugin
super()._process_args()
api_host = url_parse(UrshPlugin.settings.get('api_host'))
self.ursh_host = strip_end(api_host.to_url(), api_host.path[1:]).rstrip('/') + '/'
def _process_GET(self):
original_url = self._make_absolute_url(request.args['original_url'])
return WPShortenURLPage.render_template('ursh_custom_shortener_page.html',
event=self.event,
ursh_host=self.ursh_host,
original_url=original_url,
submitted=False)
def _process_POST(self):
original_url = self._make_absolute_url(request.args['original_url'])
shortcut = request.form['shortcut'].strip()
if not (set(shortcut) <= CUSTOM_SHORTCUT_ALPHABET):
raise BadRequest('Invalid shortcut')
result = register_shortcut(original_url, shortcut, session.user)
if result.get('error'):
kwargs = {'success': False, 'msg': self._get_error_msg(result)}
else:
kwargs = {'success': True, 'shorturl': result['short_url']}
return jsonify_template('ursh_custom_shortener_page.html', render_plugin_template,
event=self.event, ursh_host=self.ursh_host, shortcut=shortcut,
original_url=original_url, submitted=True, **kwargs)
| ThiefMaster/indico-plugins | ursh/indico_ursh/controllers.py | Python | mit | 4,034 |
import decimal
import gc
import itertools
import multiprocessing
import weakref
import sqlalchemy as sa
from sqlalchemy import ForeignKey
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy import Unicode
from sqlalchemy import util
from sqlalchemy.engine import result
from sqlalchemy.engine.processors import to_decimal_processor_factory
from sqlalchemy.orm import aliased
from sqlalchemy.orm import clear_mappers
from sqlalchemy.orm import configure_mappers
from sqlalchemy.orm import declarative_base
from sqlalchemy.orm import join as orm_join
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import Load
from sqlalchemy.orm import relationship
from sqlalchemy.orm import selectinload
from sqlalchemy.orm import Session
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm import subqueryload
from sqlalchemy.orm.session import _sessions
from sqlalchemy.sql import column
from sqlalchemy.sql import util as sql_util
from sqlalchemy.sql.visitors import cloned_traverse
from sqlalchemy.sql.visitors import replacement_traverse
from sqlalchemy.testing import engines
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.fixtures import fixture_session
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
from sqlalchemy.testing.util import gc_collect
from ..orm import _fixtures
class A(fixtures.ComparableEntity):
pass
class B(fixtures.ComparableEntity):
pass
class ASub(A):
pass
def assert_cycles(expected=0):
def decorate(fn):
def go():
fn() # warmup, configure mappers, caches, etc.
gc_collect()
gc_collect()
gc_collect() # multiple calls seem to matter
# gc.set_debug(gc.DEBUG_COLLECTABLE)
try:
return fn() # run for real
finally:
unreachable = gc_collect()
assert unreachable <= expected
gc_collect()
return go
return decorate
def profile_memory(
maxtimes=250, assert_no_sessions=True, get_num_objects=None
):
def decorate(func):
# run the test N times. if length of gc.get_objects()
# keeps growing, assert false
def get_objects_skipping_sqlite_issue():
# pysqlite keeps adding weakref objects which only
# get reset after 220 iterations. We'd like to keep these
# tests under 50 iterations and ideally about ten, so
# just filter them out so that we get a "flatline" more quickly.
if testing.against("sqlite+pysqlite"):
return [
o
for o in gc.get_objects()
if not isinstance(o, weakref.ref)
]
else:
return gc.get_objects()
def profile(queue, func_args):
# give testing.db a brand new pool and don't
# touch the existing pool, since closing a socket
# in the subprocess can affect the parent
testing.db.pool = testing.db.pool.recreate()
gc_collect()
samples = []
max_ = 0
max_grew_for = 0
success = False
until_maxtimes = 0
try:
while True:
if until_maxtimes >= maxtimes // 5:
break
for x in range(5):
try:
func(*func_args)
except Exception as err:
queue.put(
(
"result",
False,
"Test raised an exception: %r" % err,
)
)
raise
gc_collect()
samples.append(
get_num_objects()
if get_num_objects is not None
else len(get_objects_skipping_sqlite_issue())
)
if assert_no_sessions:
assert len(_sessions) == 0, "%d sessions remain" % (
len(_sessions),
)
# queue.put(('samples', samples))
latest_max = max(samples[-5:])
if latest_max > max_:
queue.put(
(
"status",
"Max grew from %s to %s, max has "
"grown for %s samples"
% (max_, latest_max, max_grew_for),
)
)
max_ = latest_max
max_grew_for += 1
until_maxtimes += 1
continue
else:
queue.put(
(
"status",
"Max remained at %s, %s more attempts left"
% (max_, max_grew_for),
)
)
max_grew_for -= 1
if max_grew_for == 0:
success = True
break
except Exception as err:
queue.put(("result", False, "got exception: %s" % err))
else:
if not success:
queue.put(
(
"result",
False,
"Ran for a total of %d times, memory kept "
"growing: %r" % (maxtimes, samples),
)
)
else:
queue.put(("result", True, "success"))
def run_plain(*func_args):
import queue as _queue
q = _queue.Queue()
profile(q, func_args)
while True:
row = q.get()
typ = row[0]
if typ == "samples":
print("sample gc sizes:", row[1])
elif typ == "status":
print(row[1])
elif typ == "result":
break
else:
assert False, "can't parse row"
assert row[1], row[2]
# return run_plain
def run_in_process(*func_args):
queue = multiprocessing.Queue()
proc = multiprocessing.Process(
target=profile, args=(queue, func_args)
)
proc.start()
while True:
row = queue.get()
typ = row[0]
if typ == "samples":
print("sample gc sizes:", row[1])
elif typ == "status":
print(row[1])
elif typ == "result":
break
else:
assert False, "can't parse row"
proc.join()
assert row[1], row[2]
return run_in_process
return decorate
def assert_no_mappers():
clear_mappers()
gc_collect()
class EnsureZeroed(fixtures.ORMTest):
def setup_test(self):
_sessions.clear()
clear_mappers()
# enable query caching, however make the cache small so that
# the tests don't take too long. issues w/ caching include making
# sure sessions don't get stuck inside of it. However it will
# make tests like test_mapper_reset take a long time because mappers
# are very much a part of what's in the cache.
self.engine = engines.testing_engine(
options={"use_reaper": False, "query_cache_size": 10}
)
@testing.add_to_marker.memory_intensive
class MemUsageTest(EnsureZeroed):
__requires__ = ("cpython", "no_windows")
def test_type_compile(self):
from sqlalchemy.dialects.sqlite.base import dialect as SQLiteDialect
cast = sa.cast(column("x"), sa.Integer)
@profile_memory()
def go():
dialect = SQLiteDialect()
cast.compile(dialect=dialect)
go()
@testing.requires.cextensions
def test_DecimalResultProcessor_init(self):
@profile_memory()
def go():
to_decimal_processor_factory({}, 10)
go()
@testing.requires.cextensions
def test_DecimalResultProcessor_process(self):
@profile_memory()
def go():
to_decimal_processor_factory(decimal.Decimal, 10)(1.2)
go()
@testing.requires.cextensions
def test_cycles_in_row(self):
tup = result.result_tuple(["a", "b", "c"])
@profile_memory()
def go():
obj = {"foo": {}}
obj["foo"]["bar"] = obj
row = tup([1, 2, obj])
obj["foo"]["row"] = row
del row
go()
def test_ad_hoc_types(self):
"""test storage of bind processors, result processors
in dialect-wide registry."""
from sqlalchemy.dialects import mysql, postgresql, sqlite
from sqlalchemy import types
eng = engines.testing_engine()
for args in (
(types.Integer,),
(types.String,),
(types.PickleType,),
(types.Enum, "a", "b", "c"),
(sqlite.DATETIME,),
(postgresql.ENUM, "a", "b", "c"),
(types.Interval,),
(postgresql.INTERVAL,),
(mysql.VARCHAR,),
):
@profile_memory()
def go():
type_ = args[0](*args[1:])
bp = type_._cached_bind_processor(eng.dialect)
rp = type_._cached_result_processor(eng.dialect, 0)
bp, rp # strong reference
go()
assert not eng.dialect._type_memos
@testing.fails()
def test_fixture_failure(self):
class Foo:
pass
stuff = []
@profile_memory(maxtimes=20)
def go():
stuff.extend(Foo() for i in range(100))
go()
@testing.add_to_marker.memory_intensive
class MemUsageWBackendTest(fixtures.MappedTest, EnsureZeroed):
__requires__ = "cpython", "memory_process_intensive", "no_asyncio"
__sparse_backend__ = True
# ensure a pure growing test trips the assertion
@testing.fails_if(lambda: True)
def test_fixture(self):
class Foo:
pass
x = []
@profile_memory(maxtimes=10)
def go():
x[-1:] = [Foo(), Foo(), Foo(), Foo(), Foo(), Foo()]
go()
def test_session(self):
metadata = MetaData()
table1 = Table(
"mytable",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
)
table2 = Table(
"mytable2",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
Column("col3", Integer, ForeignKey("mytable.col1")),
)
metadata.create_all(self.engine)
m1 = self.mapper_registry.map_imperatively(
A,
table1,
properties={
"bs": relationship(
B, cascade="all, delete", order_by=table2.c.col1
)
},
)
m2 = self.mapper_registry.map_imperatively(B, table2)
@profile_memory()
def go():
with Session(self.engine) as sess:
a1 = A(col2="a1")
a2 = A(col2="a2")
a3 = A(col2="a3")
a1.bs.append(B(col2="b1"))
a1.bs.append(B(col2="b2"))
a3.bs.append(B(col2="b3"))
for x in [a1, a2, a3]:
sess.add(x)
sess.commit()
alist = sess.query(A).order_by(A.col1).all()
eq_(
[
A(col2="a1", bs=[B(col2="b1"), B(col2="b2")]),
A(col2="a2", bs=[]),
A(col2="a3", bs=[B(col2="b3")]),
],
alist,
)
for a in alist:
sess.delete(a)
sess.commit()
go()
metadata.drop_all(self.engine)
del m1, m2
assert_no_mappers()
def test_sessionmaker(self):
@profile_memory()
def go():
sessmaker = sessionmaker(bind=self.engine)
sess = sessmaker()
r = sess.execute(select(1))
r.close()
sess.close()
del sess
del sessmaker
go()
@testing.emits_warning("Compiled statement cache for mapper.*")
@testing.emits_warning("Compiled statement cache for lazy loader.*")
@testing.crashes("sqlite", ":memory: connection not suitable here")
def test_orm_many_engines(self):
metadata = MetaData()
table1 = Table(
"mytable",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
)
table2 = Table(
"mytable2",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
Column("col3", Integer, ForeignKey("mytable.col1")),
)
metadata.create_all(self.engine)
m1 = self.mapper_registry.map_imperatively(
A,
table1,
properties={
"bs": relationship(
B, cascade="all, delete", order_by=table2.c.col1
)
},
_compiled_cache_size=50,
)
m2 = self.mapper_registry.map_imperatively(
B, table2, _compiled_cache_size=50
)
@profile_memory()
def go():
engine = engines.testing_engine(
options={
"logging_name": "FOO",
"pool_logging_name": "BAR",
"use_reaper": False,
}
)
with Session(engine) as sess:
a1 = A(col2="a1")
a2 = A(col2="a2")
a3 = A(col2="a3")
a1.bs.append(B(col2="b1"))
a1.bs.append(B(col2="b2"))
a3.bs.append(B(col2="b3"))
for x in [a1, a2, a3]:
sess.add(x)
sess.commit()
alist = sess.query(A).order_by(A.col1).all()
eq_(
[
A(col2="a1", bs=[B(col2="b1"), B(col2="b2")]),
A(col2="a2", bs=[]),
A(col2="a3", bs=[B(col2="b3")]),
],
alist,
)
for a in alist:
sess.delete(a)
sess.commit()
engine.dispose()
go()
metadata.drop_all(self.engine)
del m1, m2
assert_no_mappers()
@testing.emits_warning("Compiled statement cache for.*")
def test_many_updates(self):
metadata = MetaData()
wide_table = Table(
"t",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
*[Column("col%d" % i, Integer) for i in range(10)],
)
class Wide:
pass
self.mapper_registry.map_imperatively(
Wide, wide_table, _compiled_cache_size=10
)
metadata.create_all(self.engine)
with Session(self.engine) as session:
w1 = Wide()
session.add(w1)
session.commit()
del session
counter = [1]
@profile_memory()
def go():
with Session(self.engine) as session:
w1 = session.query(Wide).first()
x = counter[0]
dec = 10
while dec > 0:
# trying to count in binary here,
# works enough to trip the test case
if pow(2, dec) < x:
setattr(w1, "col%d" % dec, counter[0])
x -= pow(2, dec)
dec -= 1
session.commit()
counter[0] += 1
try:
go()
finally:
metadata.drop_all(self.engine)
@testing.requires.savepoints
def test_savepoints(self):
metadata = MetaData()
some_table = Table(
"t",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
)
class SomeClass:
pass
self.mapper_registry.map_imperatively(SomeClass, some_table)
metadata.create_all(self.engine)
with Session(self.engine) as session:
target_strings = (
session.connection().dialect.identifier_preparer._strings
)
@profile_memory(
assert_no_sessions=False,
get_num_objects=lambda: len(target_strings),
)
def go():
with Session(self.engine) as session, session.begin():
sc = SomeClass()
session.add(sc)
with session.begin_nested():
session.query(SomeClass).first()
try:
go()
finally:
metadata.drop_all(self.engine)
@testing.crashes("mysql+cymysql", "blocking")
def test_unicode_warnings(self):
metadata = MetaData()
table1 = Table(
"mytable",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", Unicode(30)),
)
metadata.create_all(self.engine)
i = [1]
# the times here is cranked way up so that we can see
# pysqlite clearing out its internal buffer and allow
# the test to pass
@testing.emits_warning()
@profile_memory()
def go():
# execute with a non-unicode object. a warning is emitted,
# this warning shouldn't clog up memory.
with self.engine.connect() as conn:
conn.execute(
table1.select().where(table1.c.col2 == "foo%d" % i[0])
)
i[0] += 1
try:
go()
finally:
metadata.drop_all(self.engine)
def test_warnings_util(self):
counter = itertools.count()
import warnings
warnings.filterwarnings("ignore", "memusage warning.*")
@profile_memory()
def go():
util.warn_limited(
"memusage warning, param1: %s, param2: %s",
(next(counter), next(counter)),
)
go()
def test_mapper_reset(self):
metadata = MetaData()
table1 = Table(
"mytable",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
)
table2 = Table(
"mytable2",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
Column("col3", Integer, ForeignKey("mytable.col1")),
)
@profile_memory()
def go():
self.mapper_registry.map_imperatively(
A,
table1,
properties={"bs": relationship(B, order_by=table2.c.col1)},
)
self.mapper_registry.map_imperatively(B, table2)
sess = Session(self.engine, autoflush=False)
a1 = A(col2="a1")
a2 = A(col2="a2")
a3 = A(col2="a3")
a1.bs.append(B(col2="b1"))
a1.bs.append(B(col2="b2"))
a3.bs.append(B(col2="b3"))
for x in [a1, a2, a3]:
sess.add(x)
sess.flush()
sess.expunge_all()
alist = sess.query(A).order_by(A.col1).all()
eq_(
[
A(col2="a1", bs=[B(col2="b1"), B(col2="b2")]),
A(col2="a2", bs=[]),
A(col2="a3", bs=[B(col2="b3")]),
],
alist,
)
for a in alist:
sess.delete(a)
sess.flush()
sess.close()
clear_mappers()
metadata.create_all(self.engine)
try:
go()
finally:
metadata.drop_all(self.engine)
assert_no_mappers()
def test_alias_pathing(self):
metadata = MetaData()
a = Table(
"a",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("bid", Integer, ForeignKey("b.id")),
Column("type", String(30)),
)
asub = Table(
"asub",
metadata,
Column("id", Integer, ForeignKey("a.id"), primary_key=True),
Column("data", String(30)),
)
b = Table(
"b",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
)
self.mapper_registry.map_imperatively(
A, a, polymorphic_identity="a", polymorphic_on=a.c.type
)
self.mapper_registry.map_imperatively(
ASub, asub, inherits=A, polymorphic_identity="asub"
)
self.mapper_registry.map_imperatively(
B, b, properties={"as_": relationship(A)}
)
metadata.create_all(self.engine)
sess = Session(self.engine)
a1 = ASub(data="a1")
a2 = ASub(data="a2")
a3 = ASub(data="a3")
b1 = B(as_=[a1, a2, a3])
sess.add(b1)
sess.commit()
del sess
# sqlite has a slow enough growth here
# that we have to run it more times to see the
# "dip" again
@profile_memory(maxtimes=120)
def go():
sess = Session(self.engine)
sess.query(B).options(subqueryload(B.as_.of_type(ASub))).all()
sess.close()
del sess
try:
go()
finally:
metadata.drop_all(self.engine)
clear_mappers()
def test_path_registry(self):
metadata = MetaData()
a = Table(
"a",
metadata,
Column("id", Integer, primary_key=True),
Column("foo", Integer),
Column("bar", Integer),
)
b = Table(
"b",
metadata,
Column("id", Integer, primary_key=True),
Column("a_id", ForeignKey("a.id")),
)
m1 = self.mapper_registry.map_imperatively(
A, a, properties={"bs": relationship(B)}
)
self.mapper_registry.map_imperatively(B, b)
@profile_memory()
def go():
ma = sa.inspect(aliased(A))
m1._path_registry[m1.attrs.bs][ma][m1.attrs.bar]
go()
clear_mappers()
def test_with_inheritance(self):
metadata = MetaData()
table1 = Table(
"mytable",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
)
table2 = Table(
"mytable2",
metadata,
Column(
"col1",
Integer,
ForeignKey("mytable.col1"),
primary_key=True,
test_needs_autoincrement=True,
),
Column("col3", String(30)),
)
@profile_memory()
def go():
class A(fixtures.ComparableEntity):
pass
class B(A):
pass
clear_mappers()
self.mapper_registry.map_imperatively(
A,
table1,
polymorphic_on=table1.c.col2,
polymorphic_identity="a",
)
self.mapper_registry.map_imperatively(
B, table2, inherits=A, polymorphic_identity="b"
)
sess = Session(self.engine, autoflush=False)
a1 = A()
a2 = A()
b1 = B(col3="b1")
b2 = B(col3="b2")
for x in [a1, a2, b1, b2]:
sess.add(x)
sess.flush()
sess.expunge_all()
alist = sess.query(A).order_by(A.col1).all()
eq_([A(), A(), B(col3="b1"), B(col3="b2")], alist)
for a in alist:
sess.delete(a)
sess.flush()
# don't need to clear_mappers()
del B
del A
metadata.create_all(self.engine)
try:
go()
finally:
metadata.drop_all(self.engine)
assert_no_mappers()
def test_with_manytomany(self):
metadata = MetaData()
table1 = Table(
"mytable",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
)
table2 = Table(
"mytable2",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
)
table3 = Table(
"t1tot2",
metadata,
Column("t1", Integer, ForeignKey("mytable.col1")),
Column("t2", Integer, ForeignKey("mytable2.col1")),
)
@profile_memory()
def go():
class A(fixtures.ComparableEntity):
pass
class B(fixtures.ComparableEntity):
pass
self.mapper_registry.map_imperatively(
A,
table1,
properties={
"bs": relationship(
B, secondary=table3, backref="as", order_by=table3.c.t1
)
},
)
self.mapper_registry.map_imperatively(B, table2)
sess = Session(self.engine, autoflush=False)
a1 = A(col2="a1")
a2 = A(col2="a2")
b1 = B(col2="b1")
b2 = B(col2="b2")
a1.bs.append(b1)
a2.bs.append(b2)
for x in [a1, a2]:
sess.add(x)
sess.flush()
sess.expunge_all()
alist = sess.query(A).order_by(A.col1).all()
eq_([A(bs=[B(col2="b1")]), A(bs=[B(col2="b2")])], alist)
for a in alist:
sess.delete(a)
sess.flush()
# mappers necessarily find themselves in the compiled cache,
# so to allow them to be GC'ed clear out the cache
self.engine.clear_compiled_cache()
del B
del A
metadata.create_all(self.engine)
try:
go()
finally:
metadata.drop_all(self.engine)
assert_no_mappers()
def test_many_discarded_relationships(self):
"""a use case that really isn't supported, nonetheless we can
guard against memleaks here so why not"""
m1 = MetaData()
t1 = Table("t1", m1, Column("id", Integer, primary_key=True))
t2 = Table(
"t2",
m1,
Column("id", Integer, primary_key=True),
Column("t1id", ForeignKey("t1.id")),
)
class T1:
pass
t1_mapper = self.mapper_registry.map_imperatively(T1, t1)
@testing.emits_warning()
@profile_memory()
def go():
class T2:
pass
t2_mapper = self.mapper_registry.map_imperatively(T2, t2)
t1_mapper.add_property("bar", relationship(t2_mapper))
s1 = Session(testing.db)
# this causes the path_registry to be invoked
s1.query(t1_mapper)._compile_context()
go()
# fails on newer versions of pysqlite due to unusual memory behavior
# in pysqlite itself. background at:
# https://thread.gmane.org/gmane.comp.python.db.pysqlite.user/2290
@testing.crashes("mysql+cymysql", "blocking")
def test_join_cache_deprecated_coercion(self):
metadata = MetaData()
table1 = Table(
"table1",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
)
table2 = Table(
"table2",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
Column("t1id", Integer, ForeignKey("table1.id")),
)
class Foo:
pass
class Bar:
pass
self.mapper_registry.map_imperatively(
Foo,
table1,
properties={
"bars": relationship(
self.mapper_registry.map_imperatively(Bar, table2)
)
},
)
metadata.create_all(self.engine)
session = sessionmaker(self.engine)
@profile_memory()
def go():
s = table2.select()
sess = session()
with testing.expect_deprecated(
"Implicit coercion of SELECT and " "textual SELECT constructs"
):
sess.query(Foo).join(s, Foo.bars).all()
sess.rollback()
try:
go()
finally:
metadata.drop_all(self.engine)
@testing.crashes("mysql+cymysql", "blocking")
def test_join_cache(self):
metadata = MetaData()
table1 = Table(
"table1",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
)
table2 = Table(
"table2",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
Column("t1id", Integer, ForeignKey("table1.id")),
)
class Foo:
pass
class Bar:
pass
self.mapper_registry.map_imperatively(
Foo,
table1,
properties={
"bars": relationship(
self.mapper_registry.map_imperatively(Bar, table2)
)
},
)
metadata.create_all(self.engine)
session = sessionmaker(self.engine)
@profile_memory()
def go():
s = aliased(Bar, table2.select().subquery())
sess = session()
sess.query(Foo).join(s, Foo.bars).all()
sess.rollback()
try:
go()
finally:
metadata.drop_all(self.engine)
@testing.add_to_marker.memory_intensive
class CycleTest(_fixtures.FixtureTest):
__requires__ = ("cpython", "no_windows")
run_setup_mappers = "once"
run_inserts = "once"
run_deletes = None
@classmethod
def setup_mappers(cls):
cls._setup_stock_mapping()
def test_query(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
@assert_cycles()
def go():
return s.query(User).all()
go()
def test_session_execute_orm(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
@assert_cycles()
def go():
stmt = select(User)
s.execute(stmt)
go()
def test_cache_key(self):
User, Address = self.classes("User", "Address")
configure_mappers()
@assert_cycles()
def go():
stmt = select(User)
stmt._generate_cache_key()
go()
def test_proxied_attribute(self):
from sqlalchemy.ext import hybrid
users = self.tables.users
class Foo:
@hybrid.hybrid_property
def user_name(self):
return self.name
self.mapper_registry.map_imperatively(Foo, users)
# unfortunately there's a lot of cycles with an aliased()
# for now, however calling upon clause_element does not seem
# to make it worse which is what this was looking to test
@assert_cycles(69)
def go():
a1 = aliased(Foo)
a1.user_name.__clause_element__()
go()
def test_query_alias(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
u1 = aliased(User)
@assert_cycles()
def go():
s.query(u1).all()
go()
def test_entity_path_w_aliased(self):
User, Address = self.classes("User", "Address")
configure_mappers()
@assert_cycles()
def go():
u1 = aliased(User)
inspect(u1)._path_registry[User.addresses.property]
go()
def test_orm_objects_from_query(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
def generate():
objects = s.query(User).filter(User.id == 7).all()
gc_collect()
return objects
@assert_cycles()
def go():
generate()
go()
def test_orm_objects_from_query_w_selectinload(self):
User, Address = self.classes("User", "Address")
s = fixture_session()
def generate():
objects = s.query(User).options(selectinload(User.addresses)).all()
gc_collect()
return objects
@assert_cycles()
def go():
generate()
go()
def test_selectinload_option_unbound(self):
User, Address = self.classes("User", "Address")
@assert_cycles()
def go():
selectinload(User.addresses)
go()
def test_selectinload_option_bound(self):
User, Address = self.classes("User", "Address")
@assert_cycles()
def go():
Load(User).selectinload(User.addresses)
go()
def test_orm_path(self):
User, Address = self.classes("User", "Address")
@assert_cycles()
def go():
inspect(User)._path_registry[User.addresses.property][
inspect(Address)
]
go()
def test_joinedload_option_unbound(self):
User, Address = self.classes("User", "Address")
@assert_cycles()
def go():
joinedload(User.addresses)
go()
def test_joinedload_option_bound(self):
User, Address = self.classes("User", "Address")
@assert_cycles()
def go():
l1 = Load(User).joinedload(User.addresses)
l1._generate_cache_key()
go()
def test_orm_objects_from_query_w_joinedload(self):
User, Address = self.classes("User", "Address")
s = fixture_session()
def generate():
objects = s.query(User).options(joinedload(User.addresses)).all()
gc_collect()
return objects
@assert_cycles()
def go():
generate()
go()
def test_query_filtered(self):
User, Address = self.classes("User", "Address")
s = fixture_session()
@assert_cycles()
def go():
return s.query(User).filter(User.id == 7).all()
go()
def test_query_joins(self):
User, Address = self.classes("User", "Address")
s = fixture_session()
# cycles here are due to ClauseElement._cloned_set, others
# as of cache key
@assert_cycles(4)
def go():
s.query(User).join(User.addresses).all()
go()
def test_query_joinedload(self):
User, Address = self.classes("User", "Address")
s = fixture_session()
def generate():
s.query(User).options(joinedload(User.addresses)).all()
# cycles here are due to ClauseElement._cloned_set and Load.context,
# others as of cache key. The orm.instances() function now calls
# dispose() on both the context and the compiled state to try
# to reduce these cycles.
@assert_cycles(18)
def go():
generate()
go()
def test_plain_join(self):
users, addresses = self.tables("users", "addresses")
@assert_cycles()
def go():
str(users.join(addresses).compile(testing.db))
go()
def test_plain_join_select(self):
users, addresses = self.tables("users", "addresses")
# cycles here are due to ClauseElement._cloned_set, others
# as of cache key
@assert_cycles(7)
def go():
s = select(users).select_from(users.join(addresses))
state = s._compile_state_factory(s, s.compile(testing.db))
state.froms
go()
def test_orm_join(self):
User, Address = self.classes("User", "Address")
@assert_cycles()
def go():
str(orm_join(User, Address, User.addresses).compile(testing.db))
go()
def test_join_via_query_relationship(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
@assert_cycles()
def go():
s.query(User).join(User.addresses)
go()
def test_join_via_query_to_entity(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
@assert_cycles()
def go():
s.query(User).join(Address)
go()
def test_result_fetchone(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
stmt = s.query(User).join(User.addresses).statement
@assert_cycles(4)
def go():
result = s.connection(bind_arguments=dict(mapper=User)).execute(
stmt
)
while True:
row = result.fetchone()
if row is None:
break
go()
def test_result_fetchall(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
stmt = s.query(User).join(User.addresses).statement
@assert_cycles(4)
def go():
result = s.execute(stmt)
rows = result.fetchall() # noqa
go()
def test_result_fetchmany(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
stmt = s.query(User).join(User.addresses).statement
@assert_cycles(4)
def go():
result = s.execute(stmt)
for partition in result.partitions(3):
pass
go()
def test_result_fetchmany_unique(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
stmt = s.query(User).join(User.addresses).statement
@assert_cycles(4)
def go():
result = s.execute(stmt)
for partition in result.unique().partitions(3):
pass
go()
def test_core_select_from_orm_query(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
stmt = s.query(User).join(User.addresses).statement
# ORM query using future select for .statement is adding
# some ORMJoin cycles here during compilation. not worth trying to
# find it
@assert_cycles(4)
def go():
s.execute(stmt)
go()
def test_adapt_statement_replacement_traversal(self):
User, Address = self.classes("User", "Address")
statement = select(User).select_from(
orm_join(User, Address, User.addresses)
)
@assert_cycles()
def go():
replacement_traverse(statement, {}, lambda x: None)
go()
def test_adapt_statement_cloned_traversal(self):
User, Address = self.classes("User", "Address")
statement = select(User).select_from(
orm_join(User, Address, User.addresses)
)
@assert_cycles()
def go():
cloned_traverse(statement, {}, {})
go()
def test_column_adapter_lookup(self):
User, Address = self.classes("User", "Address")
u1 = aliased(User)
@assert_cycles()
def go():
adapter = sql_util.ColumnAdapter(inspect(u1).selectable)
adapter.columns[User.id]
go()
def test_orm_aliased(self):
User, Address = self.classes("User", "Address")
@assert_cycles()
def go():
u1 = aliased(User)
inspect(u1)
go()
@testing.fails()
def test_the_counter(self):
@assert_cycles()
def go():
x = []
x.append(x)
go()
def test_weak_sequence(self):
class Foo:
pass
f = Foo()
@assert_cycles()
def go():
util.WeakSequence([f])
go()
@testing.provide_metadata
def test_optimized_get(self):
Base = declarative_base(metadata=self.metadata)
class Employee(Base):
__tablename__ = "employee"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
type = Column(String(10))
__mapper_args__ = {"polymorphic_on": type}
class Engineer(Employee):
__tablename__ = " engineer"
id = Column(ForeignKey("employee.id"), primary_key=True)
engineer_name = Column(String(50))
__mapper_args__ = {"polymorphic_identity": "engineer"}
Base.metadata.create_all(testing.db)
s = Session(testing.db)
s.add(Engineer(engineer_name="wally"))
s.commit()
s.close()
@assert_cycles()
def go():
e1 = s.query(Employee).first()
e1.engineer_name
go()
def test_visit_binary_product(self):
a, b, q, e, f, j, r = [column(chr_) for chr_ in "abqefjr"]
from sqlalchemy import and_, func
from sqlalchemy.sql.util import visit_binary_product
expr = and_((a + b) == q + func.sum(e + f), j == r)
def visit(expr, left, right):
pass
@assert_cycles()
def go():
visit_binary_product(visit, expr)
go()
def test_session_transaction(self):
@assert_cycles()
def go():
s = Session(testing.db)
s.connection()
s.close()
go()
def test_session_commit_rollback(self):
# this is enabled by #5074
@assert_cycles()
def go():
s = Session(testing.db)
s.connection()
s.commit()
go()
@assert_cycles()
def go():
s = Session(testing.db)
s.connection()
s.rollback()
go()
def test_session_multi_transaction(self):
@assert_cycles()
def go():
s = Session(testing.db)
assert s._transaction is None
s.connection()
s.close()
assert s._transaction is None
s.connection()
assert s._transaction is not None
s.close()
go()
| sqlalchemy/sqlalchemy | test/aaa_profiling/test_memusage.py | Python | mit | 45,650 |
# coding: utf-8
from __future__ import division, print_function
__author__ = "adrn <[email protected]>"
# Standard library
import os
import sys
import urllib2
import warnings
# Third-party
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import truncnorm, scoreatpercentile
# Neutron star distribution properties (fixed)
bounds_NS = (1.3, 2.) # Msun
mean_NS = 1.4 # Msun
stddev_NS = 0.05 # Msun
# White dwarf mass bounds
bounds_WD = (0.2, 1.44)
# Number of steps to use in numerical integration below
Nintegrate = 4096
def integrand_factor(m2, mf, m1):
""" Compute the factor multiplying p(M_2|θ) in the integral of Equation XXX in the paper """
mtot = m1 + m2
return mtot**(4/3.) * mf**(-1/3.) / m2 / np.sqrt(m2**2 - (mf*mtot**2)**(2/3.)) / 3.
def m2_func(p, mf, m1, bounds_WD, m2s):
mean_WD,stddev_WD,f_NS = p
# White dwarf companion mixture component
lower, upper = bounds_WD
dist_WD = truncnorm((lower - mean_WD) / stddev_WD, (upper - mean_WD) / stddev_WD, loc=mean_WD, scale=stddev_WD)
# Neutron star companion mixture component
lower, upper = bounds_NS
dist_NS = truncnorm((lower - mean_NS) / stddev_NS, (upper - mean_NS) / stddev_NS, loc=mean_NS, scale=stddev_NS)
p_WD = (1-f_NS) * dist_WD.pdf(m2s)
p_NS = f_NS * dist_NS.pdf(m2s)
return p_WD + p_NS
def likelihood(p, mf, m1, bounds_WD):
mean_WD,stddev_WD,f_NS = p
m2s = np.linspace(0., 2., Nintegrate)
dm2 = m2s[1] - m2s[0]
integ_fac = integrand_factor(m2s, mf, m1)
# White dwarf companion mixture component
lower, upper = bounds_WD
dist_WD = truncnorm((lower - mean_WD) / stddev_WD, (upper - mean_WD) / stddev_WD, loc=mean_WD, scale=stddev_WD)
# Neutron star companion mixture component
lower, upper = bounds_NS
dist_NS = truncnorm((lower - mean_NS) / stddev_NS, (upper - mean_NS) / stddev_NS, loc=mean_NS, scale=stddev_NS)
p_WD = (1-f_NS) * dist_WD.pdf(m2s)
p_NS = f_NS * dist_NS.pdf(m2s)
# Zero out when evaluating outside of allowed bounds (normally NaN)
integ_fac[np.isnan(integ_fac)] = 0.
p_WD[np.isnan(p_WD)] = 0.
p_NS[np.isnan(p_NS)] = 0.
# we approximate the integral using the trapezoidal rule
integrand_WD = p_WD * integ_fac
integrand_NS = p_NS * integ_fac
p_WD = dm2/2. * (integrand_WD[0] + np.sum(2*integrand_WD[1:-1], axis=0) + integrand_WD[-1])
p_NS = dm2/2. * (integrand_NS[0] + np.sum(2*integrand_NS[1:-1], axis=0) + integrand_NS[-1])
return np.vstack((p_WD, p_NS))
def main(m1, mf, nsamples):
file_url = "http://files.figshare.com/1720018/posterior_samples.txt"
cache_path = "data"
local_file = os.path.join(cache_path, "posterior_samples.txt")
if not os.path.exists(cache_path):
os.mkdir(cache_path)
if not os.path.exists(local_file):
print("Posterior sample file doesn't exist locally.")
print("Downloading and caching to: {}".format(os.path.abspath(local_file)))
# download and save
f = urllib2.urlopen(file_url)
with open(local_file, 'w') as f2:
f2.write(f.read())
else:
print("Reading cached file from: {}".format(os.path.abspath(local_file)))
samples = np.genfromtxt(local_file, delimiter=',', names=True)
m2s = np.linspace(0, 2., 50)
p_m2s = np.zeros((nsamples, len(m2s)))
P_NS = np.zeros(nsamples)
for i,p in enumerate(samples[:nsamples]):
p_WD,p_NS = likelihood(p, mf, m1, bounds_WD)[:,0]
P_NS[i] = p_NS / (p_WD + p_NS)
p_m2s[i] = integrand_factor(m2s, mf, m1) * m2_func(p, mf, m1, bounds_WD, m2s)
fig,axes = plt.subplots(2,1,figsize=(10,12))
binw = 3.5*np.std(P_NS) / len(P_NS)**(1/3.)
axes[0].hist(P_NS, bins=np.arange(0.,1.+binw,binw), normed=True)
axes[0].set_xlabel(r"$P_{\rm NS}$")
axes[0].axvline(np.mean(P_NS), alpha=0.5, lw=2., color='g')
axes[0].axvline(scoreatpercentile(P_NS,16), alpha=0.5, lw=2., color='g', linestyle='dashed')
axes[0].axvline(scoreatpercentile(P_NS,84), alpha=0.5, lw=2., color='g', linestyle='dashed')
axes[0].set_xlim(0,max(P_NS)+0.05)
axes[1].errorbar(m2s, np.mean(p_m2s,axis=0), np.std(p_m2s,axis=0),
marker='o', ecolor='#666666')
# for i in np.random.randint(0,nsamples,100):
# axes[1].plot(m2s, p_m2s[i], marker=None, lw=2., color='#666666', alpha=0.25)
# axes[1].plot(m2s, np.mean(p_m2s,axis=0), marker=None, lw=2., color='k')
axes[1].set_xlabel(r"${\rm M}_2 [{\rm M}_\odot]$")
print("Mean P_NS: {:.3f}".format(np.mean(P_NS)))
print("Std. deviation P_NS: {:.3f}".format(np.std(P_NS)))
print("Median P_NS: {:.3f}".format(np.median(P_NS)))
print("16th percentile P_NS: {:.3f}".format(scoreatpercentile(P_NS,16)))
print("84th percentile P_NS: {:.3f}".format(scoreatpercentile(P_NS,84)))
plt.show()
if __name__ == '__main__':
from argparse import ArgumentParser
# Define parser object
parser = ArgumentParser(description="")
parser.add_argument("--m1", dest="m1", default=None, required=True,
type=float, help="Mass of the primary.")
parser.add_argument("--mf", dest="mf", default=None, required=True,
type=float, help="Mass function.")
parser.add_argument("--nsamples", dest="nsamples", default=1000,
type=int, help="Number of posterior samples to use.")
args = parser.parse_args()
warnings.simplefilter("ignore", RuntimeWarning)
main(args.m1, args.mf, nsamples=args.nsamples)
| adrn/tilt-shift | scripts/companion.py | Python | mit | 5,554 |
# stripped away features RBH 2018
# negamax, no alphabeta, no TT
import numpy as np
class Cell: # each cell is one of these: empty, x, o
n,e,x,o,chars = 9,0,1,2,'.xo'
def opponent(c): return 3-c
# each cell is 0,1,2
# so number positions == 3**9
# can represent position as 9-digit base_3 number
ttt_states = 19683 # 3**Cell.n
powers_of_3 = np.array( # for converting position to base_3 int
[1, 3, 9, 27, 81, 243, 729, 2187, 6561], dtype=np.int16)
def board_to_int(B):
return sum(B*powers_of_3) # numpy multiplies vectors componentwise
# convert from integer for board position
def base_3( y ):
assert(y <= ttt_states)
L = [0]*Cell.n
for j in range(Cell.n):
y, L[j] = divmod(y,3)
if y==0: break
return np.array( L, dtype = np.int16)
# input-output ################################################
def char_to_cell(c):
return Cell.chars.index(c)
escape_ch = '\033['
colorend = escape_ch + '0m'
textcolor = escape_ch + '0;37m'
stonecolors = (textcolor,\
escape_ch + '0;35m',\
escape_ch + '0;32m',\
textcolor)
def printmenu():
print(' h help menu')
print(' x b2 play x b 2')
print(' o e3 play o e 3')
print(' . a2 erase a 2')
print(' ? solve state')
print(' u undo')
print(' [return] quit')
def showboard(psn):
def paint(s): # s a string
if len(s)>1 and s[0]==' ':
return ' ' + paint(s[1:])
x = Cell.chars.find(s[0])
if x > 0:
return stonecolors[x] + s + colorend
elif s.isalnum():
return textcolor + s + colorend
return s
pretty = '\n '
for c in range(3): # columns
pretty += ' ' + paint(chr(ord('a')+c))
pretty += '\n'
for j in range(3): # rows
pretty += ' ' + paint(str(1+j)) + ' '
for k in range(3): # columns
pretty += ' ' + paint(Cell.chars[psn.brd[rc_to_lcn(j,k)]])
pretty += '\n'
print(pretty)
Win_lines = np.array(( # 8 winning lines, as location triples
(0,1,2),(3,4,5),(6,7,8),(0,3,6),(1,4,7),(2,5,8),(0,4,8),(2,4,6)
), dtype=np.int8)
##################### board state ########################
# cell indices, or locations:
# 0 1 2
# 3 4 5
# 6 7 8
def rc_to_lcn(r,c):
return r*3 + c
def lcn_to_alphanum(p):
r, c = divmod(p,3)
return 'abc'[c] + '123'[r]
class Position: # ttt board with x,o,e cells
def legal_moves(self):
L = []
for j in range(Cell.n):
if self.brd[j]==Cell.e:
L.append(j)
return L
def has_win(self, z):
win_found = False
for t in Win_lines:
if (self.brd[t[0]] == z and
self.brd[t[1]] == z and
self.brd[t[2]] == z):
return True
return False
def game_over(self):
win_found = False
for z in (Cell.x, Cell.o):
if (self.has_win(z)):
print('\n game_over: ',Cell.chars[z],'wins\n')
return True
return False
def putstone(self, row, col, color):
self.brd[rc_to_lcn(row,col)] = color
def __init__(self, y):
self.brd = base_3(y)
def makemove(self, cmd, H):
parseok, cmd = False, cmd.split()
if len(cmd)==2:
ch = cmd[0][0]
if ch in Cell.chars:
q, n = cmd[1][0], cmd[1][1:]
if q.isalpha() and n.isdigit():
x, y = int(n) - 1, ord(q)-ord('a')
if x>=0 and x < 3 and y>=0 and y < 3:
self.putstone(x, y, char_to_cell(ch))
H.append(rc_to_lcn(x,y)) # add location to history
return
else: print('\n coordinate off board')
print(' ... ? ... sorry ...\n')
def undo(H, brd): # pop last location, erase that cell
if len(H)==0:
print('\n board empty, nothing to undo\n')
else:
lcn = H.pop()
brd[lcn] = Cell.e
####################### alpha-beta negamax search
#def ab_neg(AB, calls, d, psn, ptm, alpha, beta): # ptm: 1/0/-1 win/draw/loss
def negamax(calls, psn, ptm): # ptm: 1/0/-1 win/draw/loss
calls += 1
if psn.has_win(ptm):
return 1, calls # previous move created win
L = psn.legal_moves()
if len(L) == 0:
return 0, calls # board full, no winner
so_far = -1 # best score so far
for cell in L:
psn.brd[cell] = ptm
nmx, c = negamax(0, psn, opponent(ptm))
so_far = max(so_far,-nmx)
calls += c
psn.brd[cell] = Cell.e # reset brd to original
# uncomment next 2 lines to early-abort when win detected
if so_far == 1: # found a win, cannot improve the result
break
return so_far, calls
def info(p):
L = p.legal_moves()
print(' legal moves', L)
for cell in (Cell.x, Cell.o):
print(' ',Cell.chars[cell], 'negamax',end='')
nmx, c = negamax(0, p, cell)
print(' result','{:2d}'.format(nmx), ' nodes',c)
if p.game_over():
pass
def interact():
p = Position(0)
history = [] # used for erasing, so only need locations
while True:
showboard(p)
cmd = input(' ')
if len(cmd)==0:
print('\n ... adios :)\n')
return
if cmd[0][0]=='h':
printmenu()
elif cmd[0][0]=='?':
info(p)
elif cmd[0][0]=='u':
undo(history, p.brd)
elif (cmd[0][0] in Cell.chars):
p.makemove(cmd, history)
else:
print('\n ???????\n')
printmenu()
interact()
| ryanbhayward/games-puzzles-algorithms | simple/ttt/test/ttt.py | Python | mit | 5,278 |
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="color", parent_name="scattergl.marker.line", **kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
colorscale_path=kwargs.pop(
"colorscale_path", "scattergl.marker.line.colorscale"
),
**kwargs
)
| plotly/plotly.py | packages/python/plotly/plotly/validators/scattergl/marker/line/_color.py | Python | mit | 593 |
"""
WSGI config for djangoblog project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djangoblog.settings")
application = get_wsgi_application()
| Raulios/django-blog | djangoblog/wsgi.py | Python | mit | 397 |
#!/usr/bin/env python3
try:
# for Python 2.x
import StringIO
except:
# for Python 3.x
import io
import csv
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import re
# define data
csv_input = """timestamp,title,reqid
2016-07-23 11:05:08,SVP,2356556-AS
2016-12-12 01:23:33,VP,5567894-AS
2016-09-13 12:43:33,VP,3455673-AS
2016-09-13 19:43:33,EVP,8455673-AS
2016-09-30 11:43:33,VP,9455673-AS
2016-08-02 01:23:33,VP,5698765-AS
2016-04-22 01:23:33,VP,1234556-AS
"""
# load data
try:
# for Python 2.x
f = StringIO.StringIO(csv_input)
except:
# for Python 3.x
f = io.StringIO(csv_input)
reader = csv.reader(f, delimiter=',')
for row in reader:
print('\t'.join(row))
# reset file pointer position to beginning of file
f.seek(0)
# create pandas dataframe
#df = pd.read_csv(io.StringIO(csv_input))
df = pd.read_csv(f)
print(df.head())
print(df.info())
print(df)
df['date'] = pd.DatetimeIndex(df.timestamp).normalize()
print(df)
print(df.index)
#df = df.drop('timestamp',axis=1)
df.drop('timestamp', axis=1, inplace=True)
#df = df.reindex(df.reqid, fill_value=0)
#df = df.reindex(df.reqid, method='bfill')
#print(df)
#print(df.index)
#i = df[((df.title == 'SVP') & (df.reqid == '3455673-AS'))].index
#df.drop(df.index[0],inplace=True)
#df.drop(i,inplace=True)
#i = df.index[0]
#df = df.drop(i)
#print(df)
#print(i)
print(type(df['date'][0]))
#df = df.sort_values(by='date',axis=0,ascending=True)
df.sort_values(by='date',axis=0,ascending=True,inplace=True)
print(df)
df['weekday'] = df['date'].apply( lambda x: x.dayofweek)
# setup date processing
now_string = '2016-10-01 08:01:20'
past_by_days = 30
time_delta = pd.to_timedelta('{} days'.format(past_by_days))
print(time_delta)
#now = pd.tslib.Timestamp('2016-10-01 08:01:20')
now = pd.Timestamp(now_string)
now_norm = now.normalize()
print(now_norm)
now_start = now_norm - time_delta
print(now_start)
# process
ddf = df.loc[((df['date'] >= now_start) & (df['date'] <= now_norm))]
print(ddf)
print('number of observations found in filtered df = {}'.format(len(ddf)))
print(len(ddf.columns))
# histogram of number of observations by date
df_grouped_date = df.groupby(['date'])
df_date_count = df_grouped_date['reqid'].aggregate(['count'])
#df_date_count = df_grouped_date.aggregate(['count'])
print(df_date_count)
#exclude_cols = ['title count']
#df_date_count.ix[:, df_date_count.columns.difference(exclude_cols)].plot(kind='bar')
df_date_count.ix[:, df_date_count.columns].plot(kind='bar')
plt.legend(loc='best').get_texts()[0].set_text('Reqs Added Per Day')
file_name = 'myBar'
file_name = re.sub('\s+','_',file_name)
plt.savefig(file_name)
plt.show()
| robmarano/nyu-python | course-2/session-7/pandas/df_basics.py | Python | mit | 2,677 |
"""
https://leetcode.com/problems/binary-gap/
https://leetcode.com/submissions/detail/182434177/
https://leetcode.com/submissions/detail/182434882/
"""
class Solution:
def binaryGap(self, N):
"""
:type N: int
:rtype: int
"""
ans = 0
distance = 0
cur = N
while cur & 1 == 0:
cur = cur >> 1
while cur > 0:
cur = cur >> 1
distance += 1
if cur & 1 == 1:
ans = max(distance, ans)
distance = 0
return ans
import unittest
class Test(unittest.TestCase):
def test(self):
solution = Solution()
# self.assertEqual(solution.binaryGap(22), 2)
# self.assertEqual(solution.binaryGap(5), 2)
# self.assertEqual(solution.binaryGap(6), 1)
self.assertEqual(solution.binaryGap(8), 0)
if __name__ == '__main__':
unittest.main()
| vivaxy/algorithms | python/problems/binary_gap.py | Python | mit | 927 |
#!/usr/bin/python3
'''
Created on Dec 3, 2016
@author: keithcoleman
This is a simple prime number generator that finds prime numbers within a user defined range. A great script to use with the RSA scripts
'''
def primeNumberFinder(p=1):
while(True):
if primeFilter(p): yield p
p+=1
def primeFilter(n):
if n==1:return False
for x in range(2,n): return False if n%x==0 else True
def main(minNumber,maxNumber):
for minNumber in primeNumberFinder():
if minNumber>maxNumber: break
print(minNumber)
if __name__ == '__main__':
print("This program finds prime numbers within a user defined range")
minPrime=input("Please enter the starting value:")
maxPrime=input("Please enter the ending value:")
try:
main(minPrime,maxPrime)
except BaseException as e:
print("We have a problem ({})".format(e))
| kcolemanbd/Mathematics | PrimeNumberGenerator.py | Python | mit | 919 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteCircuitPeeringsOperations(object):
"""ExpressRouteCircuitPeeringsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified peering from the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRouteCircuitPeering"
"""Gets the specified peering for the express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCircuitPeering, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_06_01.models.ExpressRouteCircuitPeering
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCircuitPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
peering_parameters, # type: "_models.ExpressRouteCircuitPeering"
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRouteCircuitPeering"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(peering_parameters, 'ExpressRouteCircuitPeering')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitPeering', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteCircuitPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
peering_parameters, # type: "_models.ExpressRouteCircuitPeering"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ExpressRouteCircuitPeering"]
"""Creates or updates a peering in the specified express route circuits.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param peering_parameters: Parameters supplied to the create or update express route circuit
peering operation.
:type peering_parameters: ~azure.mgmt.network.v2020_06_01.models.ExpressRouteCircuitPeering
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ExpressRouteCircuitPeering or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_06_01.models.ExpressRouteCircuitPeering]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitPeering"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
peering_parameters=peering_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
circuit_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ExpressRouteCircuitPeeringListResult"]
"""Gets all peerings in a specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteCircuitPeeringListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_06_01.models.ExpressRouteCircuitPeeringListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitPeeringListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitPeeringListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings'} # type: ignore
| Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_06_01/operations/_express_route_circuit_peerings_operations.py | Python | mit | 22,136 |
"""
Door controller for a chicken coop
This is hooked to a power screwdriver and battery system through
the gpio port. The screwdriver is connected to a vertical sliding
door through a string and pulley arrangement. Turning the motor one
way raises the door, the other way lowers it. However, if all the
string is let out, and the motor continues to run, it will start
winding the string back up and will eventually cause the door to
raise back up and jam. We will work around this mechanical bug in
software.
Inputs:
Two sensors: an upper and a lower limit switch.
Two switches: manual up and down switches. (dpdt momentary, likely)
Outputs:
Power: a mosfet, likely will be run on the PWM
pin for speed control
Direction: a dpdt relay. On will be one direction,
off the other.
There are N states:
Open: upper limit switch is closed, power is off
Closed: lower limit switch is closed, power is off
Opening: power is on, dpdt relay is set to up
Closing: power is on, dpdt relay is set to down
Stopped: Neither limit switch is closed, power is off
Error: Door has jammed open when attempting to close.
There are 3 operations:
Up: opens the door.
Down: closes the door.
Stop: Stops the current operation.
Error: power was on, door is closing, and the upper
limit switch has closed. Power should be immediately
turned off.
"""
import RPIO as GPIO
# restrict to localhost.
from RPIO import _RPIO
_RPIO._TCP_SOCKET_HOST = '127.0.0.1'
import time
import json
import threading
import syslog
def dbg(s):
#print s
syslog.syslog(s)
# States
OPEN, CLOSED, OPENING, CLOSING, STOPPED, ERROR, DEAD, ERROR_RECOVERY = \
'open', 'closed', 'opening', 'closing', 'stopped', 'error', 'dead', 'error_recovery'
# Events
UP, DOWN, UPPER, LOWER = 'up', 'down', 'upper', 'lower'
# Power Control
ON, OFF = True, False
outputs = {
'power':24,
'direction':23,
}
inputs = {
'upper':22,
'lower': 18,
'up': 25,
'down': 17,
}
class door(object):
""" state machine to control the door """
def __init__(self, controller, use_thread=True, port=None):
self.controller = controller
self.state = None
self.use_thread = use_thread
self.port = port
self.out_state = dict((k,False) for k in outputs)
self.status_sockets = set()
""" state: {Event:op} """
self.map = { OPEN: { DOWN: self.close, },
CLOSING: { LOWER: self._delay(0.5, self.stop),
UPPER: self.error,
UP: self.stop, },
CLOSED: { UP: self.open, },
OPENING: { UPPER: self.stop,
DOWN: self.stop, },
STOPPED: { UP: self.open,
DOWN: self.close, },
ERROR: { DOWN: self.err_close,
LOWER: self.stop,
UPPER: self.stop, },
ERROR_RECOVERY: {
LOWER: self.stop,
UPPER: self.stop, },
DEAD: {},
None: {}
}
self.commands = {
'open': self.open,
'close': self.close,
'stop': self.stop,
'status': self.status,
'enroll': self.enroll,
}
self.setup()
self.reset_state()
if self.use_thread:
self.run()
def setup(self):
GPIO.setmode(GPIO.BCM)
for pin in outputs.values():
GPIO.setup(pin, GPIO.OUT)
self._power(False)
self._direction(False)
for (name, pin) in inputs.items():
GPIO.setup(pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.add_interrupt_callback(pin,
self._delay(0.05,
self._ifhigh(getattr(self,name))),
edge='both',
debounce_timeout_ms=100)
if self.port:
GPIO.add_tcp_callback(self.port, self.command_dispatch)
def run(self):
while True:
try:
GPIO.wait_for_interrupts(threaded=self.use_thread)
except IOError:
# interrupted system call
pass
except KeyboardInterrupt:
#ctrl-c
break
def cleanup(self):
"""Call on unload so that the raspberry pi releases its gpio pins"""
GPIO.cleanup()
for pin in inputs.values():
GPIO.del_interrupt_callback(pin)
GPIO.stop_waiting_for_interrupts()
def _delay(self,delay, cb):
""" Empirically determined that we need a delay to be able to read
the value of an input after an edge trigger. This returns a delay
function which then calls the callback """
def fn(*args, **kwargs):
dbg("delay %s" % str(args))
time.sleep(delay)
return cb(*args, **kwargs)
return fn
def _ifhigh(self, cb):
""" Calls the callback is the input pin is high """
def fn(pin, *args, **kwargs):
dbg("ifhigh %s "% (pin))
if GPIO.input(pin):
dbg("pin %s high, calling callback" %pin)
return cb(pin, *args, **kwargs)
dbg("pin %s low, not calling" %pin)
self.notify_enrolled(self.status())
return fn
def reset_state(self):
""" Determines the proper state when the motor is stopped"""
if GPIO.input(inputs['upper']):
self.state = OPEN
elif GPIO.input(inputs['lower']):
self.state = CLOSED
else:
self.state = STOPPED
def noop(self, *args, **kwargs):
""" a dummy function"""
# can't be a lambda because of the function signature
pass
def event_dispatch(self, event, *args):
""" Calls the appropriate operation for the current state and event """
dbg( "event dispatch: %s " % event)
self.map.get(self.state).get(event, self.noop)(*args)
self.notify_enrolled(self.status())
def notify_enrolled(self, status):
""" Notifies all of the listening status connections of the
current status of the system."""
# Undone -- delegate this to a different thread.
# this is not realtime priority, and I don't want to
# delay any of the gpio events
err = []
dbg('Updating %d sockets with status' % len(self.status_sockets))
for sock in self.status_sockets:
try:
sock.sendall("Status\n"+status)
except:
dbg('Error sending to socket: removing')
err.append(sock)
[self.status_sockets.remove(sock) for sock in err]
def command_dispatch(self, socket, msg):
""" calls the appropriate command """
msg = msg.strip().lower()
ret = self.commands.get(msg, self.noop)(**{'socket':socket})
dbg('command dispatch: %s -> %s' %(msg,ret))
if ret == None: return self.response(socket, 'Error')
if ret == True: return self.response(socket, 'Ok')
if ret == False: return self.response(socket, 'Incorrect State')
if ret: return socket.send("Status\n"+ret+'\n')
def response(self, socket, msg):
status = self.status()
try:
socket.sendall(msg + '\n' + self.status() + '\n')
except Exception, msg:
# error sending to the socket,
dbg.log("Exception sending response: %s"%msg)
socket.setblocking(0)
self.notify_enrolled(status)
def permit(self, event):
""" Is the event allowable in this state """
return callable(self.map.get(self.state).get(event, None))
#
# Input Callbacks
#
def upper(self, pin, val=None):
""" Upper limit switch callback """
self.event_dispatch(UPPER)
def lower(self, pin, val=None):
""" Lower limit switch callback """
self.event_dispatch(LOWER)
def up(self, pin, val=None):
""" Up direction command switch """
# undone -- should the other edge for up stop?
self.event_dispatch(UP)
def down(self, pin, val=None):
""" Down direction command switch """
self.event_dispatch(DOWN)
#
# Operations
#
def open(self, *args, **kwargs):
""" initiates opening the door """
dbg('command: open')
if self.permit(UP):
dbg('Opening')
self._direction(UP)
self._power(ON)
self.state = OPENING
self.watchdog(5.0)
return True
return False
def close(self, *args, **kwargs):
""" initiates closing the door """
dbg('command: close')
if self.permit(DOWN):
dbg('Closing')
self._direction(DOWN)
self._power(ON)
self.state = CLOSING
self.watchdog(5.0)
return True
return False
def stop(self, *args, **kwargs):
""" Stops the current operations, shuts down power to the outputs. """
dbg ('stopping')
self._power(OFF)
self._direction(OFF)
self.reset_state()
return True
def error(self, *args, **kwargs):
dbg('ERROR, turning it off')
self._power(OFF)
self._direction(OFF)
self.state = ERROR
def err_close(self, *args, **kwargs):
dbg('Err State, attempting to close')
if self.state == ERROR and GPIO.input(inputs['upper']):
self._state = ERROR_RECOVERY
self._direction(UP)
self._power(ON)
time.sleep(0.25)
self._power(OFF)
if GPIO.input(inputs['upper']):
# still jammed. We're dead
dbg('Still jammed, dying')
self._direction(OFF)
self.state = DEAD
return False
else:
# Unjammed. give it a shot. Any switch will stop.
dbg('Unjammed, continuing to close by opening')
self.power(ON)
self.watchdog(10)
return True
return False
#
# Reporting command
#
def enroll(self, socket=None,*args, **kwargs):
"Adds the socket to the status broadcasts"
dbg('Enrolling a socket: %s' %socket)
if socket is not None:
self.status_sockets.add(socket)
socket.setblocking(0)
return True
return False
#
# Power Control Functions
#
def _power(self, val):
""" Control the power to the mosfet, true=on/false=off"""
dbg("setting power %s"%val)
GPIO.output(outputs['power'], not val)
def _direction(self, val):
""" set the direction relay """
dbg('setting direction %s, %s' %(val, val == UP))
GPIO.output(outputs['direction'], val == UP)
#
# Reporting
#
def status(self, *args, **kwargs):
stat = {'state': self.state}
for name,pin in inputs.items():
stat[name] = GPIO.input(pin)
return json.dumps(stat)
#
# Safety
#
def watchdog(self, delay):
threading.Thread(target=self._watchdog, args=(self.state,delay)).start()
def _watchdog(self, state, delay):
time.sleep(delay)
if self.state == state:
dbg('Watchdog killing operation %s after %s seconds' %(state, delay))
self.stop()
self.notify_enrolled(self.status())
if __name__=='__main__':
d = door(None, False, 8953)
d.run()
d.cleanup()
| wiredfool/coop-door | door/door.py | Python | mit | 12,170 |
#Demeter Interpreter
#./bytecode/lexer/scanner.py
#std import
#custom import
import bytecode.lexer.tokens as tokens
class Scanner:
"""
Class that scans a text to return tokens
"""
SPACE = " "
CMT_MARK = "#"
def __init__(self,src,fname):
self.src = src #source file
self.fname = fname #file name
self.tok = []
return
def get_tokens(self):
x,y = -1,1
word = ''
escaped = inString = inComment = False
for char in self.src:
x += 1
#Manage strings
if char is '"' and not escaped and not inComment and not inString:
inString = True
#Manage escape
if char is "\\" and not escaped and not inComment:
escaped = True
#Manage comments
if char in [self.CMT_MARK] and not inString:
inComment = True
#main
if char.isalnum() or escaped or inComment or inString:
word += char
if escaped:
escaped = False
if char is '"' and inString and not escaped and not inComment:
inString = False
else:
if char.isspace():
if word:
self.tok.append([word,[y,x],self.fname])
else:
if word:
self.tok.append([word,[y,x],self.fname])
if char:
self.tok.append([char,[y,x],self.fname])
word = ""
#Manage comments and coord
if char == "\n":
y += 1;x = -1;inComment = False
return
def run(self):
self.get_tokens()
#Name tokens
return tokens.tokens(self.tok)
| Aine1000/Demeter | bytecode/lexer/scanner.py | Python | mit | 1,852 |
# -*- coding: utf-8 -*-
from __future__ import print_function, division
from plumbum import cli
from plumbum.lib import six
class TestValidator:
def test_named(self):
class Try(object):
@cli.positional(x=abs, y=str)
def main(selfy, x, y):
pass
assert Try.main.positional == [abs, str]
assert Try.main.positional_varargs == None
def test_position(self):
class Try(object):
@cli.positional(abs, str)
def main(selfy, x, y):
pass
assert Try.main.positional == [abs, str]
assert Try.main.positional_varargs == None
def test_mix(self):
class Try(object):
@cli.positional(abs, str, d=bool)
def main(selfy, x, y, z, d):
pass
assert Try.main.positional == [abs, str, None, bool]
assert Try.main.positional_varargs == None
def test_var(self):
class Try(object):
@cli.positional(abs, str, int)
def main(selfy, x, y, *g):
pass
assert Try.main.positional == [abs, str]
assert Try.main.positional_varargs == int
def test_defaults(self):
class Try(object):
@cli.positional(abs, str)
def main(selfy, x, y = 'hello'):
pass
assert Try.main.positional == [abs, str]
class TestProg:
def test_prog(self, capsys):
class MainValidator(cli.Application):
@cli.positional(int, int, int)
def main(self, myint, myint2, *mylist):
print(repr(myint), myint2, mylist)
_, rc = MainValidator.run(["prog", "1", "2", '3', '4', '5'], exit = False)
assert rc == 0
assert "1 2 (3, 4, 5)" == capsys.readouterr()[0].strip()
def test_failure(self, capsys):
class MainValidator(cli.Application):
@cli.positional(int, int, int)
def main(self, myint, myint2, *mylist):
print(myint, myint2, mylist)
_, rc = MainValidator.run(["prog", "1.2", "2", '3', '4', '5'], exit = False)
assert rc == 2
value = capsys.readouterr()[0].strip()
assert 'int' in value
assert 'not' in value
assert '1.2' in value
def test_defaults(self, capsys):
class MainValidator(cli.Application):
@cli.positional(int, int)
def main(self, myint, myint2=2):
print(repr(myint), repr(myint2))
_, rc = MainValidator.run(["prog", "1"], exit = False)
assert rc == 0
assert "1 2" == capsys.readouterr()[0].strip()
_, rc = MainValidator.run(["prog", "1", "3"], exit = False)
assert rc == 0
assert "1 3" == capsys.readouterr()[0].strip()
| AndydeCleyre/plumbum | tests/test_validate.py | Python | mit | 2,783 |
DEBUG = True # Turns on debugging features in Flask | mapposters/server | config/development.py | Python | mit | 51 |
import os
import matplotlib.pyplot as plt
from astropy.table import Table, vstack
from matplotlib.colors import LogNorm
from matplotlib.ticker import MaxNLocator
from scipy.spatial import Delaunay
import AnniesLasso as tc
RESULTS_PATH = "/data/gaia-eso/arc/rave/results/"
RESULTS_PATH = "../../"
ms_results = Table.read(os.path.join(RESULTS_PATH, "rave-tgas-v43.fits.gz"))
giant_results = Table.read(os.path.join(RESULTS_PATH, "rave-tgas-v42.fits.gz"))
joint_results = Table.read(os.path.join(RESULTS_PATH, "rave-tgas-v46.fits.gz"))
for t in (ms_results, giant_results, joint_results):
if "Name" not in t.dtype.names:
t["Name"] = [each.split("/")[-2] + "_" + each.split("/")[-1].split(".rvsun.")[0] + "_" + each.split(".rvsun.")[1].split("-result")[0].replace("_result.pkl", "") for each in t["FILENAME"]]
t.sort("Name")
assert np.all(ms_results["Name"] == joint_results["Name"])
assert np.all(giant_results["Name"] == joint_results["Name"])
ms_model = tc.load_model(os.path.join(RESULTS_PATH, "rave-tgas-v37.model"))
# HACK MAGIC BEGINS #
ms_model._labelled_set = Table.read(os.path.join(RESULTS_PATH, "rave-tgas-v16b-labelled-set-cut.fits"))
ms_model._labelled_set["TEFF"] = ms_model._labelled_set["EPIC_TEFF"]
ms_model._labelled_set["LOGG"] = ms_model._labelled_set["EPIC_LOGG"]
ms_model._labelled_set["FE_H"] = ms_model._labelled_set["EPIC_FEH"]
# HACK MAGIC ENDS
ms_results["TEFF"] = ms_results["EPIC_TEFF"]
ms_results["LOGG"] = ms_results["EPIC_LOGG"]
ms_results["FE_H"] = ms_results["EPIC_FEH"]
# Plot log(density) of the three models.
K = 3
factor = 3.5
lbdim = 0.2 * factor
trdim = 0.1 * factor
whspace = 0.05
yspace = factor
xspace = factor * K + factor * (K - 1) * whspace + lbdim * (K - 1)
xdim = lbdim + xspace + trdim
ydim = lbdim + yspace + trdim
fig, axes = plt.subplots(1, K, figsize=(xdim, ydim))
fig.subplots_adjust(
left=lbdim/xdim, bottom=lbdim/ydim, right=(xspace + lbdim)/xdim,
top=(yspace + lbdim)/ydim, wspace=whspace, hspace=whspace)
extent = (3000, 8000, 0.5, 5.5)
titles = (
r"${\rm Simple}$ ${\rm model}$",
r"${\rm Main{-}sequence}$ ${\rm model}$",
r"${\rm Giant}$ ${\rm branch}$ ${\rm model}$",
)
all_results = (joint_results, ms_results, giant_results)
for i, (ax, results, title) in enumerate(zip(axes, all_results, titles)):
ax.hexbin(results["TEFF"], results["LOGG"], extent=extent,
norm=LogNorm(), cmap="Blues", gridsize=35, linewidths=0.1,
rasterized=True, edgecolor="#ffffff")
ax.set_xlim(extent[:2][::-1])
ax.set_ylim(extent[2:][::-1])
ax.xaxis.set_major_locator(MaxNLocator(6))
ax.yaxis.set_major_locator(MaxNLocator(6))
ax.set_title(title)
ax.set_xlabel(r"$T_{\rm eff}$ $[{\rm K}]$")
if ax.is_first_col():
ax.set_ylabel(r"$\log{g}$")
else:
ax.set_yticklabels([])
fig.tight_layout()
fig.savefig("test-set-density.pdf", dpi=300)
fig.savefig("test-set-density.png")
# OK, calculate how many sigma the stars are away from the joint model.
# Plot log(density) of the three models.
K = 2
factor = 3.5
lbdim = 0.2 * factor
trdim = 0.1 * factor
whspace = 0.05
xspace = factor
yspace = factor * K + factor * (K - 1) * whspace + lbdim * (K - 1)
xdim = lbdim + xspace + trdim
ydim = lbdim + yspace + trdim
fig, axes = plt.subplots(K, 1, figsize=(xdim, ydim))
fig.subplots_adjust(
left=lbdim/xdim, bottom=lbdim/ydim, right=(xspace + lbdim)/xdim,
top=(yspace + lbdim)/ydim, wspace=whspace, hspace=whspace)
x_mu, y_mu = (0, 0)
x_sigma, y_sigma = (90, 0.15)
x = ((ms_results["TEFF"] - joint_results["TEFF"]) - x_mu)/x_sigma
y = ((ms_results["LOGG"] - joint_results["LOGG"]) - y_mu)/y_sigma
ticks = (-10, -5, 0, 5, 10)
kwds = dict(extent=(-10, 10, -10, 10), gridsize=25, linewidths=0.1,
edgecolor="#000000", cmap="Blues", norm=LogNorm())
axes[0].hexbin(x, y, **kwds)
#axes[0].axhline(0, c="#FFFFFF", linewidth=0.5, linestyle="-")
#axes[0].axvline(0, c="#FFFFFF", linewidth=0.5, linestyle="-")
axes[0].set_xticks(ticks)
axes[0].set_yticks(ticks)
axes[0].set_xlabel(r"$(T_{{\rm eff},ms} - T_{{\rm eff},simple})/\delta_{T_{{\rm eff},ms}}$")
axes[0].set_ylabel(r"$(\log{g}_{ms} - \log{g}_{simple})/\delta_{\log{g},ms}$")
x_mu, y_mu = (0, 0)
x_sigma, y_sigma = (50, 0.15)
x2 = ((giant_results["TEFF"] - joint_results["TEFF"]) - x_mu)/x_sigma
y2 = ((giant_results["LOGG"] - joint_results["LOGG"]) - y_mu)/y_sigma
hexbin = axes[1].hexbin(x2, y2, **kwds)
axes[1].set_xticks(ticks)
axes[1].set_yticks(ticks)
axes[1].set_xlabel(r"$(T_{{\rm eff},giant} - T_{{\rm eff},simple})/\delta_{T_{{\rm eff},giant}}$")
axes[1].set_ylabel(r"$(\log{g}_{giant} - \log{g}_{simple})/\delta_{\log{g},giant}$")
fig.tight_layout()
fig.savefig("joint-model-differences.pdf", dpi=300)
fig.savefig("joint-model-differences.png")
| AnnieJumpCannon/RAVE | article/figures/plot_joint_model_metrics.py | Python | mit | 4,798 |
from collections import defaultdict
import csv
from pathlib import Path
import os
from rdflib import Graph, URIRef, Literal, BNode, RDF, RDFS, OWL
from sqlalchemy import create_engine, inspect, Table, Column
from sqlalchemy.orm.session import sessionmaker
from pyontutils import utils
from pyontutils.config import auth
from typing import Dict, Tuple, List, Union
db_url = os.environ.get('SCICRUNCH_DB_URL_PRODUCTION')
ilx_uri_base = 'http://uri.interlex.org/base'
triple2annotation_bnode = {}
g = Graph()
olr = auth.get_path('ontology-local-repo')
output = olr / 'ttl/generated/neurolex_to_interlex_pmids.ttl'
namespaces = {
'ILX': 'http://uri.interlex.org/base/ilx_',
'definition': 'http://purl.obolibrary.org/obo/IAO_0000115',
'ilxtr': 'http://uri.interlex.org/tgbugs/uris/readable/',
'owl': 'http://www.w3.org/2002/07/owl#',
'PMID': 'https://www.ncbi.nlm.nih.gov/pubmed/',
'NIFSTD': 'http://uri.neuinfo.org/nif/nifstd/',
'BIRNLEX': 'http://uri.neuinfo.org/nif/nifstd/birnlex_',
'UBERON': 'http://purl.obolibrary.org/obo/UBERON_',
'PR': 'http://purl.obolibrary.org/obo/PR_',
}
for prefix, uri in namespaces.items():
g.bind(prefix, uri)
def add_annotation(
subj: URIRef,
pred: URIRef,
obj: Union[Literal, URIRef],
a_p: URIRef ,
a_o: Union[Literal, URIRef],
) -> BNode:
""" Adds annotation to rdflib graph.
The annotation axiom will filled in if this is a new annotation for the triple.
Args:
subj: Entity subject to be annotated
pref: Entities Predicate Anchor to be annotated
obj: Entities Object Anchor to be annotated
a_p: Annotation predicate
a_o: Annotation object
Returns:
A BNode which is an address to the location in the RDF graph that is storing the
annotation information.
"""
bnode: BNode = triple2annotation_bnode.get( (subj, pred, obj) )
if not bnode:
a_s: BNode = BNode()
triple2annotation_bnode[ (subj, pred, obj) ]: BNode = a_s
g.add((a_s, RDF.type, OWL.Axiom))
g.add((a_s, OWL.annotatedSource, subj))
g.add((a_s, OWL.annotatedProperty, pred))
g.add((a_s, OWL.annotatedTarget, obj))
else:
a_s: BNode = bnode
g.add( (a_s, a_p, a_o) )
return bnode # In case you have more triples to add
def get_existing_ids():
Session = sessionmaker()
engine = create_engine(db_url)
Session.configure(bind=engine)
session = Session()
sql = f"""
SELECT
tei.id, tei.tid, tei.curie, tei.iri, tei.preferred,
t.ilx, t.type, t.label, t.definition, t.comment
FROM (
SELECT *
FROM terms
GROUP BY terms.ilx
) as t
JOIN term_existing_ids AS tei
ON t.id = tei.tid
""" # groub by will just take the first occurance and drop the rest
sqlobj = session.execute(sql)
return sqlobj
def get_suffix2row():
suffix2row = {}
for row in get_existing_ids():
suffix2row[row.iri.rsplit('/', 1)[-1]] = {
'ilx': row.ilx, 'curie': row.curie, 'iri': row.iri, 'definition':row.definition,
}
return suffix2row
def pmid_fix(string):
def uni_strip(s):
return s.replace('PMID=', '').replace('PMID:', '').replace('.', '').strip()
if ',' in string:
obj = [uni_strip(s) for s in string.split(',')]
elif ';' in string:
obj = [uni_strip(s) for s in string.split(';')]
else:
obj = [uni_strip(string)]
new_obj = []
for o in obj:
if 'PMID' in o:
new_obj.append(o.split('PMID')[-1].strip())
else:
new_obj.append(o)
obj = new_obj
new_obj = []
for o in obj:
try:
if 'pmc' not in o.lower():
int(o)
new_obj.append(o)
except:
count = 0
for e in o:
try:
int(e)
count += 1
except:
pass
if count > 0:
print(o) #p357, D011919 D-- is a literal ID of the mesh annotation
return new_obj
def add_uri(ID):
if 'nlx_' in ID or 'birnlex_' in ID or 'sao' in ID or 'BAMSC' in ID:
return 'http://uri.neuinfo.org/nif/nifstd/' + ID
elif 'UBERON' in ID:
return 'http://purl.obolibrary.org/obo/UBERON_' + ID.split('_')[-1]
else:
print('Warning: ', ID, 'does not have a stored prefix')
return 'http://uri.neuinfo.org/nif/nifstd/' + ID
def main():
with open('/home/tmsincomb/Dropbox/PMID/pmid-dump.tsv', 'r') as csvFile: # FIXME what is this file? where did it come from?
reader = csv.reader(csvFile, delimiter='\t')
old_text = []
for i, row in enumerate(reader):
if i == 0:
header = {colname: col_indx for col_indx, colname in enumerate(row)}
continue
old_text.append(row[header['old_text']])
data = [text for text in old_text
if 'SuperCategory=Resource' not in text and 'Id=\\n' not in text and 'PMID=\\n' not in text]
### PRIMER
id_count, pmid_count, definition_count = 0, 0, 0
total_data = {}
id2def = {}
for d in data:
local_data = {'id':None, 'pmids':set(), 'definition':None}
for segment in d.split('|'):
if 'Id=' == segment[:3]:
id_count += 1
local_data['id'] = segment
if 'PMID=' == segment[:5]:
if 'nlx_12' in segment:
print(segment)
pmid_count += 1
local_data['pmids'].add(segment)
if 'Definition=' == segment[:11]:
definition_count += 1
local_data['definition'] = segment.split('Definition=')[-1].replace('\\n', '')
if local_data['id'] and local_data['pmids'] and local_data['definition']:
total_data[local_data['id']] = local_data['pmids']
id2def[local_data['id']] = local_data['definition']
print(id_count, pmid_count, definition_count)
### Nuances
raw_id2pmids = total_data
total_data = defaultdict(list)
clean_id2def = {}
for exid, pmids in raw_id2pmids.items():
curr_def = id2def[exid]
for hit in exid.split('\\n'):
if 'Id=' in hit:
clean_id = hit.replace('Id=', '').strip()
clean_pmids = []
for hit in [pmid.split('\\n') for pmid in pmids]:
for h in hit:
if 'PMID=' in h:
h = pmid_fix(h)
clean_pmids.extend(h)
if clean_pmids:
total_data[clean_id].extend(clean_pmids)
clean_id2def[clean_id] = curr_def
suffix2pmids = total_data
### Annotation Creation
suffix2row = get_suffix2row()
for _id, pmids in suffix2pmids.items():
row = suffix2row.get(_id)
definition = Literal(clean_id2def[_id])
if row:
for pmid in pmids:
ilx_uri = URIRef('/'.join([ilx_uri_base, row['ilx']]))
g.add((
URIRef(row['iri']),
URIRef('http://uri.interlex.org/tgbugs/uris/readable/literatureCitation'),
URIRef('https://www.ncbi.nlm.nih.gov/pubmed/'+pmid),
))
add_annotation(
URIRef(row['iri']),
URIRef('http://purl.obolibrary.org/obo/IAO_0000115'),
definition,
URIRef('http://uri.interlex.org/tgbugs/uris/readable/literatureCitation'),
URIRef('https://www.ncbi.nlm.nih.gov/pubmed/'+pmid),
)
else:
for pmid in pmids:
add_annotation(
URIRef(add_uri(str(_id))),
URIRef('http://purl.obolibrary.org/obo/IAO_0000115'),
definition,
URIRef('http://uri.interlex.org/tgbugs/uris/readable/literatureCitation'),
URIRef('https://www.ncbi.nlm.nih.gov/pubmed/'+pmid),
)
g.serialize(output, format='nifttl')
if __name__ == '__main__':
main()
| tgbugs/pyontutils | nifstd/nifstd_tools/extracting_pmids_from_neurolex.py | Python | mit | 8,206 |
#!/usr/bin/env python3
"""
When doing gene finding with tools like Augustus it's common to take a file of curated
genes/transcripts and split these into a set for training and another for evaluation.
This script accepts an input GFF3 file and then generates two files, one of genes to
use as training and another as evaluation. Selection of these is (computationally) random.
If the --retain_composition option is used, the script will attempt to make the exon count
distribution remain consisent between the full file, training set and evaluation set rather
than relying on pure random selection to do this.
Run this with the -h option to view other parameters and defaults.
Author: Joshua Orvis
"""
import argparse
import random
from collections import defaultdict
from biocode import gff
def main():
parser = argparse.ArgumentParser( description='Split an annotation GFF3 into training and evaluation sets')
## output file to be written
parser.add_argument('-i', '--input_file', type=str, required=True, help='Path to an input file to be read' )
parser.add_argument('-ot', '--output_training_file', type=str, required=True, help='GFF3 file to be created with the training genes' )
parser.add_argument('-oe', '--output_evaluation_file', type=str, required=True, help='GFF3 file to be created with the evaluation genes' )
parser.add_argument('-ts', '--training_set_size', type=int, required=False, default=200, help='Number of transcripts to select for training' )
parser.add_argument('-es', '--evaluation_set_size', type=int, required=False, default=100, help='Number of transcripts to select for evaluation' )
parser.add_argument('-me', '--max_exon_count', type=int, required=False, help='Skips any mRNAs with more exons than this' )
parser.add_argument('--retain_composition', dest='retain_composition',action='store_true')
parser.add_argument('--no_retain_composition', dest='retain_composition',action='store_false')
parser.set_defaults(retain_composition=False)
args = parser.parse_args()
if args.retain_composition is True:
raise Exception("ERROR: --retain_composition option not yet implemented")
(assemblies, features) = gff.get_gff3_features(args.input_file)
# key: exon count, value = list of mRNA objects with that count
# which of these gets used depends on whether --retain_composition is passed
mRNAs_by_exon_count = defaultdict(lambda: list())
mRNAs = list()
mRNA_count = 0
for asm_id in assemblies:
for gene in assemblies[asm_id].genes():
for mRNA in gene.mRNAs():
exon_count = mRNA.exon_count()
if args.max_exon_count is None or exon_count <= args.max_exon_count:
mRNA_count += 1
if args.retain_composition is True:
mRNAs_by_exon_count[exon_count].append(mRNA)
else:
mRNAs.append(mRNA)
# if you feel like printing a profile
#for exon_count in mRNAs_by_exon_count:
# print("DEBUG: exons:{0}\tcount:{1}".format( exon_count, len(mRNAs_by_exon_count[exon_count]) ) )
# sanity check on the number of available mRNAs
if (args.training_set_size + args.evaluation_set_size) > mRNA_count:
raise Exception("ERROR: acceptable mRNA count ({0}) is less than combined training_set_size ({1}) and evaluation_set_size ({2}) options".format(mRNA_count, args.training_set_size, args.evaluation_set_size) )
training_mRNAs = list()
evaluation_mRNAs = list()
if args.retain_composition is True:
print("DEBUG: retaining composition")
pass
else:
training_mRNAs = random.sample( mRNAs, args.training_set_size )
unselected_mRNAs = list(set(mRNAs) & set(set(mRNAs) ^ set(training_mRNAs)))
evaluation_mRNAs = random.sample( unselected_mRNAs, args.evaluation_set_size )
export_mRNAs_to_file(training_mRNAs, args.output_training_file)
export_mRNAs_to_file(evaluation_mRNAs, args.output_evaluation_file)
def export_mRNAs_to_file( mRNAs, f ):
fh = open(f, 'wt')
fh.write("##gff-version 3\n")
mRNA_ids = dict()
genes_to_print = list()
for mRNA in mRNAs:
mRNA_ids[mRNA.id] = 1
if mRNA.parent not in genes_to_print:
genes_to_print.append(mRNA.parent)
for gene in sorted(genes_to_print):
mRNAs_to_keep = []
original_mRNAs = []
for mRNA in gene.mRNAs():
original_mRNAs.append(mRNA)
if mRNA.id in mRNA_ids:
mRNAs_to_keep.append(mRNA)
gene.children['mRNA'] = mRNAs_to_keep
gene.print_as(fh=fh, source='SOURCE', format='gff3')
gene.children['mRNA'] = original_mRNAs
if __name__ == '__main__':
main()
| jorvis/biocode | gff/select_training_and_evaluation_transcripts.py | Python | mit | 4,854 |
# -*- coding: utf-8 -*-
from pgradd.RINGParser.Reader import Read
from rdkit import Chem
import unittest
class TestRINGParser(unittest.TestCase):
def test_molquery(self):
testmol = Chem.MolFromSmiles('CCC')
s = """
fragment a{
C labeled c1
C labeled c2 single bond to c1
}
"""
molquery = Read(s)
match_index = molquery.GetQueryMatches(testmol)
self.assertListEqual(sorted(match_index),
sorted(((1, 0), (0, 1), (1, 2), (2, 1))))
def test_double_triple_bond(self):
testmol = Chem.MolFromSmiles('C=C-C#C')
s = """
fragment a{
C labeled c1
C labeled c2 double bond to c1
C labeled c3 single bond to c2
C labeled c4 triple bond to c3
}
"""
molquery = Read(s)
match_index = molquery.GetQueryMatches(testmol)
self.assertListEqual(sorted(match_index),
sorted(((0, 1, 2, 3),)))
def test_aromatic_bond(self):
testmol = Chem.MolFromSmiles('c1ccccc1')
s = """
fragment a{
C labeled c1
C labeled c2 aromatic bond to c1
}
"""
molquery = Read(s)
match_index = molquery.GetQueryMatches(testmol)
self.assertListEqual(sorted(match_index),
sorted(((0, 1), (0, 5), (1, 0), (1, 2), (2, 1),
(2, 3), (3, 2), (3, 4), (4, 3), (4, 5),
(5, 4), (5, 0))))
def test_ring_bond1(self):
testmol = Chem.MolFromSmiles('CCC1CCC1')
s = """
fragment a{
C labeled c1
C labeled c2 ring bond to c1
C labeled c3 ring bond to c2
C labeled c4 ring bond to c3
ringbond c4 ring bond to c1
}
"""
molquery = Read(s)
match_index = molquery.GetQueryMatches(testmol)
self.assertListEqual(sorted(match_index),
sorted(((2, 3, 4, 5), (2, 5, 4, 3),
(3, 2, 5, 4), (3, 4, 5, 2),
(4, 3, 2, 5), (4, 5, 2, 3),
(5, 2, 3, 4), (5, 4, 3, 2))))
def test_ring_bond2(self):
testmol = Chem.MolFromSmiles('CCC1CCC1')
s = """
fragment a{
C labeled c1
C labeled c2 ring bond to c1
}
"""
molquery = Read(s)
match_index = molquery.GetQueryMatches(testmol)
self.assertListEqual(sorted(match_index),
sorted(((2, 3), (2, 5), (3, 2), (3, 4), (4, 3),
(4, 5), (5, 4), (5, 2))))
def test_non_ring_bond(self):
testmol = Chem.MolFromSmiles('CCC1CCC1')
s = """
fragment a{
C labeled c1
C labeled c2 nonring bond to c1
}
"""
molquery = Read(s)
match_index = molquery.GetQueryMatches(testmol)
self.assertListEqual(sorted(match_index),
sorted(((0, 1), (1, 0), (1, 2), (2, 1))))
def test_any_bond1(self):
testmol = Chem.MolFromSmiles('CC=CC#C')
s = """
fragment a{
C labeled c1
C labeled c2 any bond to c1
}
"""
molquery = Read(s)
match_index = molquery.GetQueryMatches(testmol)
self.assertListEqual(sorted(match_index),
sorted(((0, 1), (1, 0), (1, 2), (2, 1),
(2, 3), (3, 2), (3, 4), (4, 3))))
def test_any_bond2(self):
testmol = Chem.MolFromSmiles('CC=CC#C')
s = """
fragment a{
C labeled c1
C labeled c2 any bond to c1
}
"""
molquery = Read(s)
match_index = molquery.GetQueryMatches(testmol)
self.assertListEqual(sorted(match_index),
sorted(((0, 1), (1, 0), (1, 2), (2, 1),
(2, 3), (3, 2), (3, 4), (4, 3))))
def test_strong_bond(self):
testmol = Chem.MolFromSmiles('CC=CC#C')
s = """
fragment a{
C labeled c1
C labeled c2 strong bond to c1
}
"""
molquery = Read(s)
match_index = molquery.GetQueryMatches(testmol)
self.assertListEqual(sorted(match_index),
sorted(((1, 2), (2, 1), (3, 4), (4, 3))))
def test_other_bond1(self):
testmol = Chem.MolFromSmiles('C[CH2-]')
s = """
positive fragment a{
C labeled c1
C labeled c2 single bond to c1
}
"""
molquery = Read(s)
match_index = molquery.GetQueryMatches(testmol)
self.assertEqual(match_index, ())
def test_other_bond2(self):
testmol = Chem.MolFromSmiles('C[CH2+]')
s = """
negative fragment a{
C labeled c1
C labeled c2 single bond to c1
}
"""
molquery = Read(s)
match_index = molquery.GetQueryMatches(testmol)
self.assertEqual(match_index, ())
def test_other_bond3(self):
testmol = Chem.MolFromSmiles('C=CC')
s = """
olefinic fragment a{
C labeled c1
C labeled c2 single bond to c1
}
"""
molquery = Read(s)
match_index = molquery.GetQueryMatches(testmol)
self.assertListEqual(sorted(match_index),
sorted(((1, 2), (2, 1))))
def test_other_bond4(self):
testmol = Chem.MolFromSmiles('C=CC')
s = """
paraffinic fragment a{
C labeled c1
C labeled c2 single bond to c1
}
"""
molquery = Read(s)
match_index = molquery.GetQueryMatches(testmol)
self.assertEqual(match_index, ())
def test_other_bond5(self):
testmol = Chem.MolFromSmiles('CCC')
s = """
paraffinic fragment a{
C labeled c1
C labeled c2 single bond to c1
}
"""
molquery = Read(s)
match_index = molquery.GetQueryMatches(testmol)
self.assertListEqual(sorted(match_index),
sorted(((0, 1), (1, 0), (1, 2), (2, 1))))
def test_other_bond6(self):
testmol = Chem.MolFromSmiles('CCC')
s = """
linear fragment a{
C labeled c1
C labeled c2 single bond to c1
}
"""
molquery = Read(s)
match_index = molquery.GetQueryMatches(testmol)
self.assertListEqual(sorted(match_index),
sorted(((0, 1), (1, 0), (1, 2), (2, 1))))
def test_other_bond7(self):
testmol = Chem.MolFromSmiles('CCC')
s = """
cyclic fragment a{
C labeled c1
C labeled c2 single bond to c1
}
"""
molquery = Read(s)
match_index = molquery.GetQueryMatches(testmol)
self.assertEqual(match_index, ())
def test_other_bond8(self):
testmol = Chem.MolFromSmiles('C1CCC1C')
s = """
cyclic fragment a{
C labeled c1
C labeled c2 single bond to c1
}
"""
molquery = Read(s)
match_index = molquery.GetQueryMatches(testmol)
self.assertListEqual(sorted(match_index),
sorted(((0, 1), (0, 3), (1, 0), (1, 2),
(2, 1), (2, 3), (3, 2),
(3, 4), (3, 0), (4, 3))))
def test_symbol_atomsuffix(self):
testmol = Chem.MolFromSmiles('CCC')
s = """
fragment a{
$ labeled c1
$ labeled c2 single bond to c1
}
"""
molquery = Read(s)
match_index = molquery.GetQueryMatches(testmol)
self.assertListEqual(sorted(match_index),
sorted(((0, 1), (0, 3), (0, 4), (0, 5), (1, 0),
(1, 2), (1, 6), (1, 7), (2, 1), (2, 8),
(2, 9), (2, 10), (3, 0), (4, 0),
(5, 0), (6, 1), (7, 1), (8, 2), (9, 2),
(10, 2))))
def test_other_bond9(self):
testmol = Chem.MolFromSmiles('CCO')
s = """
fragment a{
X labeled c1
X labeled c2 single bond to c1
}
"""
molquery = Read(s)
match_index = molquery.GetQueryMatches(testmol)
self.assertListEqual(sorted(match_index),
sorted(((0, 1), (1, 0), (1, 2), (2, 1))))
def test_other_bond10(self):
testmol = Chem.MolFromSmiles('CCS')
s = """
fragment a{
X labeled c1
& labeled c2 single bond to c1
}
"""
molquery = Read(s)
match_index = molquery.GetQueryMatches(testmol)
self.assertListEqual(sorted(match_index),
sorted(((1, 2),)))
def test_atom_constraint1(self):
testmol = Chem.MolFromSmiles('CCC')
s = """
fragment a{
C labeled c1
C labeled c2 single bond to c1 {connected to =2 C}
}
"""
molquery = Read(s)
match_index = molquery.GetQueryMatches(testmol)
self.assertListEqual(sorted(match_index),
sorted(((0, 1), (2, 1))))
def test_atom_constraint2(self):
testmol = Chem.MolFromSmiles('CCC')
s = """
fragment a{
C labeled c1
C labeled c2 single bond to c1 {connected to =1 C}
}
"""
molquery = Read(s)
match_index = molquery.GetQueryMatches(testmol)
self.assertListEqual(sorted(match_index),
sorted(((1, 0), (1, 2))))
def test_atom_constraint3(self):
testmol = Chem.MolFromSmiles('CC=C')
s = """
fragment a{
C labeled c1
C labeled c2 single bond to c1 {connected to =1 C with double bond}
}
"""
molquery = Read(s)
match_index = molquery.GetQueryMatches(testmol)
self.assertListEqual(sorted(match_index),
sorted(((0, 1),)))
def test_atom_constraint4(self):
testmol = Chem.MolFromSmiles('CC=C')
s = """
fragment a{
C labeled c1 {connected to >1 C with any bond}
}
"""
molquery = Read(s)
match_index = molquery.GetQueryMatches(testmol)
self.assertListEqual(sorted(match_index),
sorted(((1,),)))
def test_atom_constraint5(self):
testmol = Chem.MolFromSmiles('CC=C')
s = """
fragment a{
C labeled c1 {!connected to >1 C with any bond}
}
"""
molquery = Read(s)
match_index = molquery.GetQueryMatches(testmol)
self.assertListEqual(sorted(match_index),
sorted(((0, ), (2, ))))
def test_atom_constraint6(self):
testmol = Chem.MolFromSmiles('CC1CCC1')
s = """
fragment a{
C labeled c1 {!in ring of size >0}
C labeled c2 single bond to c1 {in ring of size >0}
}
"""
molquery = Read(s)
match_index = molquery.GetQueryMatches(testmol)
self.assertListEqual(sorted(match_index),
sorted(((0, 1),)))
def test_atom_prefix1(self):
testmol = Chem.MolFromSmiles('CC1CCC1')
s = """
fragment a{
nonringatom C labeled c1
ringatom C labeled c2 single bond to c1
}
"""
molquery = Read(s)
match_index = molquery.GetQueryMatches(testmol)
self.assertListEqual(sorted(match_index),
sorted(((0, 1),)))
def test_atom_prefix2(self):
testmol = Chem.MolFromSmiles('Cc1ccccc1')
s = """
fragment a{
nonaromatic C labeled c1
aromatic C labeled c2 single bond to c1
}
"""
molquery = Read(s)
match_index = molquery.GetQueryMatches(testmol)
self.assertListEqual(sorted(match_index),
sorted(((0, 1),)))
def test_atom_prefix3(self):
testmol = Chem.MolFromSmiles('CC=C')
s = """
fragment a{
C labeled c1
allylic C labeled c2 single bond to c1
}
"""
molquery = Read(s)
match_index = molquery.GetQueryMatches(testmol)
self.assertListEqual(sorted(match_index),
sorted(((0, 1),)))
if __name__ == '__main__':
unittest.main()
| VlachosGroup/VlachosGroupAdditivity | pgradd/tests/test_RINGparser_RDkitwrapper_test.py | Python | mit | 12,902 |
# type: ignore
from unittest import mock
from django.core.exceptions import ValidationError
from django.forms import Textarea
from django.http import HttpRequest
from django.test import TestCase
from appmail.forms import (
EmailTestForm,
JSONWidget,
MultiEmailField,
MultiEmailTemplateField,
)
from appmail.models import AppmailMessage, EmailTemplate
class JSONWidgetTests(TestCase):
def test_format_value(self):
widget = JSONWidget()
self.assertEqual(widget.format_value(None), "{}")
self.assertEqual(widget.format_value(""), "{}")
self.assertEqual(widget.format_value('{"foo": true}'), '{\n "foo": true\n}')
self.assertRaises(TypeError, widget.format_value, {"foo": True})
def test_render(self):
widget = JSONWidget()
textarea = Textarea()
for val in [None, "", '{"foo": true}']:
self.assertEqual(
widget.render("test", val),
textarea.render(
"test", widget.format_value(val), attrs=widget.DEFAULT_ATTRS
),
)
class MultiEmailFieldTests(TestCase):
def test_to_python(self):
form = MultiEmailField()
self.assertEqual(form.to_python(None), [])
self.assertEqual(form.to_python(""), [])
self.assertEqual(form.to_python("[email protected]"), ["[email protected]"])
self.assertEqual(
form.to_python("[email protected] , [email protected]"),
["[email protected]", "[email protected]"],
)
self.assertEqual(form.to_python(["[email protected]"]), ["[email protected]"])
def test_validate(self):
form = MultiEmailField()
form.validate(["[email protected]"])
form.validate(form.to_python("[email protected], [email protected]"))
# single email address fails validation - must be a list
self.assertRaises(ValidationError, form.validate, "[email protected]")
class MultiEmailTemplateFieldTests(TestCase):
@mock.patch.object(EmailTemplate.objects, "filter")
def test_to_python(self, mock_filter):
form = MultiEmailTemplateField()
self.assertEqual(list(form.to_python(None)), list(EmailTemplate.objects.none()))
self.assertEqual(list(form.to_python("")), list(EmailTemplate.objects.none()))
qs = EmailTemplate.objects.none()
self.assertEqual(form.to_python(qs), qs)
form.to_python("1, 2")
mock_filter.assert_called_once_with(pk__in=[1, 2])
class EmailTestFormTests(TestCase):
def test_clean_context(self):
form = EmailTestForm()
form.cleaned_data = {"context": "true"}
self.assertEqual(form.clean_context(), True)
form.cleaned_data["context"] = True
self.assertRaises(ValidationError, form.clean_context)
def test__create_message(self):
form = EmailTestForm()
form.cleaned_data = {
"context": {"foo": "bar"},
"to": ["[email protected]"],
"cc": [],
"bcc": [],
"from_email": "[email protected]",
}
template = EmailTemplate()
email = form._create_message(template)
self.assertEqual(email.from_email, "[email protected]")
self.assertEqual(email.to, ["[email protected]"])
self.assertEqual(email.cc, [])
self.assertEqual(email.bcc, [])
@mock.patch("appmail.forms.messages")
@mock.patch.object(AppmailMessage, "send")
def test_send_emails(self, mock_send, mock_messages):
template = EmailTemplate().save()
form = EmailTestForm()
form.cleaned_data = {
"context": {"foo": "bar"},
"to": ["[email protected]"],
"cc": [],
"bcc": [],
"from_email": "[email protected]",
"templates": [template],
}
request = HttpRequest()
form.send_emails(request)
mock_send.assert_called_once()
mock_messages.success.assert_called_once()
# test email failure
mock_send.side_effect = Exception()
form.send_emails(request)
mock_messages.error.assert_called_once()
| yunojuno/django-appmail | tests/test_forms.py | Python | mit | 4,173 |
from collections import defaultdict
import random
def getLcs(string1, string2):
f, p = {}, {}
#Initialize
for i in range(len(string1) + 1):
f[(i, 0)], p[(i, 0)] = 0, -1
for j in range(len(string2) + 1):
f[(0, j)], p[(0, j)] = 0, -1
#dp
maxF = [0, (0, 0)]
for i in range(1, len(string1) + 1):
for j in range(1, len(string2) + 1):
if string1[i - 1] == string2[j - 1]:
f[(i, j)] = f[(i - 1, j - 1)] + 1
p[(i, j)] = 0
elif f[(i - 1, j)] >= f[(i, j - 1)]:
f[(i, j)] = f[(i - 1, j)]
p[(i, j)] = 1
else:
f[(i, j)] = f[(i, j - 1)]
p[(i, j)] = 2
if f[(i, j)] > maxF[0]:
maxF = [f[(i, j)], (i, j)]
#back tracing
lcs = ''
i = maxF[1]
while p[i] != -1:
if p[i] == 0:
lcs += string1[i[0] - 1]
i = (i[0] - 1, i[1] - 1)
elif p[i] == 1:
i = (i[0] - 1, i[1])
else:
i = (i[0], i[1] - 1)
return lcs[::-1]
def weightedSample(candidates, weightFunc, number):
weightedList = [[c, weightFunc(c)] for c in candidates]
result = []
random.seed()
for _ in range(number):
choice = random.random() * sum(element[1] for element in weightedList)
weightSum = 0
for i in range(len(weightedList)):
candidate, weight = weightedList[i]
weightSum += weight
if choice < weightSum:
result.append(candidate)
del weightedList[i]
break
return result
def weightedChoice(candidates, weightFunc):
return weightedSample(candidates, weightFunc, 1)[0]
def multiplyArrays(*arrays):
if len(arrays) == 0:
return []
result = [[e] for e in arrays[0]]
for i in range(1, len(arrays)):
newResult = []
for listElement in result:
for element in arrays[i]:
newResult.append(listElement + [element])
result = newResult
return result
if __name__ == '__main__':
print(getLcs('global', 'printing'))
| Poligun/NihongoWeb | nihongo/algorithm.py | Python | mit | 2,157 |
import datetime
import graphene
from django.db.models import Q, Prefetch
from openstates.data.models import (
Jurisdiction,
Organization,
Person,
Membership,
LegislativeSession,
RunPlan,
)
from utils.geo import coords_to_divisions
from .common import (
OCDBaseNode,
IdentifierNode,
NameNode,
LinkNode,
DjangoConnectionField,
CountableConnectionBase,
)
from .optimization import optimize
def _resolve_suborganizations(root_obj, field_name, classification=None):
"""resolve organizations by classification optionally using the prefetch cache"""
# special case filtering if organizations are prefetched
if classification and field_name in getattr(
root_obj, "_prefetched_objects_cache", []
):
if isinstance(classification, str):
return [
o
for o in root_obj._prefetched_objects_cache[field_name]
if o.classification == classification
]
elif isinstance(classification, (list, tuple)):
return [
o
for o in root_obj._prefetched_objects_cache[field_name]
if o.classification in classification
]
qs = getattr(root_obj, field_name).all()
if isinstance(classification, str):
qs = qs.filter(classification=classification)
elif isinstance(classification, (list, tuple)):
qs = qs.filter(classification__in=classification)
return qs
def _membership_filter(
qs, info, classification=None, prefix=None, current=False, coming_from_person=True
):
today = datetime.date.today().isoformat()
if current:
qs = qs.filter(
Q(start_date="") | Q(start_date__lte=today),
Q(end_date="") | Q(end_date__gte=today),
)
else:
qs = qs.filter(
Q(start_date__gte=today) | (Q(end_date__lte=today) & ~Q(end_date=""))
)
if classification:
qs = qs.filter(organization__classification__in=classification)
related = [".post", ".post.division"]
if coming_from_person:
related.append(".organization")
else:
related.append(".person")
# if we're getting a membership we're probably going to need org/post
qs = optimize(qs, info, None, related, prefix=prefix)
return qs
class OfficeNode(graphene.ObjectType):
classification = graphene.String()
address = graphene.String()
voice = graphene.String()
fax = graphene.String()
display_name = graphene.String()
class ContactDetailNode(graphene.ObjectType):
type = graphene.String()
value = graphene.String()
note = graphene.String()
label = graphene.String()
class OrganizationNode(OCDBaseNode):
name = graphene.String()
image = graphene.String()
# jurisdiction left out for now since traversing up can lead to query explosion
classification = graphene.String()
founding_date = graphene.String()
dissolution_date = graphene.String()
jurisdiction_id = graphene.String()
# self-referential relationship
parent = graphene.Field("graphapi.core.OrganizationNode")
children = DjangoConnectionField(
"graphapi.core.OrganizationConnection", classification=graphene.String()
)
current_memberships = graphene.List("graphapi.core.MembershipNode")
# related objects
identifiers = graphene.List(IdentifierNode)
other_names = graphene.List(NameNode)
links = graphene.List(LinkNode)
sources = graphene.List(LinkNode)
def resolve_children(
self, info, classification=None, first=None, last=None, before=None, after=None
):
return _resolve_suborganizations(self, "children", classification)
def resolve_current_memberships(self, info):
if hasattr(self, "current_memberships"):
return self.current_memberships
else:
return _membership_filter(
self.memberships, info, None, current=True, coming_from_person=False
)
class DivisionNode(OCDBaseNode):
name = graphene.String()
redirect = graphene.Field("graphapi.core.DivisionNode")
country = graphene.String()
class PostNode(OCDBaseNode):
label = graphene.String()
role = graphene.String()
division = graphene.Field(DivisionNode)
start_date = graphene.String()
end_date = graphene.String()
maximum_memberships = graphene.Int()
# organization excluded from this direction
class PersonNode(OCDBaseNode):
name = graphene.String()
sort_name = graphene.String()
family_name = graphene.String()
given_name = graphene.String()
image = graphene.String()
# not used: gender, summary, national_identity, biography
birth_date = graphene.String()
death_date = graphene.String()
primary_party = graphene.String()
email = graphene.String()
# related objects
identifiers = graphene.List(IdentifierNode)
other_names = graphene.List(NameNode)
links = graphene.List(LinkNode)
sources = graphene.List(LinkNode)
contact_details = graphene.List(ContactDetailNode)
offices = graphene.List(OfficeNode)
# special attributes
current_memberships = graphene.List(
"graphapi.core.MembershipNode", classification=graphene.List(graphene.String)
)
old_memberships = graphene.List(
"graphapi.core.MembershipNode", classification=graphene.List(graphene.String)
)
votes = DjangoConnectionField("graphapi.legislative.BillVoteConnection")
def resolve_identifiers(self, info):
return self.identifiers.all()
def resolve_other_names(self, info):
return self.other_names.all()
def resolve_links(self, info):
return self.links.all()
def resolve_sources(self, info):
return self.sources.all()
def resolve_contact_details(self, info):
contact_details = []
# contact detail shim for backwards compatibility
for office in self.offices.all():
for key in ("fax", "voice", "address"):
if value := getattr(office, key):
contact_details.append(
dict(value=value, type=key, note=office.display_name)
)
# email shim for backwards compatibility
if self.email:
contact_details.append(
dict(value=self.email, type="email", note="Capitol Office")
)
return contact_details
def resolve_offices(self, info):
return self.offices.all()
def resolve_current_memberships(self, info, classification=None):
if hasattr(self, "current_memberships"):
if classification:
return [
m
for m in self.current_memberships
if m.organization.classification in classification
]
return self.current_memberships
else:
return _membership_filter(
self.memberships, info, classification, current=True
)
def resolve_old_memberships(self, info, classification=None):
if hasattr(self, "old_memberships"):
if classification:
return [
m
for m in self.old_memberships
if m.organization.classification in classification
]
return self.old_memberships
else:
return _membership_filter(
self.memberships, info, classification, current=False
)
def resolve_votes(self, info):
return self.votes.all()
class MembershipNode(OCDBaseNode):
organization = graphene.Field(OrganizationNode)
person = graphene.Field(PersonNode)
person_name = graphene.String()
post = graphene.Field(PostNode)
# on_behalf_of (not used?)
label = graphene.String()
role = graphene.String()
start_date = graphene.String()
end_date = graphene.String()
class LegislativeSessionNode(graphene.ObjectType):
jurisdiction = graphene.Field("graphapi.core.JurisdictionNode")
identifier = graphene.String()
name = graphene.String()
classification = graphene.String()
start_date = graphene.String()
end_date = graphene.String()
class LegislativeSessionConnection(graphene.relay.Connection):
class Meta:
node = LegislativeSessionNode
class OrganizationConnection(CountableConnectionBase):
class Meta:
node = OrganizationNode
max_items = 100
class JurisdictionNode(graphene.ObjectType):
id = graphene.String()
name = graphene.String()
url = graphene.String()
classification = graphene.String()
feature_flags = graphene.List(graphene.String)
last_scraped_at = graphene.String()
legislative_sessions = DjangoConnectionField(LegislativeSessionConnection)
organizations = DjangoConnectionField(
OrganizationConnection, classification=graphene.List(graphene.String)
)
def resolve_legislative_sessions(
self, info, first=None, last=None, before=None, after=None
):
return self.legislative_sessions.all()
def resolve_organizations(
self, info, first=None, last=None, before=None, after=None, classification=None
):
return _resolve_suborganizations(self, "organizations", classification)
def resolve_last_scraped_at(self, info):
try:
return self.runs.filter(success=True).latest("end_time").end_time
except RunPlan.DoesNotExist:
return None
class JurisdictionConnection(graphene.relay.Connection):
class Meta:
node = JurisdictionNode
class PersonConnection(CountableConnectionBase):
class Meta:
node = PersonNode
max_items = 100
class CoreQuery:
jurisdictions = DjangoConnectionField(
JurisdictionConnection, classification=graphene.String()
)
jurisdiction = graphene.Field(
JurisdictionNode, id=graphene.String(), name=graphene.String()
)
people = DjangoConnectionField(
PersonConnection,
member_of=graphene.String(),
ever_member_of=graphene.String(),
district=graphene.String(),
division_id=graphene.String(),
name=graphene.String(),
updated_since=graphene.String(),
latitude=graphene.Float(),
longitude=graphene.Float(),
)
person = graphene.Field(PersonNode, id=graphene.ID())
organization = graphene.Field(OrganizationNode, id=graphene.ID())
def resolve_jurisdictions(
self,
info,
classification="state",
first=None,
last=None,
before=None,
after=None,
):
qs = Jurisdiction.objects.filter(classification=classification).order_by("name")
return optimize(
qs,
info,
[
(
".legislativeSessions",
Prefetch(
"legislative_sessions",
LegislativeSession.objects.all().order_by("start_date"),
),
),
".organizations",
".organizations.children",
],
)
def resolve_jurisdiction(self, info, id=None, name=None):
if id:
return Jurisdiction.objects.get(id=id)
if name:
return Jurisdiction.objects.get(name=name)
else:
raise ValueError("Jurisdiction requires id or name")
def resolve_people(
self,
info,
first=None,
last=None,
before=None,
after=None,
member_of=None,
ever_member_of=None,
district=None,
division_id=None,
name=None,
updated_since=None,
latitude=None,
longitude=None,
):
qs = Person.objects.all()
today = datetime.date.today()
if name:
qs = qs.filter(
Q(name__icontains=name) | Q(other_names__name__icontains=name)
)
if division_id:
qs = qs.filter(
Q(memberships__post__division_id=division_id),
Q(memberships__end_date="") | Q(memberships__end_date__gt=today),
)
if member_of:
qs = qs.member_of(member_of, post=district)
if ever_member_of:
qs = qs.member_of(ever_member_of, current_only=False, post=district)
if updated_since:
qs = qs.filter(updated_at__gte=updated_since)
if district and not (member_of or ever_member_of):
raise ValueError(
"'district' parameter requires specifying either "
"'memberOf' or 'everMemberOf'"
)
if latitude and longitude:
try:
latitude = float(latitude)
longitude = float(longitude)
except ValueError:
raise ValueError("invalid lat or lon")
divisions = coords_to_divisions(latitude, longitude)
qs = qs.filter(
Q(memberships__post__division__id__in=divisions),
Q(memberships__end_date="") | Q(memberships__end_date__gt=today),
)
elif latitude or longitude:
raise ValueError("must provide lat & lon together")
qs = optimize(
qs,
info,
[
".identifiers",
".otherNames",
".links",
".sources",
".offices",
(".contactDetails", Prefetch("offices")),
(
".currentMemberships",
Prefetch(
"memberships",
queryset=_membership_filter(
Membership.objects,
info,
prefix=".currentMemberships",
current=True,
),
to_attr="current_memberships",
),
),
(
".oldMemberships",
Prefetch(
"memberships",
queryset=_membership_filter(
Membership.objects,
info,
prefix=".oldMemberships",
current=False,
),
to_attr="old_memberships",
),
),
],
)
return qs
def resolve_person(self, info, id):
return Person.objects.get(pk=id)
def resolve_organization(self, info, id):
return optimize(Organization.objects, info, None, [".parent"]).get(pk=id)
| openstates/openstates.org | graphapi/core.py | Python | mit | 14,832 |
import socket
import pytest
from Pyro5.compatibility import Pyro4
def test_compat_config():
import Pyro4
conf = Pyro4.config.asDict()
assert conf["NS_PORT"] == 9090
Pyro4.config.NS_PORT = 12345
conf = Pyro4.config.asDict()
assert conf["NS_PORT"] == 12345
Pyro4.config.NS_PORT = 9090
def test_compat_layer():
from Pyro4 import naming
from Pyro4 import socketutil
from Pyro4 import util
try:
_ = 1//0
except ZeroDivisionError:
tb = util.getPyroTraceback()
assert len(tb) == 3
assert "Traceback" in tb[0]
assert "zero" in tb[2]
assert 4 == socketutil.getIpVersion("127.0.0.1")
assert 6 == socketutil.getIpVersion("::1")
Pyro4.URI("PYRO:test@localhost:5555")
p = Pyro4.Proxy("PYRO:test@localhost:5555")
Pyro4.BatchProxy(p)
Pyro4.Daemon()
assert socketutil.getIpAddress("localhost", ipVersion=4).startswith("127.0")
if socket.has_ipv6:
try:
assert ":" in socketutil.getIpAddress("localhost", ipVersion=6)
except socket.error as x:
if str(x) != "unable to determine IPV6 address":
raise
assert "127.0.0.1" == socketutil.getIpAddress("127.0.0.1")
assert "::1" == socketutil.getIpAddress("::1")
assert "127.0.0.1" == socketutil.getInterfaceAddress("127.0.0.1")
with pytest.raises(NotImplementedError):
naming.NameServer()
with pytest.raises(NotImplementedError):
_ = p._pyroHmacKey
with pytest.raises(NotImplementedError):
p._pyroHmacKey = b"fail"
| irmen/Pyro5 | tests/test_pyro4compat.py | Python | mit | 1,569 |
import _plotly_utils.basevalidators
class SizesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="sizesrc", parent_name="histogram2d.hoverlabel.font", **kwargs
):
super(SizesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
| plotly/plotly.py | packages/python/plotly/plotly/validators/histogram2d/hoverlabel/font/_sizesrc.py | Python | mit | 427 |
from starcluster.clustersetup import ClusterSetup
from starcluster.logger import log
class SamblasterInstaller(ClusterSetup):
def run(self, nodes, master, user, user_shell, volumes):
for node in nodes:
log.info("Installing samblaster 0.1.20 on %s" % (node.alias))
node.ssh.execute('mkdir -p /opt/software/samblaster')
node.ssh.execute('wget -c -P /opt/software/samblaster https://github.com/GregoryFaust/samblaster/releases/download/v.0.1.20/samblaster-v.0.1.20.tar.gz')
node.ssh.execute('tar xvzf /opt/software/samblaster/samblaster-v.0.1.20.tar.gz -C /opt/software/samblaster')
node.ssh.execute('make -C /opt/software/samblaster/samblaster-v.0.1.20')
node.ssh.execute('mkdir -p /usr/local/Modules/applications/samblaster/;touch /usr/local/Modules/applications/samblaster/0.1.20')
node.ssh.execute('echo "#%Module" >> /usr/local/Modules/applications/samblaster/0.1.20')
node.ssh.execute('echo "set root /opt/software/samblaster/samblaster-v.0.1.20" >> /usr/local/Modules/applications/samblaster/0.1.20')
node.ssh.execute('echo -e "prepend-path\tPATH\t\$root" >> /usr/local/Modules/applications/samblaster/0.1.20')
| meissnert/StarCluster-Plugins | samblaster_0_1_20.py | Python | mit | 1,150 |
import random
def build_level (hallway):
flag_a = False
flag_b = True
for i in range(66):
hallway.append(random.randint(0,9))
if hallway[i] == 8:
if flag_a == True:
hallway[i] = 0
flag_a = False
else:
flag_a = True
flag_b = False
else:
hallway[i] = 0
return flag_b
def set_level ():
hallway = []
flag = build_level(hallway)
while flag:
flag = build_level(hallway)
return hallway
for i in range(20):
h = set_level()
print(h)
| cpt-ado/laberinto | nivel.py | Python | mit | 600 |
import click
from contextlib import closing
import os
import requests
def auth_header(data):
return {'Authorization': 'Token {}'.format(data.auth_token)}
def download_with_progressbar(data, url, filename=None, label=None):
with closing(requests.get(url, stream=True)) as rq:
if filename is None:
filename = url.rsplit('/', 1)[-1]
if label is None:
label = filename
chunksize = 1024
totalsize = int(rq.headers.get('Content-Length', '0').strip())
kwargs = {
'show_eta': True,
'show_percent': True,
'show_pos': True,
}
if label:
kwargs['label'] = label
if totalsize > 0:
kwargs['length'] = int(totalsize / chunksize)
with click.progressbar(rq.iter_content(chunksize), **kwargs) as bar:
with open(filename, 'wb') as f:
for buf in bar:
if buf:
f.write(buf)
def file_chunker(filename, chunksize):
with open(filename, 'rb') as f:
while True:
buf = f.read(chunksize)
if buf:
yield buf
else:
break
def upload_with_progressbar(data, url, filename):
chunksize = 1024
totalsize = os.path.getsize(filename)
kwargs = {
'show_eta': True,
'show_percent': True,
'show_pos': True,
'length': int(totalsize / chunksize),
'label': 'Uploading {}'.format(os.path.basename(filename)),
}
with click.progressbar(file_chunker(filename, chunksize), **kwargs) as bar:
for buf in bar:
pass
# requests.post(url, data=bar, stream=True)
| axsemantics/axsemantics-cli | axsemantics_cli/common/transfer.py | Python | mit | 1,718 |
# Time: O(n)
# Space: O(p), p is the number of paths
class Solution(object):
def pathSum(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
class Node(object):
def __init__(self, num):
self.level = num/100 - 1
self.i = (num%100)/10 - 1
self.val = num%10
self.leaf = True
def isParent(self, other):
return self.level == other.level-1 and \
self.i == other.i/2
if not nums:
return 0
result = 0
q = collections.deque()
dummy = Node(10)
parent = dummy
for num in nums:
child = Node(num)
while not parent.isParent(child):
result += parent.val if parent.leaf else 0
parent = q.popleft()
parent.leaf = False
child.val += parent.val
q.append(child)
while q:
result += q.pop().val
return result
| yiwen-luo/LeetCode | Python/path-sum-iv.py | Python | mit | 1,061 |
# -*- coding: utf-8 -*-
NEXT_CARDS_TO_REVIEW_STUBS = [
{
"cards": [
{
"id": 15726,
"deck": 370,
"fact": 8112,
"ease_factor": None,
"interval": None,
"due_at": None,
"last_ease_factor": None,
"last_interval": None,
"last_due_at": None,
"review_count": 0,
"template": 0,
"expression": "[1] 汽車",
"reading": "|汽車《きしゃ》",
"meaning": "steam locomotive",
},
{
"id": 15760,
"deck": 370,
"fact": 8146,
"ease_factor": None,
"interval": None,
"due_at": None,
"last_ease_factor": None,
"last_interval": None,
"last_due_at": None,
"review_count": 0,
"template": 0,
"expression": "[2] 左手",
"reading": "|左手《ひだりて》",
"meaning": "left hand",
},
],
"interstitial": None,
},
{
"cards": [
{
"id": 15768,
"deck": 370,
"fact": 8154,
"ease_factor": None,
"interval": None,
"due_at": None,
"last_ease_factor": None,
"last_interval": None,
"last_due_at": None,
"review_count": 0,
"template": 0,
"expression": "[3] あせる",
"reading": "あせる",
"meaning": "fret; be impatient",
},
{
"id": 15819,
"deck": 370,
"fact": 8203,
"ease_factor": None,
"interval": None,
"due_at": None,
"last_ease_factor": None,
"last_interval": None,
"last_due_at": None,
"review_count": 0,
"template": 0,
"expression": "[4] おぼろな",
"reading": "おぼろな",
"meaning": "hazy; indistinct",
},
],
"interstitial": {
"more_cards_ready_for_review": True,
"next_new_cards_count": 2,
"early_review_available": False,
},
},
{
"cards": [
{
"id": 15986,
"deck": 370,
"fact": 8369,
"ease_factor": None,
"interval": None,
"due_at": None,
"last_ease_factor": None,
"last_interval": None,
"last_due_at": None,
"review_count": 0,
"template": 3,
"expression": "[5] 背を寄せる",
"reading": "|背《せ》を|寄《よ》せる",
"meaning": "lean one's back (against)",
},
{
"id": 16041,
"deck": 370,
"fact": 8424,
"ease_factor": None,
"interval": None,
"due_at": None,
"last_ease_factor": None,
"last_interval": None,
"last_due_at": None,
"review_count": 0,
"template": 0,
"expression": "[6] 真直ぐに",
"reading": "|真直《まっす》ぐに",
"meaning": "straight",
},
],
"interstitial": {
"more_cards_ready_for_review": True,
"next_new_cards_count": 0,
"early_review_available": True,
},
},
]
| aehlke/manabi | manabi/apps/flashcards/test_stubs.py | Python | mit | 3,873 |
from operator import itemgetter
from math import sqrt
def kdtree(points, axis):
if not points:
return None
median = len(points) // 2
points.sort(key=itemgetter(axis))
axis = (axis + 1) % 2
return [points[median],
kdtree(points[0:median], axis),
kdtree(points[median + 1:], axis)]
point_list = [(2, 5), (5, 7), (10, 2), (3, 3), (8, 9), (1, 1)]
tree = kdtree(point_list, 0)
def nnsearch(node, query, max_sqd, axis):
if node is None:
return(node, float("inf"))
point = node[0]
if query[axis] < point[axis]:
nearer = node[1]
further = node[2]
else:
nearer = node[2]
further = node[1]
n1 = nnsearch(nearer, query, max_sqd, (axis + 1) % 2)
nearest = n1[0]
sqd = n1[1]
if sqd < max_sqd:
max_sqd = sqd
d = (point[axis] - query[axis]) ** 2
if d > max_sqd:
return(nearest, sqd)
d = (point[0] - query[0])**2 + (point[1] - query[1])**2
if d < sqd:
nearest = point
sqd = d
max_sqd = sqd
n2 = nnsearch(further, query, max_sqd, (axis + 1) % 2)
if n2[1] < sqd:
nearest = n2[0]
sqd = n2[1]
return(nearest, sqd)
n = nnsearch(tree, (9, 6), float("inf"), 0)
print('nearest:', n[0], 'dist:', sqrt(n[1]))
| o-kei/design-computing-aij | ch4_1/nearest_kdtree2.py | Python | mit | 1,306 |
#!python3
from tkinter import *
from tkinter.messagebox import *
from tkinter.ttk import *
import sympy
simptypes = {
"Simplify": sympy.simplify,
"Expand": sympy.expand,
"Factor": sympy.factor,
"Cancel": sympy.cancel,
"Apart": sympy.apart,
}
class SimpSelect(Frame):
def __init__(self, *args, **kargs):
Frame.__init__(self, *args, **kargs)
Label(self, text="Select simplification method.").pack(side=TOP)
self.var = StringVar()
for type in reversed(sorted(simptypes)):
Radiobutton(self, text=type, value=type, variable=self.var).pack(anchor=NW)
self.var.set("Simplify")
def getType(self):
return self.var.get()
class SimpWidget(Frame):
def __init__(self, *args, **kargs):
Frame. __init__(self, *args, **kargs)
self.__makeWidgets()
def __makeWidgets(self):
Label(self, text="Expression simplification\n", justify="center").pack()
row = Frame(self)
lab = Label(row, text="Enter your expression. ")
self.ent = Entry(row)
row.pack(side=LEFT, expand=YES, fill=X, anchor=N)
self.ent.pack(side=RIGHT, expand=YES, fill=X)
lab.pack(side=LEFT)
Button(self, text="Simplify", command=self.simplify).pack(fill=X)
self.simpselect = SimpSelect(self)
self.simpselect.pack()
def simplify(self):
try:
type = self.simpselect.getType()
meth = simptypes[type]
expr = sympy.sympify(self.ent.get())
res = str(meth(expr))
self.ent.delete(0, END)
self.ent.insert(0, res)
except Exception as e:
showerror("ERROR!", str(e))
if __name__ == '__main__':
root = Tk()
s = SimpWidget(root)
s.pack(expand=YES, fill=BOTH)
root.mainloop()
| BookOwl/pymath | widgets/simpWidget.py | Python | mit | 1,823 |
import urllib, shutil, csv
from time import time
import os
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.sys.path.insert(0,parentdir)
import summary.summary
def findRoutines(fileName):
for ln in fileName:
url = ln.split(";")[3]
routineName = url.split("/")[-1]
#print routineName
#print url
f = urllib.urlopen(url)
flag = 1
for line in f:
line = line[3:]
#print line
if line.startswith("Arguments"):
break
else:
if line.startswith("\par Purpose:"):
flag = 0
if line.startswith("Arguments"):
flag = 1
if not flag:
index1 = line.find("inverse of a")
if index1 > -1:
routines_inverse_341.append(routineName)
f_inverse_341.write(routineName)
else:
pass
fileName.close()
print "------------- Find 'inverse' routines in v3.4.1 --------------"
###------------ find routines that compute the inverse of a matrix in the new version
###------------ and write them into routines/inverse_341.txt
## find the routines that HAVE the keywords:
f_computational_341_single = open(parentdir+'/sort341/routines/computational_341_single.txt')
f_computational_341_double = open(parentdir+'/sort341/routines/computational_341_double.txt')
f_computational_341_complex = open(parentdir+'/sort341/routines/computational_341_complex.txt')
f_computational_341_complex16 = open(parentdir+'/sort341/routines/computational_341_complex16.txt')
f_inverse_341 = open('./routines/inverse_341.txt', 'w')
routines_inverse_341 = []
start = time()
findRoutines(f_computational_341_single)
findRoutines(f_computational_341_double)
findRoutines(f_computational_341_complex)
findRoutines(f_computational_341_complex16)
elapsed = (time() - start)
print "There are %s routines in the 341 version that provides inverse." % len(routines_inverse_341), elapsed
| LighthouseHPC/lighthouse | src/LAPACK341/computational_inverse/inverse_find_341.py | Python | mit | 2,107 |
# coding:utf-8
import cv2
def Hist(im1, im2):
hist1 = cv2.calcHist([im1], [0], None, [256], [0,256])
hist2 = cv2.calcHist([im2], [0], None, [256], [0,256])
# ヒストグラムの類似度を計算
d = cv2.compareHist(hist1, hist2, 0)
print d
if d > 0.6:
return 1
return 0
| palloc/face_t | backend-api/image_api/authapp/hist.py | Python | mit | 319 |
"""
Django settings for AndroidWEB project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')kgjg0#rdz&3g&md3$ts(mb%=49y)4_qhlx54u+#+iayhw='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'users',
'quizes',
'corsheaders',
'adminactions',
'nested_inlines'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
#'django.middleware.csrf.CsrfViewMiddleware',
'AndroidWEB.disable.DisableCSRF', #disable CSRF
'django.contrib.auth.middleware.AuthenticationMiddleware',
#'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware', #corsheaders
'django.middleware.common.CommonMiddleware', #corsheaders
)
ROOT_URLCONF = 'AndroidWEB.urls'
WSGI_APPLICATION = 'AndroidWEB.wsgi.application'
#django-cors-headers
USE_ETAGS = True
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_METHODS = (
'GET',
'POST',
'PUT',
'PATCH',
'DELETE',
'OPTIONS'
)
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
| amragaey/AndroidWEB | AndroidWEB/settings.py | Python | mit | 2,533 |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Spanner database sessions API helper."""
from apitools.base.py import encoding
from apitools.base.py import extra_types
from googlecloudsdk.api_lib.util import apis
from googlecloudsdk.core import resources
def Create(instance, database):
"""Create a database session."""
client = apis.GetClientInstance('spanner', 'v1')
msgs = apis.GetMessagesModule('spanner', 'v1')
ref = resources.REGISTRY.Parse(
database,
params={'instancesId': instance},
collection='spanner.projects.instances.databases')
req = msgs.SpannerProjectsInstancesDatabasesSessionsCreateRequest(
database=ref.RelativeName())
return client.projects_instances_databases_sessions.Create(req)
def Delete(session):
client = apis.GetClientInstance('spanner', 'v1')
msgs = apis.GetMessagesModule('spanner', 'v1')
req = msgs.SpannerProjectsInstancesDatabasesSessionsDeleteRequest(
name=session.name)
return client.projects_instances_databases_sessions.Delete(req)
def ExecuteSql(session, sql):
"""Execute an SQL command."""
client = apis.GetClientInstance('spanner', 'v1')
msgs = apis.GetMessagesModule('spanner', 'v1')
# TODO(b/33482229): remove this workaround
def _ToJson(msg):
return extra_types.JsonProtoEncoder(
extra_types.JsonArray(entries=msg.entry))
def _FromJson(data):
return msgs.ResultSet.RowsValueListEntry(
entry=extra_types.JsonProtoDecoder(data).entries)
encoding.RegisterCustomMessageCodec(
encoder=_ToJson, decoder=_FromJson)(
msgs.ResultSet.RowsValueListEntry)
req = msgs.SpannerProjectsInstancesDatabasesSessionsExecuteSqlRequest(
session=session.name,
executeSqlRequest=msgs.ExecuteSqlRequest(
sql=sql))
resp = client.projects_instances_databases_sessions.ExecuteSql(req)
return resp
| Sorsly/subtle | google-cloud-sdk/lib/googlecloudsdk/api_lib/spanner/database_sessions.py | Python | mit | 2,404 |
#!/usr/bin/python3
__author__ = 'Przemek Decewicz'
import gzip
import sys
import pkg_resources
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from Bio import SeqIO
from compare_predictions_to_phages import genbank_seqio
from glob import glob
from os import makedirs, path
from PhiSpyModules import log_and_message
from re import sub
from numpy import arange
from subprocess import call
DATA_DIR = pkg_resources.resource_filename('PhiSpyModules', 'data')
TEST_DIR = pkg_resources.resource_filename('PhiSpyModules', 'data/testSets')
if not path.isdir(TEST_DIR): makedirs(TEST_DIR)
def read_genbank(gbkfile, full_analysis=False):
"""
Parses GenBank file's CDSs and groups them into host or phage groups based on the '/is_phage' qualifier.
:param gbkfile: path to GenBank file
:param full_analysis: wether to read just the taxonomy (False) or all CDSs
:return infile_data: dictionary with taxonomy, bact/phage CDSs lists
"""
log_and_message(f"Reading {gbkfile}.", stderr=True)
infile_data = {
'taxonomy': [],
'bact_cds': [],
'phage_cds': []
}
tax_present = False
for record in genbank_seqio(gbkfile):
# check records until taxonomy information is provided
if not tax_present:
if len(record.annotations['taxonomy']) == 0:
infile_data['taxonomy'] = ['Bacteria']
else:
infile_data['taxonomy'] = check_taxa_names(record.annotations['taxonomy'])
tax_present = True
# get bacteria and phage CDSs nucleotide sequences to make kmers
if full_analysis:
for f in record.features:
if f.type == 'CDS':
dna = str(f.extract(record).seq)
try:
status = f.qualifiers['is_phage'][0]
if status == '1':
infile_data['phage_cds'].append(dna)
else:
infile_data['bact_cds'].append(dna)
except KeyError:
infile_data['bact_cds'].append(dna)
if not tax_present:
log_and_message(f"- WARNING! Taxonomy was missing!!! Assigning to Bacteria.", c="RED", stderr=True)
if full_analysis:
log_and_message(f"- Bact CDSs: {len(infile_data['bact_cds'])}", stderr=True)
log_and_message(f"- Phage CDSs: {len(infile_data['phage_cds'])}", stderr=True)
return infile_data
def check_taxa_names(taxonomy):
"""
Checks wether taxa names contain any illegal characters.
:param taxonomy: list of taxonomic levels names
:return taxonomy: corrected Taxonomy
"""
ILLEGAL_CHARACTERS = {
" ": "-", # e.g. Mycobacteium tuberculosis comples
"/": "-and-", # e.g. Rhizobium/Agrobacterium group
}
for i in range(len(taxonomy)):
for ic, lc in ILLEGAL_CHARACTERS.items():
taxonomy[i] = sub(ic, lc, taxonomy[i])
return taxonomy
def read_groups(groups_file, training_data):
"""
Reads tab-delimited input file with the path to input file to use for training in the first column
or just the file name (if --indir provided, it will be later added to this file path)
and the name of the group to put it afterwards while creating trainingGenome_list.txt.
:param groups_file: path to the groups file
:param training_data: dictionary with groups and infiles
:return training_data: updated dictionary with genomes and infiles
"""
trained_groups = len(training_data['groups'])
with open(groups_file) as inf:
for line in inf:
line = line.strip().split('\t')
training_data['genomes'].add(line[0])
try:
training_data['groups'][line[1]].add(line[0])
except KeyError:
training_data['groups'][line[1]] = set([line[0]])
log_and_message(f"Read {len(training_data['groups']) - trained_groups} new groups.", stderr=True)
return training_data
def read_kmers(kmerfile):
"""
Simply reads input file with kmers, ony by line.
:param kmerfile: path to file with kmers to read
:return kmers: set of kmers
"""
with gzip.open(kmerfile, 'rt') as inf:
kmers = {x.strip() for x in inf}
return kmers
def read_training_genomes_list(training_data):
"""
Read trainingGenome_list.txt in PhiSpy's data directory and check currently set
groups and already trained genomes.
:param training_data: dictionary storing groups, genomes and taxonomy information of processed genomes
:return training_data: updated dictionary
"""
with open(path.join(DATA_DIR,'trainingGenome_list.txt')) as infile:
infile.readline() # skip the first line with Generic test set
for line in infile: #pkg_resources.resource_stream('PhiSpyModules', 'data/trainingGenome_list.txt'):
n, group, genomes, genomes_number = line.strip().split('\t') #decode().strip().split('\t')
group = group.rsplit('.txt', 1)[0][9:] #remove .txt and trainSet_
genomes = set(genomes.split(';')) if ';' in genomes else set([genomes])
training_data['groups'][group] = genomes
training_data['genomes'].update(genomes)
log_and_message(f"Read {len(training_data['genomes'])} genomes assigned to {len(training_data['groups'])} groups.", stderr=True)
return training_data
def kmerize_orf(orf, k, t):
"""
Creates kmers of certain size and type from provided sequence.
:param orf: nucleotide sequence
:param k: size of a kmer
:param t: type of a kmer, i.e. 'simple', 'all' or 'codon'
:return kmers: list of kmers
"""
kmers = []
if t == 'simple':
stop = len(orf) - (len(orf) % k)
for i in range(0, stop, k):
kmers.append(orf[i : i + k])
elif t == 'all':
for j in range(0, k):
stop = len(orf) - ((len(orf) - j) % k)
for i in range(j, stop, k):
kmers.append(orf[i : i + k])
elif t == 'codon':
for j in range(0, k, 3):
stop = len(orf) - ((len(orf) - j) % k)
for i in range(j, stop, k):
kmers.append(orf[i : i + k])
return kmers
def prepare_taxa_groups(training_data):
"""
Reads all input files / genomes and creates a groups file based on all genomes taxonomy.
:param training_data: currently trained genomes and set groups
:return training_data
"""
current_groups = len(training_data['groups'])
for file_name in training_data['genomes']:
for tax in training_data['taxonomy'][file_name]:
try:
training_data['groups'][tax].add(file_name)
except KeyError:
training_data['groups'][tax] = set([file_name])
log_and_message(f"Created {len(training_data['groups']) - current_groups} new groups based on taxonomy.",stderr=True)
return training_data
def write_kmers_file(file_name, bact_orfs_list, phage_orfs_list, kmer_size, kmers_type):
"""
Calculates host/phage kmers from input file and writes phage-specific kmers to file.
"""
MIN_RATIO = 1.0
# host_kmers = set()
# phage_kmers = set()
kmers_dict = {} # {kmer: [# in host, # in phage]}
kmers_total_count = 0
kmers_host_unique_count = 0
kmers_phage_unique_count = 0
kmers_ratios = {}
kmers_ratios_stats = {round(x, 2): 0 for x in arange(0, 1.01, 0.01)}
kmers_ratios_file = path.join(TEST_DIR, f'{file_name}.kmers_ratios.txt')
kmers_phage_file = path.join(TEST_DIR, f'{file_name}.kmers_phage.gz')
kmers_host_file = path.join(TEST_DIR, f'{file_name}.kmers_host.gz')
# kmrize CDSs
for orf in bact_orfs_list:
for kmer in kmerize_orf(orf, kmer_size, kmers_type):
try:
kmers_dict[kmer][0] += 1
except KeyError:
kmers_dict[kmer] = [1, 0]
kmers_host_unique_count += 1
kmers_total_count += 1
# host_kmers.update(kmerize_orf(i, kmer_size, kmers_type))
for orf in phage_orfs_list:
for kmer in kmerize_orf(orf, kmer_size, kmers_type):
try:
kmers_dict[kmer][1] += 1
except KeyError:
kmers_dict[kmer] = [0, 1]
kmers_phage_unique_count += 1
kmers_total_count += 1
# phage_kmers.update(kmerize_orf(i, kmer_size, kmers_type))
log_and_message(f"Analyzed {kmers_total_count} kmers.", stderr=True)
log_and_message(f"Identified {len(kmers_dict)} unique kmers.", stderr=True)
log_and_message(f"- Bact unique {kmers_host_unique_count} ({kmers_host_unique_count / len(kmers_dict) * 100:.2f}%).", stderr=True)
log_and_message(f"- Phage unique {kmers_phage_unique_count} ({kmers_phage_unique_count / len(kmers_dict) * 100:.2f}%).", stderr=True)
##################
# the below part could be simplified if ratios will not be considered
# it just slightly extends the calculations
##################
# calculate ratios
for kmer, freqs in kmers_dict.items():
ratio = round(freqs[1] / sum(freqs), 2)
kmers_ratios[(ratio, kmer)] = freqs
kmers_ratios_stats[ratio] += 1
del kmers_dict
# write ratios_stats
log_and_message(f"Writing kmers ratios stats.", stderr=True)
with open(kmers_ratios_file, 'w') as outf:
outf.write("Ratio\tNumber of kmers\tPerc of such kmers\tCumulative perc\n")
tot = 0
tot_perc = 0
for x in reversed(arange(0, 1.01, 0.01)):
x = round(x, 2)
tot += kmers_ratios_stats[x]
perc = kmers_ratios_stats[x]/len(kmers_ratios) * 100
tot_perc = tot / len(kmers_ratios) * 100
outf.write(f"{x}\t{kmers_ratios_stats[x]}\t{perc:.3f}%\t{tot_perc:.3f}%\n")
# write unique phage kmers
log_and_message(f"Writing kmers into phage and host kmers files.", stderr=True)
cnt = 0
with gzip.open(kmers_phage_file, 'wt') as out_phage:
with gzip.open(kmers_host_file, 'wt') as out_host:
for ratio, kmer in kmers_ratios.keys():
if ratio >= MIN_RATIO:
cnt += 1
out_phage.write(f"{kmer}\n")
else:
out_host.write(f"{kmer}\n")
log_and_message(f"- Wrote {cnt} kmers with ratios >= {MIN_RATIO}.", stderr=True)
return
def write_training_sets(training_data):
"""
Writes trainSets based on provided groups.
:param training_data: dictionary with genomes and groups used for making trainging sets
"""
training_data['groups']['genericAll'] = training_data['genomes']
for i, group in enumerate(sorted(training_data['groups']), 1):
header = False
log_and_message(f"[{i}/{len(training_data['groups'])}] Writing trainSet for {group} ({len(training_data['groups'][group])} genome(s)).", c="PINK", stderr=True)
with open(path.join(DATA_DIR, f"trainSet_{group}.txt"), 'w') as outf:
for genome in training_data['groups'][group]:
with open(path.join(TEST_DIR, f"{genome}.testSet")) as inf:
if header: inf.readline()
outf.write(inf.read())
header = True
training_data['groups'].pop('genericAll')
def write_training_genome_list(training_data):
"""
Writes trainingGenome_list.txt file with currently available training sets.
:param training_data: dictionary with genomes and groups used for making trainging sets
"""
with open(path.join(DATA_DIR, 'trainingGenome_list.txt'), 'w') as outf:
outf.write(f"0\ttestSet_genericAll.txt\tGeneric Test Set\t{len(training_data['genomes'])}\n")
for i, group in enumerate(sorted(training_data['groups']), 1):
outf.write(f"{i}\t")
outf.write(f"trainSet_{group}.txt\t")
outf.write(f"{';'.join(training_data['groups'][group])}\t")
outf.write(f"{len(training_data['groups'][group])}\n")
def print_groups(groups):
"""
Prints groups with their genomes.
:param groups: a dictionary with group name as key and genomes list as value
"""
for group, genomes in sorted(groups.items()):
gg = '\n- '.join(genomes)
log_and_message(f"{group}", c="PINK", stderr=True)
log_and_message(f"- {gg}", stderr=True)
def get_file_path(file_name, infiles, indir):
"""
Return path to input file.
:param file_name: GenBank file names
:param infiles: list of input files from user's input directory
:param indir: user's input directory
:return infile: path to file of interest
"""
if file_name in infiles:
# if indicated within directory provided by user
log_and_message(f"File in user's input directory.", stderr=True)
infile = path.join(indir, file_name)
else:
log_and_message(f"File {file_name} not present in user's input directory. If retraing with PhiSpy's default training genomes consider using its test_genbank_files directory.\nIf you want to use just your own data, run the script with --absolute_retrain flag.\nQuiting.", c="RED", stderr=True)
exit(2)
return infile
def main():
args = ArgumentParser(prog = 'make_training_sets.py',
description = 'Automates making new or extending current PhiSpy\'s training sets. By default these will be created in PhiSpyModules/data directory so keep that in mind preparing groups file. ',
epilog = 'Example usage:\npython3 scripts/make_training_sets.py -d test_genbank_files -g test_genbank_files/groups.txt --retrain --use_taxonomy --phmms pVOGs.hmm --threads 4',
formatter_class = RawDescriptionHelpFormatter)
args.add_argument('-d', '--indir',
type = str,
help = 'Path to input directory with GenBank file(s) for training. This path will be added to file names in groups file.')
args.add_argument('-g', '--groups',
type = str,
help = 'Path to file two tab-delimited columns: file name and group name. If not provided each file will have its own training set.')
args.add_argument('--use_taxonomy',
action = 'store_true',
help = 'If set, taxonomy from input files will be used to update or create new groups. This is performed after reading groups file.')
args.add_argument('-k', '--kmer_size',
type = int,
help = 'The size of required kmers. For codon approach use multiplicity of 3. [Default: 12]',
default = 12)
args.add_argument('-t', '--kmers_type',
type = str,
help = 'Approach for creating kmers. Options are: simple (just slicing the sequence from the first position), all (all possible kmers), codon (all possible kmers made with step of 3 nts to get kmers corresponding translated aas). [Default: all]',
default = 'all')
args.add_argument('--phmms',
type = str,
help = 'Phage HMM profile database (like pVOGs) will be mapped against the genome of interest and used as additional feature to identify prophages.')
args.add_argument('--threads',
type = str,
help = 'Number of threads to use while searching with phmms.',
default = '4')
args.add_argument('--retrain',
action = 'store_true',
help = 'Set if any of reference files previously used for training has changed, e.g. prophage protein features indication was modified.')
args.add_argument('--absolute_retrain',
action = 'store_true',
help = 'If set, ignores trainingGenome_list file and PhiSpy\'s default reference genomes. This option allows to train PhiSpy with files provided just by the user.')
if len(sys.argv[1:]) == 0:
args.print_usage()
args.exit()
try:
args = args.parse_args()
except:
args.exit()
if not args.indir:
log_and_message(f"You have to provide input directory --indir.", c="RED",
stderr=True, stdout=False)
sys.exit(2)
log_and_message("Reading input directory", c="GREEN", stderr=True)
infiles = glob(path.join(args.indir, r'*.gb'))
infiles += glob(path.join(args.indir, r'*.gb[kf]'))
infiles += glob(path.join(args.indir, r'*.gbff'))
infiles += glob(path.join(args.indir, r'*.gb.gz'))
infiles += glob(path.join(args.indir, r'*.gb[kf].gz'))
infiles += glob(path.join(args.indir, r'*.gbff.gz'))
infiles = {path.basename(infile) for infile in infiles}
log_and_message(f"Read {len(infiles)} GenBank files from input directory.", stderr=True)
# read currently available genomes - either by reading trainingGenome_list.txt
log_and_message("Checking currently available training sets.", c="GREEN", stderr=True, stdout=False)
training_data = {
'groups': {},
'genomes': set(),
'taxonomy': {}
}
not_trained = set()
if args.absolute_retrain:
log_and_message(f"Ignoring PhiSpy's trainingGenome_list.txt file and default test GenBank files.Files provided with --indir and/or --groups will overwrite current reference sets.", stderr=True)
args.retrain = True
elif pkg_resources.resource_exists('PhiSpyModules', 'data/trainingGenome_list.txt'):
training_data = read_training_genomes_list(training_data)
else:
log_and_message(f"trainingGenome_list.txt is missing.", c="RED", stderr=True)
log_and_message(f"{len(training_data['groups'])} groups based on trainingGenome_list file:", c="GREEN", stderr=True)
old_training_groups = training_data['groups'].copy()
print_groups(training_data['groups'])
log_and_message(f"Checking which genomes are considered as new.", c="GREEN", stderr=True)
for infile in infiles:
file_name = path.basename(infile)
if file_name not in training_data['genomes']:
not_trained.add(infile)
training_data['genomes'].add(infile)
log_and_message(f"- {file_name}", c="YELLOW", stderr=True)
log_and_message(f"In total there are {len(not_trained)} new genomes.", stderr=True)
# check what new groups were requested
if args.groups:
log_and_message(f"Reading provided groups file.", c="GREEN", stderr=True)
training_data = read_groups(args.groups, training_data)
log_and_message(f"{len(training_data['groups'])} currently considered groups:", c="GREEN", stderr=True)
# make kmers files or read taxonomy information from input file if needed
# Comment:
# In general, all files need to be retrained if:
# (i) there's at least one new reference file or
# (ii) annotation of CDSs with is_phage qualifier has changed in any file
# The change in CDSs marked as phage CDSs triggers the change of phage-specific
# kmers set and therefore changed the Shannon Score statistics.
log_and_message(f"Checking which genomes need to be read/retrained.", c="GREEN", stderr=True)
for i, file_name in enumerate(sorted(training_data['genomes']), 1):
full_analysis = False
if file_name in not_trained:
log_and_message(f"This file has not been used for training yet: {file_name}", c="RED", stderr=True)
full_analysis = True
elif args.retrain:
log_and_message(f"Retraining file upon user's request.", c="PINK", stderr=True)
full_analysis = True
elif not pkg_resources.resource_exists('PhiSpyModules', f"data/testSets/{file_name}.kmers_phage.gz") or \
not pkg_resources.resource_exists('PhiSpyModules', f"data/testSets/{file_name}.kmers_host.gz") or \
not pkg_resources.resource_exists('PhiSpyModules', f"data/testSets/{file_name}.testSet"):
log_and_message(f"Training files missing for: {file_name}", c="RED", stderr=True)
full_analysis = True
# full_analysis = file_name in not_trained or args.retrain
if full_analysis or args.use_taxonomy:
log_and_message(f"[{i}/{len(training_data['genomes'])}] Reading {file_name}.", c="YELLOW", stderr=True)
infile = get_file_path(file_name, infiles, args.indir)
infile_data = read_genbank(infile, full_analysis)
training_data['taxonomy'][file_name] = infile_data['taxonomy']
if full_analysis:
write_kmers_file(file_name, infile_data['bact_cds'], infile_data['phage_cds'], args.kmer_size, args.kmers_type)
else:
log_and_message("No need to write kmers files.", stderr=True)
else:
log_and_message(f"[{i}/{len(training_data['genomes'])}] Skipping. {file_name} already analyzed.", c="YELLOW", stderr=True)
# use taxonomy information to create/update groups
if args.use_taxonomy:
log_and_message(f"Using taxonomy from input files to create new or update current groups.", c="GREEN", stderr=True)
training_data = prepare_taxa_groups(training_data)
log_and_message(f"{len(training_data['groups'])} considered groups, including taxonomy-based ones:", c="GREEN", stderr=True)
print_groups(training_data['groups'])
# Check wether there's a point to go further
if len(not_trained) == 0 and training_data['groups'] == old_training_groups:
log_and_message(f"There are 0 new genomes and groups. Quiting.", c="GREEN", stderr=True)
exit(1)
log_and_message("Making phage unique kmers file from all considered genomes: phage_kmers_all_wohost.txt", c="GREEN", stderr=True)
phage_kmers_all_wohost_file = path.join(DATA_DIR, 'phage_kmers_all_wohost.txt')
phage_kmers = set()
for i, file_name in enumerate(sorted(training_data['genomes']), 1):
log_and_message(f"[{i}/{len(training_data['genomes'])}] Reading kmers from {file_name}.", c="YELLOW", stderr=True)
kmers_file = path.join(TEST_DIR, file_name)
phage_kmers.update(read_kmers(kmers_file + '.kmers_phage.gz'))
phage_kmers.difference_update(read_kmers(kmers_file + '.kmers_host.gz'))
log_and_message(f"Writing {len(phage_kmers)} into {phage_kmers_all_wohost_file}.", stderr=True)
with open(phage_kmers_all_wohost_file, 'w') as outf:
outf.write("\n".join(phage_kmers))
retrain_all = False
if len(not_trained) > 0:
log_and_message(f"{len(not_trained)} file{' has' if len(not_trained) == 1 else 's have'} not been analyzed.\nMaking testSets for all genomes using new kmers file.", c="GREEN", stderr=True)
retrain_all = True
elif args.retrain:
log_and_message(f"Making testSets for all genomes upon user's request.", c="GREEN", stderr=True)
retrain_all = True
else:
log_and_message(f"There's no need to make testSets for any genome.", c="GREEN", stderr=True)
if retrain_all:
for i, file_name in enumerate(sorted(training_data['genomes']), 1):
log_and_message(f"[{i}/{len(training_data['genomes'])}] Making testSet for {file_name}.", c="YELLOW", stderr=True)
infile = get_file_path(file_name, infiles, args.indir)
cmd = ['PhiSpy.py', infile,
'-o', TEST_DIR,
'-m', file_name + '.testSet',
# '--kmer_size', str(args.kmer_size), # TODO not supported by PhiSpy yet
'--kmers_type', args.kmers_type]
if args.phmms: cmd.extend(['--phmms', args.phmms, '--threads', args.threads])
log_and_message(f"PhiSpy command: {' '.join(cmd)}", stderr=True)
log_and_message(f"{'PhiSpy start':=^30s}", c="PINK", stderr=True)
call(cmd)
log_and_message(f"{'PhiSpy stop':=^30s}", c="PINK", stderr=True)
log_and_message("Updating training sets based on new groups.", c="GREEN", stderr=True)
write_training_sets(training_data)
# update trainingGenome_list file - this will act as a new groups file
# for genomes available in PhiSpy's data directory
log_and_message("Writing updated trainingGenome_list.", c="GREEN", stderr=True)
write_training_genome_list(training_data)
log_and_message("Done!", c="GREEN", stderr=True)
if __name__ == '__main__':
main()
| linsalrob/PhiSpy | scripts/make_training_sets.py | Python | mit | 24,540 |
import os
import random
import string
class CalcItJobCreateError(IOError):
""" Exception cast if there is an error to create a job"""
pass
def substitute_file(from_file, to_file, substitutions):
""" Substitute contents in from_file with substitutions and
output to to_file using string.Template class
Raises: IOError file the file to replace from is not found
Arguments:
----------
from_file -- template file to load
to_file -- substituted file
substitutions -- dictionary of substitutions.
"""
with open(from_file, "r") as f_in:
source = string.Template(f_in.read())
with open(to_file, "w") as f_out:
outcome = source.safe_substitute(substitutions)
f_out.write(outcome)
def create_scratch_directory(basename):
""" Creates scratch directory named basename
Arguments:
----------
basename -- name of folder to create
"""
if not os.path.isdir(basename):
os.mkdir(basename)
def only_coordinates(f):
""" Generator to only consider coordinates in
xyz-files.
skips the first two lines
"""
for i, line in enumerate(f):
if i > 1:
yield line
def read_xyz(filename):
""" Reads an xyz-file """
with open(filename, "r") as xyz_file:
for line in only_coordinates(xyz_file):
data = line.split()
yield data[0], list(map(float, data[1:]))
def write_xyz(xyz_filename, include_header=True):
""" Generates an .xyz file output from an .xyz file input """
xyz_data = list(read_xyz(xyz_filename))
s = ""
if include_header:
s += "{0:d}\n".format(len(xyz_data))
s += "\n"
for label, coordinates in xyz_data:
s += "{0:s}{1[0]:20.9f}{1[1]:16.9f}{1[2]:16.9f}\n".format(label, coordinates)
return s[:-1]
def directories(from_file):
""" Sets up directories needed internally in CalcIt.
This pertains to especially the share directory
that holds the template files.
Arguments:
----------
from_file -- the basename to use to extract the files
Returns:
--------
dictionary with paths. The following keys are available:
path -- the base path for the executable
bin -- the path for the binary
share -- the path of the share directory
"""
abs_path = os.path.abspath(from_file)
bin_path = os.path.dirname(abs_path)
path = os.path.dirname(bin_path)
share_path = os.path.join(path, 'share')
return {'path': path, 'bin': bin_path, 'share': share_path}
def generate_auth_key(mode):
""" Generates an authentification key either manually
or by specifying "auto"
Arguments:
----------
mode -- the mode used to generate the key.
auto automatically generates a suitable key.
"""
key = mode
if mode == "auto":
key = "calcit-{0:d}".format(random.randint(10000, 30000))
return key
| cstein/calcit | calcit/util.py | Python | mit | 3,045 |
import jsog
import unittest
class TestJSOG(unittest.TestCase):
def test_encode_reference(self):
inner = { "foo": "bar" }
outer = { "inner1": inner, "inner2": inner }
encoded = jsog.encode(outer)
inner1 = encoded['inner1']
inner2 = encoded['inner2']
# one has @id, one has @ref
self.assertNotEqual('@id' in inner1, '@id' in inner2)
self.assertNotEqual('@ref' in inner1, '@ref' in inner2)
if '@id' in inner1:
self.assertEqual(inner1['@id'], inner2['@ref'])
else:
self.assertEqual(inner1['@ref'], inner2['@id'])
def test_decode_reference(self):
JSOGIFIED = '{"@id":"1","foo":"foo","inner1":{"@id":"2","bar":"bar"},"inner2":{"@ref":"2"}}'
parsed = jsog.loads(JSOGIFIED)
inner1 = parsed['inner1']
inner2 = parsed['inner2']
self.assertTrue(inner1 is inner2)
def test_encode_circular(self):
thing = {}
thing['me'] = thing
encoded = jsog.encode(thing)
self.assertTrue(encoded['@id'])
self.assertTrue(encoded['me']['@ref'] == encoded['@id'])
def test_decode_circular(self):
thing = {}
thing['me'] = thing
encoded = jsog.encode(thing)
back = jsog.decode(encoded)
self.assertFalse('@id' in back)
self.assertTrue(back['me'] is back)
def test_encode_null(self):
encoded = jsog.encode(None)
self.assertEqual(encoded, None)
def test_decode_null(self):
decoded = jsog.decode(None)
self.assertEqual(decoded, None)
def test_decode_plain_json(self):
json = { "foo": "bar" }
decoded = jsog.decode(json)
self.assertEqual(json, decoded)
if __name__ == '__main__':
unittest.main() | jsog/jsog-python | test_jsog.py | Python | mit | 1,554 |
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements input and output processing from Nwchem.
2015/09/21 - Xin Chen ([email protected]):
NwOutput will read new kinds of data:
1. normal hessian matrix. ["hessian"]
2. projected hessian matrix. ["projected_hessian"]
3. normal frequencies. ["normal_frequencies"]
For backward compatibility, the key for accessing the projected frequencies
is still 'frequencies'.
2015/10/12 - Xin Chen
NwOutput will read new kinds of data:
1. forces. ["forces"]
"""
import os
import re
import warnings
from string import Template
import numpy as np
from monty.io import zopen
from monty.json import MSONable
from pymatgen.analysis.excitation import ExcitationSpectrum
from pymatgen.core.structure import Molecule, Structure
from pymatgen.core.units import Energy, FloatWithUnit
NWCHEM_BASIS_LIBRARY = None
if os.environ.get("NWCHEM_BASIS_LIBRARY"):
NWCHEM_BASIS_LIBRARY = set(os.listdir(os.environ["NWCHEM_BASIS_LIBRARY"]))
class NwTask(MSONable):
"""
Base task for Nwchem.
"""
theories = {
"g3gn": "some description",
"scf": "Hartree-Fock",
"dft": "DFT",
"esp": "ESP",
"sodft": "Spin-Orbit DFT",
"mp2": "MP2 using a semi-direct algorithm",
"direct_mp2": "MP2 using a full-direct algorithm",
"rimp2": "MP2 using the RI approximation",
"ccsd": "Coupled-cluster single and double excitations",
"ccsd(t)": "Coupled-cluster linearized triples approximation",
"ccsd+t(ccsd)": "Fourth order triples contribution",
"mcscf": "Multiconfiguration SCF",
"selci": "Selected CI with perturbation correction",
"md": "Classical molecular dynamics simulation",
"pspw": "Pseudopotential plane-wave DFT for molecules and insulating solids using NWPW",
"band": "Pseudopotential plane-wave DFT for solids using NWPW",
"tce": "Tensor Contraction Engine",
"tddft": "Time Dependent DFT",
}
operations = {
"energy": "Evaluate the single point energy.",
"gradient": "Evaluate the derivative of the energy with respect to nuclear coordinates.",
"optimize": "Minimize the energy by varying the molecular structure.",
"saddle": "Conduct a search for a transition state (or saddle point).",
"hessian": "Compute second derivatives.",
"frequencies": "Compute second derivatives and print out an analysis of molecular vibrations.",
"freq": "Same as frequencies.",
"vscf": "Compute anharmonic contributions to the vibrational modes.",
"property": "Calculate the properties for the wave function.",
"dynamics": "Perform classical molecular dynamics.",
"thermodynamics": "Perform multi-configuration thermodynamic integration using classical MD.",
"": "dummy",
}
def __init__(
self,
charge,
spin_multiplicity,
basis_set,
basis_set_option="cartesian",
title=None,
theory="dft",
operation="optimize",
theory_directives=None,
alternate_directives=None,
):
"""
Very flexible arguments to support many types of potential setups.
Users should use more friendly static methods unless they need the
flexibility.
Args:
charge: Charge of the molecule. If None, charge on molecule is
used. Defaults to None. This allows the input file to be set a
charge independently from the molecule itself.
spin_multiplicity: Spin multiplicity of molecule. Defaults to None,
which means that the spin multiplicity is set to 1 if the
molecule has no unpaired electrons and to 2 if there are
unpaired electrons.
basis_set: The basis set used for the task as a dict. E.g.,
{"C": "6-311++G**", "H": "6-31++G**"}.
basis_set_option: cartesian (default) | spherical,
title: Title for the task. Defaults to None, which means a title
based on the theory and operation of the task is
autogenerated.
theory: The theory used for the task. Defaults to "dft".
operation: The operation for the task. Defaults to "optimize".
theory_directives: A dict of theory directives. For example,
if you are running dft calculations, you may specify the
exchange correlation functional using {"xc": "b3lyp"}.
alternate_directives: A dict of alternate directives. For
example, to perform cosmo calculations and dielectric
constant of 78, you'd supply {'cosmo': {"dielectric": 78}}.
"""
# Basic checks.
if theory.lower() not in NwTask.theories.keys():
raise NwInputError(f"Invalid theory {theory}")
if operation.lower() not in NwTask.operations.keys():
raise NwInputError(f"Invalid operation {operation}")
self.charge = charge
self.spin_multiplicity = spin_multiplicity
self.title = title if title is not None else f"{theory} {operation}"
self.theory = theory
self.basis_set = basis_set or {}
if NWCHEM_BASIS_LIBRARY is not None:
for b in set(self.basis_set.values()):
if re.sub(r"\*", "s", b.lower()) not in NWCHEM_BASIS_LIBRARY:
warnings.warn("Basis set %s not in in NWCHEM_BASIS_LIBRARY" % b)
self.basis_set_option = basis_set_option
self.operation = operation
self.theory_directives = theory_directives or {}
self.alternate_directives = alternate_directives or {}
def __str__(self):
bset_spec = []
for el, bset in sorted(self.basis_set.items(), key=lambda x: x[0]):
bset_spec.append(f' {el} library "{bset}"')
theory_spec = []
if self.theory_directives:
theory_spec.append(f"{self.theory}")
for k in sorted(self.theory_directives.keys()):
theory_spec.append(f" {k} {self.theory_directives[k]}")
theory_spec.append("end")
for k in sorted(self.alternate_directives.keys()):
theory_spec.append(k)
for k2 in sorted(self.alternate_directives[k].keys()):
theory_spec.append(f" {k2} {self.alternate_directives[k][k2]}")
theory_spec.append("end")
t = Template(
"""title "$title"
charge $charge
basis $basis_set_option
$bset_spec
end
$theory_spec
"""
)
output = t.substitute(
title=self.title,
charge=int(self.charge),
spinmult=self.spin_multiplicity,
basis_set_option=self.basis_set_option,
bset_spec="\n".join(bset_spec),
theory_spec="\n".join(theory_spec),
theory=self.theory,
)
if self.operation is not None:
output += f"task {self.theory} {self.operation}"
return output
def as_dict(self):
"""
Returns: MSONable dict.
"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"charge": self.charge,
"spin_multiplicity": self.spin_multiplicity,
"title": self.title,
"theory": self.theory,
"operation": self.operation,
"basis_set": self.basis_set,
"basis_set_option": self.basis_set_option,
"theory_directives": self.theory_directives,
"alternate_directives": self.alternate_directives,
}
@classmethod
def from_dict(cls, d):
"""
Args:
d (dict): Dict representation
Returns:
NwTask
"""
return NwTask(
charge=d["charge"],
spin_multiplicity=d["spin_multiplicity"],
title=d["title"],
theory=d["theory"],
operation=d["operation"],
basis_set=d["basis_set"],
basis_set_option=d["basis_set_option"],
theory_directives=d["theory_directives"],
alternate_directives=d["alternate_directives"],
)
@classmethod
def from_molecule(
cls,
mol,
theory,
charge=None,
spin_multiplicity=None,
basis_set="6-31g",
basis_set_option="cartesian",
title=None,
operation="optimize",
theory_directives=None,
alternate_directives=None,
):
"""
Very flexible arguments to support many types of potential setups.
Users should use more friendly static methods unless they need the
flexibility.
Args:
mol: Input molecule
charge: Charge of the molecule. If None, charge on molecule is
used. Defaults to None. This allows the input file to be set a
charge independently from the molecule itself.
spin_multiplicity: Spin multiplicity of molecule. Defaults to None,
which means that the spin multiplicity is set to 1 if the
molecule has no unpaired electrons and to 2 if there are
unpaired electrons.
basis_set: The basis set to be used as string or a dict. E.g.,
{"C": "6-311++G**", "H": "6-31++G**"} or "6-31G". If string,
same basis set is used for all elements.
basis_set_option: cartesian (default) | spherical,
title: Title for the task. Defaults to None, which means a title
based on the theory and operation of the task is
autogenerated.
theory: The theory used for the task. Defaults to "dft".
operation: The operation for the task. Defaults to "optimize".
theory_directives: A dict of theory directives. For example,
if you are running dft calculations, you may specify the
exchange correlation functional using {"xc": "b3lyp"}.
alternate_directives: A dict of alternate directives. For
example, to perform cosmo calculations with DFT, you'd supply
{'cosmo': "cosmo"}.
"""
title = title if title is not None else "{} {} {}".format(re.sub(r"\s", "", mol.formula), theory, operation)
charge = charge if charge is not None else mol.charge
nelectrons = -charge + mol.charge + mol.nelectrons # pylint: disable=E1130
if spin_multiplicity is not None:
spin_multiplicity = spin_multiplicity
if (nelectrons + spin_multiplicity) % 2 != 1:
raise ValueError(
"Charge of {} and spin multiplicity of {} is"
" not possible for this molecule".format(charge, spin_multiplicity)
)
elif charge == mol.charge:
spin_multiplicity = mol.spin_multiplicity
else:
spin_multiplicity = 1 if nelectrons % 2 == 0 else 2
elements = set(mol.composition.get_el_amt_dict().keys())
if isinstance(basis_set, str):
basis_set = {el: basis_set for el in elements}
basis_set_option = basis_set_option
return NwTask(
charge,
spin_multiplicity,
basis_set,
basis_set_option=basis_set_option,
title=title,
theory=theory,
operation=operation,
theory_directives=theory_directives,
alternate_directives=alternate_directives,
)
@classmethod
def dft_task(cls, mol, xc="b3lyp", **kwargs):
r"""
A class method for quickly creating DFT tasks with optional
cosmo parameter .
Args:
mol: Input molecule
xc: Exchange correlation to use.
\\*\\*kwargs: Any of the other kwargs supported by NwTask. Note the
theory is always "dft" for a dft task.
"""
t = NwTask.from_molecule(mol, theory="dft", **kwargs)
t.theory_directives.update({"xc": xc, "mult": t.spin_multiplicity})
return t
@classmethod
def esp_task(cls, mol, **kwargs):
r"""
A class method for quickly creating ESP tasks with RESP
charge fitting.
Args:
mol: Input molecule
\\*\\*kwargs: Any of the other kwargs supported by NwTask. Note the
theory is always "dft" for a dft task.
"""
return NwTask.from_molecule(mol, theory="esp", **kwargs)
class NwInput(MSONable):
"""
An object representing a Nwchem input file, which is essentially a list
of tasks on a particular molecule.
"""
def __init__(
self,
mol,
tasks,
directives=None,
geometry_options=("units", "angstroms"),
symmetry_options=None,
memory_options=None,
):
"""
Args:
mol: Input molecule. If molecule is a single string, it is used as a
direct input to the geometry section of the Gaussian input
file.
tasks: List of NwTasks.
directives: List of root level directives as tuple. E.g.,
[("start", "water"), ("print", "high")]
geometry_options: Additional list of options to be supplied to the
geometry. E.g., ["units", "angstroms", "noautoz"]. Defaults to
("units", "angstroms").
symmetry_options: Addition list of option to be supplied to the
symmetry. E.g. ["c1"] to turn off the symmetry
memory_options: Memory controlling options. str.
E.g "total 1000 mb stack 400 mb"
"""
self._mol = mol
self.directives = directives if directives is not None else []
self.tasks = tasks
self.geometry_options = geometry_options
self.symmetry_options = symmetry_options
self.memory_options = memory_options
@property
def molecule(self):
"""
Returns molecule associated with this GaussianInput.
"""
return self._mol
def __str__(self):
o = []
if self.memory_options:
o.append("memory " + self.memory_options)
for d in self.directives:
o.append(f"{d[0]} {d[1]}")
o.append("geometry " + " ".join(self.geometry_options))
if self.symmetry_options:
o.append(" symmetry " + " ".join(self.symmetry_options))
for site in self._mol:
o.append(f" {site.specie.symbol} {site.x} {site.y} {site.z}")
o.append("end\n")
for t in self.tasks:
o.append(str(t))
o.append("")
return "\n".join(o)
def write_file(self, filename):
"""
Args:
filename (str): Filename
"""
with zopen(filename, "w") as f:
f.write(self.__str__())
def as_dict(self):
"""
Returns: MSONable dict
"""
return {
"mol": self._mol.as_dict(),
"tasks": [t.as_dict() for t in self.tasks],
"directives": [list(t) for t in self.directives],
"geometry_options": list(self.geometry_options),
"symmetry_options": self.symmetry_options,
"memory_options": self.memory_options,
}
@classmethod
def from_dict(cls, d):
"""
Args:
d (dict): Dict representation
Returns:
NwInput
"""
return NwInput(
Molecule.from_dict(d["mol"]),
tasks=[NwTask.from_dict(dt) for dt in d["tasks"]],
directives=[tuple(li) for li in d["directives"]],
geometry_options=d["geometry_options"],
symmetry_options=d["symmetry_options"],
memory_options=d["memory_options"],
)
@classmethod
def from_string(cls, string_input):
"""
Read an NwInput from a string. Currently tested to work with
files generated from this class itself.
Args:
string_input: string_input to parse.
Returns:
NwInput object
"""
directives = []
tasks = []
charge = None
spin_multiplicity = None
title = None
basis_set = None
basis_set_option = None
theory_directives = {}
geom_options = None
symmetry_options = None
memory_options = None
lines = string_input.strip().split("\n")
while len(lines) > 0:
l = lines.pop(0).strip()
if l == "":
continue
toks = l.split()
if toks[0].lower() == "geometry":
geom_options = toks[1:]
l = lines.pop(0).strip()
toks = l.split()
if toks[0].lower() == "symmetry":
symmetry_options = toks[1:]
l = lines.pop(0).strip()
# Parse geometry
species = []
coords = []
while l.lower() != "end":
toks = l.split()
species.append(toks[0])
coords.append([float(i) for i in toks[1:]])
l = lines.pop(0).strip()
mol = Molecule(species, coords)
elif toks[0].lower() == "charge":
charge = int(toks[1])
elif toks[0].lower() == "title":
title = l[5:].strip().strip('"')
elif toks[0].lower() == "basis":
# Parse basis sets
l = lines.pop(0).strip()
basis_set = {}
while l.lower() != "end":
toks = l.split()
basis_set[toks[0]] = toks[-1].strip('"')
l = lines.pop(0).strip()
elif toks[0].lower() in NwTask.theories:
# read the basis_set_option
if len(toks) > 1:
basis_set_option = toks[1]
# Parse theory directives.
theory = toks[0].lower()
l = lines.pop(0).strip()
theory_directives[theory] = {}
while l.lower() != "end":
toks = l.split()
theory_directives[theory][toks[0]] = toks[-1]
if toks[0] == "mult":
spin_multiplicity = float(toks[1])
l = lines.pop(0).strip()
elif toks[0].lower() == "task":
tasks.append(
NwTask(
charge=charge,
spin_multiplicity=spin_multiplicity,
title=title,
theory=toks[1],
operation=toks[2],
basis_set=basis_set,
basis_set_option=basis_set_option,
theory_directives=theory_directives.get(toks[1]),
)
)
elif toks[0].lower() == "memory":
memory_options = " ".join(toks[1:])
else:
directives.append(l.strip().split())
return NwInput(
mol,
tasks=tasks,
directives=directives,
geometry_options=geom_options,
symmetry_options=symmetry_options,
memory_options=memory_options,
)
@classmethod
def from_file(cls, filename):
"""
Read an NwInput from a file. Currently tested to work with
files generated from this class itself.
Args:
filename: Filename to parse.
Returns:
NwInput object
"""
with zopen(filename) as f:
return cls.from_string(f.read())
class NwInputError(Exception):
"""
Error class for NwInput.
"""
pass
class NwOutput:
"""
A Nwchem output file parser. Very basic for now - supports only dft and
only parses energies and geometries. Please note that Nwchem typically
outputs energies in either au or kJ/mol. All energies are converted to
eV in the parser.
"""
def __init__(self, filename):
"""
Args:
filename: Filename to read.
"""
self.filename = filename
with zopen(filename) as f:
data = f.read()
chunks = re.split(r"NWChem Input Module", data)
if re.search(r"CITATION", chunks[-1]):
chunks.pop()
preamble = chunks.pop(0)
self.raw = data
self.job_info = self._parse_preamble(preamble)
self.data = [self._parse_job(c) for c in chunks]
def parse_tddft(self):
"""
Parses TDDFT roots. Adapted from nw_spectrum.py script.
Returns:
{
"singlet": [
{
"energy": float,
"osc_strength: float
}
],
"triplet": [
{
"energy": float
}
]
}
"""
start_tag = "Convergence criterion met"
end_tag = "Excited state energy"
singlet_tag = "singlet excited"
triplet_tag = "triplet excited"
state = "singlet"
inside = False # true when we are inside output block
lines = self.raw.split("\n")
roots = {"singlet": [], "triplet": []}
while lines:
line = lines.pop(0).strip()
if start_tag in line:
inside = True
elif end_tag in line:
inside = False
elif singlet_tag in line:
state = "singlet"
elif triplet_tag in line:
state = "triplet"
elif inside and "Root" in line and "eV" in line:
toks = line.split()
roots[state].append({"energy": float(toks[-2])})
elif inside and "Dipole Oscillator Strength" in line:
osc = float(line.split()[-1])
roots[state][-1]["osc_strength"] = osc
return roots
def get_excitation_spectrum(self, width=0.1, npoints=2000):
"""
Generate an excitation spectra from the singlet roots of TDDFT
calculations.
Args:
width (float): Width for Gaussian smearing.
npoints (int): Number of energy points. More points => smoother
curve.
Returns:
(ExcitationSpectrum) which can be plotted using
pymatgen.vis.plotters.SpectrumPlotter.
"""
roots = self.parse_tddft()
data = roots["singlet"]
en = np.array([d["energy"] for d in data])
osc = np.array([d["osc_strength"] for d in data])
epad = 20.0 * width
emin = en[0] - epad
emax = en[-1] + epad
de = (emax - emin) / npoints
# Use width of at least two grid points
if width < 2 * de:
width = 2 * de
energies = [emin + ie * de for ie in range(npoints)]
cutoff = 20.0 * width
gamma = 0.5 * width
gamma_sqrd = gamma * gamma
de = (energies[-1] - energies[0]) / (len(energies) - 1)
prefac = gamma / np.pi * de
x = []
y = []
for energy in energies:
xx0 = energy - en
stot = osc / (xx0 * xx0 + gamma_sqrd)
t = np.sum(stot[np.abs(xx0) <= cutoff])
x.append(energy)
y.append(t * prefac)
return ExcitationSpectrum(x, y)
@staticmethod
def _parse_preamble(preamble):
info = {}
for l in preamble.split("\n"):
toks = l.split("=")
if len(toks) > 1:
info[toks[0].strip()] = toks[-1].strip()
return info
def __iter__(self):
return self.data.__iter__()
def __getitem__(self, ind):
return self.data[ind]
def __len__(self):
return len(self.data)
@staticmethod
def _parse_job(output):
energy_patt = re.compile(r"Total \w+ energy\s+=\s+([.\-\d]+)")
energy_gas_patt = re.compile(r"gas phase energy\s+=\s+([.\-\d]+)")
energy_sol_patt = re.compile(r"sol phase energy\s+=\s+([.\-\d]+)")
coord_patt = re.compile(r"\d+\s+(\w+)\s+[.\-\d]+\s+([.\-\d]+)\s+" r"([.\-\d]+)\s+([.\-\d]+)")
lat_vector_patt = re.compile(r"a[123]=<\s+([.\-\d]+)\s+" r"([.\-\d]+)\s+([.\-\d]+)\s+>")
corrections_patt = re.compile(r"([\w\-]+ correction to \w+)\s+=" r"\s+([.\-\d]+)")
preamble_patt = re.compile(
r"(No. of atoms|No. of electrons" r"|SCF calculation type|Charge|Spin " r"multiplicity)\s*:\s*(\S+)"
)
force_patt = re.compile(r"\s+(\d+)\s+(\w+)" + 6 * r"\s+([0-9\.\-]+)")
time_patt = re.compile(r"\s+ Task \s+ times \s+ cpu: \s+ ([.\d]+)s .+ ", re.VERBOSE)
error_defs = {
"calculations not reaching convergence": "Bad convergence",
"Calculation failed to converge": "Bad convergence",
"geom_binvr: #indep variables incorrect": "autoz error",
"dft optimize failed": "Geometry optimization failed",
}
def fort2py(x):
return x.replace("D", "e")
def isfloatstring(s):
return s.find(".") == -1
parse_hess = False
parse_proj_hess = False
hessian = None
projected_hessian = None
parse_force = False
all_forces = []
forces = []
data = {}
energies = []
frequencies = None
normal_frequencies = None
corrections = {}
molecules = []
structures = []
species = []
coords = []
lattice = []
errors = []
basis_set = {}
bset_header = []
parse_geom = False
parse_freq = False
parse_bset = False
parse_projected_freq = False
job_type = ""
parse_time = False
time = 0
for l in output.split("\n"):
# pylint: disable=E1136
for e, v in error_defs.items():
if l.find(e) != -1:
errors.append(v)
if parse_time:
m = time_patt.search(l)
if m:
time = m.group(1)
parse_time = False
if parse_geom:
if l.strip() == "Atomic Mass":
if lattice:
structures.append(Structure(lattice, species, coords, coords_are_cartesian=True))
else:
molecules.append(Molecule(species, coords))
species = []
coords = []
lattice = []
parse_geom = False
else:
m = coord_patt.search(l)
if m:
species.append(m.group(1).capitalize())
coords.append([float(m.group(2)), float(m.group(3)), float(m.group(4))])
m = lat_vector_patt.search(l)
if m:
lattice.append([float(m.group(1)), float(m.group(2)), float(m.group(3))])
if parse_force:
m = force_patt.search(l)
if m:
forces.extend(map(float, m.groups()[5:]))
elif len(forces) > 0:
all_forces.append(forces)
forces = []
parse_force = False
elif parse_freq:
if len(l.strip()) == 0:
if len(normal_frequencies[-1][1]) == 0:
continue
parse_freq = False
else:
vibs = [float(vib) for vib in l.strip().split()[1:]]
num_vibs = len(vibs)
for mode, dis in zip(normal_frequencies[-num_vibs:], vibs):
mode[1].append(dis)
elif parse_projected_freq:
if len(l.strip()) == 0:
if len(frequencies[-1][1]) == 0:
continue
parse_projected_freq = False
else:
vibs = [float(vib) for vib in l.strip().split()[1:]]
num_vibs = len(vibs)
for mode, dis in zip(frequencies[-num_vibs:], vibs):
mode[1].append(dis)
elif parse_bset:
if l.strip() == "":
parse_bset = False
else:
toks = l.split()
if toks[0] != "Tag" and not re.match(r"-+", toks[0]):
basis_set[toks[0]] = dict(zip(bset_header[1:], toks[1:]))
elif toks[0] == "Tag":
bset_header = toks
bset_header.pop(4)
bset_header = [h.lower() for h in bset_header]
elif parse_hess:
if l.strip() == "":
continue
if len(hessian) > 0 and l.find("----------") != -1:
parse_hess = False
continue
toks = l.strip().split()
if len(toks) > 1:
try:
row = int(toks[0])
except Exception:
continue
if isfloatstring(toks[1]):
continue
vals = [float(fort2py(x)) for x in toks[1:]]
if len(hessian) < row:
hessian.append(vals)
else:
hessian[row - 1].extend(vals)
elif parse_proj_hess:
if l.strip() == "":
continue
nat3 = len(hessian)
toks = l.strip().split()
if len(toks) > 1:
try:
row = int(toks[0])
except Exception:
continue
if isfloatstring(toks[1]):
continue
vals = [float(fort2py(x)) for x in toks[1:]]
if len(projected_hessian) < row:
projected_hessian.append(vals)
else:
projected_hessian[row - 1].extend(vals)
if len(projected_hessian[-1]) == nat3:
parse_proj_hess = False
else:
m = energy_patt.search(l)
if m:
energies.append(Energy(m.group(1), "Ha").to("eV"))
parse_time = True
continue
m = energy_gas_patt.search(l)
if m:
cosmo_scf_energy = energies[-1]
energies[-1] = {}
energies[-1].update({"cosmo scf": cosmo_scf_energy})
energies[-1].update({"gas phase": Energy(m.group(1), "Ha").to("eV")})
m = energy_sol_patt.search(l)
if m:
energies[-1].update({"sol phase": Energy(m.group(1), "Ha").to("eV")})
m = preamble_patt.search(l)
if m:
try:
val = int(m.group(2))
except ValueError:
val = m.group(2)
k = m.group(1).replace("No. of ", "n").replace(" ", "_")
data[k.lower()] = val
elif l.find('Geometry "geometry"') != -1:
parse_geom = True
elif l.find('Summary of "ao basis"') != -1:
parse_bset = True
elif l.find("P.Frequency") != -1:
parse_projected_freq = True
if frequencies is None:
frequencies = []
toks = l.strip().split()[1:]
frequencies.extend([(float(freq), []) for freq in toks])
elif l.find("Frequency") != -1:
toks = l.strip().split()
if len(toks) > 1 and toks[0] == "Frequency":
parse_freq = True
if normal_frequencies is None:
normal_frequencies = []
normal_frequencies.extend([(float(freq), []) for freq in l.strip().split()[1:]])
elif l.find("MASS-WEIGHTED NUCLEAR HESSIAN") != -1:
parse_hess = True
if not hessian:
hessian = []
elif l.find("MASS-WEIGHTED PROJECTED HESSIAN") != -1:
parse_proj_hess = True
if not projected_hessian:
projected_hessian = []
elif l.find("atom coordinates gradient") != -1:
parse_force = True
elif job_type == "" and l.strip().startswith("NWChem"):
job_type = l.strip()
if job_type == "NWChem DFT Module" and "COSMO solvation results" in output:
job_type += " COSMO"
else:
m = corrections_patt.search(l)
if m:
corrections[m.group(1)] = FloatWithUnit(m.group(2), "kJ mol^-1").to("eV atom^-1")
if frequencies:
for freq, mode in frequencies:
mode[:] = zip(*[iter(mode)] * 3)
if normal_frequencies:
for freq, mode in normal_frequencies:
mode[:] = zip(*[iter(mode)] * 3)
if hessian:
n = len(hessian)
for i in range(n):
for j in range(i + 1, n):
hessian[i].append(hessian[j][i])
if projected_hessian:
n = len(projected_hessian)
for i in range(n):
for j in range(i + 1, n):
projected_hessian[i].append(projected_hessian[j][i])
data.update(
{
"job_type": job_type,
"energies": energies,
"corrections": corrections,
"molecules": molecules,
"structures": structures,
"basis_set": basis_set,
"errors": errors,
"has_error": len(errors) > 0,
"frequencies": frequencies,
"normal_frequencies": normal_frequencies,
"hessian": hessian,
"projected_hessian": projected_hessian,
"forces": all_forces,
"task_time": time,
}
)
return data
| vorwerkc/pymatgen | pymatgen/io/nwchem.py | Python | mit | 35,126 |
"""Identify program versions used for analysis, reporting in structured table.
Catalogs the full list of programs used in analysis, enabling reproduction of
results and tracking of provenance in output files.
"""
import os
import contextlib
import subprocess
import sys
import yaml
import toolz as tz
from bcbio import utils
from bcbio.pipeline import config_utils, version
from bcbio.pipeline import datadict as dd
from bcbio.log import logger
_cl_progs = [{"cmd": "bamtofastq", "name": "biobambam",
"args": "--version", "stdout_flag": "This is biobambam version"},
{"cmd": "bamtools", "args": "--version", "stdout_flag": "bamtools"},
{"cmd": "bcftools", "stdout_flag": "Version:"},
{"cmd": "bedtools", "args": "--version", "stdout_flag": "bedtools"},
{"cmd": "bowtie2", "args": "--version", "stdout_flag": "bowtie2-align version"},
{"cmd": "bwa", "stdout_flag": "Version:"},
{"cmd": "chanjo"},
{"cmd": "cutadapt", "args": "--version"},
{"cmd": "fastqc", "args": "--version", "stdout_flag": "FastQC"},
{"cmd": "freebayes", "stdout_flag": "version:"},
{"cmd": "gemini", "args": "--version", "stdout_flag": "gemini "},
{"cmd": "novosort", "paren_flag": "novosort"},
{"cmd": "novoalign", "stdout_flag": "Novoalign"},
{"cmd": "samtools", "stdout_flag": "Version:"},
{"cmd": "qualimap", "args": "-h", "stdout_flag": "QualiMap"},
{"cmd": "vcflib", "has_cl_version": False},
{"cmd": "featurecounts", "args": "-v", "stdout_flag": "featureCounts"}]
_manifest_progs = ["bcbio-variation", "bioconductor-bubbletree", "cufflinks",
"cnvkit", "gatk-framework", "hisat2", "sailfish", "salmon",
"grabix", "htseq", "lumpy-sv", "manta", "metasv", "oncofuse",
"picard", "phylowgs", "platypus-variant",
"rna-star", "rtg-tools", "sambamba", "samblaster", "scalpel", "snpeff", "vardict",
"vardict-java", "varscan", "variant-effect-predictor", "vt", "wham"]
def _broad_versioner(type):
def get_version(config):
from bcbio import broad
try:
runner = broad.runner_from_config(config)
except ValueError:
return ""
if type == "gatk":
return runner.get_gatk_version()
elif type == "mutect":
try:
runner = broad.runner_from_config(config, "mutect")
except ValueError:
return ""
return runner.get_mutect_version()
else:
raise NotImplementedError(type)
return get_version
def jar_versioner(program_name, jar_name):
"""Retrieve version information based on jar file.
"""
def get_version(config):
try:
pdir = config_utils.get_program(program_name, config, "dir")
# not configured
except ValueError:
return ""
jar = os.path.basename(config_utils.get_jar(jar_name, pdir))
for to_remove in [jar_name, ".jar", "-standalone"]:
jar = jar.replace(to_remove, "")
if jar.startswith(("-", ".")):
jar = jar[1:]
if not jar:
logger.warn("Unable to determine version for program '{}' from jar file {}".format(
program_name, config_utils.get_jar(jar_name, pdir)))
return jar
return get_version
def java_versioner(pname, jar_name, **kwargs):
def get_version(config):
try:
pdir = config_utils.get_program(pname, config, "dir")
except ValueError:
return ""
jar = config_utils.get_jar(jar_name, pdir)
kwargs["cmd"] = "java"
kwargs["args"] = "-Xms128m -Xmx256m -jar %s" % jar
return _get_cl_version(kwargs, config)
return get_version
_alt_progs = [{"name": "gatk", "version_fn": _broad_versioner("gatk")},
{"name": "mutect",
"version_fn": _broad_versioner("mutect")}]
def _parse_from_stdoutflag(stdout, x):
for line in stdout:
if line.find(x) >= 0:
parts = [p for p in line[line.find(x) + len(x):].split() if p.strip()]
return parts[0].strip()
return ""
def _parse_from_parenflag(stdout, x):
for line in stdout:
if line.find(x) >= 0:
return line.split("(")[-1].split(")")[0]
return ""
def _get_cl_version(p, config):
"""Retrieve version of a single commandline program.
"""
if not p.get("has_cl_version", True):
return ""
try:
prog = config_utils.get_program(p["cmd"], config)
except config_utils.CmdNotFound:
localpy_cmd = os.path.join(os.path.dirname(sys.executable), p["cmd"])
if os.path.exists(localpy_cmd):
prog = localpy_cmd
else:
return ""
args = p.get("args", "")
cmd = "{prog} {args}"
subp = subprocess.Popen(cmd.format(**locals()), stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True)
with contextlib.closing(subp.stdout) as stdout:
if p.get("stdout_flag"):
v = _parse_from_stdoutflag(stdout, p["stdout_flag"])
elif p.get("paren_flag"):
v = _parse_from_parenflag(stdout, p["paren_flag"])
else:
lines = [l.strip() for l in stdout.read().split("\n") if l.strip()]
v = lines[-1]
if v.endswith("."):
v = v[:-1]
return v
def _get_brew_versions():
"""Retrieve versions of tools installed via brew.
"""
from bcbio import install
tooldir = install.get_defaults().get("tooldir")
brew_cmd = os.path.join(tooldir, "bin", "brew") if tooldir else "brew"
try:
vout = subprocess.check_output([brew_cmd, "list", "--versions"])
except OSError: # brew not installed/used
vout = ""
out = {}
for vstr in vout.split("\n"):
if vstr.strip():
parts = vstr.rstrip().split()
name = parts[0]
v = parts[-1]
out[name] = v
return out
def _get_versions(config=None):
"""Retrieve details on all programs available on the system.
"""
out = [{"program": "bcbio-nextgen",
"version": ("%s-%s" % (version.__version__, version.__git_revision__)
if version.__git_revision__ else version.__version__)}]
manifest_dir = _get_manifest_dir(config)
manifest_vs = _get_versions_manifest(manifest_dir)
if manifest_vs:
out += manifest_vs
else:
assert config is not None, "Need configuration to retrieve from non-manifest installs"
brew_vs = _get_brew_versions()
for p in _cl_progs:
out.append({"program": p["cmd"],
"version": (brew_vs[p["cmd"]] if p["cmd"] in brew_vs else
_get_cl_version(p, config))})
for p in _alt_progs:
out.append({"program": p["name"],
"version": (brew_vs[p["name"]] if p["name"] in brew_vs else
p["version_fn"](config))})
out.sort(key=lambda x: x["program"])
return out
def _get_manifest_dir(data=None):
"""
get manifest directory from the data dictionary, falling back on alternatives
it prefers, in order:
1. locating it from the bcbio_system.yaml file
2. locating it from the galaxy directory
3. location it from the python executable.
it can accept either the data or config dictionary
"""
manifest_dir = None
if data:
bcbio_system = tz.get_in(["config", "bcbio_system"], data, None)
bcbio_system = bcbio_system if bcbio_system else data.get("bcbio_system", None)
if bcbio_system:
sibling_dir = os.path.normpath(os.path.dirname(bcbio_system))
else:
sibling_dir = dd.get_galaxy_dir(data)
if sibling_dir:
manifest_dir = os.path.normpath(os.path.join(sibling_dir, os.pardir,
"manifest"))
if not manifest_dir or not os.path.exists(manifest_dir):
manifest_dir = os.path.join(config_utils.get_base_installdir(), "manifest")
return manifest_dir
def _get_versions_manifest(manifest_dir):
"""Retrieve versions from a pre-existing manifest of installed software.
"""
all_pkgs = _manifest_progs + [p.get("name", p["cmd"]) for p in _cl_progs] + [p["name"] for p in _alt_progs]
if os.path.exists(manifest_dir):
out = []
for plist in ["toolplus", "brew", "python", "r", "debian", "custom"]:
pkg_file = os.path.join(manifest_dir, "%s-packages.yaml" % plist)
if os.path.exists(pkg_file):
with open(pkg_file) as in_handle:
pkg_info = yaml.safe_load(in_handle)
added = []
for pkg in all_pkgs:
if pkg in pkg_info:
added.append(pkg)
out.append({"program": pkg, "version": pkg_info[pkg]["version"]})
for x in added:
all_pkgs.remove(x)
out.sort(key=lambda x: x["program"])
for pkg in all_pkgs:
out.append({"program": pkg, "version": ""})
return out
def _get_program_file(dirs):
if dirs.get("work"):
base_dir = utils.safe_makedir(os.path.join(dirs["work"], "provenance"))
return os.path.join(base_dir, "programs.txt")
def write_versions(dirs, config=None, is_wrapper=False):
"""Write CSV file with versions used in analysis pipeline.
"""
out_file = _get_program_file(dirs)
if is_wrapper:
assert utils.file_exists(out_file), "Failed to create program versions from VM"
elif out_file is None:
for p in _get_versions(config):
print("{program},{version}".format(**p))
else:
with open(out_file, "w") as out_handle:
for p in _get_versions(config):
out_handle.write("{program},{version}\n".format(**p))
return out_file
def get_version_manifest(name, data=None, required=False):
"""Retrieve a version from the currently installed manifest.
"""
manifest_dir = _get_manifest_dir(data)
manifest_vs = _get_versions_manifest(manifest_dir)
for x in manifest_vs:
if x["program"] == name:
v = x.get("version", "")
if v:
return v
if required:
raise ValueError("Did not find %s in install manifest. Could not check version." % name)
return ""
def add_subparser(subparsers):
"""Add command line option for exporting version information.
"""
parser = subparsers.add_parser("version",
help="Export versions of used software to stdout or a file ")
parser.add_argument("--workdir", help="Directory export programs to in workdir/provenance/programs.txt",
default=None)
def get_version(name, dirs=None, config=None):
"""Retrieve the current version of the given program from cached names.
"""
if dirs:
p = _get_program_file(dirs)
else:
p = config["resources"]["program_versions"]
with open(p) as in_handle:
for line in in_handle:
prog, version = line.rstrip().split(",")
if prog == name and version:
return version
raise KeyError("Version information not found for %s in %s" % (name, p))
| mjafin/bcbio-nextgen | bcbio/provenance/programs.py | Python | mit | 11,554 |
import time
from math import ceil, log10
def get_numbers():
"""
Returns list of numbers in data file
"""
filename = "problem13-data.txt"
text_file = open(filename, "r")
lines = text_file.readlines()
text_file.close()
L = [int(lines[i]) for i in range(len(lines))]
return L
def test():
L = get_numbers()
print(L[0])
def main():
start = time.time()
L = get_numbers()
S = sum(L)
ndigits = int(ceil(log10(S)))
S_red = (S - S%10**(ndigits-10))/10**(ndigits-10)
print(S_red)
end = time.time()
print(end-start)
if __name__ == "__main__":
main()
| hubenjm/Project-Euler | problem13.py | Python | mit | 565 |
# -*- coding: utf-8 -*-
"""
Part of the **Pyception** package.
:Version: 1
:Authors: - Florian Indot
:Contact: [email protected]
:Date: 28.06.2017
:Revision: 3
:Copyright: MIT License
"""
import os
import sys
import matplotlib.pyplot as plt
import lib as pct
from lib import SETTINGS, Level, Repository
from .Experiment import Experiment
class Subject(object):
"""
This class is an experiments container. Its **analyze** and **save**
methods are iterative wrappers over their Experiment method counterpart.
.. seealso:: Experiment
"""
# ------------------------------------------------------------------- VARIABLES
db_file = SETTINGS["db_file"]
repository = Repository(db_file)
# ----------------------------------------------------------------------- MAGIC
def __init__(self, name: str, repo: Repository = None) -> None:
"""
Class constructor. Intializes important variables.
:param name: The name of the subject.
:type name: str
"""
self.name = name
self.directory = os.path.join(SETTINGS["analytics_dir"], self.name)
self.id = None
self._control = None
self.repository = self.repository if repo is None else repo
self._load()
self.experiments = list()
self.pull()
# --------------------------------------------------------------------- METHODS
def _load(self):
"""
Deferred class constructor, loads important values from the database.
"""
pct.log("Retreiving subject %s description..." % self.name,
Level.DEBUG, linesep="")
data = self.repository.read({'name': self.name}, "subjects")[0]
if not data:
pct.log(" Failed", Level.FAILED)
pct.log("Subject does not exist in database.", Level.WARNING)
return
pct.log(" Done", Level.DONE)
self.id = data["id"]
self._control = data["control"] == 1
def pull(self) -> None:
'''
TODO
'''
pct.log("Retreiving %s experiments descriptions..." % self.name,
Level.DEBUG, linesep="")
experiments = self.repository.read({'subject': self.id}, "experiments")
pct.log(" Done", Level.DONE)
for experiment in experiments:
self.experiments.append(Experiment(experiment["name"], self))
def analyze(self, draw_heatmap: bool = False) -> None:
"""
TODO REMAKE
Retreives this subject's experiments from the database before to
loop and analyze each of those.
"""
pct.log("Beginning subject {0} experiments analysis...".format(
self.id
), Level.INFORMATION)
if not self.experiments:
self.pull()
for experiment in self.experiments:
try:
experiment.analyze()
if draw_heatmap and experiment.analyzed:
experiment.make_heatmap()
except Exception as e:
pct.log(e, Level.EXCEPTION)
pct.log("A fatal error occured. Exiting...", Level.ERROR)
sys.exit(-1)
except KeyboardInterrupt as ki:
print("")
pct.log("Keyboard Interrupt. Exiting...", Level.INFORMATION)
sys.exit(0)
pct.log("Experiments analysis completed successfully.",
Level.INFORMATION)
def save(self):
"""
Save every analyzed experiments.
"""
for experiment in self.experiments:
experiment.save()
if experiment.heatmap is not None:
plt.imsave(
os.path.join(experiment.directory, "heatmap.png"),
experiment.heatmap,
cmap='nipy_spectral'
)
fig, img, clrb = experiment.figure()
fig.savefig(
os.path.join(experiment.directory, "heatmap_figure.png")
)
fig.clear()
# ------------------------------------------------------------------ PROPERTIES
@property
def saved_analysis(self):
sa = True
for experiment in self.experiments:
sa = sa and experiment.saved_analysis
return sa
| 3mpr/TobiiLogger | lib/analytics/Subject.py | Python | mit | 4,318 |
"""
Module containing defintions for all sites to test. Should be subclasses
of 'browser_base'.
"""
import os
import imp
sites = {}
for fle in os.listdir(__path__[0]):
name, ext = os.path.splitext(fle)
if ext == '.py' and not name == "__init__":
sites[name] = imp.load_source(name, os.path.join(__path__[0], fle))
| winfried/WTF | sites/__init__.py | Python | mit | 342 |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from future.builtins import *
from blockext import *
import sphero
__version__ = '0.2.1'
class Sphero:
def __init__(self):
self.robot = sphero.Sphero()
self.robot.connect()
self.name = self.robot.get_bluetooth_info().name
"""def _is_connected(self):
try:
self.robot.get_bluetooth_info()
except:
self.robot = False
if not self.robot:
try:
self.robot.connect()
self.name = self.robot.get_bluetooth_info().name
except:
pass
return bool(self.robot)"""
def _problem(self):
if not self.robot:
return "Your Sphero is not connected"
def _on_reset(self):
self.robot.roll(0,0)
def get_sphero_name(self):
return self.name
def set_sphero_name(self, name):
self.name = name
self.robot.set_device_name(name)
def roll_sphero(self, power, heading):
self.robot.roll(power*2.55, heading)
"""def set_sphero_color(self, r, g, b):
self.robot.set_rgb(r,g,b)"""
descriptor = Descriptor(
name = "Orbotix Sphero",
port = 7575,
blocks = [
Block('roll_sphero', 'command', 'roll Sphero %n percent speed at %n degrees', defaults=[100,0]),
Block('get_sphero_name', 'reporter', 'get Sphero name'),
Block('set_sphero_name', 'command', 'set Sphero name to %s', defaults=['Rob Orb'])
]
)
extension = Extension(Sphero, descriptor)
if __name__ == '__main__':
extension.run_forever(debug=True)
| blockext/sphero | __init__.py | Python | mit | 1,738 |
# Copyright (c) 2015 Tanium Inc
#
# Generated from console.wsdl version 0.0.1
#
#
from .base import BaseType
class ClientStatus(BaseType):
_soap_tag = 'client_status'
def __init__(self):
BaseType.__init__(
self,
simple_properties={'host_name': str,
'computer_id': str,
'ipaddress_client': str,
'ipaddress_server': str,
'protocol_version': int,
'full_version': str,
'last_registration': str,
'send_state': str,
'receive_state': str,
'status': str,
'port_number': int,
'public_key_valid': int,
'cache_row_id': int},
complex_properties={},
list_properties={},
)
self.host_name = None
self.computer_id = None
self.ipaddress_client = None
self.ipaddress_server = None
self.protocol_version = None
self.full_version = None
self.last_registration = None
self.send_state = None
self.receive_state = None
self.status = None
self.port_number = None
self.public_key_valid = None
self.cache_row_id = None
| tanium/pytan | lib/taniumpy/object_types/client_status.py | Python | mit | 1,389 |
from localisation_core import *
import sys
from pymongo import MongoClient
if len(sys.argv) > 1 and sys.argv[1].lower() not in ['testbb'] :
exit('argument is incorrect')
client = MongoClient()
db = client.twitterdb
nonloc_values = []
if len(sys.argv) == 1:
nonloc_values = getUnloc()
print("Data loaded. Number of nonloc is %d" % len(nonloc_values))
bbox_values = getBbox()
print("and number of bbox is %d" % len(bbox_values))
tweet_content = [value.text for value in tqdm(bbox_values + nonloc_values)]
# flatten the list of lists to 1d array
flatten_content = [item for sublist in tweet_content for item in sublist]
# remove duplicates
content_dict = {w: '' for w in flatten_content}
# enumerate without duplicates
content_enum = {w: idx for idx, w in enumerate(content_dict)}
print("Dictionary is generated. Number of words %d" % len(content_enum))
conjunction_matrix = np.zeros((len(bbox_values + nonloc_values), len(content_enum)), dtype=int)
d = dict()
for idx, tweet in enumerate(tqdm(bbox_values + nonloc_values)):
d[tweet.id] = idx
for w in tweet.text:
conjunction_matrix[idx, content_enum[w]] += 1.
print("Matrix is calculated. Shape is", conjunction_matrix.shape)
#make a threshold for similarity
threshold = 0.9
if len(sys.argv) > 2 and sys.argv[1].lower() == 'testbb':
qualityTesting(bbox_values, sys.argv[2], threshold=threshold, alpha = 0.5, conjunction_matrix=conjunction_matrix, d=d)
exit('testing is completed')
new_col = []
if len(sys.argv) == 1:
new_col = localise_to_bbox(nonloc_values[:100], bbox_values, threshold=threshold, alpha=0.5, conj_m=conjunction_matrix, d=d)
print("We could recognize %f per cent tweets" % (len(new_col) * 100.0 / len(nonloc_values)))
#join recognised values to bbox
bbox_values += new_col | alek-beloff/teamproject | localisation_main.py | Python | mit | 1,787 |
"""Configure tests."""
import logging
import sys
import httpretty
import pytest
import requests.packages.urllib3
@pytest.fixture(autouse=True, scope='session')
def config_httpretty():
"""Configure httpretty global variables."""
httpretty.HTTPretty.allow_net_connect = False
@pytest.fixture(autouse=True, scope='session')
def config_requests():
"""Disable SSL warnings during testing."""
if sys.version_info[:3] < (2, 7, 9):
requests.packages.urllib3.disable_warnings()
logging.getLogger('requests').setLevel(logging.WARNING)
| Robpol86/appveyor-artifacts | tests/conftest.py | Python | mit | 559 |
import os
import argparse
import csv
import random
from .utils import CompletePath
# Get arguments
def get_args():
parser = argparse.ArgumentParser(description="Use the phyluce_align_get_informative_sites output to extract UCE alignments with a certain number of informative sites (or random ones)", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--input',required=True,action=CompletePath,default=None,help='The phyluce_align_get_informative_sites screen output as text file')
parser.add_argument('--output',required=True,action=CompletePath,default=None,help='The name of the file, which will eb a list of taxa fulfilling the requirement')
parser.add_argument('--mode',choices=["top","bottom","cutoff","random"],default="top",help='Choose which alignments you want to extract: top = the x most informative alignments bottom = the x least informative alignments cutoff = all alignments with more than x informative sites random = randomly chooses x alignments x is specified with the --threshold flag')
parser.add_argument('--threshold',type=int,default=15,help='Minimum coverage-support that is required for making a base call. Anything below this threshold will be masked as ambiguity.')
return parser.parse_args()
# Preparation for calling input variables and files
args = get_args()
input = args.input
output = args.output
out_file = output.split("/")[-1]
out_dir = '/'.join(output.split("/")[:-1])
mode = args.mode
threshold = args.threshold
def getkey(string):
locus, misc, values = string.partition('\t')
x, y, values = values.partition('\t')
insites, miscx, miscy = values.partition('\t')
return int(insites)
output_file = open("%s/%s_%s_%s" %(out_dir,mode,threshold,out_file), "wb")
uce_list=csv.writer(output_file)
with open(input) as f:
content = [x.strip('\n') for x in f.readlines()]
header = content[0]
tail = content[-0]
body = content[1:-1]
body = sorted(body, key=getkey)
if mode == 'cutoff':
for line in body:
element = line.split('\t')
if int(element[2]) >= int(threshold):
print(line)
uce_list.writerow([element[0]])
elif mode == 'top':
for line in body[-threshold:]:
element = line.split('\t')
print(line)
uce_list.writerow([element[0]])
elif mode == 'bottom':
for line in body[:threshold]:
element = line.split('\t')
print(line)
uce_list.writerow([element[0]])
elif mode =='random':
random_body = random.sample(body, threshold)
for line in random_body:
element = line.split('\t')
print(line)
uce_list.writerow([element[0]])
| AntonelliLab/seqcap_processor | secapr/extract_alignments_from_phyluce_get_inf_sites_output.py | Python | mit | 2,572 |
from __future__ import print_function
import logging
try:
from PyQt4.QtGui import QPushButton
except ImportError:
from PyQt5.QtWidgets import QPushButton
from pyqtgraph.dockarea import DockArea, Dock
logger = logging.getLogger(__name__)
class ProviderPB(QPushButton):
"""
Widget to enalbe/disable the plotter inside the dock area and activate the
analysis algorithm.
This class is a demo of what how a provider should be. The developer can
use the guidelines for any type of widget. In this case, a checkable
PushButton is used to place a BufferPlotter inside the PgDockArea.
"""
def __init__(self, dock_area, plotter, algorithm, **kwargs):
"""
Initializes the class, sets defaults
"""
super(ProviderPB, self).__init__()
self.title = kwargs.get('title', 'Provider')
self.dock_kw = kwargs.get('dock_kw',
{'size': (1, 1), 'closable': False})
self.plot_kw = kwargs.get('plot_kw', {'test': 1})
self.toggled.connect(self.toggled_provider)
self.setCheckable(True)
self.setChecked(False)
self.setText(self.title)
self.da = dock_area
self.plotter = plotter
self.algo = algorithm
# connecting the algorithm to the plotter
self.algo.finished.connect(self.finished_algo)
self.plot_id = None
def toggled_provider(self):
"""
Adds/removes the plotter. If dock area is defined then the plotter is
inserted there, else as a separate QWidget.
"""
if self.plotter is None:
logger.debug('No plotter for provider %s' % self.title)
return
if self.da:
if self.isChecked():
logger.debug('Insert plotter to dock area')
self.plot_id = self.da.insert_dock(self.plotter, self.title,
**self.dock_kw)
else:
logger.debug('Remove plotter from dock area')
self.da.remove_dock(self.plot_id)
self.plot_id = None
else:
if self.isChecked():
self.plotter.show()
else:
self.plotter.close()
def finished_algo(self):
try:
self.plotter.plot(self.algo.data, **self.plot_kw)
except AttributeError as e:
logger.debug('No plotter for [%s]: %s'
% (self.title, e))
def closed_plotter(sef):
logger.debug('Plotter Closed.')
class PgDockArea(DockArea):
"""
The area where the plotters are going to be placed.
Implements a very simple functionality. The only instance variable is the
list of active docks and three methods are define for inserting, removing &
emptying of the list. On insertion, the index of the BufferPlotter on the
list is returned to the BufferProvider.
"""
def __init__(self):
super(PgDockArea, self).__init__()
self.active_docks = {}
def insert_dock(self, *args, **kwargs):
"""
Insert a dock to the dock area.
A title is needed as argument for the definition of the dock, while the
widget and the area are passed as arguments. The Dock widget is finally
added to the active_docks list.
"""
logger.debug('Inserting dock... args:%s, kwargs:%s' % (args, kwargs))
wdgt, title = args
kwargs['widget'] = wdgt
kwargs['area'] = self
d = Dock(title, **kwargs)
d.setParent(self)
self.active_docks[id(d)] = d
self.addDock(dock=d)
return id(d)
def remove_dock(self, identity):
"""
Remove a dock from the dock area.
The only argument here is the id of the dock inside the active_docks
list.
"""
logging.debug('Removing dock with id:%d...' % identity)
# self.active_docks[index].close()
self.active_docks[identity].setParent(None)
del self.active_docks[identity]
def remove_all_docks(self):
"""
Remove all docks from the dock area.
"""
pass
| andalexo/pyqttoolbox | pytaap/providers.py | Python | mit | 4,194 |
import commands
import os
import eyed3
# Changing string to unicode
def str_uni(string):
unicode = unicode(string, 'utf-8')
return unicode
# Currently not in use , hence commented
# def addslashes(s):
# s = s.replace(' ', '\ ').replace("'", "\'").replace('"', '\"')
# return s
# I used a class here
# This is pretty surprising since I suck at OOPS
# Looks like I want to conquer my Fear
# Carpe Diem
# Also , Fuck you TKT
class Trial(object):
def __init__(self):
self.message = 'is it a constructor'
def show_details(self, audio_file):
print "Artist name is %s" % (audio_file.tag.artist)
print "Album name is %s" % (audio_file.tag.album)
print "Title is %s" % (audio_file.tag.title)
def change_details(self,title=None,Album=None,Artist=None,Track_num=None):
if title:
audio_file.tag.title = str_uni(title)
if Album:
audio_file.tag.album = str_uni(Album)
if Artist:
audio_file.tag.album = str_uni(Artist)
audio_file.tag.save()
# Below Link will be useful for images
# http://tuxpool.blogspot.in/2013/02/how-to-store-images-in-mp3-files-using.html
# Official Docs
# http://eyed3.nicfit.net/api/eyed3.id3.html#id1
# Change method of taking input , use some file - explorer
#music_path = str(raw_input("Enter the path for the Music Directory: "))
music_path = '/home/yashmehrotra/Music/Music/'
os.chdir(music_path)
current_directory = commands.getoutput('pwd')
# Finding the right bash command took 30 mins , it isnt as easy as it seems
mp3_list = commands.getoutput('ls -R | grep ".mp3" ').split('\n')
# List of all the music files in the given directory
# Be a true pythonista and do not use this -- edit - fixed i am a true
# pythonista \n Me Gusta (Why did that troll went out of fashion)
print "Here is a list of all the songs"
for index, song in enumerate(mp3_list, start=1):
print index, song
print "Please enter the song no. you want to edit,press -1 to exit"
user_choice = int(raw_input())
if user_choice != -1:
x = mp3_list[user_choice - 1]
# str1 = 'find ' + current_directory + ' -name "' + x + '*"'
# str1 = 'find %s -name "%s*" ' % (current_directory, x)
str1 = 'find {0} -name "{1}*" '.format(current_directory, x)
# The following comment is no more valid - Improve fucking str1 , have u ever heard of string formatting asshole
# Also finding this command took a lot of time
x = commands.getoutput(str1)
print x
audio_file = eyed3.load(x)
t = Trial()
t.show_details(audio_file)
| yashmehrotra/mp3-manager | trial.py | Python | mit | 2,588 |
"""
Utilities for generating perfect hash functions for integer keys.
This module implements the first fit decreasing method, described in
Gettys01_. It is **not** guaranteed to generate a *minimal* perfect hash,
though by no means is it impossible. See for example:
>>> phash = hash_parameters('+-<>[].,', to_int=ord)
>>> len(phash.slots)
8
>>> phash.slots
('+', ',', '-', '.', '<', '[', '>', ']')
.. _Gettys01: http://www.drdobbs.com/architecture-and-design/generating-perfect-hash-functions/184404506
"""
from .getty import make_hash, make_dict, hash_parameters
__version__ = '2.0.1'
__all__ = ['make_hash', 'make_dict', 'hash_parameters']
| eddieantonio/perfection | perfection/__init__.py | Python | mit | 649 |
import uuid
from sketch_components.utils import combine_styles, update_existing, \
small_camel_case
class Box(object):
def __init__(self):
self.top = None
self.right = None
self.bottom = None
self.left = None
def __repr__(self):
box = {'top': self.top, 'right': self.right, 'bottom': self.bottom,
'left': self.left}
for key in box.keys():
if box[key] is None:
box.pop(key)
return repr(box)
def __nonzero__(self):
if [self.top, self.left, self.bottom, self.right].count(None) == 4:
return False
return True
def __bool__(self):
if [self.top, self.left, self.bottom, self.right].count(None) == 4:
return False
return True
class BoxModel(object):
def __init__(self):
self.position = None
self.width = None
self.height = None
self.padding = Box()
self.border = Box()
self.margin = Box()
def __repr__(self):
box = {'position': self.position, 'width': self.width,
'height': self.height, 'padding': self.padding,
'border': self.border, 'margin': self.margin}
for key in box.keys():
if box[key] is None:
box.pop(key)
elif isinstance(box[key], Box) and not box[key]:
box.pop(key)
return repr(box)
class StyleSheet(object):
def __init__(self, styles, name=None):
self.styles = styles
if name is None:
self.name = 'style{}'.format(str(uuid.uuid4()).replace('-', ''))
else:
if not (name.endswith('style') or name.endswith('Style')):
name = name + 'Style'
name = small_camel_case(name)
self.name = name
def __str__(self):
return self.name
def __repr__(self):
return "{styles.%s}" % self.name
def get_css_styles(self):
return self.styles
def update_styles(self, styles):
self.styles = combine_styles(self.styles, styles)
def update_existing(self, styles):
update_existing(self.styles, styles)
def get_valid_css_styles(self):
styles = dict()
for key in self.styles.keys():
if self.styles.get(key) is not None:
styles[key] = self.styles.get(key)
return styles
def get_style(self, key):
return self.styles.get(key, None)
| ibhubs/sketch-components | sketch_components/engines/react/base/components/commons.py | Python | mit | 2,482 |
#
# This file is part of Gruvi. Gruvi is free software available under the
# terms of the MIT license. See the file "LICENSE" that was provided
# together with this source file for the licensing terms.
#
# Copyright (c) 2012-2017 the Gruvi authors. See the file "AUTHORS" for a
# complete list.
"""
The :mod:`gruvi.http` module implements a HTTP client and server.
The client and server are relatively complete implementations of the HTTP
protocol. Some of the supported features are keepalive, pipelining, chunked
transfers and trailers.
This implementation supports both HTTP/1.0 and HTTP/1.1. The default for the
client is 1.1, and the server will respond with the same version as the client.
Connections are kept alive by default. This means that you need to make sure
you close connections when they are no longer needed, by calling the
appropriate :meth:`~gruvi.Endpoint.close` method.
It is important to clarify how the API exposed by this module uses text and
binary data. Data that is read from or written to the HTTP header, such as the
version string, method, and headers, are text strings (``str`` on Python 3,
``str`` or ``unicode`` on Python 2). However, if the string type is unicode
aware (``str`` on Python 3, ``unicode`` on Python 2), you must make sure that
it only contains code points that are part of ISO-8859-1, which is the default
encoding specified in :rfc:`2606`. Data that is read from or written to HTTP
bodies is always binary. This is done in alignment with the WSGI spec that
requires this.
This module provides a number of APIs. Client-side there is one:
* A :class:`gruvi.Client` based API. You will use :meth:`~HttpClient.connect`
to connect to a server, and then use :meth:`~HttpClient.request` and
:meth:`~HttpClient.getresponse` to interact with it.
The following server-side APIs are available:
* A :class:`gruvi.Server` based API. Incoming HTTP messages are passed to a
message handler that needs to take care of all aspects of HTTP other than
parsing.
* A WSGI API, as described in :pep:`333`.
The server-side API is selected through the *adapter* argument to
:class:`HttpServer` constructor. The default adapter is :class:`WsgiAdapter`,
which implements the WSGI protocol. To use the raw server interface, pass the
identity function (``lambda x: x``).
"""
from __future__ import absolute_import, print_function
import re
import time
import functools
import six
from collections import namedtuple
from . import logging, compat
from .hub import switchpoint
from .util import delegate_method, docfrom
from .protocols import MessageProtocol, ProtocolError
from .stream import Stream
from .endpoints import Client, Server
from .http_ffi import lib, ffi
from six.moves import http_client
__all__ = ['HttpError', 'ParsedUrl', 'parse_url', 'HttpMessage', 'HttpRequest',
'HttpProtocol', 'WsgiAdapter', 'HttpClient', 'HttpServer']
#: Constant indicating a HTTP request.
REQUEST = lib.HTTP_REQUEST
#: Constant indicating a HTTP response.
RESPONSE = lib.HTTP_RESPONSE
# Export some definitions from http.client.
for name in dir(http_client):
value = getattr(http_client, name)
if not name.isupper() or not isinstance(value, int):
continue
if value in http_client.responses:
globals()[name] = value
HTTP_PORT = http_client.HTTP_PORT
HTTPS_PORT = http_client.HTTPS_PORT
responses = http_client.responses
# The "Hop by Hop" headers as defined in RFC 2616. These may not be set by the
# HTTP handler.
hop_by_hop = frozenset(('connection', 'keep-alive', 'proxy-authenticate',
'proxy-authorization', 'te', 'trailers',
'transfer-encoding', 'upgrade'))
# Keep a cache of http-parser's HTTP methods numbers -> method strings
_http_methods = {}
for i in range(100):
method = ffi.string(lib.http_method_str(i)).decode('ascii')
if not method.isupper():
break
_http_methods[i] = method
class HttpError(ProtocolError):
"""Exception that is raised in case of HTTP protocol errors."""
# Header parsing
# RFC 2626 section 2.2 grammar definitions:
re_ws = re.compile('([ \t]+)')
re_token = re.compile('([!#$%&\'*+\-.0-9A-Z^_`a-z|~]+)')
re_qstring = re.compile(r'"(([\t !\x23-\x5b\x5d-\xff]|\\[\x00-\x7f])*)"')
re_qpair = re.compile(r'\\([\x00-\x7f])')
re_qvalue = re.compile('[qQ]=(1(\.0{0,3})?|0(\.[0-9]{0,3})?)')
def lookahead(buf, pos):
"""Return the next char at the current buffer position."""
if pos >= len(buf):
return None
return buf[pos]
def accept_ws(buf, pos):
"""Skip whitespace at the current buffer position."""
match = re_ws.match(buf, pos)
if not match:
return None, pos
return buf[match.start(0):match.end(0)], match.end(0)
def accept_lit(char, buf, pos):
"""Accept a literal character at the current buffer position."""
if pos >= len(buf) or buf[pos] != char:
return None, pos
return char, pos+1
def expect_lit(char, buf, pos):
"""Expect a literal character at the current buffer position."""
if pos >= len(buf) or buf[pos] != char:
return None, len(buf)
return char, pos+1
def accept_re(regexp, buf, pos):
"""Accept a regular expression at the current buffer position."""
match = regexp.match(buf, pos)
if not match:
return None, pos
return buf[match.start(1):match.end(1)], match.end(0)
def expect_re(regexp, buf, pos):
"""Require a regular expression at the current buffer position."""
match = regexp.match(buf, pos)
if not match:
return None, len(buf)
return buf[match.start(1):match.end(1)], match.end(0)
def parse_content_type(header):
"""Parse the "Content-Type" header."""
typ = subtyp = None; options = {}
typ, pos = expect_re(re_token, header, 0)
_, pos = expect_lit('/', header, pos)
subtyp, pos = expect_re(re_token, header, pos)
ctype = header[:pos] if subtyp else ''
while pos < len(header):
_, pos = accept_ws(header, pos)
_, pos = expect_lit(';', header, pos)
_, pos = accept_ws(header, pos)
name, pos = expect_re(re_token, header, pos)
_, pos = expect_lit('=', header, pos)
char = lookahead(header, pos)
if char == '"':
value, pos = expect_re(re_qstring, header, pos)
value = re_qpair.sub('\\1', value)
elif char:
value, pos = expect_re(re_token, header, pos)
if name and value is not None:
options[name] = value
return ctype, options
def parse_te(header):
"""Parse the "TE" header."""
pos = 0
names = []
while pos < len(header):
name, pos = expect_re(re_token, header, pos)
_, pos = accept_ws(header, pos)
_, pos = accept_lit(';', header, pos)
_, pos = accept_ws(header, pos)
qvalue, pos = accept_re(re_qvalue, header, pos)
if name:
names.append((name, qvalue))
_, pos = accept_ws(header, pos)
_, pos = expect_lit(',', header, pos)
_, pos = accept_ws(header, pos)
return names
def parse_trailer(header):
"""Parse the "Trailer" header."""
pos = 0
names = []
while pos < len(header):
name, pos = expect_re(re_token, header, pos)
if name:
names.append(name)
_, pos = accept_ws(header, pos)
_, pos = expect_lit(',', header, pos)
_, pos = accept_ws(header, pos)
return names
weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
rfc1123_fmt = '%a, %d %b %Y %H:%M:%S GMT'
def rfc1123_date(timestamp=None):
"""Create a RFC1123 style Date header for *timestamp*."""
if timestamp is None:
timestamp = int(time.time())
# The time stamp must be GMT, and cannot be localized.
tm = time.gmtime(timestamp)
s = rfc1123_fmt.replace('%a', weekdays[tm.tm_wday]) \
.replace('%b', months[tm.tm_mon-1])
return time.strftime(s, tm)
# URL parser (using http-parser). Unlike Python's urlsplit() this doesn't do
# relatively URLs. This is a benefit IMHO, because that allows us to recognize
# absolute URLs with a missing schema ('www.example.com') and not mistaken them
# for the path component of a relative URL. To parse relative URLs you need to
# first turn them into origin-form or absolute-form.
default_ports = {'http': 80, 'https': 443, 'ws': 80, 'wss': 443}
ssl_protocols = frozenset(('https', 'wss'))
url_field_names = ('scheme', 'host', 'path', 'query', 'fragment', 'port', 'userinfo')
url_field_indices = tuple((getattr(lib, 'UF_{}'.format(name.replace('scheme', 'schema')).upper())
for name in url_field_names))
class ParsedUrl(namedtuple('_ParsedUrl', url_field_names)):
"""A :func:`~collections.namedtuple` with the following fields:
``scheme``, ``host``, ``port``, ``path``, ``query``, ``fragment`` and
``userinfo``.
In addition to the tuple fields the following properties are defined:
"""
__slots__ = ()
@classmethod
def from_parser(cls, parser, url):
values = []
for ix in url_field_indices:
if parser.field_set & (1 << ix):
fd = parser.field_data[ix]
values.append(url[fd.off:fd.off+fd.len])
else:
values.append('')
return cls(*values)
@property
def addr(self):
"""Address tuple that can be used with :func:`~gruvi.create_connection`."""
port = self.port
if port:
port = int(port)
else:
port = default_ports.get(self.scheme or 'http')
return (self.host, port)
@property
def ssl(self):
"""Whether the scheme requires SSL/TLS."""
return self.scheme in ssl_protocols
@property
def target(self):
"""The "target" i.e. local part of the URL, consisting of the path and query."""
target = self.path or '/'
if self.query:
target = '{}?{}'.format(target, self.query)
return target
ParsedUrl.__new__.__doc__ = ''
ParsedUrl.__new__.__defaults__ = ('',) * len(url_field_names)
def parse_url(url, default_scheme='http', is_connect=False):
"""Parse an URL and return its components.
The *default_scheme* argument specifies the scheme in case URL is
an otherwise valid absolute URL but with a missing scheme.
The *is_connect* argument must be set to ``True`` if the URL was requested
with the HTTP CONNECT method. These URLs have a different form and need to
be parsed differently.
The result is a :class:`ParsedUrl` containing the URL components.
"""
# If this is not in origin-form, authority-form or asterisk-form and no
# scheme is present, assume it's in absolute-form with a missing scheme.
# See RFC7230 section 5.3.
if url[:1] not in '*/' and not is_connect and '://' not in url:
url = '{}://{}'.format(default_scheme, url)
burl = s2b(url)
parser = ffi.new('struct http_parser_url *')
lib.http_parser_url_init(parser)
res = lib.http_parser_parse_url(ffi.from_buffer(burl), len(burl), is_connect, parser)
if res != 0:
raise ValueError('invalid URL')
parsed = ParsedUrl.from_parser(parser, url)
return parsed
# String conversions. Note that ISO-8859-1 is the default encoding for HTTP
# headers.
def s2b(s):
"""Convert a string *s* to bytes in the ISO-8859-1 encoding."""
if type(s) is not bytes:
s = s.encode('iso-8859-1')
return s
def ba2s(ba):
"""Convert a byte-array to a "str" type."""
if six.PY3:
return ba.decode('iso-8859-1')
else:
return bytes(ba)
def cd2s(cd):
"""Convert a cffi cdata('char *') to a str."""
s = ffi.string(cd)
if six.PY3:
s = s.decode('iso-8859-1')
return s
def get_header(headers, name, default=None):
"""Return the value of header *name*.
The *headers* argument must be a list of ``(name, value)`` tuples. If the
header is found its associated value is returned, otherwise *default* is
returned. Header names are matched case insensitively.
"""
name = name.lower()
for header in headers:
if header[0].lower() == name:
return header[1]
return default
def remove_headers(headers, name):
"""Remove all headers with name *name*.
The list is modified in-place and the updated list is returned.
"""
i = 0
name = name.lower()
for j in range(len(headers)):
if headers[j][0].lower() != name:
if i != j:
headers[i] = headers[j]
i += 1
del headers[i:]
return headers
def create_chunk(buf):
"""Create a chunk for the HTTP "chunked" transfer encoding."""
chunk = bytearray()
chunk.extend(s2b('{:X}\r\n'.format(len(buf))))
chunk.extend(s2b(buf))
chunk.extend(b'\r\n')
return chunk
def create_chunked_body_end(trailers=None):
"""Create the ending that terminates a chunked body."""
ending = bytearray()
ending.extend(b'0\r\n')
if trailers:
for name, value in trailers:
ending.extend(s2b('{}: {}\r\n'.format(name, value)))
ending.extend(b'\r\n')
return ending
def create_request(version, method, url, headers):
"""Create a HTTP request header."""
# According to my measurements using b''.join is faster that constructing a
# bytearray.
message = []
message.append(s2b('{} {} HTTP/{}\r\n'.format(method, url, version)))
for name, value in headers:
message.append(s2b('{}: {}\r\n'.format(name, value)))
message.append(b'\r\n')
return b''.join(message)
def create_response(version, status, headers):
"""Create a HTTP response header."""
message = []
message.append(s2b('HTTP/{} {}\r\n'.format(version, status)))
for name, value in headers:
message.append(s2b('{}: {}\r\n'.format(name, value)))
message.append(b'\r\n')
return b''.join(message)
class HttpMessage(object):
"""HTTP message.
Instances of this class are returned by :meth:`HttpClient.getresponse` and
passed as an argument to :class:`HttpServer` message handlers.
"""
def __init__(self):
self._message_type = None
self._version = None
self._status_code = None
self._method = None
self._url = None
self._parsed_url = None
self._headers = []
self._charset = None
self._body = None
self._should_keep_alive = None
@property
def message_type(self):
"""The message type, either :data:`REQUEST` or :data:`RESPONSE`."""
return self._message_type
@property
def version(self):
"""The HTTP version as a string, either ``'1.0'`` or ``'1.1'``."""
return self._version
@property
def status_code(self):
"""The HTTP status code as an integer. Only for response messages."""
return self._status_code
@property
def method(self):
"""The HTTP method as a string. Only for request messages."""
return self._method
@property
def url(self):
"""The URL as a string. Only for request messages."""
return self._url
@property
def parsed_url(self):
"""The parsed URL as a :class:`ParsedUrl` instance."""
return self._parsed_url
@property
def headers(self):
"""The headers as a list of ``(name, value)`` tuples."""
return self._headers
delegate_method(headers, get_header)
@property
def charset(self):
"""The character set as parsed from the "Content-Type" header, if available."""
return self._charset
@property
def body(self):
"""The message body, as a :class:`~gruvi.Stream` instance."""
return self._body
class HttpRequest(object):
"""HTTP client request.
Usually you do not instantiate this class directly, but use the instance
returned by :meth:`HttpProtocol.request`. You can however start new request
yourself by instantiating this class and passing it a protocol instance.
"""
def __init__(self, protocol):
self._protocol = protocol
self._content_length = None
self._chunked = False
self._bytes_written = 0
self._charset = None
@switchpoint
def start_request(self, method, url, headers=None, bodylen=None):
"""Start a new HTTP request.
The optional *headers* argument contains the headers to send. It must
be a sequence of ``(name, value)`` tuples.
The optional *bodylen* parameter is a hint that specifies the length of
the body that will follow. A length of -1 indicates no body, 0 means an
empty body, and a positive number indicates the body size in bytes.
This parameter helps determine whether to use the chunked transfer
encoding. Normally when the body size is known chunked encoding is not used.
"""
self._headers = headers or []
agent = host = clen = trailer = None
# Check the headers provided, and capture some information about the
# request from them.
for name, value in self._headers:
lname = name.lower()
# Only HTTP applications are allowed to set "hop-by-hop" headers.
if lname in hop_by_hop:
raise ValueError('header {} is hop-by-hop'.format(name))
elif lname == 'user-agent':
agent = value
elif lname == 'host':
host = value
elif lname == 'content-length':
clen = int(value)
elif lname == 'trailer':
trailer = parse_trailer(value)
elif lname == 'content-type' and value.startswith('text/'):
ctype, params = parse_content_type(value)
self._charset = params.get('charset')
version = self._protocol._version
# The Host header is mandatory in 1.1. Add it if it's missing.
if host is None and version == '1.1':
self._headers.append(('Host', self._protocol._server_name))
# Identify ourselves.
if agent is None:
self._headers.append(('User-Agent', self._protocol.identifier))
# Check if we need to use chunked encoding due to unknown body size.
if clen is None and bodylen is None:
if version == '1.0':
raise HttpError('body size unknown for HTTP/1.0')
self._chunked = True
self._content_length = clen
# Check if trailers are requested and if so need to switch to chunked.
if trailer:
if version == '1.0':
raise HttpError('cannot support trailers for HTTP/1.0')
if clen is not None:
remove_headers(self._headers, 'Content-Length')
self._chunked = True
self._trailer = trailer
# Add Content-Length if we know the body size and are not using chunked.
if not self._chunked and clen is None and bodylen >= 0:
self._headers.append(('Content-Length', str(bodylen)))
self._content_length = bodylen
# Complete the "Hop by hop" headers.
if version == '1.0':
self._headers.append(('Connection', 'keep-alive'))
elif version == '1.1':
self._headers.append(('Connection', 'te'))
self._headers.append(('TE', 'trailers'))
if self._chunked:
self._headers.append(('Transfer-Encoding', 'chunked'))
# Start the request
self._protocol._requests.append(method)
header = create_request(version, method, url, self._headers)
self._protocol.writer.write(header)
@switchpoint
def write(self, buf):
"""Write *buf* to the request body."""
if not isinstance(buf, six.binary_type):
raise TypeError('buf: must be a bytes instance')
# Be careful not to write zero-length chunks as they indicate the end of a body.
if len(buf) == 0:
return
if self._content_length and self._bytes_written > self._content_length:
raise RuntimeError('wrote too many bytes ({} > {})'
.format(self._bytes_written, self._content_length))
self._bytes_written += len(buf)
if self._chunked:
buf = create_chunk(buf)
self._protocol.writer.write(buf)
@switchpoint
def end_request(self):
"""End the request body."""
if not self._chunked:
return
trailers = [(n, get_header(self._headers, n)) for n in self._trailer] \
if self._trailer else None
ending = create_chunked_body_end(trailers)
self._protocol.writer.write(ending)
class ErrorStream(object):
"""Passed to the WSGI application as environ['wsgi.errors'].
Forwards messages to the Python logging facility.
"""
__slots__ = ['_log']
def __init__(self, log=None):
self._log = log or logging.get_logger()
def flush(self):
pass
def write(self, data):
self._log.error('wsgi.errors: {}', data)
def writelines(self, seq):
for line in seq:
self.write(line)
class WsgiAdapter(object):
"""WSGI Adapter"""
def __init__(self, application):
"""
This class adapts the WSGI callable *application* so that instances of
this class can be used as a message handler in :class:`HttpProtocol`.
"""
self._application = application
self._transport = None
self._protocol = None
self._log = logging.get_logger()
@switchpoint
def send_headers(self):
# Send out the actual headers.
# We need to figure out the transfer encoding of the body that will
# follow the header. Here's what we do:
# - If we know the body length, don't use any TE.
# - Otherwise, if the protocol is HTTP/1.1, use "chunked".
# - Otherwise, close the connection after the body is sent.
clen = get_header(self._headers, 'Content-Length')
version = self._message.version
# Keep the connection alive if the request wanted it kept alive AND we
# can keep it alive because we don't need EOF to signal end of message.
can_chunk = version == '1.1'
can_keep_alive = can_chunk or clen is not None or self._body_len is not None
self._keepalive = self._message._should_keep_alive and can_keep_alive
# The default on HTTP/1.1 is keepalive, on HTTP/1.0 it is to close.
if version == '1.1' and not self._keepalive:
self._headers.append(('Connection', 'close'))
elif version == '1.0' and self._keepalive:
self._headers.append(('Connection', 'keep-alive'))
# Are we using chunked?
self._chunked = can_chunk and clen is None and self._body_len is None
if self._chunked:
self._headers.append(('Transfer-Encoding', 'chunked'))
elif clen is None and self._body_len is not None:
self._headers.append(('Content-Length', str(self._body_len)))
# Trailers..
trailer = get_header(self._headers, 'Trailer')
te = get_header(self._message.headers, 'TE')
if version == '1.1' and trailer is not None and te is not None:
tenames = [e[0].lower() for e in parse_te(te)]
if 'trailers' in tenames:
trailer = parse_trailer(trailer)
else:
remove_headers(self._headers, 'Trailer')
self._trailer = trailer
# Add some informational headers.
server = get_header(self._headers, 'Server')
if server is None:
self._headers.append(('Server', self._protocol.identifier))
date = get_header(self._headers, 'Date')
if date is None:
self._headers.append(('Date', rfc1123_date()))
header = create_response(version, self._status, self._headers)
self._protocol.writer.write(header)
self._headers_sent = True
def start_response(self, status, headers, exc_info=None):
# Callable to be passed to the WSGI application.
if exc_info:
try:
if self._headers_sent:
six.reraise(*exc_info)
finally:
exc_info = None
elif self._status is not None:
raise RuntimeError('response already started')
for name, value in headers:
if name.lower() in hop_by_hop:
raise ValueError('header {} is hop-by-hop'.format(name))
self._status = status
self._headers = headers
return self.write
@switchpoint
def write(self, data):
# Callable passed to the WSGI application by start_response().
if not isinstance(data, bytes):
raise TypeError('data: expecting bytes instance')
# Never write empty chunks as they signal end of body.
if not data:
return
if not self._status:
raise HttpError('WSGI handler did not call start_response()')
if not self._headers_sent:
self.send_headers()
if self._chunked:
data = create_chunk(data)
self._protocol.writer.write(data)
@switchpoint
def end_response(self):
# Finalize a response. This method must be called.
if not self._status:
raise HttpError('WSGI handler did not call start_response()')
if not self._headers_sent:
self._body_len = 0
self.send_headers()
if self._chunked:
trailers = [hd for hd in self._headers if hd[0] in self._trailer] \
if self._trailer else None
ending = create_chunked_body_end(trailers)
self._protocol.writer.write(ending)
if not self._keepalive:
self._transport.close()
@switchpoint
def __call__(self, message, transport, protocol):
# Run the WSGI handler.
self._message = message
if self._transport is None:
self._transport = transport
self._protocol = protocol
self._sockname = transport.get_extra_info('sockname')
self._peername = transport.get_extra_info('peername')
self._status = None
self._headers = None
self._headers_sent = False
self._chunked = False
self._body_len = None
self.create_environ()
self._log.debug('request: {} {}', message.method, message.url)
result = None
try:
result = self._application(self._environ, self.start_response)
# Prevent chunking in this common case:
if isinstance(result, list) and len(result) == 1:
self._body_len = len(result[0])
for chunk in result:
self.write(chunk)
self.end_response()
finally:
if hasattr(result, 'close'):
result.close()
ctype = get_header(self._headers, 'Content-Type', 'unknown')
clen = get_header(self._headers, 'Content-Length', 'unknown')
self._log.debug('response: {} ({}; {} bytes)'.format(self._status, ctype, clen))
def create_environ(self):
# Initialize the environment with per connection variables.
m = self._message
env = self._environ = {}
# CGI variables
env['SCRIPT_NAME'] = ''
if isinstance(self._sockname, tuple):
env['SERVER_NAME'] = self._protocol._server_name or self._sockname[0]
env['SERVER_PORT'] = str(self._sockname[1])
else:
env['SERVER_NAME'] = self._protocol._server_name or self._sockname
env['SERVER_PORT'] = ''
env['SERVER_SOFTWARE'] = self._protocol.identifier
env['SERVER_PROTOCOL'] = 'HTTP/' + m.version
env['REQUEST_METHOD'] = m.method
env['PATH_INFO'] = m.parsed_url[2]
env['QUERY_STRING'] = m.parsed_url[4]
env['REQUEST_URI'] = m.url
for name, value in m.headers:
if name.lower() in hop_by_hop:
continue
name = name.upper().replace('-', '_')
if name not in ('CONTENT_LENGTH', 'CONTENT_TYPE'):
name = 'HTTP_' + name
env[name] = value
# SSL information
sslobj = self._transport.get_extra_info('ssl')
cipherinfo = sslobj.cipher() if sslobj else None
if sslobj and cipherinfo:
env['HTTPS'] = '1'
env['SSL_CIPHER'] = cipherinfo[0]
env['SSL_PROTOCOL'] = cipherinfo[1]
env['SSL_CIPHER_USEKEYSIZE'] = int(cipherinfo[2])
# Support the de-facto X-Forwarded-For and X-Forwarded-Proto headers
# that are added by reverse proxies.
peername = self._transport.get_extra_info('peername')
remote = env.get('HTTP_X_FORWARDED_FOR')
env['REMOTE_ADDR'] = remote if remote else peername[0] \
if isinstance(peername, tuple) else ''
proto = env.get('HTTP_X_FORWARDED_PROTO')
env['REQUEST_SCHEME'] = proto if proto else 'https' if sslobj else 'http'
# WSGI specific variables
env['wsgi.version'] = (1, 0)
env['wsgi.url_scheme'] = env['REQUEST_SCHEME']
env['wsgi.input'] = m.body
env['wsgi.errors'] = ErrorStream(self._log)
env['wsgi.multithread'] = True
env['wsgi.multiprocess'] = True
env['wsgi.run_once'] = False
env['wsgi.charset'] = m.charset
# Gruvi specific variables
env['gruvi.sockname'] = self._sockname
env['gruvi.peername'] = self._peername
class HttpProtocol(MessageProtocol):
"""HTTP protocol implementation."""
identifier = __name__
#: Default HTTP version.
default_version = '1.1'
#: Max header size. The parser keeps the header in memory during parsing.
max_header_size = 65536
#: Max number of body bytes to buffer. Bodies larger than this will cause
#: the transport to be paused until the buffer is below the threshold again.
max_buffer_size = 65536
#: Max number of pipelined requests to keep before pausing the transport.
max_pipeline_size = 10
# In theory, max memory is pipeline_size * (header_size + buffer_size)
def __init__(self, handler=None, server_side=False, server_name=None,
version=None, timeout=None):
"""
The *handler* argument specifies a message handler to handle incoming
HTTP requests. It must be a callable with the signature
``handler(message, transport, protocol)``.
The *server_side* argument specifies whether this is a client or server
side protocol. For server-side protocols, the *server_name* argument
can be used to provide a server name. If not provided, then the socket
name of the listening socket will be used.
"""
super(HttpProtocol, self).__init__(handler, timeout=timeout)
if server_side and handler is None:
raise ValueError('need a handler for server side protocol')
self._handler = handler
self._server_side = server_side
self._server_name = server_name
self._version = self.default_version if version is None else version
if self._version not in ('1.0', '1.1'):
raise ValueError('unsupported HTTP version: {!r}'.format(version))
self._timeout = timeout
self._create_parser()
self._requests = []
self._response = None
self._writer = None
self._message = None
self._error = None
def _create_parser(self):
# Create a new CFFI http and parser.
self._parser = ffi.new('http_parser *')
self._cdata = ffi.new_handle(self) # store in instance to keep alive:
self._parser.data = self._cdata # struct field doesn't take reference
kind = lib.HTTP_REQUEST if self._server_side else lib.HTTP_RESPONSE
lib.http_parser_init(self._parser, kind)
self._urlparser = ffi.new('struct http_parser_url *')
def _append_header_name(self, buf):
# Add a chunk to a header name.
if self._header_value:
# This starts a new header: stash away the previous one.
# The header might be part of the http headers or trailers.
header = (ba2s(self._header_name), ba2s(self._header_value))
self._message.headers.append(header)
# Try to capture the charset for text bodies.
if header[0].lower() == 'content-type':
ctype, params = parse_content_type(header[1])
if ctype.startswith('text/'):
self._message._charset = params.get('charset')
del self._header_name[:]
del self._header_value[:]
self._header_name.extend(buf)
def _append_header_value(self, buf):
# Add a chunk to a header value.
self._header_value.extend(buf)
# Parser callbacks. These are Python methods called by http-parser C code
# via CFFI. Callbacks are run in the hub fiber, and we only do parsing
# here, no handlers are run. For server protocols we stash away the result
# in a queue to be processed in a dispatcher fiber (one per protocol).
#
# Callbacks return 0 for success, 1 for error.
#
# Also note that these are static methods that get a reference to their
# instance via the parser state (parser.data).
@ffi.callback('http_cb')
def on_message_begin(parser):
# http-parser callback: prepare for a new message
self = ffi.from_handle(parser.data)
self._url = bytearray()
self._header = bytearray()
self._header_name = bytearray()
self._header_value = bytearray()
self._header_size = 0
self._message = HttpMessage()
lib.http_parser_url_init(self._urlparser)
return 0
@ffi.callback('http_data_cb')
def on_url(parser, at, length):
# http-parser callback: got a piece of the URL
self = ffi.from_handle(parser.data)
self._header_size += length
if self._header_size > self.max_header_size:
self._error = HttpError('HTTP header too large')
return 1
self._url.extend(ffi.buffer(at, length))
return 0
@ffi.callback('http_data_cb')
def on_header_name(parser, at, length):
# http-parser callback: got a piece of a header name
self = ffi.from_handle(parser.data)
self._header_size += length
if self._header_size > self.max_header_size:
self._error = HttpError('HTTP header too large')
return 1
buf = ffi.buffer(at, length)
self._append_header_name(buf)
return 0
@ffi.callback('http_data_cb')
def on_header_value(parser, at, length):
# http-parser callback: got a piece of a header value
self = ffi.from_handle(parser.data)
self._header_size += length
if self._header_size > self.max_header_size:
self._error = HttpError('HTTP header too large')
return 1
buf = ffi.buffer(at, length)
self._append_header_value(buf)
return 0
@ffi.callback('http_cb')
def on_headers_complete(parser):
# http-parser callback: the HTTP header is complete. This is the point
# where we hand off the message to our consumer. Going forward,
# on_body() will continue to write chunks of the body to message.body.
self = ffi.from_handle(parser.data)
self._append_header_name(b'')
m = self._message
m._message_type = lib.http_message_type(parser)
m._version = '{}.{}'.format(parser.http_major, parser.http_minor)
if self._server_side:
m._method = _http_methods.get(lib.http_method(parser), '<unknown>')
m._url = ba2s(self._url)
res = lib.http_parser_parse_url(ffi.from_buffer(self._url), len(self._url),
m._method == 'CONNECT', self._urlparser)
assert res == 0 # URL was already validated by http-parser
m._parsed_url = ParsedUrl.from_parser(self._urlparser, m._url)
else:
m._status_code = lib.http_status_code(parser)
m._should_keep_alive = lib.http_should_keep_alive(parser)
m._body = Stream(self._transport, 'r')
m._body.buffer.set_buffer_limits(self.max_buffer_size)
# Make the message available on the queue.
self._queue.put_nowait(m)
# Return 1 if this is a response to a HEAD request. This is a hint to
# the parser that no body will follow. Normally the parser deduce from
# the headers whether body will follow (either Content-Length or
# Transfer-Encoding is present). But not so with HEAD, as the response
# is exactly identical to GET but without the body.
if not self._server_side and self._requests and self._requests.pop(0) == 'HEAD':
return 1
return 0
@ffi.callback('http_data_cb')
def on_body(parser, at, length):
# http-parser callback: got a body chunk
self = ffi.from_handle(parser.data)
# StreamBuffer.feed() may pause the transport here if the buffer size is exceeded.
self._message.body.buffer.feed(bytes(ffi.buffer(at, length)))
return 0
@ffi.callback('http_cb')
def on_message_complete(parser):
# http-parser callback: the http request or response ended
# complete any trailers that might be present
self = ffi.from_handle(parser.data)
self._append_header_name(b'')
self._message.body.buffer.feed_eof()
self._maybe_pause_transport()
return 0
# The settings object is shared between all protocol instances.
_settings = ffi.new('http_parser_settings *')
_settings.on_message_begin = on_message_begin
_settings.on_url = on_url
_settings.on_header_field = on_header_name
_settings.on_header_value = on_header_value
_settings.on_headers_complete = on_headers_complete
_settings.on_body = on_body
_settings.on_message_complete = on_message_complete
def connection_made(self, transport):
# Protocol callback
super(HttpProtocol, self).connection_made(transport)
self._transport = transport
self._writer = Stream(transport, 'w')
def data_received(self, data):
# Protocol callback
nbytes = lib.http_parser_execute(self._parser, self._settings, data, len(data))
if nbytes != len(data):
msg = cd2s(lib.http_errno_name(lib.http_errno(self._parser)))
self._log.debug('http_parser_execute(): {}'.format(msg))
self._error = HttpError('parse error: {}'.format(msg))
if self._message:
self._message.body.buffer.feed_error(self._error)
self._transport.close()
def connection_lost(self, exc):
# Protocol callback
# Feed the EOF to the parser. It will tell us it if was unexpected.
super(HttpProtocol, self).connection_lost(exc)
nbytes = lib.http_parser_execute(self._parser, self._settings, b'', 0)
if nbytes != 0:
msg = cd2s(lib.http_errno_name(lib.http_errno(self._parser)))
self._log.debug('http_parser_execute(): {}'.format(msg))
if exc is None:
exc = HttpError('parse error: {}'.format(msg))
if self._message:
self._message.body.buffer.feed_error(exc)
self._error = exc
@property
def writer(self):
"""A :class:`~gruvi.Stream` instance for writing directly to the
underlying transport."""
return self._writer
@switchpoint
def request(self, method, url, headers=None, body=None):
"""Make a new HTTP request.
The *method* argument is the HTTP method as a string, for example
``'GET'`` or ``'POST'``. The *url* argument specifies the URL.
The optional *headers* argument specifies extra HTTP headers to use in
the request. It must be a sequence of ``(name, value)`` tuples.
The optional *body* argument may be used to include a body in the
request. It must be a ``bytes`` instance, a file-like object opened in
binary mode, or an iterable producing ``bytes`` instances. To send
potentially large bodies, use the file or iterator interfaces. This has
the benefit that only a single chunk is kept in memory at a time.
The response to the request can be obtained by calling the
:meth:`getresponse` method. You may make multiple requests before
reading a response. For every request that you make however, you must
call :meth:`getresponse` exactly once. The remote HTTP implementation
will send by the responses in the same order as the requests.
This method will use the "chunked" transfer encoding if here is a body
and the body size is unknown ahead of time. This happens when the file
or interator interface is used in the abence of a "Content-Length"
header.
"""
if self._error:
raise compat.saved_exc(self._error)
elif self._transport is None:
raise HttpError('not connected')
request = HttpRequest(self)
bodylen = -1 if body is None else \
len(body) if isinstance(body, bytes) else None
request.start_request(method, url, headers, bodylen)
if isinstance(body, bytes):
request.write(body)
elif hasattr(body, 'read'):
while True:
chunk = body.read(4096)
if not chunk:
break
request.write(chunk)
elif hasattr(body, '__iter__'):
for chunk in body:
request.write(chunk)
request.end_request()
@switchpoint
def getresponse(self):
"""Wait for and return a HTTP response.
The return value will be a :class:`HttpMessage`. When this method
returns only the response header has been read. The response body can
be read using :meth:`~gruvi.Stream.read` and similar methods on
the message :attr:`~HttpMessage.body`.
Note that if you use persistent connections (the default), it is
required that you read the entire body of each response. If you don't
then deadlocks may occur.
"""
if self._error:
raise compat.saved_exc(self._error)
elif self._transport is None:
raise HttpError('not connected')
message = self._queue.get(timeout=self._timeout)
if isinstance(message, Exception):
raise compat.saved_exc(message)
return message
class HttpClient(Client):
"""HTTP client."""
def __init__(self, version=None, timeout=None):
"""
The optional *version* argument specifies the HTTP version to use. The
default is :attr:`HttpProtocol.default_version`.
The optional *timeout* argument specifies the timeout for various
network and protocol operations.
"""
protocol_factory = functools.partial(HttpProtocol, version=version)
super(HttpClient, self).__init__(protocol_factory, timeout=timeout)
self._server_name = None
@docfrom(Client.connect)
def connect(self, address, **kwargs):
# Capture the host name that we are connecting to. We need this for
# generating "Host" headers in HTTP/1.1
if self._server_name is None and isinstance(address, tuple):
host, port = address[:2] # len(address) == 4 for IPv6
if port != default_ports['https' if 'ssl' in kwargs else 'http']:
host = '{}:{}'.format(host, port)
self._server_name = host
super(HttpClient, self).connect(address, **kwargs)
self._protocol._server_name = self._server_name
protocol = Client.protocol
delegate_method(protocol, HttpProtocol.request)
delegate_method(protocol, HttpProtocol.getresponse)
class HttpServer(Server):
"""HTTP server."""
#: The default adapter to use.
default_adapter = WsgiAdapter
def __init__(self, application, server_name=None, adapter=None, timeout=None):
"""The *application* argument is the web application to expose on this
server. The application is wrapped in *adapter* to create a message
handler as required by :class:`HttpProtocol`. By default the adapter in
:attr:`default_adapter` is used.
The optional *server_name* argument specifies the server name. The
default is to use the host portion of the address passed to
:meth:`~gruvi.Server.listen`. The server name is made available to WSGI
applications as the $SERVER_NAME environment variable.
The optional *timeout* argument specifies the timeout for various
network and protocol operations.
"""
adapter = self.default_adapter if adapter is None else adapter
def handler(*args):
return adapter(application)(*args)
protocol_factory = functools.partial(HttpProtocol, handler,
server_side=True, server_name=server_name)
super(HttpServer, self).__init__(protocol_factory, timeout)
self._server_name = server_name
def connection_made(self, transport, protocol):
if protocol._server_name is None:
protocol._server_name = self._server_name
def listen(self, address, **kwargs):
# Capture the first listen address to provide for a default server name.
if not self._server_name and isinstance(address, tuple):
host, port = address[:2] # len(address) == 4 for IPv6
if port != default_ports['https' if 'ssl' in kwargs else 'http']:
host = '{}:{}'.format(host, port)
self._server_name = host
super(HttpServer, self).listen(address, **kwargs)
| swegener/gruvi | lib/gruvi/http.py | Python | mit | 46,281 |
#
from pyon import loads, dumps
from etree import Node, Leaf
htmlTags = ["html", "meta", "header", "body", "table", "td", "tr", "p", "h1", "h2", "h3", "h4", "li", "it"]
moduleDict = globals()
for tag in htmlTags:
def func(*args, _thisTag=tag, **kwargs):
if args:
return Leaf(_thisTag, args[0])
else:
return Node(_thisTag, **kwargs)
func.__name__ = tag
moduleDict[tag] = func
del func
del moduleDict, htmlTags
print(dir())
if __name__ == '__main__':
text = r"""
html(*[
body(*[
h1('Title'),
p('text', class_='a'),
table(*[
tr(*[
td(1), td(2)
]),
tr(*[
td(3), td(4)
]),
tr(*[
td(6), td(7)
]),
])
])
])
"""
print(text)
ob = loads(text)
text = dumps(ob)
print(text)
| intellimath/pyon | examples/etree/html.py | Python | mit | 948 |
# The MIT License (MIT)
#
# Copyright (c) 2015 University of East Anglia, Norwich, UK
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Developed by Geoffrey French in collaboration with Dr. M. Fisher and
# Dr. M. Mackiewicz.
import argparse
import json
from matplotlib import pyplot as plt
import numpy as np
import os
import yaml
from flask import Flask, render_template, request, make_response, send_from_directory
from image_labelling_tool import labelling_tool
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Image labelling tool - Flask app')
parser.add_argument('--slic', action='store_true', help='Use SLIC segmentation to generate initial labels')
parser.add_argument('--readonly', action='store_true', help='Don\'t persist changes to disk')
parser.add_argument('--image_dir')
parser.add_argument('--label_names')
parser.add_argument('--file_ext', type=str, default='png')
args = parser.parse_args()
file_ext = '.{}'.format(args.file_ext)
# `LabelClass` parameters are: symbolic name, human readable name for UI, and RGB colour as list
with open(args.label_names, 'r') as f:
label_names = yaml.load(f)
cmap = plt.get_cmap('Spectral')
colors = [(np.array(cmap(i)[:3]) * 255).astype(np.int32).tolist()
for i in range(1, len(label_names) + 1)]
label_classes = [labelling_tool.LabelClass(name, name, color)
for color, name in zip(colors, label_names)]
img_dir = args.image_dir
if args.slic:
import glob
from skimage.segmentation import slic
for path in glob.glob(os.path.join(img_dir, '*{}'.format(file_ext))):
name = os.path.splitext(path)[0]
out_name = name + '__labels.json'
if os.path.exists(out_name):
print('Label already exits at {}'.format(out_name))
# raise ValueError
continue
print('Segmenting {0}'.format(path))
img = plt.imread(path)
# slic_labels = slic(img, 1000, compactness=20.0)
# slic_labels = slic(img, 1000, slic_zero=True) + 1
slic_labels = slic(img, 1500, slic_zero=True) + 1
print('Converting SLIC labels to vector labels...')
labels = labelling_tool.ImageLabels.from_label_image(slic_labels)
with open(out_name, 'w') as f:
json.dump(labels.labels_json, f)
readonly = args.readonly
# Load in .JPG images from the 'images' directory.
labelled_images = labelling_tool.PersistentLabelledImage.for_directory(
img_dir, image_filename_pattern='*{}'.format(file_ext),
readonly=readonly)
print('Loaded {0} images'.format(len(labelled_images)))
# Generate image IDs list
image_ids = [str(i) for i in range(len(labelled_images))]
# Generate images table mapping image ID to image so we can get an image by ID
images_table = {image_id: img for image_id, img in zip(image_ids, labelled_images)}
# Generate image descriptors list to hand over to the labelling tool
# Each descriptor provides the image ID, the URL and the size
image_descriptors = []
for image_id, img in zip(image_ids, labelled_images):
data, mimetype, width, height = img.data_and_mime_type_and_size()
image_descriptors.append(labelling_tool.image_descriptor(
image_id=image_id, url='/image/{}'.format(image_id),
width=width, height=height
))
app = Flask(__name__, static_folder='image_labelling_tool/static')
config = {
'tools': {
'imageSelector': True,
'labelClassSelector': True,
'drawPolyLabel': True,
'compositeLabel': True,
'deleteLabel': True,
}
}
@app.route('/')
def index():
label_classes_json = [{'name': cls.name, 'human_name': cls.human_name, 'colour': cls.colour} for cls in label_classes]
return render_template('labeller_page.jinja2',
tool_js_urls=labelling_tool.js_file_urls('/static/labelling_tool/'),
label_classes=json.dumps(label_classes_json),
image_descriptors=json.dumps(image_descriptors),
initial_image_index=0,
config=json.dumps(config))
@app.route('/labelling/get_labels/<image_id>')
def get_labels(image_id):
image = images_table[image_id]
labels = image.labels_json
complete = False
label_header = {
'labels': labels,
'image_id': image_id,
'complete': complete
}
r = make_response(json.dumps(label_header))
r.mimetype = 'application/json'
return r
@app.route('/labelling/set_labels', methods=['POST'])
def set_labels():
label_header = json.loads(request.form['labels'])
image_id = label_header['image_id']
complete = label_header['complete']
labels = label_header['labels']
image = images_table[image_id]
image.labels_json = labels
return make_response('')
@app.route('/image/<image_id>')
def get_image(image_id):
image = images_table[image_id]
data, mimetype, width, height = image.data_and_mime_type_and_size()
r = make_response(data)
r.mimetype = mimetype
return r
@app.route('/ext_static/<path:filename>')
def base_static(filename):
return send_from_directory(app.root_path + '/ext_static/', filename)
# app.run(debug=True)
app.run(debug=False)
| yuyu2172/image-labelling-tool | flask_app.py | Python | mit | 6,680 |
from flask import Flask
from flask.ext.bootstrap import Bootstrap
from flask.ext.mail import Mail
from flask.ext.sqlalchemy import SQLAlchemy
from config import config
bootstrap = Bootstrap()
mail = Mail()
db = SQLAlchemy()
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
bootstrap.init_app(app)
mail.init_app(app)
db.init_app(app)
# attach routes and error pages
# - routes are stored in app/main/views.py
# - error handlers are stored in app/main/errors.py
# - importing these modules causes the routes/errors handlers
# to be associated with the blueprint
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
return app
| benosment/cookbook | app/__init__.py | Python | mit | 799 |
from django.conf.urls import patterns, include, url
from tracking.views import *
urlpatterns = patterns(
'',
url(r'^logout/$',LogoutView.as_view(), name="logout"),
url(r'^home/$', IndexView.as_view(), name="index"),
url(r'^add/organization/$', OrganizationView.as_view(), name="add-organization"),
url(r'^add/referring_entity/$', ReferringEntityView.as_view(), name="add-referring-entity"),
url(r'^add/treating_provider/$', TreatingProviderView.as_view(), name="add-treating-provider"),
url(r'^add/patient_visit/$', PatientVisitView.as_view(), name="add-patient-visit"),
url(r'^add/get-patient_visit-view/$', GetPatientVisitReport.as_view(), name="get-patient_visit-view"),
url(r'^patient-visit-history/$', GetPatientVisitHistory.as_view(), name="patient-visit-history"),
url(r'^edit/organization/([0-9]+)/$', edit_organization, name="edit-organization"),
url(r'^edit/referring_entity/([0-9]+)/$', edit_referring_entity, name="edit-referring-entity"),
url(r'^edit/treating_provider/([0-9]+)/$', edit_treating_provider, name="edit-treating-provider"),
url(r'^edit/patient_visit/([0-9]+)/$', edit_patient_visit, name="edit-patient-visit"),
url(r'^delete/patient_visit/([0-9]+)/$', delete_patient_visit, name="delete-patient-visit"),
url(r'^delete/organization/([0-9]+)/$', delete_organization, name="delete-organization"),
url(r'^delete/referring_entity/([0-9]+)/$', delete_referring_entity, name="delete-referring-entity"),
url(r'^delete/treating_provider/([0-9]+)/$', delete_treating_provider, name="delete-treating-provider"),
url(r'^view/organizations/$', OrganizationListView.as_view(), name="view-organizations"),
url(r'^view/referring_entities/$', ReferringEntityListView.as_view(), name="view-referring-entities"),
url(r'^view/treating_provider/$', TreatingProviderListView.as_view(), name="view-treating-providers"),
url(r'^view/patient_visits/$', PatientVisitListView.as_view(), name="view-patient-visits"),
url('', include('social.apps.django_app.urls', namespace='social')),
)
| Heteroskedastic/Dr-referral-tracker | tracking/urls.py | Python | mit | 2,078 |
# -*- mode: python; coding: utf-8 -*-
# Copyright 2013-2014 Peter Williams <[email protected]> and collaborators.
# Licensed under the MIT License.
"""pwkit.tinifile - Dealing with typed ini-format files full of measurements.
Functions:
read
Generate :class:`pwkit.Holder` instances of measurements from an ini-format file.
write
Write :class:`pwkit.Holder` instances of measurements to an ini-format file.
read_stream
Lower-level version; only operates on streams, not path names.
write_stream
Lower-level version; only operates on streams, not path names.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
__all__ = str ('read_stream read write_stream write').split ()
import six
from . import Holder, inifile, msmt
def _parse_one (old):
new = {}
for name, value in six.iteritems (old.__dict__):
if name == 'section':
new[name] = value
continue
a = name.rsplit (':', 1)
if len (a) == 1:
a.append ('s')
shname, typetag = a
new[shname] = msmt.parsers[typetag] (value)
return Holder (**new)
def read_stream (stream, **kwargs):
for unparsed in inifile.read_stream (stream, **kwargs):
yield _parse_one (unparsed)
def read (stream_or_path, **kwargs):
for unparsed in inifile.read (stream_or_path, **kwargs):
yield _parse_one (unparsed)
def _format_many (holders, defaultsection, extrapos, digest):
# We need to handle defaultsection here, and not just leave it to inifile,
# so that we can get consistent digest computation.
for old in holders:
s = old.get ('section', defaultsection)
if s is None:
raise ValueError ('cannot determine section name for item <%s>' % old)
new = {'section': s}
if digest is not None:
digest.update ('s')
digest.update (s)
for name in sorted (x for x in six.iterkeys (old.__dict__) if x != 'section'):
value = old.get (name)
if value is None:
continue
typetag, ftext, is_imprecise = msmt.fmtinfo (value)
lname = name
if len (typetag):
if is_imprecise and name in extrapos and typetag in ('u', 'f'):
typetag = 'P' + typetag
lname += ':' + typetag
itext = ' # imprecise' if is_imprecise else ''
new[lname] = ftext + itext
if digest is not None:
digest.update ('k')
digest.update (name)
digest.update (typetag)
digest.update ('v')
if is_imprecise:
digest.update ('<impreciseval>')
else:
digest.update (ftext)
yield Holder (**new)
def write_stream (stream, holders, defaultsection=None, extrapos=(), sha1sum=False, **kwargs):
"""`extrapos` is basically a hack for multi-step processing. We have some flux
measurements that are computed from luminosities and distances. The flux
value is therefore an unwrapped Uval, which doesn't retain memory of any
positivity constraint it may have had. Therefore, if we write out such a
value using this routine, we may get something like `fx:u = 1pm1`, and the
next time it's read in we'll get negative fluxes. Fields listed in
`extrapos` will have a "P" constraint added if they are imprecise and
their typetag is just "f" or "u".
"""
if sha1sum:
import hashlib
sha1 = hashlib.sha1 ()
else:
sha1 = None
inifile.write_stream (stream,
_format_many (holders, defaultsection, extrapos, sha1),
defaultsection=defaultsection,
**kwargs)
if sha1sum:
return sha1.digest ()
def write (stream_or_path, holders, defaultsection=None, extrapos=(),
sha1sum=False, **kwargs):
if sha1sum:
import hashlib
sha1 = hashlib.sha1 ()
else:
sha1 = None
inifile.write (stream_or_path,
_format_many (holders, defaultsection, extrapos, sha1),
defaultsection=defaultsection,
**kwargs)
if sha1sum:
return sha1.digest ()
| pkgw/pwkit | pwkit/tinifile.py | Python | mit | 4,312 |
# Generated by Django 3.0.8 on 2020-07-12 05:05
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("places", "0009_auto_20200712_0155"),
]
operations = [
migrations.AlterModelOptions(
name="restaurant", options={"ordering": ["name"]},
),
]
| huangsam/chowist | places/migrations/0010_auto_20200712_0505.py | Python | mit | 336 |
from . import Block
class BlockView(Block):
validation = '^\d{4}$|CAVOK'
name = 'view'
patterns = {
'distance': ['\d{4}$', ('^CAVOK$', '_cavok')],
}
def _cavok(self, c):
return 'Ceiling And Visibility OKay' # No cloud below 5000ft, no Cn Tc, visability >= 10km, nosig | tspycher/python-aviationdata | aviationdata/blocks/blockview.py | Python | mit | 306 |
from __future__ import absolute_import
from django.core.serializers.json import DjangoJSONEncoder
import json, sys
def to_json(data, **kw):
if sys.version_info.major < 3:
kw['encoding'] = 'utf-8'
return json.dumps(data, cls=DjangoJSONEncoder, ensure_ascii=False, separators=(',',':'), **kw)
| futurice/django-jsonmodel | djangojsonmodel/contrib/representation/json.py | Python | mit | 308 |
from conans.model import Generator
from conans.paths import BUILD_INFO_PREMAKE
class PremakeDeps(object):
def __init__(self, deps_cpp_info):
self.include_paths = ",\n".join('"%s"' % p.replace("\\", "/")
for p in deps_cpp_info.include_paths)
self.lib_paths = ",\n".join('"%s"' % p.replace("\\", "/")
for p in deps_cpp_info.lib_paths)
self.bin_paths = ",\n".join('"%s"' % p.replace("\\", "/")
for p in deps_cpp_info.bin_paths)
self.libs = ", ".join('"%s"' % p.replace('"', '\\"') for p in deps_cpp_info.libs)
self.defines = ", ".join('"%s"' % p for p in deps_cpp_info.defines)
self.cxxflags = ", ".join('"%s"' % p for p in deps_cpp_info.cxxflags)
self.cflags = ", ".join('"%s"' % p for p in deps_cpp_info.cflags)
self.sharedlinkflags = ", ".join('"%s"' % p.replace('"', '\\"') for p in deps_cpp_info.sharedlinkflags)
self.exelinkflags = ", ".join('"%s"' % p.replace('"', '\\"') for p in deps_cpp_info.exelinkflags)
self.rootpath = "%s" % deps_cpp_info.rootpath.replace("\\", "/")
class PremakeGenerator(Generator):
@property
def filename(self):
return BUILD_INFO_PREMAKE
@property
def content(self):
deps = PremakeDeps(self.deps_build_info)
template = ('conan_includedirs{dep} = {{{deps.include_paths}}}\n'
'conan_libdirs{dep} = {{{deps.lib_paths}}}\n'
'conan_bindirs{dep} = {{{deps.bin_paths}}}\n'
'conan_libs{dep} = {{{deps.libs}}}\n'
'conan_defines{dep} = {{{deps.defines}}}\n'
'conan_cxxflags{dep} = {{{deps.cxxflags}}}\n'
'conan_cflags{dep} = {{{deps.cflags}}}\n'
'conan_sharedlinkflags{dep} = {{{deps.sharedlinkflags}}}\n'
'conan_exelinkflags{dep} = {{{deps.exelinkflags}}}\n')
sections = ["#!lua"]
sections.extend(
['conan_build_type = "{0}"'.format(str(self.settings.get_safe("build_type"))),
'conan_arch = "{0}"'.format(str(self.settings.get_safe("arch"))),
""]
)
all_flags = template.format(dep="", deps=deps)
sections.append(all_flags)
template_deps = template + 'conan_rootpath{dep} = "{deps.rootpath}"\n'
for dep_name, dep_cpp_info in self.deps_build_info.dependencies:
deps = PremakeDeps(dep_cpp_info)
dep_name = dep_name.replace("-", "_")
dep_flags = template_deps.format(dep="_" + dep_name, deps=deps)
sections.append(dep_flags)
sections.append(
"function conan_basic_setup()\n"
" configurations{conan_build_type}\n"
" architecture(conan_arch)\n"
" includedirs{conan_includedirs}\n"
" libdirs{conan_libdirs}\n"
" links{conan_libs}\n"
" defines{conan_defines}\n"
" bindirs{conan_bindirs}\n"
"end\n")
return "\n".join(sections)
| memsharded/conan | conans/client/generators/premake.py | Python | mit | 3,149 |
from django.conf.urls.defaults import patterns
from url_images import DjangoImageHandler
resizer = DjangoImageHandler()
urlpatterns = patterns('',
(r'^.*$', resizer),
)
| AndrewIngram/python-url-images | url_images/urls.py | Python | mit | 175 |
# Copyright (C) 2006-2011, University of Maryland
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/ or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Author: James Krycka
"""
This module implements the AppPanel class which creates the main panel on top
of the frame of the GUI for the Direct Inversion Reflectometry application. It
updates the menu, tool bar, and status bar, and also builds notebook pages on
the panel.
"""
#==============================================================================
import os
import sys
import wx
# If we are running from an image built by py2exe, keep the frozen environment
# self contained by having matplotlib use a private directory instead of using
# .matplotlib under the user's home directory for storing shared data files
# such as fontList.cache. Note that a Windows installer/uninstaller such as
# Inno Setup should explicitly delete this private directory on uninstall.
if hasattr(sys, 'frozen'):
mplconfigdir = os.path.join(sys.prefix, '.matplotlib')
if not os.path.exists(mplconfigdir):
os.mkdir(mplconfigdir)
os.environ['MPLCONFIGDIR'] = mplconfigdir
import matplotlib
# Disable interactive mode so that plots are only updated on show() or draw().
# Note that the interactive function must be called before selecting a backend
# or importing pyplot, otherwise it will have no effect.
matplotlib.interactive(False)
# Specify the backend to use for plotting and import backend dependent classes.
# Note that this must be done before importing pyplot to have an effect.
matplotlib.use('WXAgg')
from .images import getOpenBitmap
from .simulation_page import SimulationPage
from .inversion_page import InversionPage
from .auxiliary_page import AuxiliaryPage
# Custom colors.
PALE_GREEN = "#C8FFC8"
PALE_BLUE = "#E8E8FF"
#==============================================================================
class AppPanel(wx.Panel):
"""
This class creates the main panel of the frame and builds the GUI for the
application on it.
"""
def __init__(self, frame, id=wx.ID_ANY, style=wx.RAISED_BORDER,
name="AppPanel"
):
# Create a panel on the frame. This will be the only child panel of
# the frame and it inherits its size from the frame which is useful
# during resize operations (as it provides a minimal size to sizers).
wx.Panel.__init__(self, parent=frame, id=id, style=style, name=name)
self.SetBackgroundColour("WHITE")
self.frame = frame
# Modify the menu bar.
self.modify_menubar()
# Modify the tool bar.
self.modify_toolbar()
# Reconfigure the status bar.
self.modify_statusbar([-34, -50, -16])
# Initialize the notebook bar.
self.add_notebookbar()
def modify_menubar(self):
"""Adds items to the menu bar, menus, and menu options."""
frame = self.frame
mb = frame.GetMenuBar()
# Add items to the "File" menu (prepending them in reverse order).
file_menu = mb.GetMenu(0)
file_menu.PrependSeparator()
_item = file_menu.Prepend(wx.ID_ANY, "&Save Model ...",
"Save model parameters to a file")
frame.Bind(wx.EVT_MENU, self.OnSaveModel, _item)
_item = file_menu.Prepend(wx.ID_ANY, "&Load Model ...",
"Load model parameters from a file")
frame.Bind(wx.EVT_MENU, self.OnLoadModel, _item)
# Add a 'Demo' menu to the menu bar and define its options.
demo_menu = wx.Menu()
_item = demo_menu.Append(wx.ID_ANY, "Load &Demo Model 1",
"Load description of sample model 1")
frame.Bind(wx.EVT_MENU, self.OnLoadDemoModel1, _item)
_item = demo_menu.Append(wx.ID_ANY, "Load &Demo Model 2",
"Load description of sample model 2")
frame.Bind(wx.EVT_MENU, self.OnLoadDemoModel2, _item)
_item = demo_menu.Append(wx.ID_ANY, "Load &Demo Model 3",
"Load description of sample model 3")
frame.Bind(wx.EVT_MENU, self.OnLoadDemoModel3, _item)
demo_menu.AppendSeparator()
_item = demo_menu.Append(wx.ID_ANY, "Load &Demo Dataset 1",
"Load reflectivity data files for example 1")
frame.Bind(wx.EVT_MENU, self.OnLoadDemoDataset1, _item)
frame.load_demo_dataset_1_item = _item # handle for hide/show
_item = demo_menu.Append(wx.ID_ANY, "Load &Demo Dataset 2",
"Load reflectivity data files for example 2")
frame.Bind(wx.EVT_MENU, self.OnLoadDemoDataset2, _item)
frame.load_demo_dataset_2_item = _item # handle for hide/show
mb.Insert(1, demo_menu, "&Demo")
def modify_toolbar(self):
"""Populates the tool bar."""
tb = self.frame.GetToolBar()
'''
tb.AddSimpleTool(wx.ID_OPEN, getOpenBitmap(),
"Open Data Files", "Open reflectivity data files")
icon_size = (16, 16)
icon_bitmap = wx.ArtProvider.GetBitmap(wx.ART_FILE_SAVE, wx.ART_TOOLBAR,
icon_size)
tb.AddSimpleTool(wx.ID_OPEN, icon_bitmap,
"Open Data Files", "Open reflectivity data files")
'''
tb.Realize()
self.frame.SetToolBar(tb)
def modify_statusbar(self, subbars):
"""Divides the status bar into multiple segments."""
sb = self.frame.GetStatusBar()
sb.SetFieldsCount(len(subbars))
sb.SetStatusWidths(subbars)
def add_notebookbar(self):
"""Creates a notebook bar and a set of tabs, one for each page."""
nb = self.notebook = \
wx.Notebook(self, wx.ID_ANY,
style=wx.NB_TOP|wx.NB_FIXEDWIDTH|wx.NB_NOPAGETHEME)
try:
nb.SetTabSize((100, 20)) # works on Windows but not on Linux
except Exception:
pass
# Create page windows as children of the notebook.
self.page0 = SimulationPage(nb, colour=PALE_GREEN, fignum=0)
self.page1 = InversionPage(nb, colour=PALE_BLUE, fignum=1)
# Add the pages to the notebook with a label to show on the tab.
nb.AddPage(self.page0, "Simulation")
nb.AddPage(self.page1, "Inversion")
# Create test page windows and add them to notebook if requested.
if len(sys.argv) > 1 and '--xtabs' in sys.argv[1:]:
self.page10 = AuxiliaryPage(nb, colour="FIREBRICK", fignum=10)
self.page11 = AuxiliaryPage(nb, colour="BLUE", fignum=11)
self.page12 = AuxiliaryPage(nb, colour="GREEN", fignum=12)
self.page13 = AuxiliaryPage(nb, colour="WHITE", fignum=13)
nb.AddPage(self.page10, "Test 1")
nb.AddPage(self.page11, "Test 2")
nb.AddPage(self.page12, "Test 3")
nb.AddPage(self.page13, "Test 4")
# Put the notebook in a sizer attached to the main panel.
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(nb, 1, wx.EXPAND)
self.SetSizer(sizer)
sizer.Fit(self)
'''
# Sample code to switch windows in notebook tabs
nb.RemovePage(self.page0)
nb.RemovePage(self.page1)
nb.InsertPage(0, self.page1, "Replace 1")
nb.InsertPage(1, self.page0, "Replace 0")
'''
self.Bind(wx.EVT_NOTEBOOK_PAGE_CHANGED, self.OnPageChanged)
# Make sure the first page is the active one.
# Note that SetSelection generates a page change event only if the
# page changes and ChangeSelection does not generate an event. Thus
# we force a page change event so that the status bar is properly set
# on startup.
nb.ChangeSelection(0)
#nb.SendPageChangedEvent(0, 0)
def OnPageChanged(self, event):
"""
Performs page specific save, restore, or update operations when the
user clicks on a notebook tab to change pages or when the program calls
SetSelection. (Note that ChangeSelection does not generate an event.)
"""
### prev_page = self.notebook.GetPage(event.GetOldSelection())
### print("*** OnPageChanged:", event.GetOldSelection(),
### event.GetSelection())
curr_page = self.notebook.GetPage(event.GetSelection())
curr_page.OnActivePage()
event.Skip()
def OnLoadDemoModel1(self, event):
"""Loads Demo Model 1 from a resource file."""
self.page0.OnLoadDemoModel1(event)
self.notebook.SetSelection(0)
def OnLoadDemoModel2(self, event):
"""Loads Demo Model 2 from a resource file."""
self.page0.OnLoadDemoModel2(event)
self.notebook.SetSelection(0)
def OnLoadDemoModel3(self, event):
"""Loads Demo Model 3 from a resource file."""
self.page0.OnLoadDemoModel3(event)
self.notebook.SetSelection(0)
def OnLoadModel(self, event):
"""Loads the Model from a user specified file."""
self.page0.OnLoadModel(event)
self.notebook.SetSelection(0)
def OnSaveModel(self, event):
"""Saves the Model to a user specified file."""
self.page0.OnSaveModel(event)
self.notebook.SetSelection(0)
def OnLoadDemoDataset1(self, event):
"""Loads demo 1 reflectometry data from resource files."""
self.page1.OnLoadDemoDataset1(event)
self.notebook.SetSelection(1)
def OnLoadDemoDataset2(self, event):
"""Loads demo 2 reflectometry data from resource files."""
self.page1.OnLoadDemoDataset2(event)
self.notebook.SetSelection(1)
| reflectometry/direfl | direfl/gui/app_panel.py | Python | mit | 10,749 |
# coding=utf-8
import urllib.request, urllib.parse, urllib.error
import urllib.request, urllib.error, urllib.parse
import urllib.parse
import re
import time
import datetime
import sys
import random
import json
import codecs
import requests
import logging
import os
from PIL import Image
from io import BytesIO
from bson import json_util
from bs4 import BeautifulSoup
from weibo import Client
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from code_verification import verify_user
from weibo_comments_crawler import WeiboCommentsCrawler
from weibo_login import WeiboLogin
search_domain = 's.weibo.com'
weibo_type = ('hot', 'time')
USER_NAME = '[email protected]'
PASSWD = '5805880'
def save_source(html_content):
'''
this function is used for debugging
'''
file_path = './screenshot/error.html'
with open(file_path, 'w', encoding='utf-8') as f:
f.write(html_content)
return
class WeiboCrawler():
'''
crawl weibo using keywords
'''
def __init__(self, search_key, user_name=USER_NAME, passwd=PASSWD):
# login to sinaweibo
self.driver = webdriver.PhantomJS()
self.wl = WeiboLogin(user_name, passwd, self.driver) # the interface for authorization
if self.wl.login():
logging.info('login successfully')
else:
logging.info('login faied')
sys.exit(1)
self.sk = search_key.strip()
return
def __del__(self):
self.driver.quit()
return
def crawl(self, page_count=1, comments=False):
'''
crawl the weibo using the keywords
page_count: how many pages would be crawled
'''
self.results = []
# get the mids from each result page
pages = list(range(1, page_count+1))
random.shuffle(pages)
for t in ('hot', 'time'):
for i in pages:
url_to_crawl = self.get_search_url(i)
logging.info('crawling page {}:{}'.format(i, url_to_crawl))
self.driver.get(url_to_crawl)
# wait the page loading the content
try:
element = WebDriverWait(self.driver, 5).until(
lambda x: x.find_elements_by_class_name('feed_list')
)
except TimeoutException:
logging.info('there is no weibo content in {}'.format(url_to_crawl))
logging.info('you are considered as a robot')
logging.info(self.driver.current_url)
self.driver.get_screenshot_as_file('./screenshot/error.png')
# let user input the verification code
verify_user(self.driver, 'search')
# break
weibo_list = self.get_weibo_list(self.driver.page_source) # mid is used to crawl the original weibo content, using batch mode
self.results.extend(weibo_list)
# sleep some time to prevent hitting too much
# time.sleep(1)
else: continue
break
# for r in results:
# logging.info_dict(r)
logging.info('total result {}'.format(len(self.results)))
if comments:
logging.info('crawling the comments')
self.crawl_comments()
return
def get_search_url(self, page=1, w_type='hot'):
'''
compose a search url based on page_num and weibo type
'''
# logging.info('generating the url')
url=''
url += 'http://'
url += search_domain
url += '/wb'
url += urllib.parse.quote('/'+self.sk)
url += '&'
url += urllib.parse.urlencode([
('page', page),
('xsort', w_type)
])
return url
def get_weibo_list(self, content):
'''
parse the weibo content in the current result page
content: the source page of the keywords result
return: a list of weibo object
'''
weibo_list = []
soup = BeautifulSoup(content, 'html5lib')
for t in soup.find_all('dl', class_='feed_list'):
if t.has_attr('mid'):
weibo = self.parse_weibo(t)
if weibo:
weibo_list.append(weibo)
logging.info('There are {} weibo on this page'.format(len(weibo_list)))
return weibo_list
def parse_weibo(self, t):
'''
parse weibo object from html
t: the tag object that has weibo content
Return weibo object
'''
weibo = {}
try:
weibo['keywords'] = self.sk.split(' ') #keywords is a list of words
weibo['mid'] = t['mid']
# the user name
weibo['screen_name'] = t.find(name='dt', class_='face').find('a').get('title')
weibo['user_profile'] = t.find(name='dt', class_='face').find('a').get('href')
# the content of weibo
weibo['text'] = t.find(name='dd', class_='content').find('em').get_text().strip()
# the source url of the weibo
weibo['source_url'] = t.find(name='a', class_='date').get('href').strip()
logging.info(weibo['source_url'])
# logging.info(weibo['text'])
# meta data
epoch_length = len(str(int(time.time())))
time_str = t.find('dd', class_='content').find('p', class_='info W_linkb W_textb').find(name='a', class_='date').get('date')[0:epoch_length]
time_now = time.localtime(int(time_str))
weibo['created_at'] = datetime.datetime(*time_now[0:6])
weibo['source'] = t.find('dd', class_='content').find('p', class_='info W_linkb W_textb').find('a', rel='nofollow').string.strip()
pop_str = t.find('dd', class_='content').find('p', class_='info W_linkb W_textb').find('span').get_text().strip().replace('\n', '')
pop_type = {
# key: source representation, value: attr
'赞': 'like_count',
'转发': 'repost_count',
'评论': 'comment_count'
}
for key in list(pop_type.keys()):
pattern = re.compile(r'.*(%s\((\d+)\)).*' % key)
match = pattern.match(pop_str)
if match:
# logging.info match.group(1)
# logging.info match.group(2)
weibo[pop_type[key]] = int(match.group(2))
else:
# logging.info key, 'not found.'
weibo[pop_type[key]] = 0
except Exception as e:
logging.info(e)
return None
# logging.info_dict(weibo)
return weibo
def save(self, dist_dir='result'):
'''
save the search results to file
'''
if dist_dir not in os.listdir(os.curdir):
os.mkdir(dist_dir)
for w in self.results:
file_name = ''.join([
'_'.join([k for k in w['keywords']]),
w['mid']
])
file_name += '.txt'
f = codecs.open(os.path.join(dist_dir, file_name), 'w', 'utf-8')
json.dump(w, f, ensure_ascii = False, default=json_util.default, indent = 2)
# logging.info(w['text'])
logging.info('writed to file {}'.format(file_name))
return
def crawl_comments(self):
'''
crawl the comments after getting all the results and update the results list --> self
'''
client = self.wl.authorize_app()
if client:
for w in self.results:
# logging.info(w['mid'])
w['comments'] = []
crawler = WeiboCommentsCrawler(client, weibo_mid = w['mid'])
r = crawler.crawl()
# filter out the unrelated fields
for c in r:
c.pop('status')
w['comments'].extend(r)
else:
logging.error('认证失败,不能获取评论列表')
return
def test():
wc = WeiboCrawler('火影忍者', USER_NAME, PASSWD)
wc.crawl(1, comments = True)
wc.save()
# wl = WeiboLogin(USER_NAME, PASSWD, driver)
# c = wl.authorize_app(APP_DATA)
# logging.info c.get('users/show', uid=1282440983)
if __name__ == '__main__':
test()
| KeithYue/weibo-keywords-crawler | weibo_crawler.py | Python | mit | 8,723 |
from schemas import (material)
from os import (environ)
MONGO_URI = environ.get('MONGOLAB_URI')
PUBLIC_METHODS = ['GET']
PUBLIC_ITEM_METHODS = ['GET']
RESOURCE_METHODS = ['GET']
ITEM_METHODS = ['GET']
DOMAIN = {
'materials': material
}
| olivettigroup/synthesis-api | synthesisapi/settings.py | Python | mit | 242 |
# -*- coding: utf8 -*-
import textwrap
# https://docs.python.org/3.6/library/textwrap.html
def wrap(string, max_width):
return textwrap.fill(string, max_width)
if __name__ == '__main__':
string, max_width = input(), int(input())
result = wrap(string, max_width)
print(result)
| ejspeiro/HackerRank | all_domains/python/strings/text_wrap.py | Python | mit | 296 |
"""
@brief test tree node (time=3s)
"""
import sys
import os
import unittest
import warnings
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import get_temp_folder
from pymyinstall.installhelper.install_venv_helper import create_virtual_env, run_venv_script
class TestInstallWithDeps(unittest.TestCase):
def test_venv_install_deps(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
fold = os.path.dirname(os.path.abspath(__file__))
src_ = os.path.normpath(os.path.abspath(
os.path.join(fold, "..", "..", "src")))
assert os.path.exists(src_)
temp = get_temp_folder(__file__, "temp_venv_install_deps")
if __name__ != "__main__":
warnings.warn("does not work well from a virtual environment")
return
if sys.version_info[0] == 2:
# using nose so previous test is true
warnings.warn(
"does not work well from a virtual environment (Python 2.7)")
return
out = create_virtual_env(temp, fLOG=fLOG)
src_ = src_.replace("\\", "/")
temp = temp.replace("\\", "/")
script = ["import sys",
"sys.path.append('{0}')".format(src_),
"from pymyinstall.packaged import install_module_deps",
"install_module_deps('imbox', temp_folder='{0}', source='2')".format(
temp),
]
file_script = os.path.join(temp, "test_install_deps.py")
with open(file_script, "w") as f:
f.write("\n".join(script))
out = run_venv_script(temp, file_script, fLOG=fLOG, file=True)
if "installing module six" not in out:
raise Exception(out)
if __name__ == "__main__":
unittest.main()
| sdpython/pymyinstall | _unittests/ut_packaged/test_LONG_install_with_deps.py | Python | mit | 1,872 |
Subsets and Splits