code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
class amicable():
def d(self, n):
if n == 1:
return 0
else:
sum_of_factors = 0
for i in range(1, int(n**0.5)+1):
if n % i == 0:
sum_of_factors += i
if n/i != n:
sum_of_factors += int(n/i)
return sum_of_factors
def __call__(self, n):
sum_of_amicable = 0
for i in range(1, n):
original = i, amicable.d(self, i)
inverse = amicable.d(self, amicable.d(self, i)), amicable.d(self, i)
if (original == inverse) & (amicable.d(self, i) != i):
sum_of_amicable += i
return sum_of_amicable
def main():
euler_21 = amicable()
n=10000
print(euler_21(n))
if __name__ == "__main__":
main()
| higee/project_euler | 21-30/21.py | Python | mit | 860 |
# -*- coding: utf-8 -*-
"""
Django settings for ember_demo project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from os.path import join
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
try:
from S3 import CallingFormat
AWS_CALLING_FORMAT = CallingFormat.SUBDOMAIN
except ImportError:
# TODO: Fix this where even if in Dev this class is called.
pass
from configurations import Configuration, values
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
class Common(Configuration):
########## APP CONFIGURATION
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'south', # Database migration helpers:
'crispy_forms', # Form layouts
'avatar', # for user avatars
'rest_framework',
)
# Apps specific for this project go here.
LOCAL_APPS = (
'users', # custom users app
# Your stuff: custom apps go here
'core',
'api',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
INSTALLED_APPS += (
# Needs to come last for now because of a weird edge case between
# South and allauth
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
)
########## END APP CONFIGURATION
########## MIDDLEWARE CONFIGURATION
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
########## END MIDDLEWARE CONFIGURATION
########## DEBUG
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = values.BooleanValue(False)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
########## END DEBUG
########## SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
# In production, this is changed to a values.SecretValue() setting
SECRET_KEY = "CHANGEME!!!"
########## END SECRET CONFIGURATION
########## FIXTURE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
join(BASE_DIR, 'fixtures'),
)
########## END FIXTURE CONFIGURATION
########## EMAIL CONFIGURATION
EMAIL_BACKEND = values.Value('django.core.mail.backends.smtp.EmailBackend')
########## END EMAIL CONFIGURATION
########## MANAGER CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
('Agconti', '[email protected]'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
########## END MANAGER CONFIGURATION
########## DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = values.DatabaseURLValue('postgres://localhost/ember_demo')
########## END DATABASE CONFIGURATION
########## CACHING
# Do this here because thanks to django-pylibmc-sasl and pylibmc memcacheify is painful to install on windows.
# memcacheify is what's used in Production
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
########## END CACHING
########## GENERAL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#time-zone
TIME_ZONE = 'America/Los_Angeles'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
########## END GENERAL CONFIGURATION
########## TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
"allauth.account.context_processors.account",
"allauth.socialaccount.context_processors.socialaccount",
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
# Your stuff: custom template context processers go here
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_DIRS = (
join(BASE_DIR, 'templates'),
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
# See: http://django-crispy-forms.readthedocs.org/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
########## END TEMPLATE CONFIGURATION
########## STATIC FILE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = join(os.path.dirname(BASE_DIR), 'staticfiles')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
join(BASE_DIR, 'static'),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
########## END STATIC FILE CONFIGURATION
########## MEDIA CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = join(BASE_DIR, 'media')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
########## END MEDIA CONFIGURATION
########## URL Configuration
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
########## End URL Configuration
########## AUTHENTICATION CONFIGURATION
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = "username"
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
########## END AUTHENTICATION CONFIGURATION
########## Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = "users.User"
LOGIN_REDIRECT_URL = "users:redirect"
########## END Custom user app defaults
########## SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = "slugify.slugify"
########## END SLUGLIFIER
########## LOGGING CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
########## END LOGGING CONFIGURATION
########## Your common stuff: Below this line define 3rd party libary settings
class Local(Common):
########## DEBUG
DEBUG = values.BooleanValue(True)
TEMPLATE_DEBUG = DEBUG
########## END DEBUG
########## INSTALLED_APPS
INSTALLED_APPS = Common.INSTALLED_APPS
########## END INSTALLED_APPS
########## Mail settings
EMAIL_HOST = "localhost"
EMAIL_PORT = 1025
EMAIL_BACKEND = values.Value('django.core.mail.backends.console.EmailBackend')
########## End mail settings
########## django-debug-toolbar
MIDDLEWARE_CLASSES = Common.MIDDLEWARE_CLASSES + ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INSTALLED_APPS += ('debug_toolbar',)
INTERNAL_IPS = ('127.0.0.1',)
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
########## end django-debug-toolbar
########## Your local stuff: Below this line define 3rd party libary settings
class Production(Common):
########## INSTALLED_APPS
INSTALLED_APPS = Common.INSTALLED_APPS
########## END INSTALLED_APPS
########## SECRET KEY
SECRET_KEY = values.SecretValue()
########## END SECRET KEY
########## django-secure
INSTALLED_APPS += ("djangosecure", )
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = values.BooleanValue(True)
SECURE_FRAME_DENY = values.BooleanValue(True)
SECURE_CONTENT_TYPE_NOSNIFF = values.BooleanValue(True)
SECURE_BROWSER_XSS_FILTER = values.BooleanValue(True)
SESSION_COOKIE_SECURE = values.BooleanValue(False)
SESSION_COOKIE_HTTPONLY = values.BooleanValue(True)
SECURE_SSL_REDIRECT = values.BooleanValue(True)
########## end django-secure
########## SITE CONFIGURATION
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["*"]
########## END SITE CONFIGURATION
INSTALLED_APPS += ("gunicorn", )
########## STORAGE CONFIGURATION
# See: http://django-storages.readthedocs.org/en/latest/index.html
INSTALLED_APPS += (
'storages',
)
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
STATICFILES_STORAGE = DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
AWS_ACCESS_KEY_ID = values.SecretValue()
AWS_SECRET_ACCESS_KEY = values.SecretValue()
AWS_STORAGE_BUCKET_NAME = values.SecretValue()
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
# see: https://github.com/antonagestam/collectfast
AWS_PRELOAD_METADATA = True
INSTALLED_APPS += ("collectfast", )
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIREY = 60 * 60 * 24 * 7
AWS_HEADERS = {
'Cache-Control': 'max-age=%d, s-maxage=%d, must-revalidate' % (AWS_EXPIREY,
AWS_EXPIREY)
}
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
########## END STORAGE CONFIGURATION
########## EMAIL
DEFAULT_FROM_EMAIL = values.Value(
'ember_demo <[email protected]>')
EMAIL_HOST = values.Value('smtp.sendgrid.com')
EMAIL_HOST_PASSWORD = values.SecretValue(environ_prefix="", environ_name="SENDGRID_PASSWORD")
EMAIL_HOST_USER = values.SecretValue(environ_prefix="", environ_name="SENDGRID_USERNAME")
EMAIL_PORT = values.IntegerValue(587, environ_prefix="", environ_name="EMAIL_PORT")
EMAIL_SUBJECT_PREFIX = values.Value('[ember_demo] ', environ_name="EMAIL_SUBJECT_PREFIX")
EMAIL_USE_TLS = True
SERVER_EMAIL = EMAIL_HOST_USER
########## END EMAIL
########## TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
)
########## END TEMPLATE CONFIGURATION
########## CACHING
# Only do this here because thanks to django-pylibmc-sasl and pylibmc memcacheify is painful to install on windows.
try:
# See: https://github.com/rdegges/django-heroku-memcacheify
from memcacheify import memcacheify
CACHES = memcacheify()
except ImportError:
CACHES = values.CacheURLValue(default="memcached://127.0.0.1:11211")
########## END CACHING
########## Your production stuff: Below this line define 3rd party libary settings
| agconti/Ember-Demo | ember_demo/config/settings.py | Python | mit | 14,218 |
import sys, struct, array
import SocketServer
# import StringIO as StringIO
# import pygame
p = 0x08d682598db70a889ff1bc7e3e00d602e9fe9e812162d4e3d06954b2ff554a4a21d5f0aab3eae5c49ac1aec7117709cba1b88b79ae9805d28ddb99be07ba05ea219654afe0c8dddac7e73165f3dcd851a3c8a3b6515766321420aff177eaaa7b3da39682d7e773aa863a729706d52e83a1d0e34d69b461c837ed239745d6c50f124e34f4d1d00ad15d6ebabda8c189c7b8b35b5bae7a9cbafc5f09bd506a39bd9d2d9245324f02ff7254fab4ab17f7a165d49e318baeb8effc4e1a3f1251d2ea1ab93f767bd6dcf5567406550ea1f194ef7deb1b2fec8b30520b6777fea1b305593db941f9ad8ce1eba6f77c3a104bd97448ec0c11688c5bf82e85c90234abfc5
q = 0x0f67e886d1a0d1e59a53b4aa831c9bcb39a5d0a8f
g = 0x27d6a1359821e2a758a93f5c06ebb26382a06a4681e7cf44d71aeff2390c87d20ce7cd885fb01fd84ad9d52839a8ae163bfee5d09820fea1a09f814801cb157b2c5bc4636d042fb2ac1a836f33adafd6735826ae1e96c3bfbd04f7df672a14120f6780e8848ff3b3123004654127c9d25843cd54c68c396a410a2f0496e8ebb35b971993dee0f596388911277fce46ff3c5191e7e76262875bb3368724d3a40c852ccc80be4dc82335fb9267c6ff0e20396ae8bb2d51e35f15fbd07fa1b354944c285367ac88763dd00fe6fe0aab5a49faf7bc10f8e90ba376efdc034e9e1cae7e79ac906aed3b513c5f3452dc33eb307ab3d45efe92a31b1cd9a6f52dd5fb09
y = 0x6bff47f5ea736b03c85885b0bd0f1f7fa2a7efef8812c544ab47f4aa3542235f5a298fc778bb9263223c66d149f88d377b1e70a5715e4554776127ffb874e218d7c75a3c6202cc3e2cfb6a5a4cf34e7e8d5428b90b7aa1dbf9a7e965feab029220266ad0dabade6ae09362f6463eea60e3133bb79fc4af511057e31574f4b0f34b848b180fa20da7d9a6d8adedded9819da20b8923073e35f43ca75eeb9a1ab5451c3a5446306f93ef246759f59e65e498032d48aece56f437b4b7179daf3dfa80d6a36c211ed5acdfeaf91a7e8070a49a521f3c2e411a26eeaf8fab697535914982f1f7cda1e1aa1aac602f9606ea326632b4fbabf6b361fe118637e048c482
def bytesToInt(s):
x = 0
for c in s:
x = (x << 8) | ord(c)
return x
def verifySig(r, s, m):
#DSA, straight from Wikipedia
if not 0 < s < q and 0 < r < q:
return False
w = pow(s, q-2, q)
u1 = m*w % q
u2 = r*w % q
v = pow(g, u1, p) * pow(y, u2, p) % p
return (v % q) == r
def superHash(b):
b += '0' * (-len(b) % 2)
h = (len(b) + 1) * (len(b) ^ 42)
x = 88172645463325252
for i, c in enumerate(array.array('H', b)):
x ^= (x<<13) & 0xFFFFFFFFFFFFFFFF
x ^= (x>>7) & 0xFFFFFFFFFFFFFFFF
x ^= (x<<17) & 0xFFFFFFFFFFFFFFFF
h += c * (((i % 7) + 9) ** (i % 25))
if i % 2:
h *= x | i
else:
h += x | i
h &= 0xFFFFFFFFFFFFFFFF
h ^= (len(b) ^ 1) * (len(b) + 42)
h &= 0xFFFFFFFFFFFFFFFF
return h
class HandleCheckin(SocketServer.BaseRequestHandler):
def readStr(self):
req = self.request
prefix = req.recv(2)
if prefix != '\x12\xae':
req.sendall("Incorrect prefix\n")
req.close()
return None
leng = struct.unpack("<I", req.recv(4))[0]
toRead = ""
while len(toRead) < leng:
toRead += req.recv(leng - len(toRead))
if len(toRead) > leng:
req.sendall("Length does not match input data size\n")
req.close()
return None
return toRead
def handle(self):
req = self.request
req.sendall("""Welcome to the new and improved Music Box! Please provide your signed music file.""")
data = self.readStr()
if data is None or len(data) < 48:
req.sendall("Incomplete header\n")
return
elif len(data) > 12345678:
req.sendall("The data. It is too much!\n")
return
r = bytesToInt(data[:20])
s = bytesToInt(data[20:40])
h = bytesToInt(data[40:48])
sound = data[48:]
if not verifySig(r, s, h):
req.sendall("Invalid signature\n")
return
elif h != superHash(sound):
req.sendall("Message hash does not match\n")
return
else:
req.sendall("Success!\n")
if "Secret backdoor lol GIMME THE FLAG" in sound:
with open('flag.txt','r') as f:
req.sendall(f.read() + "\n")
else:
req.sendall("Unfortunately, the musicbox is not available at the moment.\n")
req.close()
# f = StringIO.StringIO(sound)
# pygame.mixer.music.load(f)
# pygame.mixer.music.play(loops=-1)
class ThreadedServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
pass
if __name__ == "__main__":
# pygame.mixer.init()
HOST, PORT = sys.argv[1], int(sys.argv[2])
print 'Running on port', PORT
server = ThreadedServer((HOST, PORT), HandleCheckin)
server.allow_reuse_address = True
server.serve_forever()
| nickbjohnson4224/greyhat-crypto-ctf-2014 | challenges/musicbox/musicbox.py | Python | mit | 4,204 |
#!/usr/bin/env python
"""
This module contains the :class:`.DataType` class and its subclasses. These
types define how data should be converted during the creation of a
:class:`.Table`.
A :class:`TypeTester` class is also included which be used to infer data
types from column data.
"""
from copy import copy
from agate.data_types.base import DEFAULT_NULL_VALUES, DataType # noqa
from agate.data_types.boolean import Boolean
from agate.data_types.date import Date
from agate.data_types.date_time import DateTime
from agate.data_types.number import Number
from agate.data_types.text import Text
from agate.data_types.time_delta import TimeDelta
from agate.exceptions import CastError # noqa
class TypeTester(object):
"""
Infer data types for the columns in a given set of data.
:param force:
A dictionary where each key is a column name and each value is a
:class:`.DataType` instance that overrides inference.
:param limit:
An optional limit on how many rows to evaluate before selecting the
most likely type. Note that applying a limit may mean errors arise when
the data is cast--if the guess is proved incorrect in further rows of
data.
:param types:
A sequence of possible types to test against. This be used to specify
what data formats you want to test against. For instance, you may want
to exclude :class:`TimeDelta` from testing. It can also be used to pass
options such as ``locale`` to :class:`.Number` or ``cast_nulls`` to
:class:`.Text`. Take care in specifying the order of the list. It is
the order they are tested in. :class:`.Text` should always be last.
"""
def __init__(self, force={}, limit=None, types=None):
self._force = force
self._limit = limit
if types:
self._possible_types = types
else:
# In order of preference
self._possible_types = [
Boolean(),
Number(),
TimeDelta(),
Date(),
DateTime(),
Text()
]
def run(self, rows, column_names):
"""
Apply type inference to the provided data and return an array of
column types.
:param rows:
The data as a sequence of any sequences: tuples, lists, etc.
"""
num_columns = len(column_names)
hypotheses = [set(self._possible_types) for i in range(num_columns)]
force_indices = [column_names.index(name) for name in self._force.keys()]
if self._limit:
sample_rows = rows[:self._limit]
elif self._limit == 0:
text = Text()
return tuple([text] * num_columns)
else:
sample_rows = rows
for row in sample_rows:
for i in range(num_columns):
if i in force_indices:
continue
h = hypotheses[i]
if len(h) == 1:
continue
for column_type in copy(h):
if len(row) > i and not column_type.test(row[i]):
h.remove(column_type)
column_types = []
for i in range(num_columns):
if i in force_indices:
column_types.append(self._force[column_names[i]])
continue
h = hypotheses[i]
# Select in prefer order
for t in self._possible_types:
if t in h:
column_types.append(t)
break
return tuple(column_types)
| JoeGermuska/agate | agate/data_types/__init__.py | Python | mit | 3,646 |
from django.test import TestCase
from seedsdb.models import (
Plant, Tag, Harvest, Activity
)
class TestPlant(TestCase):
def tearDown(self):
Plant.objects.all().delete()
def make_test_plant(self, aliases=None):
if aliases:
aliases = "|".join(aliases)
else:
aliases = ""
plant = Plant.objects.create(
name="Love in idleness",
description="Test description",
aliases=aliases
)
return plant
def test_unicode(self):
"The unicode method of a plant returns the expected value"
plant = self.make_test_plant()
self.assertEqual(u"Love in idleness", unicode(plant))
def test_slug_create(self):
"Creating a new plant sets the slug as expected "
plant = self.make_test_plant()
self.assertEqual("love-in-idleness", plant.slug)
def test_slug_update(self):
"Renaming an existinmg plant updates the slug as expected "
plant = self.make_test_plant()
plant.name = 'Love lies oozing'
plant.save()
self.assertEqual("love-lies-oozing", plant.slug)
def test_get_absolute_url(self):
plant = self.make_test_plant()
expected_url = "/plants/detail/love-in-idleness/"
self.assertEqual(expected_url, plant.get_absolute_url())
def test_aliases_string_none(self):
"Ensure the liases_string property works when no alias is defined"
plant = self.make_test_plant()
self.assertEqual(u"", plant.aliases_string)
def test_aliases_string_one(self):
"Ensure the aliases_string property works when one alias is defined"
plant = self.make_test_plant(aliases=["Alternative"])
self.assertEqual(u"Alternative", plant.aliases_string)
def test_aliases_string_multiple(self):
"Ensure the aliases property works when more than one alias is defined"
plant = self.make_test_plant(aliases=["Alternative", "Beta"])
self.assertEqual(u"Alternative, Beta", plant.aliases_string)
def test_aliases_search_none(self):
"Ensure the aliases_search property works when no alias is defined"
plant = self.make_test_plant()
self.assertEqual(u"", plant.aliases_search)
def test_aliases_search_one(self):
"Ensure the aliases_search property works when one alias is defined"
plant = self.make_test_plant(aliases=["Alternative"])
self.assertEqual(u"Alternative", plant.aliases_search)
def test_aliases_search_multiple(self):
"Ensure the aliases_search property works when more than one alias is defined"
plant = self.make_test_plant(aliases=["Alternative", "Beta"])
self.assertEqual(u"Alternative Beta", plant.aliases_search)
class TestTag(TestCase):
def tearDown(self):
Tag.objects.all().delete()
def test_unicode(self):
"The unicode method of a tag returns the expected value"
tag = Tag.objects.create(caption="test tag")
self.assertEqual(u"test tag", unicode(tag))
def test_tag_normalisation(self):
"A tag is normalised on save as expecgted"
tag = Tag.objects.create(caption=" VALUE ")
self.assertEqual("value", tag.caption)
class TestHarvest(TestCase):
def setUp(self):
self.test_plant = Plant.objects.create(
name="Love in idleness",
description="Test description",
)
def tearDown(self):
Harvest.objects.all().delete()
def test_unicode(self):
"The unicode method of a harvest returns the expected value"
harvest = Harvest.objects.create(season=2014, plant=self.test_plant)
self.assertEqual(u"2014 harvest of Love in idleness", unicode(harvest))
class TestActivity(TestCase):
def setUp(self):
self.test_plant = Plant.objects.create(
name="Love in idleness",
description="Test description",
)
def tearDown(self):
Activity.objects.all().delete()
def test_unicode(self):
"The unicode method of an activity returns the expected value"
activities = ['Sow', 'Plant out', 'Flowering', 'Harvest']
months = ['January', 'February', 'March', 'April', 'May', 'June',
'July', 'August', 'September', 'October', 'November', 'December']
for i, activity_name in enumerate(activities):
for j, month in enumerate(months):
activity = Activity.objects.create(plant=self.test_plant,
activity=i + 1,
month=j + 1)
expected = u"{0} Love in idleness in {1}".format(activity_name, month)
self.assertEqual(expected, unicode(activity))
| timjarman/seeds | seeds/seedsdb/tests/unit/test_models.py | Python | mit | 4,829 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-"""
# Temperature conversion constants
KELVIN_OFFSET = 273.15
FAHRENHEIT_OFFSET = 32.0
FAHRENHEIT_DEGREE_SCALE = 1.8
# Wind speed conversion constants
MILES_PER_HOUR_FOR_ONE_METER_PER_SEC = 2.23694
KM_PER_HOUR_FOR_ONE_METER_PER_SEC = 3.6
KNOTS_FOR_ONE_METER_PER_SEC = 1.94384
# Barometric conversion constants
HPA_FOR_ONE_INHG = 33.8639
# Visibility distance conversion constants
MILE_FOR_ONE_METER = 0.000621371
KMS_FOR_ONE_METER = .001
# Decimal precision
ROUNDED_TO = 2
def kelvin_dict_to(d, target_temperature_unit):
"""
Converts all the values in a dict from Kelvin temperatures to the
specified temperature format.
:param d: the dictionary containing Kelvin temperature values
:type d: dict
:param target_temperature_unit: the target temperature unit, may be:
'celsius' or 'fahrenheit'
:type target_temperature_unit: str
:returns: a dict with the same keys as the input dict and converted
temperature values as values
:raises: *ValueError* when unknown target temperature units are provided
"""
if target_temperature_unit == 'kelvin':
return d
elif target_temperature_unit == 'celsius':
return {key: kelvin_to_celsius(d[key]) for key in d}
elif target_temperature_unit == 'fahrenheit':
return {key: kelvin_to_fahrenheit(d[key]) for key in d}
else:
raise ValueError("Invalid value for target temperature conversion \
unit")
def kelvin_to_celsius(kelvintemp):
"""
Converts a numeric temperature from Kelvin degrees to Celsius degrees
:param kelvintemp: the Kelvin temperature
:type kelvintemp: int/long/float
:returns: the float Celsius temperature
:raises: *TypeError* when bad argument types are provided
"""
if kelvintemp < 0:
raise ValueError(__name__ +
": negative temperature values not allowed")
celsiustemp = kelvintemp - KELVIN_OFFSET
return float("{0:.2f}".format(celsiustemp))
def kelvin_to_fahrenheit(kelvintemp):
"""
Converts a numeric temperature from Kelvin degrees to Fahrenheit degrees
:param kelvintemp: the Kelvin temperature
:type kelvintemp: int/long/float
:returns: the float Fahrenheit temperature
:raises: *TypeError* when bad argument types are provided
"""
if kelvintemp < 0:
raise ValueError(__name__ +
": negative temperature values not allowed")
fahrenheittemp = (kelvintemp - KELVIN_OFFSET) * \
FAHRENHEIT_DEGREE_SCALE + FAHRENHEIT_OFFSET
return float("{0:.2f}".format(fahrenheittemp))
def metric_wind_dict_to_imperial(d):
"""
Converts all the wind values in a dict from meters/sec (metric measurement
system) to miles/hour (imperial measurement system)
.
:param d: the dictionary containing metric values
:type d: dict
:returns: a dict with the same keys as the input dict and values converted
to miles/hour
"""
result = {}
for key, value in d.items():
if key != 'deg': # do not convert wind degree
result[key] = value * MILES_PER_HOUR_FOR_ONE_METER_PER_SEC
else:
result[key] = value
return result
def metric_wind_dict_to_km_h(d):
"""
Converts all the wind values in a dict from meters/sec
to km/hour.
:param d: the dictionary containing metric values
:type d: dict
:returns: a dict with the same keys as the input dict and values converted
to km/hour
"""
result = {}
for key, value in d.items():
if key != 'deg': # do not convert wind degree
result[key] = value * KM_PER_HOUR_FOR_ONE_METER_PER_SEC
else:
result[key] = value
return result
def metric_wind_dict_to_knots(d):
"""
Converts all the wind values in a dict from meters/sec
to knots
:param d: the dictionary containing metric values
:type d: dict
:returns: a dict with the same keys as the input dict and values converted
to km/hour
"""
result = {}
for key, value in d.items():
if key != 'deg': # do not convert wind degree
result[key] = value * KNOTS_FOR_ONE_METER_PER_SEC
else:
result[key] = value
return result
def metric_wind_dict_to_beaufort(d):
"""
Converts all the wind values in a dict from meters/sec
to the corresponding Beaufort scale level (which is not an exact number but rather
represents a range of wind speeds - see: https://en.wikipedia.org/wiki/Beaufort_scale).
Conversion table: https://www.windfinder.com/wind/windspeed.htm
:param d: the dictionary containing metric values
:type d: dict
:returns: a dict with the same keys as the input dict and values converted
to Beaufort level
"""
result = {}
for key, value in d.items():
if key != 'deg': # do not convert wind degree
if value <= 0.2:
bf = 0
elif 0.2 < value <= 1.5:
bf = 1
elif 1.5 < value <= 3.3:
bf = 2
elif 3.3 < value <= 5.4:
bf = 3
elif 5.4 < value <= 7.9:
bf = 4
elif 7.9 < value <= 10.7:
bf = 5
elif 10.7 < value <= 13.8:
bf = 6
elif 13.8 < value <= 17.1:
bf = 7
elif 17.1 < value <= 20.7:
bf = 8
elif 20.7 < value <= 24.4:
bf = 9
elif 24.4 < value <= 28.4:
bf = 10
elif 28.4 < value <= 32.6:
bf = 11
else:
bf = 12
result[key] = bf
else:
result[key] = value
return result
def metric_pressure_dict_to_inhg(d):
"""
Converts all barometric pressure values in a dict to "inches of mercury."
:param d: the dictionary containing metric values
:type d: dict
:returns: a dict with the same keys as the input dict and values converted
to "Hg or inHg (inches of mercury)
Note what OWM says about pressure: "Atmospheric pressure [is given in hPa]
(on the sea level, if there is no sea_level or grnd_level data)"
"""
result = dict()
for key, value in d.items():
if value is None:
continue
result[key] = round((value / HPA_FOR_ONE_INHG), ROUNDED_TO)
return result
def visibility_distance_to(v, target_visibility_unit='kilometers'):
"""
Converts visibility distance (in meters) to kilometers or miles
Defaults to kilometer conversion
:param distance: the value of visibility_distance
:type distance: int
:param target_visibility_unit: the unit of conversion
:type target_visibility_unit: str
:returns: a converted value for visibility_distance (float)
"""
if v is None:
return v
if target_visibility_unit == 'kilometers':
const = KMS_FOR_ONE_METER
elif target_visibility_unit == 'miles':
const = MILE_FOR_ONE_METER
else:
raise ValueError('Invalid value for target visibility distance unit')
return round(v * const, ROUNDED_TO)
| csparpa/pyowm | pyowm/utils/measurables.py | Python | mit | 7,259 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-03-18 10:05
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.contrib.taggit
import modelcluster.fields
import wagtail.contrib.routable_page.models
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.embeds.blocks
import wagtail.images.blocks
import wagtailmd.utils
class Migration(migrations.Migration):
initial = True
dependencies = [
('wagtailimages', '0019_delete_filter'),
('taggit', '0002_auto_20150616_2121'),
('wagtailcore', '0040_page_draft_title'),
]
operations = [
migrations.CreateModel(
name='BlogCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('slug', models.SlugField(max_length=80, unique=True)),
],
options={
'verbose_name_plural': 'Categories',
'verbose_name': 'Category',
},
),
migrations.CreateModel(
name='BlogPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('description', models.CharField(blank=True, max_length=255)),
],
options={
'abstract': False,
},
bases=(wagtail.contrib.routable_page.models.RoutablePageMixin, 'wagtailcore.page'),
),
migrations.CreateModel(
name='BlogPageTag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='LandingPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('body', wagtail.core.fields.StreamField((('heading', wagtail.core.blocks.CharBlock(classname='full title')), ('paragraph', wagtail.core.blocks.RichTextBlock()), ('image', wagtail.images.blocks.ImageChooserBlock(icon='image')), ('two_columns', wagtail.core.blocks.StructBlock((('left_column', wagtail.core.blocks.StreamBlock((('heading', wagtail.core.blocks.CharBlock(classname='full title')), ('paragraph', wagtail.core.blocks.RichTextBlock()), ('image', wagtail.images.blocks.ImageChooserBlock())), icon='arrow-right', label='Left column content')), ('right_column', wagtail.core.blocks.StreamBlock((('heading', wagtail.core.blocks.CharBlock(classname='full title')), ('paragraph', wagtail.core.blocks.RichTextBlock()), ('image', wagtail.images.blocks.ImageChooserBlock())), icon='arrow-right', label='Right column content'))))), ('embedded_video', wagtail.embeds.blocks.EmbedBlock(icon='media'))), blank=True, null=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='PostPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('body', wagtailmd.utils.MarkdownField()),
('date', models.DateTimeField(default=datetime.datetime.today, verbose_name='Post date')),
('excerpt', wagtailmd.utils.MarkdownField(blank=True, verbose_name='excerpt')),
('categories', modelcluster.fields.ParentalManyToManyField(blank=True, to='blog.BlogCategory')),
('header_image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='Tag',
fields=[
],
options={
'indexes': [],
'proxy': True,
},
bases=('taggit.tag',),
),
migrations.AddField(
model_name='postpage',
name='tags',
field=modelcluster.contrib.taggit.ClusterTaggableManager(blank=True, help_text='A comma-separated list of tags.', through='blog.BlogPageTag', to='taggit.Tag', verbose_name='Tags'),
),
migrations.AddField(
model_name='blogpagetag',
name='content_object',
field=modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='post_tags', to='blog.PostPage'),
),
migrations.AddField(
model_name='blogpagetag',
name='tag',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='blog_blogpagetag_items', to='taggit.Tag'),
),
]
| hellowebbooks/hellowebbooks-website | blog/migrations/0001_initial.py | Python | mit | 5,381 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:\Coding\Python\PythonPackageLinks\dataquick\plugins\visualizations\ui\psd.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_PSD(object):
def setupUi(self, PSD):
PSD.setObjectName("PSD")
PSD.resize(1000, 561)
self.verticalLayout = QtWidgets.QVBoxLayout(PSD)
self.verticalLayout.setContentsMargins(3, 3, 3, 3)
self.verticalLayout.setObjectName("verticalLayout")
self.splitter = QtWidgets.QSplitter(PSD)
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setObjectName("splitter")
self.layoutWidget = QtWidgets.QWidget(self.splitter)
self.layoutWidget.setObjectName("layoutWidget")
self.verticalLayout_left = QtWidgets.QVBoxLayout(self.layoutWidget)
self.verticalLayout_left.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_left.setObjectName("verticalLayout_left")
self.listView_datasets = DQFListView(self.layoutWidget)
self.listView_datasets.setObjectName("listView_datasets")
self.verticalLayout_left.addWidget(self.listView_datasets)
self.plotWidget = PlotWidget(self.splitter)
self.plotWidget.setObjectName("plotWidget")
self.verticalLayout.addWidget(self.splitter)
self.retranslateUi(PSD)
QtCore.QMetaObject.connectSlotsByName(PSD)
def retranslateUi(self, PSD):
_translate = QtCore.QCoreApplication.translate
PSD.setWindowTitle(_translate("PSD", "Particle Size Distribution"))
from dataquick.qt.plotlist import DQFListView
from dataquick.qt.plotwidget import PlotWidget
| vincentchevrier/dataquick | dataquick/plugins/visualizations/ui/psd.py | Python | mit | 1,775 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
# TODO: put package requirements here
]
setup_requirements = [
# TODO(nbargnesi): put setup requirements (distutils extensions, etc.) here
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='proxme',
version='0.1.0',
description="Serves your proxy auto-config (PAC) content.",
long_description=readme + '\n\n' + history,
author="Nick Bargnesi",
author_email='[email protected]',
url='https://github.com/nbargnesi/proxme',
packages=find_packages(include=['proxme']),
include_package_data=True,
install_requires=requirements,
license="MIT license",
zip_safe=False,
keywords='proxme',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
test_suite='tests',
tests_require=test_requirements,
setup_requires=setup_requirements,
entry_points = {
'console_scripts': [
'proxme = proxme.__main__:main'
],
}
)
| nbargnesi/proxme | setup.py | Python | mit | 1,716 |
from dataworkflow.data import get_data
import pandas as pd
def test_data():
data = get_data()
assert all(data.columns == ['Survived', 'Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare',
'Embarked', 'FamilyTot', 'FamStatus', 'age_group'])
assert all([len(data[col]) == 891
for col in ['Survived', 'Pclass', 'Sex', 'SibSp', 'Parch', 'Fare', 'FamilyTot', 'FamStatus']])
assert all([str(data[col].dtype) == 'category'
for col in ['Survived', 'Pclass', 'Sex','Embarked','FamStatus', 'age_group']])
| jco44/UdacityDataAnalysis | dataworkflow/tests/test_data.py | Python | mit | 551 |
# Fluent Python Book
# List comprehensions are faster than for-loops
import time
from random import choices
symbols = list('abcdefghijklmn')
print(symbols)
symbols_big = choices(symbols, k=2000000)
# print(symbols_big)
start = time.time()
ord_list1 = []
for sym in symbols_big:
ord_list1.append(ord(sym))
# print('ord list1:', ord_list1)
end = time.time()
print('for loop ran in %f s' % (end - start))
start = time.time()
# list comprehension
ord_list2 = [ord(sym) for sym in symbols_big]
# print('ord list2:', ord_list2)
end = time.time()
print('for loop ran in %f s' % (end - start))
# let's do a performance benchmark of this list comprehension
l_nums = [i for i in range(1000000)]
start = time.time()
sq_nums = []
for i in l_nums:
sq_nums.append(i ** 2)
end = time.time()
print('for loop ran in %f s' % (end - start))
start = time.time()
sq_nums = [i ** 2 for i in range(1000000)]
end = time.time()
print('list comp ran in %f s' % (end - start))
| suresh/notebooks | fluentpy/list_comprehension.py | Python | mit | 968 |
#!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core Developers
// Copyright (c) 2015 Solarminx
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework import SolariTestFramework
from solarirpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
from binascii import a2b_hex, b2a_hex
from hashlib import sha256
from struct import pack
def check_array_result(object_array, to_match, expected):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
"""
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0:
raise AssertionError("No objects matched %s"%(str(to_match)))
def b2x(b):
return b2a_hex(b).decode('ascii')
# NOTE: This does not work for signed numbers (set the high bit) or zero (use b'\0')
def encodeUNum(n):
s = bytearray(b'\1')
while n > 127:
s[0] += 1
s.append(n % 256)
n //= 256
s.append(n)
return bytes(s)
def varlenEncode(n):
if n < 0xfd:
return pack('<B', n)
if n <= 0xffff:
return b'\xfd' + pack('<H', n)
if n <= 0xffffffff:
return b'\xfe' + pack('<L', n)
return b'\xff' + pack('<Q', n)
def dblsha(b):
return sha256(sha256(b).digest()).digest()
def genmrklroot(leaflist):
cur = leaflist
while len(cur) > 1:
n = []
if len(cur) & 1:
cur.append(cur[-1])
for i in range(0, len(cur), 2):
n.append(dblsha(cur[i] + cur[i+1]))
cur = n
return cur[0]
def template_to_bytes(tmpl, txlist):
blkver = pack('<L', tmpl['version'])
mrklroot = genmrklroot(list(dblsha(a) for a in txlist))
timestamp = pack('<L', tmpl['curtime'])
nonce = b'\0\0\0\0'
blk = blkver + a2b_hex(tmpl['previousblockhash'])[::-1] + mrklroot + timestamp + a2b_hex(tmpl['bits'])[::-1] + nonce
blk += varlenEncode(len(txlist))
for tx in txlist:
blk += tx
return blk
def template_to_hex(tmpl, txlist):
return b2x(template_to_bytes(tmpl, txlist))
def assert_template(node, tmpl, txlist, expect):
rsp = node.getblocktemplate({'data':template_to_hex(tmpl, txlist),'mode':'proposal'})
if rsp != expect:
raise AssertionError('unexpected: %s' % (rsp,))
class GetBlockTemplateProposalTest(SolariTestFramework):
'''
Test block proposals with getblocktemplate.
'''
def run_test(self):
node = self.nodes[0]
node.setgenerate(True, 1) # Mine a block to leave initial block download
tmpl = node.getblocktemplate()
if 'coinbasetxn' not in tmpl:
rawcoinbase = encodeUNum(tmpl['height'])
rawcoinbase += b'\x01-'
hexcoinbase = b2x(rawcoinbase)
hexoutval = b2x(pack('<Q', tmpl['coinbasevalue']))
tmpl['coinbasetxn'] = {'data': '01000000' + '01' + '0000000000000000000000000000000000000000000000000000000000000000ffffffff' + ('%02x' % (len(rawcoinbase),)) + hexcoinbase + 'fffffffe' + '01' + hexoutval + '00' + '00000000'}
txlist = list(bytearray(a2b_hex(a['data'])) for a in (tmpl['coinbasetxn'],) + tuple(tmpl['transactions']))
# Test 0: Capability advertised
assert('proposal' in tmpl['capabilities'])
# NOTE: This test currently FAILS (regtest mode doesn't enforce block height in coinbase)
## Test 1: Bad height in coinbase
#txlist[0][4+1+36+1+1] += 1
#assert_template(node, tmpl, txlist, 'FIXME')
#txlist[0][4+1+36+1+1] -= 1
# Test 2: Bad input hash for gen tx
txlist[0][4+1] += 1
assert_template(node, tmpl, txlist, 'bad-cb-missing')
txlist[0][4+1] -= 1
# Test 3: Truncated final tx
lastbyte = txlist[-1].pop()
try:
assert_template(node, tmpl, txlist, 'n/a')
except JSONRPCException:
pass # Expected
txlist[-1].append(lastbyte)
# Test 4: Add an invalid tx to the end (duplicate of gen tx)
txlist.append(txlist[0])
assert_template(node, tmpl, txlist, 'bad-txns-duplicate')
txlist.pop()
# Test 5: Add an invalid tx to the end (non-duplicate)
txlist.append(bytearray(txlist[0]))
txlist[-1][4+1] = b'\xff'
assert_template(node, tmpl, txlist, 'bad-txns-inputs-missingorspent')
txlist.pop()
# Test 6: Future tx lock time
txlist[0][-4:] = b'\xff\xff\xff\xff'
assert_template(node, tmpl, txlist, 'bad-txns-nonfinal')
txlist[0][-4:] = b'\0\0\0\0'
# Test 7: Bad tx count
txlist.append(b'')
try:
assert_template(node, tmpl, txlist, 'n/a')
except JSONRPCException:
pass # Expected
txlist.pop()
# Test 8: Bad bits
realbits = tmpl['bits']
tmpl['bits'] = '1c0000ff' # impossible in the real world
assert_template(node, tmpl, txlist, 'bad-diffbits')
tmpl['bits'] = realbits
# Test 9: Bad merkle root
rawtmpl = template_to_bytes(tmpl, txlist)
rawtmpl[4+32] = (rawtmpl[4+32] + 1) % 0x100
rsp = node.getblocktemplate({'data':b2x(rawtmpl),'mode':'proposal'})
if rsp != 'bad-txnmrklroot':
raise AssertionError('unexpected: %s' % (rsp,))
# Test 10: Bad timestamps
realtime = tmpl['curtime']
tmpl['curtime'] = 0x7fffffff
assert_template(node, tmpl, txlist, 'time-too-new')
tmpl['curtime'] = 0
assert_template(node, tmpl, txlist, 'time-too-old')
tmpl['curtime'] = realtime
# Test 11: Valid block
assert_template(node, tmpl, txlist, None)
# Test 12: Orphan block
tmpl['previousblockhash'] = 'ff00' * 16
assert_template(node, tmpl, txlist, 'inconclusive-not-best-prevblk')
if __name__ == '__main__':
GetBlockTemplateProposalTest().main()
| CoinAge-DAO/solari | qa/rpc-tests/getblocktemplate_proposals.py | Python | mit | 6,404 |
# Created by PyCharm Pro Edition
# User: Kaushik Talukdar
# Date: 30-03-17
# Time: 11:35 PM
# tuple can't be modified but the variable holding a tuple can be assigned new values
# basically changing the tuple
cars = ["bmw", "rollsroyce", "audi", "ferrari"]
print(cars)
cars = ["bmw", "koenigsegg", "audi", "ferrari"]
print(cars) | KT26/PythonCourse | 3. Working with lists/17.py | Python | mit | 397 |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
from flexget.utils import json
class TestUserAPI(object):
config = 'tasks: {}'
def test_change_password(self, execute_task, api_client):
weak_password = {'password': 'weak'}
medium_password = {'password': 'a.better.password'}
strong_password = {'password': 'AVer123y$ron__g-=PaW[]rd'}
rsp = api_client.json_put('/user/', data=json.dumps(weak_password))
assert rsp.status_code == 500
rsp = api_client.json_put('/user/', data=json.dumps(medium_password))
assert rsp.status_code == 200
rsp = api_client.json_put('/user/', data=json.dumps(strong_password))
assert rsp.status_code == 200
def test_change_token(self, execute_task, api_client):
rsp = api_client.get('user/token/')
assert rsp.status_code == 200
| qvazzler/Flexget | tests/test_user.py | Python | mit | 955 |
#-*- coding: UTF-8 -*-
from django.db.models import F
from celery.task import *
from datetime import timedelta, datetime
from app.models import CustomUser
from django.utils.translation import activate,deactivate
"""
Note: the api of twitter retards 1 minute (more or less) the update of the update_date
"""
@task
def forensic():
logger = forensic.get_logger(logfile='tasks.log')
users = CustomUser.objects.filter(next_check__lt=datetime.utcnow(),half_dead=False,dead=False,configured=True,posts__gt=0)
for user in users:
logger.info("User %s, act: %d, mail: %d, lu: %s - [%s]" % (user.username, user.activity_interval, user.mail_interval, user.last_update, datetime.now()))
#Get the last update date for the user
if user.update_date():
logger.info("User %s, update her date update (on twitter) - [%s]" % (user.username, datetime.utcnow()))
#Which is bigger? login or update date?
date_substract = user.bigger_date()
nowdate = datetime.utcnow()
#time from last update or login on foowill
t = nowdate - date_substract
#Check if the user is half-dead
if t.seconds >= user.activity_interval:
user.half_dead = True
user.save()
False
logger.info("User %s, is HALF-DEAD (on twitter) - [%s]" % (user.username, datetime.utcnow()))
activate(user.language)
user.send_email_halfdead()
deactivate()
@task
def killer_saver():
logger = killer_saver.get_logger(logfile='tasks.log')
users = CustomUser.objects.filter(half_dead=True, dead=False, configured=True, posts__gt=0)
for user in users:
logger.info("User %s, act: %d, mail: %d, lu: %s - [%s]" % (user.username, user.activity_interval, user.mail_interval, user.last_update, datetime.now()))
#Get the last update date for the user
if user.update_date():
logger.info("User %s, update the last date update (on twitter) - [%s]" % (user.username, datetime.utcnow()))
#Which is bigger? login or update date?
date_substract = user.bigger_date()
nowdate = datetime.utcnow()
#time from last update or login on foowill
if nowdate > date_substract: #Correction for a date_substract in future (synchronization problems)
t = nowdate - date_substract
else:
t = timedelta(seconds=0)
#Check if the user status
if t.seconds < user.activity_interval:
#Is not still half_dead -> save it
user.half_dead = False
user.last_update = nowdate
user.next_check = nowdate + timedelta(seconds=user.activity_interval)
user.save()
logger.info("User %s, is SAVED (on twitter) - [%s]" % (user.username, datetime.utcnow()))
activate(user.language)
user.send_email_still_alive()
deactivate()
#user.update_twitter_status("Sigo vivo, no os preocupeis. http://foowill.com %s" % datetime.now() )
elif t.seconds >= user.activity_interval + user.mail_interval:
user.dead = True
user.save()
logger.info("User %s, is DEAD (on twitter) - [%s]" % (user.username, datetime.utcnow()))
activate(user.language)
user.send_email_hope_to_read()
if user.mail_interval == 0:
user.deliver_all_to_twitter()
else:
user.posts_sended = user.posts
user.deliver_one_to_twitter()
deactivate()
else:
logger.info("User %s, is STILL HALF-DEAD (on twitter) - [%s]" % (user.username, datetime.utcnow()))
#TODO: if email: Send email for another reminder.
@task
def tweet_sender():
logger = killer_saver.get_logger(logfile='tasks.log')
users = CustomUser.objects.filter(half_dead=True, dead=True, configured=True, posts_sended__gt=0, next_check_mail__lt=datetime.utcnow())
for user in users:
user.deliver_one_to_twitter() | eduherraiz/foowill | tasks.py | Python | mit | 3,869 |
from flask_restful import Resource, Api
from flask_restful_swagger import swagger
from flauthority import app
from flauthority import api, app, celery, auth
from ModelClasses import AnsibleCommandModel, AnsiblePlaybookModel, AnsibleExtraArgsModel
import celery_runner
class TaskStatus(Resource):
@swagger.operation(
notes='Get the status of an certificate generation task/job',
nickname='taskstatus',
parameters=[
{
"name": "task_id",
"description": "The ID of the task/job to get status for",
"required": True,
"allowMultiple": False,
"dataType": 'string',
"paramType": "path"
}
])
@auth.login_required
def get(self, task_id):
task = celery_runner.generate_certificate.AsyncResult(task_id)
if task.state == 'PENDING':
result = "Task not found"
resp = app.make_response((result, 404))
return resp
elif task.state == 'PROGRESS':
result_obj = {'Status': "PROGRESS",
'description': "Task is currently running",
'returncode': None}
else:
try:
return_code = task.info['returncode']
description = task.info['description']
if return_code is 0:
result_obj = {'Status': "SUCCESS",
'description': description}
else:
result_obj = {'Status': "FLAUTHORITY_TASK_FAILURE",
'description': description,
'returncode': return_code}
except:
result_obj = {'Status': "CELERY_FAILURE"}
return result_obj
api.add_resource(TaskStatus, '/api/taskstatus/<string:task_id>')
| trondhindenes/Flauthority | flauthority/api_task_status.py | Python | mit | 1,886 |
"""
WSGI config for roastdog project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/dev/howto/deployment/wsgi/
"""
from django.core.wsgi import get_wsgi_application
from dj_static import Cling
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings.base")
application = Cling(get_wsgi_application())
| chrisvans/roastdoge | config/wsgi.py | Python | mit | 422 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------
from ._logs_query_client_async import LogsQueryClient
from ._metrics_query_client_async import MetricsQueryClient
__all__ = ["LogsQueryClient", "MetricsQueryClient"]
| Azure/azure-sdk-for-python | sdk/monitor/azure-monitor-query/azure/monitor/query/aio/__init__.py | Python | mit | 492 |
import sublime, sublime_plugin, tempfile, os, re;
global g_current_file;
global g_last_view;
g_current_file = None;
g_last_view = None;
# Two methods that could be used here:
# get language name
# check if .sublime-build exists for language name
# if it doesn't, somehow get the file extension
# check for .sublime-build using file extension
# wouldn't work if it's a scratch buffer
# create temp file
# change extension (just incase) of temp file to extension of running file
# quickly switch to that file and run_command("build")
# immediately after, run_command("close")
# path = sublime.packages_path().split("\\");
# path.pop();
# path.append(view.settings().get('syntax'));
# open("/".join(path).replace("tmLanguage", "sublime-build"));
# re.search("<string>(\w+)</string>", open(os.path.join("\\".join(sublime.packages_path().split("\\")[:-1]), view.settings().get('syntax'))).read()).group(1)
class ScratchpadFile: # class to delegate the data to
def __init__(self, file):
self.file = file;
self.file_name = file.name;
def set_file(self, file):
self.file = file;
def unlink(self):
try:
os.unlink(self.file_name);
except OSError, e:
print("Couldn't remove file %s, %i, %s" % (self.file_name, e.errorno, e.strerror));
class ScratchpadCommand(sublime_plugin.TextCommand):
def __get_filetype(self):
syntaxpath = os.path.join( os.path.split( os.path.normcase(sublime.packages_path()) )[0], os.path.normcase(self.view.settings().get('syntax')) ); # obtain the absolute path to the syntax file
# so now we have a path where the last 3 entries are: packages / syntax folder / syntax.tmlanguage
# splitpath = syntaxpath.split(os.sep);
text = None;
with open(syntaxpath, "rt") as f:
text = f.read(); # not sure how the fileTypes array can be implemented in the file, but we will just read the entire file for now
if text != None:
filetype = re.search("<key>.*(\n?).*<array>.*(\n?).*<string>(\w+)<\/string>", text).group(3); # hacky regex to find first filetype result
return filetype;
# name = re.search("", text); # another to get the name (py.sublime-build doesn't exist, but python.sublime-build does)
# if os.path.exists(path):
# elif os.path.exists():
# syntax.sublime-build
# name/syntax.sublime-build
# name/name.sublime-build
def __get_selection(self):
selection = self.view.sel()[0]; # only the first selection, for now...
selectedText = "";
if selection.empty():
selectedText = self.view.substr(sublime.Region(0, self.view.size())); # grab entire file
else:
selectedText = self.view.substr(selection); # grab just the selected text
return selectedText;
def run(self, edit):
if self.view.sel()[0].empty() and not(self.view.is_dirty() or self.view.is_scratch()) and self.view.file_name() != None:
self.view.window().run_command("build");
return;
global g_current_file;
settings = sublime.load_settings("ScratchPad.sublime-settings");
filetype = "." + self.__get_filetype();
selectedText = self.__get_selection();
new_view = None;
with tempfile.NamedTemporaryFile(mode='w+t', delete=False, prefix="scratchpad", suffix=filetype) as f:
f.write(selectedText);
g_current_file = ScratchpadFile(f);
new_view = self.view.window().open_file(f.name);
global g_last_view;
g_last_view = self.view;
class ScratchpadEvent(sublime_plugin.EventListener):
def on_load(self, view):
global g_current_file;
if g_current_file != None and os.path.normcase(g_current_file.file_name) == os.path.normcase(view.file_name()):
window = view.window();
window.run_command("build");
window.run_command("close");
# g_current_file.unlink(); # build is an asynchronous call
global g_last_view;
if g_last_view != None and window.active_view() != g_last_view:
window.focus_view(g_last_view);
g_last_view = None;
g_current_file = None;
| Pugsworth/ScratchPad | ScratchPad.py | Python | mit | 3,907 |
"""
Demonstrate how to use major and minor tickers.
The two relevant userland classes are Locators and Formatters.
Locators determine where the ticks are and formatters control the
formatting of ticks.
Minor ticks are off by default (NullLocator and NullFormatter). You
can turn minor ticks on w/o labels by setting the minor locator. You
can also turn labeling on for the minor ticker by setting the minor
formatter
Make a plot with major ticks that are multiples of 20 and minor ticks
that are multiples of 5. Label major ticks with %d formatting but
don't label minor ticks
The MultipleLocator ticker class is used to place ticks on multiples of
some base. The FormatStrFormatter uses a string format string (e.g.,
'%d' or '%1.2f' or '%1.1f cm' ) to format the tick
The pyplot interface grid command changes the grid settings of the
major ticks of the y and y axis together. If you want to control the
grid of the minor ticks for a given axis, use for example
ax.xaxis.grid(True, which='minor')
Note, you should not use the same locator between different Axis
because the locator stores references to the Axis data and view limits
"""
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
majorLocator = MultipleLocator(20)
majorFormatter = FormatStrFormatter('%d')
minorLocator = MultipleLocator(5)
t = np.arange(0.0, 100.0, 0.1)
s = np.sin(0.1*np.pi*t)*np.exp(-t*0.01)
fig, ax = plt.subplots()
plt.plot(t, s)
ax.xaxis.set_major_locator(majorLocator)
ax.xaxis.set_major_formatter(majorFormatter)
# for the minor ticks, use no labels; default NullFormatter
ax.xaxis.set_minor_locator(minorLocator)
plt.show()
| bundgus/python-playground | matplotlib-playground/examples/pylab_examples/major_minor_demo1.py | Python | mit | 1,698 |
from unittest import TestCase
import trafaret as t
from trafaret_validator import TrafaretValidator
class ValidatorForTest(TrafaretValidator):
t_value = t.Int()
value = 5
class ValidatorForTest2(ValidatorForTest):
test = t.String()
class TestMetaclass(TestCase):
def test_metaclass(self):
self.assertIsInstance(ValidatorForTest._validators, dict,
'Value should be instance of dict')
self.assertIn('t_value', ValidatorForTest._validators,
'Value should be in _validators')
self.assertNotIn('value', ValidatorForTest._validators,
'Value should not be in _validators')
self.assertIsInstance(ValidatorForTest._trafaret, t.Trafaret,
'Value should be instance of Trafaret')
self.assertFalse(ValidatorForTest._data,
'_data should be empty')
self.assertFalse(ValidatorForTest._errors,
'_data should be empty')
def test_inheritance(self):
self.assertIsInstance(ValidatorForTest2._validators, dict,
'Value should be instance of dict')
self.assertIn('t_value', ValidatorForTest2._validators,
'Value should be in _validators')
self.assertIn('test', ValidatorForTest2._validators,
'Value should be in _validators')
self.assertNotIn('value', ValidatorForTest2._validators,
'Value should not be in _validators')
self.assertIsInstance(ValidatorForTest2._trafaret, t.Trafaret,
'Value should be instance of Trafaret')
self.assertFalse(ValidatorForTest2._data,
'_data should be empty')
self.assertFalse(ValidatorForTest2._errors,
'_data should be empty')
| Lex0ne/trafaret_validator | tests/test_metaclass.py | Python | mit | 1,906 |
# collections.abc new as of 3.3, and collections is deprecated. collections
# will be unavailable in 3.9
try:
import collections.abc as collections
except ImportError:
import collections
import datetime
import logging
try:
import json
except ImportError:
import simplejson as json
import re
def get_log():
return logging.getLogger(__name__.split('.')[0])
class MarathonJsonEncoder(json.JSONEncoder):
"""Custom JSON encoder for Marathon object serialization."""
def default(self, obj):
if hasattr(obj, 'json_repr'):
return self.default(obj.json_repr())
if isinstance(obj, datetime.datetime):
return obj.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
if isinstance(obj, collections.Iterable) and not isinstance(obj, str):
try:
return {k: self.default(v) for k, v in obj.items()}
except AttributeError:
return [self.default(e) for e in obj]
return obj
class MarathonMinimalJsonEncoder(json.JSONEncoder):
"""Custom JSON encoder for Marathon object serialization."""
def default(self, obj):
if hasattr(obj, 'json_repr'):
return self.default(obj.json_repr(minimal=True))
if isinstance(obj, datetime.datetime):
return obj.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
if isinstance(obj, collections.Iterable) and not isinstance(obj, str):
try:
return {k: self.default(v) for k, v in obj.items() if (v or v in (False, 0))}
except AttributeError:
return [self.default(e) for e in obj if (e or e in (False, 0))]
return obj
def to_camel_case(snake_str):
words = snake_str.split('_')
return words[0] + ''.join(w.capitalize() for w in words[1:])
def to_snake_case(camel_str):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', camel_str)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
DATETIME_FORMATS = [
'%Y-%m-%dT%H:%M:%S.%fZ',
'%Y-%m-%dT%H:%M:%SZ', # Marathon omits milliseconds when they would be .000
]
def to_datetime(timestamp):
if (timestamp is None or isinstance(timestamp, datetime.datetime)):
return timestamp
else:
for fmt in DATETIME_FORMATS:
try:
return datetime.datetime.strptime(timestamp, fmt).replace(tzinfo=datetime.timezone.utc)
except ValueError:
pass
raise ValueError(f'Unrecognized datetime format: {timestamp}')
| thefactory/marathon-python | marathon/util.py | Python | mit | 2,499 |
from decimal import Decimal
from django.core.management.base import BaseCommand
from openpyxl import load_workbook
from contracts.models import PriceName, PriceCoast
from directory.models import Researches
class Command(BaseCommand):
def add_arguments(self, parser):
"""
:param path - файл с картами пациентов + диагноз Д-учета
"""
parser.add_argument('path', type=str)
def handle(self, *args, **kwargs):
"""
Испорт цен услуг
Если услуга(id) существует записать в новый ф-л уже существующие, иначе создать новую запись
:param args:
:param kwargs:
:return:
"""
fp = kwargs["path"]
self.stdout.write("Path: " + fp)
wb = load_workbook(filename=fp)
ws = wb[wb.sheetnames[0]]
starts = False
identify = 0
price_code = 0
coast = 0
for row in ws.rows:
cells = [str(x.value) for x in row]
if not starts:
if "id" in cells and "код_прайс" in cells and "цена" in cells:
starts = True
identify = cells.index("id")
price_code = cells.index("код_прайс")
coast = cells.index("цена")
else:
price_obj = PriceName.objects.filter(pk=int(cells[price_code])).first()
research_obj = Researches.objects.filter(pk=int(cells[identify])).first()
if cells[coast]:
coast_value = Decimal(cells[coast])
if price_obj and research_obj:
PriceCoast.objects.update_or_create(price_name=price_obj, research=research_obj, defaults={'coast': coast_value})
| moodpulse/l2 | users/management/commands/price_import.py | Python | mit | 1,880 |
class O(object): pass
class A(O): pass
class B(O): pass
class C(O): pass
class D(O): pass
class E(O): pass
class K1(A,B,C): pass
class K2(D,B,E): pass
class K3(D,A): pass
class Z(K1,K2,K3): pass
print K1.__mro__
print K2.__mro__
print K3.__mro__
print Z.__mro__
| ArcherSys/ArcherSys | skulpt/test/run/t242.py | Python | mit | 262 |
class FakeFetcher(object):
"""
Used i.e. in Harvest tracker when we need credentials but don't fetcher
"""
def __init__(self, *args, **kwargs):
pass
def fetch_user_tickets(self, *args, **kwargs):
pass
def fetch_all_tickets(self, *args, **kwargs):
pass
def fetch_bugs_for_query(self, *args, **kwargs):
pass
def get_result(self):
return []
| stxnext/intranet-open | src/intranet3/intranet3/asyncfetchers/fake.py | Python | mit | 416 |
import os
import json
import pandas
import numpy
from IPython.display import HTML
from datetime import datetime
import pandas_highcharts.core
title_name = 'Tasks'
file_name = 'tasks.csv'
css_dt_name = '//cdn.datatables.net/1.10.12/css/jquery.dataTables.min.css'
js_dt_name = '//cdn.datatables.net/1.10.12/js/jquery.dataTables.min'
js_hc_name_1 = '//code.highcharts.com/highcharts'
js_hc_name_2 = '//code.highcharts.com/modules/exporting'
def read_task():
if os.path.exists(file_name):
return pandas.DataFrame.from_csv(file_name)
else:
return pandas.DataFrame()
def save_task(data):
pandas.DataFrame(data).to_csv(file_name)
def add_task(name, content):
data = read_task()
df = pandas.DataFrame([{
'name':name,
'content':content,
'status':'new',
'created_at':datetime.now().strftime("%Y/%m/%d %H:%M:%S")
}], columns = ['name', 'content', 'status', 'created_at', 'updated_at'])
data = data.append(df, ignore_index=True)
save_task(data)
def render_task(data):
js = '''
<link rel='stylesheet' type='text/css' href='%s'>
<script>
require.config({
paths: {
dataTables: '%s'
}
});
require(['dataTables'], function(){
$('.dataframe').DataTable();
});
</script>
'''%(css_dt_name, js_dt_name)
return HTML('<h2>%s</h2>'%(title_name) + data.to_html(classes="display") + js)
def show_done_task():
data = read_task()
data = data[data['status'] == 'done']
return render_task(data)
def show_task():
data = read_task()
data = data[data['status'] != 'done']
return render_task(data)
def update_task(id, **kwargs):
data = read_task()
if kwargs.get('name'):
data.loc.__setitem__((slice(id, id), 'name'), kwargs.get('name'))
if kwargs.get('content'):
data.loc.__setitem__((slice(id, id), 'content'), kwargs.get('content'))
if kwargs.get('status'):
data.loc.__setitem__((slice(id, id), 'status'), kwargs.get('status'))
data.loc.__setitem__((slice(id, id), 'updated_at'), datetime.now().strftime("%Y/%m/%d %H:%M:%S"))
save_task(data)
def delete_task(id):
data = read_task()
data = data.drop(id)
save_task(data)
def backup_task():
os.system( "mkdir backup" )
os.system( "cp %s backup/%s_%s"%(file_name, datetime.now().strftime("%Y%m%d%H%M%S"), file_name) )
def render_graph(data):
chart = pandas_highcharts.core.serialize(data, title=title_name, zoom="xy", output_type='dict')
chart['subtitle'] = {"text": "created tasks", "x": -20}
html = HTML('''
<div id="chart1" style="min-width: 400px; height: 400px; margin: 0 auto"></div>
<script>
require.config({
paths: {
highcharts: '%s',
exporting: '%s'
}
});
require(['highcharts','exporting'], function(){
$('#chart1').highcharts(%s);
});
</script>
''' %(js_hc_name_1, js_hc_name_2, json.dumps(chart)))
return html
def graph_task():
data = read_task()
data['datetime'] = pandas.to_datetime(data['created_at'])
data['count'] = data['name'].count()
data = data.groupby([data['datetime'].dt.year, data['datetime'].dt.month, data['datetime'].dt.day])['count'].count()
data = pandas.DataFrame(data)
return render_graph(data)
| llby/tasks-for-notebook | tasks_for_notebook/tasks_for_notebook.py | Python | mit | 3,217 |
class UnknownAccess(Exception):
"""
Access doesn't exist for this user.
"""
pass
| novafloss/django-formidable | formidable/exceptions.py | Python | mit | 99 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# system
import os
import sys
dir = os.path.split(os.path.split(os.path.split(os.path.realpath(__file__))[0])[0])[0]
sys.path.append(os.path.join(dir, 'scripts'))
# testing
import mock
import unittest
from mock import patch
# program
import setup.load as Config
import setup.database as DB
#
# Global variables.
#
TEST_DATA = 'test_flood_portal_output.json'
class CheckConfigurationStructure(unittest.TestCase):
'''Unit tests for the configuration files.'''
def test_that_load_config_fails_gracefully(self):
assert Config.LoadConfig('xxx.json') == False
## Object type tests.
def test_config_is_list(self):
d = Config.LoadConfig(os.path.join(dir, 'config', 'dev.json'))
assert type(d) is dict
def test_config_returns_a_table_list(self):
d = Config.LoadConfig(os.path.join(dir, 'config', 'dev.json'))
assert type(d['database']) is list
def test_config_checks_api_key(self):
Config.LoadConfig(os.path.join(dir, 'config', 'dev.json'))
assert Config.LoadConfig(os.path.join(dir, 'tests', 'data', 'test_config.json')) == False
class CheckDatabaseCreation(unittest.TestCase):
'''Unit tests for the setting up the database.'''
## Structural tests.
def test_wrapper_database_function_works(self):
assert DB.Main() != False
## Failed config file.
def test_database_fail(self):
assert DB.CreateTables(config_path=os.path.join(dir, 'tests', 'data', 'test_database_fail.json')) == False
def test_that_odd_table_names_fail(self):
assert DB.CreateTables(config_path=os.path.join(dir, 'tests', 'data', 'test_fail_column_names.json')) == False
| luiscape/hdxscraper-violation-documentation-center-syria | tests/unit/test_setup.py | Python | mit | 1,648 |
#!/usr/bin/env python
# Copyright (C) 2006-2010, University of Maryland
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/ or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Author: James Krycka
"""
This script uses py2exe to create inversion\dist\direfl.exe for Windows.
The resulting executable bundles the DiRefl application, the python runtime
environment, and other required python packages into a single file. Additional
resource files that are needed when DiRefl is run are placed in the dist
directory tree. On completion, the contents of the dist directory tree can be
used by the Inno Setup Compiler (via a separate script) to build a Windows
installer/uninstaller for deployment of the DiRefl application. For testing
purposes, direfl.exe can be run from the dist directory.
"""
import os
import sys
'''
print "*** Python path is:"
for i, p in enumerate(sys.path):
print "%5d %s" %(i, p)
'''
from distutils.core import setup
# Augment the setup interface with the py2exe command and make sure the py2exe
# option is passed to setup.
import py2exe
if len(sys.argv) == 1:
sys.argv.append('py2exe')
import matplotlib
# Retrieve the application version string.
from version import version
# A manifest is required to be included in a py2exe image (or accessible as a
# file in the image directory) when wxPython is included so that the Windows XP
# theme is used when rendering wx widgets. The manifest must be matched to the
# version of Python that is being used.
#
# Create a manifest for use with Python 2.5 on Windows XP or Vista. It is
# adapted from the Python manifest file (C:\Python25\pythonw.exe.manifest).
manifest_for_python25 = """
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<assemblyIdentity
version="1.0.0.0"
processorArchitecture="x86"
name="%(prog)s"
type="win32"
/>
<description>%(prog)s</description>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.Windows.Common-Controls"
version="6.0.0.0"
processorArchitecture="X86"
publicKeyToken="6595b64144ccf1df"
language="*"
/>
</dependentAssembly>
</dependency>
</assembly>
"""
# Create a manifest for use with Python 2.6 or 2.7 on Windows XP or Vista.
manifest_for_python26 = """
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<assemblyIdentity
version="5.0.0.0"
processorArchitecture="x86"
name="%(prog)s"
type="win32">
</assemblyIdentity>
<description>%(prog)s</description>
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel
level="asInvoker"
uiAccess="false">
</requestedExecutionLevel>
</requestedPrivileges>
</security>
</trustInfo>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.VC90.CRT"
version="9.0.21022.8"
processorArchitecture="x86"
publicKeyToken="1fc8b3b9a1e18e3b">
</assemblyIdentity>
</dependentAssembly>
</dependency>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.Windows.Common-Controls"
version="6.0.0.0"
processorArchitecture="x86"
publicKeyToken="6595b64144ccf1df"
language="*">
</assemblyIdentity>
</dependentAssembly>
</dependency>
</assembly>
"""
# Select the appropriate manifest to use.
if sys.version_info >= (3, 0) or sys.version_info < (2, 5):
print "*** This script only works with Python 2.5, 2.6, or 2.7."
sys.exit()
elif sys.version_info >= (2, 6):
manifest = manifest_for_python26
elif sys.version_info >= (2, 5):
manifest = manifest_for_python25
# Create a list of all files to include along side the executable being built
# in the dist directory tree. Each element of the data_files list is a tuple
# consisting of a path (relative to dist\) and a list of files in that path.
data_files = []
# Add data files from the matplotlib\mpl-data folder and its subfolders.
# For matploblib prior to version 0.99 see the examples at the end of the file.
data_files = matplotlib.get_py2exe_datafiles()
# Add resource files that need to reside in the same directory as the image.
data_files.append( ('.', [os.path.join('.', 'direfl.ico')]) )
data_files.append( ('.', [os.path.join('.', 'direfl_splash.png')]) )
data_files.append( ('.', [os.path.join('.', 'LICENSE.txt')]) )
data_files.append( ('.', [os.path.join('.', 'README.txt')]) )
data_files.append( ('examples', [os.path.join('examples', 'demo_model_1.dat')]) )
data_files.append( ('examples', [os.path.join('examples', 'demo_model_2.dat')]) )
data_files.append( ('examples', [os.path.join('examples', 'demo_model_3.dat')]) )
data_files.append( ('examples', [os.path.join('examples', 'qrd1.refl')]) )
data_files.append( ('examples', [os.path.join('examples', 'qrd2.refl')]) )
data_files.append( ('examples', [os.path.join('examples', 'surround_air_4.refl')]) )
data_files.append( ('examples', [os.path.join('examples', 'surround_d2o_4.refl')]) )
# Add the Microsoft Visual C++ 2008 redistributable kit if we are building with
# Python 2.6 or 2.7. This kit will be installed on the target system as part
# of the installation process for the frozen image. Note that the Python 2.5
# interpreter requires msvcr71.dll which is included in the Python25 package,
# however, Python 2.6 and 2.7 require the msvcr90.dll but they do not bundle it
# with the Python26 or Python27 package. Thus, for Python 2.6 and later, the
# appropriate dll must be present on the target system at runtime.
if sys.version_info >= (2, 6):
pypath = os.path.dirname(sys.executable)
data_files.append( ('.', [os.path.join(pypath, 'vcredist_x86.exe')]) )
# Specify required packages to bundle in the executable image.
packages = ['matplotlib', 'numpy', 'scipy', 'pytz']
# Specify files to include in the executable image.
includes = []
# Specify files to exclude from the executable image.
# - We can safely exclude Tk/Tcl and Qt modules because our app uses wxPython.
# - We do not use ssl services so they are omitted.
# - We can safely exclude the TkAgg matplotlib backend because our app uses
# "matplotlib.use('WXAgg')" to override the default matplotlib configuration.
# - On the web it is widely recommended to exclude certain lib*.dll modules
# but this does not seem necessary any more (but adding them does not hurt).
# - Python25 requires mscvr71.dll, however, Win XP includes this file.
# - Since we do not support Win 9x systems, w9xpopen.dll is not needed.
# - For some reason cygwin1.dll gets included by default, but it is not needed.
excludes = ['Tkinter', 'PyQt4', '_ssl', '_tkagg']
dll_excludes = ['libgdk_pixbuf-2.0-0.dll',
'libgobject-2.0-0.dll',
'libgdk-win32-2.0-0.dll',
'tcl84.dll',
'tk84.dll',
'QtGui4.dll',
'QtCore4.dll',
'msvcr71.dll',
'msvcp90.dll',
'w9xpopen.exe',
'cygwin1.dll']
class Target():
"""This class stores metadata about the distribution in a dictionary."""
def __init__(self, **kw):
self.__dict__.update(kw)
self.version = version
client = Target(
name = 'DiRefl',
description = 'Direct Inversion Reflectometry (DiRefl) application',
script = 'bin/direfl.py', # module to run on application start
dest_base = 'direfl', # file name part of the exe file to create
icon_resources = [(1, 'direfl.ico')], # also need to specify in data_files
bitmap_resources = [],
other_resources = [(24, 1, manifest % dict(prog='DiRefl'))] )
# Now we do the work to create a standalone distribution using py2exe.
#
# When the application is run in console mode, a console window will be created
# to receive any logging or error messages and the application will then create
# a separate GUI application window.
#
# When the application is run in windows mode, it will create a GUI application
# window and no console window will be provided. Output to stderr will be
# written to <app-image-name>.log.
setup(
#console=[client],
windows=[client],
options={'py2exe': {
'packages': packages,
'includes': includes,
'excludes': excludes,
'dll_excludes': dll_excludes,
'compressed': 1, # standard compression
'optimize': 0, # no byte-code optimization
'dist_dir': "dist",# where to put py2exe results
'xref': False, # display cross reference (as html doc)
'bundle_files': 1 # bundle python25.dll in library.zip
}
},
#zipfile=None, # None means bundle library.zip in exe
data_files=data_files # list of files to copy to dist directory
)
#==============================================================================
# This section is for reference only when using older versions of matplotlib.
# The location of mpl-data files has changed across releases of matplotlib.
# Furthermore, matplotlib.get_py2exe_datafiles() had problems prior to version
# 0.99 (see link below for details), so alternative ways had to be used.
# The various techniques shown below for obtaining matplotlib auxiliary files
# (and previously used by this project) was adapted from the examples and
# discussion on http://www.py2exe.org/index.cgi/MatPlotLib.
#
# The following technique worked for matplotlib 0.91.2.
# Note that glob '*.*' will not find files that have no file extension.
'''
import glob
data_files = []
matplotlibdatadir = matplotlib.get_data_path()
mpl_lst = ('mpl-data', glob.glob(os.path.join(matplotlibdatadir, '*.*')))
data_files.append(mpl_lst)
mpl_lst = ('mpl-data', [os.path.join(matplotlibdatadir, 'matplotlibrc')])
data_files.append(mpl_lst) # pickup file missed by glob
mpl_lst = (r'mpl-data\fonts',
glob.glob(os.path.join(matplotlibdatadir, r'fonts\*.*')))
data_files.append(mpl_lst)
mpl_lst = (r'mpl-data\images',
glob.glob(os.path.join(matplotlibdatadir, r'images\*.*')))
data_files.append(mpl_lst)
'''
# The following technique worked for matplotlib 0.98.5.
# Note that glob '*.*' will not find files that have no file extension.
'''
import glob
data_files = []
matplotlibdatadir = matplotlib.get_data_path()
mpl_lst = ('mpl-data', glob.glob(os.path.join(matplotlibdatadir, '*.*')))
data_files.append(mpl_lst)
mpl_lst = ('mpl-data', [os.path.join(matplotlibdatadir, 'matplotlibrc')])
data_files.append(mpl_lst) # pickup file missed by glob
mpl_lst = (r'mpl-data\fonts\afm',
glob.glob(os.path.join(matplotlibdatadir, r'fonts\afm\*.*')))
data_files.append(mpl_lst)
mpl_lst = (r'mpl-data\fonts\pdfcorefonts',
glob.glob(os.path.join(matplotlibdatadir, r'fonts\pdfcorefonts\*.*')))
data_files.append(mpl_lst)
mpl_lst = (r'mpl-data\fonts\ttf',
glob.glob(os.path.join(matplotlibdatadir, r'fonts\ttf\*.*')))
data_files.append(mpl_lst)
mpl_lst = (r'mpl-data\images',
glob.glob(os.path.join(matplotlibdatadir, r'images\*.*')))
data_files.append(mpl_lst)
'''
# The following technique worked for matplotlib 0.98 and 0.99.
'''
from distutils.filelist import findall
data_files = []
matplotlibdatadir = matplotlib.get_data_path()
matplotlibdata = findall(matplotlibdatadir)
for f in matplotlibdata:
dirname = os.path.join('mpl-data', f[len(matplotlibdatadir)+1:])
data_files.append((os.path.split(dirname)[0], [f]))
'''
| reflectometry/direfl | setup_py2exe.py | Python | mit | 12,830 |
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Helpful routines for regression testing."""
from base64 import b64encode
from binascii import hexlify, unhexlify
from decimal import Decimal, ROUND_DOWN
import hashlib
import json
import logging
import os
import random
import re
from subprocess import CalledProcessError
import time
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
logger = logging.getLogger("TestFramework.utils")
# Assert functions
##################
def assert_fee_amount(fee, tx_size, fee_per_kB):
"""Assert the fee was in range"""
target_fee = round(tx_size * fee_per_kB / 1000, 8)
if fee < target_fee:
raise AssertionError("Fee of %s BTC too low! (Should be %s BTC)" % (str(fee), str(target_fee)))
# allow the wallet's estimation to be at most 2 bytes off
if fee > (tx_size + 20) * fee_per_kB / 1000:
raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)" % (str(fee), str(target_fee)))
def assert_equal(thing1, thing2, *args):
if thing1 != thing2 or any(thing1 != arg for arg in args):
raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s" % (str(thing1), str(thing2)))
def assert_greater_than_or_equal(thing1, thing2):
if thing1 < thing2:
raise AssertionError("%s < %s" % (str(thing1), str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
assert_raises_message(exc, None, fun, *args, **kwds)
def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except JSONRPCException:
raise AssertionError("Use assert_raises_rpc_error() to test RPC failures")
except exc as e:
if message is not None and message not in e.error['message']:
raise AssertionError("Expected substring not found:" + e.error['message'])
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_raises_process_error(returncode, output, fun, *args, **kwds):
"""Execute a process and asserts the process return code and output.
Calls function `fun` with arguments `args` and `kwds`. Catches a CalledProcessError
and verifies that the return code and output are as expected. Throws AssertionError if
no CalledProcessError was raised or if the return code and output are not as expected.
Args:
returncode (int): the process return code.
output (string): [a substring of] the process output.
fun (function): the function to call. This should execute a process.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
try:
fun(*args, **kwds)
except CalledProcessError as e:
if returncode != e.returncode:
raise AssertionError("Unexpected returncode %i" % e.returncode)
if output not in e.output:
raise AssertionError("Expected substring not found:" + e.output)
else:
raise AssertionError("No exception raised")
def assert_raises_rpc_error(code, message, fun, *args, **kwds):
"""Run an RPC and verify that a specific JSONRPC exception code and message is raised.
Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException
and verifies that the error code and message are as expected. Throws AssertionError if
no JSONRPCException was raised or if the error code/message are not as expected.
Args:
code (int), optional: the error code returned by the RPC call (defined
in src/rpc/protocol.h). Set to None if checking the error code is not required.
message (string), optional: [a substring of] the error string returned by the
RPC call. Set to None if checking the error string is not required.
fun (function): the function to call. This should be the name of an RPC.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
assert try_rpc(code, message, fun, *args, **kwds), "No exception raised"
def try_rpc(code, message, fun, *args, **kwds):
"""Tries to run an rpc command.
Test against error code and message if the rpc fails.
Returns whether a JSONRPCException was raised."""
try:
fun(*args, **kwds)
except JSONRPCException as e:
# JSONRPCException was thrown as expected. Check the code and message values are correct.
if (code is not None) and (code != e.error["code"]):
raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"])
if (message is not None) and (message not in e.error['message']):
raise AssertionError("Expected substring not found:" + e.error['message'])
return True
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
return False
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find=False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find:
assert_equal(expected, {})
num_matched = 0
for item in object_array:
all_match = True
for key, value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find:
num_matched = num_matched + 1
for key, value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s" % (str(item), str(key), str(value)))
num_matched = num_matched + 1
if num_matched == 0 and not should_not_find:
raise AssertionError("No objects matched %s" % (str(to_match)))
if num_matched > 0 and should_not_find:
raise AssertionError("Objects were found %s" % (str(to_match)))
# Utility functions
###################
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n))) * 1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hash256(byte_str):
sha256 = hashlib.sha256()
sha256.update(byte_str)
sha256d = hashlib.sha256()
sha256d.update(sha256.digest())
return sha256d.digest()[::-1]
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=None):
if attempts == float('inf') and timeout == float('inf'):
timeout = 60
attempt = 0
timeout += time.time()
while attempt < attempts and time.time() < timeout:
if lock:
with lock:
if predicate():
return
else:
if predicate():
return
attempt += 1
time.sleep(0.5)
# Print the cause of the timeout
assert_greater_than(attempts, attempt)
assert_greater_than(timeout, time.time())
raise RuntimeError('Unreachable')
# RPC/P2P connection constants and functions
############################################
# The maximum number of nodes a single test can spawn
MAX_NODES = 8
# Don't assign rpc or p2p ports lower than this
PORT_MIN = 11000
# The number of ports to "reserve" for p2p and rpc, each
PORT_RANGE = 5000
class PortSeed:
# Must be initialized with a unique integer for each process
n = None
def get_rpc_proxy(url, node_number, timeout=None, coveragedir=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
coveragedir, node_number) if coveragedir else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def p2p_port(n):
assert(n <= MAX_NODES)
return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_port(n):
return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_url(datadir, i, rpchost=None):
rpc_u, rpc_p = get_auth_cookie(datadir)
host = '127.0.0.1'
port = rpc_port(i)
if rpchost:
parts = rpchost.split(':')
if len(parts) == 2:
host, port = parts
else:
host = rpchost
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
# Node functions
################
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node" + str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
rpc_u, rpc_p = rpc_auth_pair(n)
with open(os.path.join(datadir, "ioncoin.conf"), 'w', encoding='utf8') as f:
f.write("regtest=1\n")
f.write("rpcuser=" + rpc_u + "\n")
f.write("rpcpassword=" + rpc_p + "\n")
f.write("port=" + str(p2p_port(n)) + "\n")
f.write("rpcport=" + str(rpc_port(n)) + "\n")
f.write("listenonion=0\n")
f.write("litemode=1\n")
f.write("enablezeromint=0\n")
f.write("staking=0\n")
f.write("spendzeroconfchange=1\n")
return datadir
def rpc_auth_pair(n):
return 'rpcuser�' + str(n), 'rpcpass�' + str(n)
def get_datadir_path(dirname, n):
return os.path.join(dirname, "node" + str(n))
def get_auth_cookie(datadir):
user = None
password = None
if os.path.isfile(os.path.join(datadir, "ioncoin.conf")):
with open(os.path.join(datadir, "ioncoin.conf"), 'r', encoding='utf8') as f:
for line in f:
if line.startswith("rpcuser="):
assert user is None # Ensure that there is only one rpcuser line
user = line.split("=")[1].strip("\n")
if line.startswith("rpcpassword="):
assert password is None # Ensure that there is only one rpcpassword line
password = line.split("=")[1].strip("\n")
if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")):
with open(os.path.join(datadir, "regtest", ".cookie"), 'r') as f:
userpass = f.read()
split_userpass = userpass.split(':')
user = split_userpass[0]
password = split_userpass[1]
if user is None or password is None:
raise ValueError("No RPC credentials")
return user, password
# If a cookie file exists in the given datadir, delete it.
def delete_cookie_file(datadir):
if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")):
logger.debug("Deleting leftover cookie file")
os.remove(os.path.join(datadir, "regtest", ".cookie"))
def get_bip9_status(node, key):
info = node.getblockchaininfo()
return info['bip9_softforks'][key]
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def disconnect_nodes(from_connection, node_num):
for addr in [peer['addr'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']]:
try:
from_connection.disconnectnode(addr)
except JSONRPCException as e:
# If this node is disconnected between calculating the peer id
# and issuing the disconnect, don't worry about it.
# This avoids a race condition if we're mass-disconnecting peers.
if e.error['code'] != -29: # RPC_CLIENT_NODE_NOT_CONNECTED
raise
# wait to disconnect
wait_until(lambda: [peer['addr'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']] == [], timeout=5)
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:" + str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
wait_until(lambda: all(peer['version'] != 0 for peer in from_connection.getpeerinfo()))
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def sync_blocks(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same tip.
sync_blocks needs to be called with an rpc_connections set that has least
one node already synced to the latest, stable tip, otherwise there's a
chance it might return before all nodes are stably synced.
"""
# Use getblockcount() instead of waitforblockheight() to determine the
# initial max height because the two RPCs look at different internal global
# variables (chainActive vs latestBlock) and the former gets updated
# earlier.
time.sleep(5)
maxheight = max(x.getblockcount() for x in rpc_connections)
start_time = cur_time = time.time()
while cur_time <= start_time + timeout:
tips = [r.waitforblockheight(maxheight, int(wait * 1000)) for r in rpc_connections]
if all(t["height"] == maxheight for t in tips):
if all(t["hash"] == tips[0]["hash"] for t in tips):
return
raise AssertionError("Block sync failed, mismatched block hashes:{}".format(
"".join("\n {!r}".format(tip) for tip in tips)))
cur_time = time.time()
raise AssertionError("Block sync to height {} timed out:{}".format(
maxheight, "".join("\n {!r}".format(tip) for tip in tips)))
def sync_chain(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same best block
"""
while timeout > 0:
best_hash = [x.getbestblockhash() for x in rpc_connections]
if best_hash == [best_hash[0]] * len(best_hash):
return
time.sleep(wait)
timeout -= wait
raise AssertionError("Chain sync failed: Best block hashes don't match")
def sync_mempools(rpc_connections, *, wait=1, timeout=60, flush_scheduler=True):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while timeout > 0:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match + 1
if num_match == len(rpc_connections):
#if flush_scheduler:
#for r in rpc_connections:
# r.syncwithvalidationinterfacequeue()
return
time.sleep(wait)
timeout -= wait
raise AssertionError("Mempool sync failed")
# Transaction/Block functions
#############################
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found" % (txid, str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >= 0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({"txid": t["txid"], "vout": t["vout"], "address": t["address"]})
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d" % (amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out + fee
change = amount_in - amount
if change > amount * 2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change / 2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment * random.randint(0, fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount + fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
to_generate = int(0.5 * count) + 101
while to_generate > 0:
node.generate(min(25, to_generate))
to_generate -= 25
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in range(iterations):
t = utxos.pop()
inputs = []
inputs.append({"txid": t["txid"], "vout": t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = float(satoshi_round(send_value / 2))
outputs[addr2] = float(satoshi_round(send_value / 2))
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransaction(raw_tx)["hex"]
node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert(len(utxos) >= count)
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes
for i in range(512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = "81"
for k in range(128):
# add txout value
txouts = txouts + "0000000000000000"
# add length of script_pubkey
txouts = txouts + "fd0402"
# add script_pubkey
txouts = txouts + script_pubkey
return txouts
def create_tx(node, coinbase, to_address, amount):
inputs = [{"txid": coinbase, "vout": 0}]
outputs = {to_address: amount}
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
addr = node.getnewaddress()
txids = []
for _ in range(num):
t = utxos.pop()
inputs = [{"txid": t["txid"], "vout": t["vout"]}]
outputs = {}
change = t['amount'] - fee
outputs[addr] = float(satoshi_round(change))
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransaction(newtx, None, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def mine_large_block(node, utxos=None):
# generate a 66k transaction,
# and 14 of them is close to the 1MB block limit
num = 14
txouts = gen_return_txouts()
utxos = utxos if utxos is not None else []
if len(utxos) < num:
utxos.clear()
utxos.extend(node.listunspent())
fee = 100 * node.getnetworkinfo()["relayfee"]
create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee)
node.generate(1)
| cevap/ion | test/functional/test_framework/util.py | Python | mit | 22,607 |
import sys
from aimes.emgr.utils import *
__author__ = "Matteo Turilli"
__copyright__ = "Copyright 2015, The AIMES Project"
__license__ = "MIT"
# -----------------------------------------------------------------------------
def write_skeleton_conf(cfg, scale, cores, uniformity, fout):
'''Write a skeleton configuration file with the set number/type/duration of
tasks and stages.
'''
substitutes = dict()
substitutes['SCALE'] = scale
substitutes['CORES'] = cores[-1]
if substitutes['CORES'] > 1:
substitutes['TASK_TYPE'] = 'parallel'
elif substitutes['CORES'] == 1:
substitutes['TASK_TYPE'] = 'serial'
else:
print "ERROR: invalid number of cores per task: '%s'." % cores
sys.exit(1)
if uniformity == 'uniform':
substitutes['UNIFORMITY_DURATION'] = "%s %s" % \
(uniformity, cfg['skeleton_task_duration']['max'])
# TODO: Calculate stdev and avg.
elif uniformity == 'gauss':
substitutes['UNIFORMITY_DURATION'] = "%s [%s, %s]" % \
(uniformity, cfg['skeleton_task_duration']['avg'],
cfg['skeleton_task_duration']['stdev'])
else:
print "ERROR: invalid task uniformity '%s' specified." % uniformity
sys.exit(1)
write_template(cfg['skeleton_template'], substitutes, fout)
| radical-cybertools/aimes.emgr | src/aimes/emgr/workloads/skeleton.py | Python | mit | 1,336 |
from yos.rt import BaseTasklet
from yos.ipc import Catalog
class CatalogExample(BaseTasklet):
def on_startup(self):
Catalog.store('test1', 'test2', catname='test3')
Catalog.get('test1', self.on_read, catname='test3')
def on_read(self, val):
if val == 'test2':
print("Test passed")
else:
print("Test failed") | piotrmaslanka/systemy | examples/catalogExample.py | Python | mit | 382 |
import numpy as np
class Stock:
"""
Class to represent the data and ratios of a stock.
"""
def __init__(self, eps, dps, roe=0):
'''
eps: earnings per share.
dps: dividends per share.
roe: fractional return on equity, default to 0.
'''
self.eps = np.array(eps).astype(float)
self.dps = np.array(dps).astype(float)
self.roe = np.array(roe).astype(float)
def retention_ratio(self):
'''
Calculates the retention ratio for a stock.
Returns fractional payout ratio numpy array.
'''
return self.eps / self.dps
def dividend_cover(self):
'''
Returns the dividend cover for the stock (alias
for retention_ratio).
'''
return self.retention_ratio()
def payout_ratio(self):
'''
Calculates the stock payout ratio based on:
Returns fractional payout ratio numpy array.
'''
return 1 / self.retention_ratio()
def growth_rate(self):
'''
Calculates the dividend growth rate:
(1 - payout ratio)^cost of equity
Returns the fractional expected growth rate numpy array.
'''
ones = np.ones_like(self.roe)
return (ones - self.payout_ratio()) * self.roe
| conceptric/pyinvest | pyinvest/stock.py | Python | mit | 1,365 |
#-*- coding: utf-8 -*-
import argparse
import numpy as np
parser = argparse.ArgumentParser(description='Configuration file')
arg_lists = []
def add_argument_group(name):
arg = parser.add_argument_group(name)
arg_lists.append(arg)
return arg
def str2bool(v):
return v.lower() in ('true', '1')
# Network
net_arg = add_argument_group('Network')
net_arg.add_argument('--hidden_dim', type=int, default=128, help='actor LSTM num_neurons')
net_arg.add_argument('--num_heads', type=int, default=16, help='actor input embedding') ###
net_arg.add_argument('--num_stacks', type=int, default=3, help='actor LSTM num_neurons')
# Data
data_arg = add_argument_group('Data')
data_arg.add_argument('--batch_size', type=int, default=128, help='batch size')
data_arg.add_argument('--input_dimension', type=int, default=2, help='city dimension')
data_arg.add_argument('--max_length', type=int, default=20, help='number of deliveries')
# Training / test parameters
train_arg = add_argument_group('Training')
train_arg.add_argument('--nb_epoch', type=int, default=100000, help='nb epoch')
train_arg.add_argument('--lr1_start', type=float, default=0.001, help='actor learning rate')
train_arg.add_argument('--lr1_decay_step', type=int, default=5000, help='lr1 decay step')
train_arg.add_argument('--lr1_decay_rate', type=float, default=0.96, help='lr1 decay rate')
train_arg.add_argument('--alpha', type=float, default=0.99, help='update factor moving average baseline')
train_arg.add_argument('--init_baseline', type=float, default=7.0, help='initial baseline - REINFORCE')
train_arg.add_argument('--temperature', type=float, default=3.0, help='pointer_net initial temperature')
train_arg.add_argument('--C', type=float, default=10.0, help='pointer_net tan clipping')
# Misc
misc_arg = add_argument_group('User options') #####################################################
misc_arg.add_argument('--inference_mode', type=str2bool, default=True, help='switch to inference mode when model is trained')
misc_arg.add_argument('--restore_model', type=str2bool, default=True, help='whether or not model is retrieved')
misc_arg.add_argument('--save_to', type=str, default='20/model', help='saver sub directory')
misc_arg.add_argument('--restore_from', type=str, default='20/model', help='loader sub directory') ###
misc_arg.add_argument('--log_dir', type=str, default='summary/20/repo', help='summary writer log directory')
def get_config():
config, unparsed = parser.parse_known_args()
return config, unparsed
def print_config():
config, _ = get_config()
print('\n')
print('Data Config:')
print('* Batch size:',config.batch_size)
print('* Sequence length:',config.max_length)
print('* City coordinates:',config.input_dimension)
print('\n')
print('Network Config:')
print('* Restored model:',config.restore_model)
print('* Actor hidden_dim (embed / num neurons):',config.hidden_dim)
print('* Actor tan clipping:',config.C)
print('\n')
if config.inference_mode==False:
print('Training Config:')
print('* Nb epoch:',config.nb_epoch)
print('* Temperature:',config.temperature)
print('* Actor learning rate (init,decay_step,decay_rate):',config.lr1_start,config.lr1_decay_step,config.lr1_decay_rate)
else:
print('Testing Config:')
print('* Summary writer log dir:',config.log_dir)
print('\n') | MichelDeudon/neural-combinatorial-optimization-rl-tensorflow | Self_Net_TSP/config.py | Python | mit | 3,341 |
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
try:
with open('README.rst') as file:
long_description = file.read()
except IOError:
long_description = 'Python lib for sniffets.com'
setup(
name='sniffets',
packages=['sniffets'],
version='0.1.8',
long_description=long_description,
description='Python lib for sniffets.com',
author='Doniyor Jurabayev',
author_email='[email protected]',
url='https://github.com/behconsci/sniffets-python',
download_url='https://github.com/behconsci/sniffets-python/archive/0.1.8.tar.gz',
keywords=['track', 'monitor', 'bug'],
classifiers=[],
install_requires=[
'requests', 'grequests'
],
)
| behconsci/sniffets-python | setup.py | Python | mit | 749 |
from distutils.core import setup
setup (
name = 'quaternion_class'
author = 'Matthew Nichols'
author_email = '[email protected]'
packages = ['quaternion']
package_dir = {'quaternion':src}
) | mlnichols/quaternion_class | setup.py | Python | mit | 214 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import os
import sys
from csvkit import sql
from csvkit import table
from csvkit import CSVKitWriter
from csvkit.cli import CSVKitUtility
class CSVSQL(CSVKitUtility):
description = 'Generate SQL statements for one or more CSV files, create execute those statements directly on a database, and execute one or more SQL queries.'
override_flags = ['l', 'f']
def add_arguments(self):
self.argparser.add_argument(metavar="FILE", nargs='*', dest='input_paths', default=['-'],
help='The CSV file(s) to operate on. If omitted, will accept input on STDIN.')
self.argparser.add_argument('-y', '--snifflimit', dest='snifflimit', type=int,
help='Limit CSV dialect sniffing to the specified number of bytes. Specify "0" to disable sniffing entirely.')
self.argparser.add_argument('-i', '--dialect', dest='dialect', choices=sql.DIALECTS,
help='Dialect of SQL to generate. Only valid when --db is not specified.')
self.argparser.add_argument('--db', dest='connection_string',
help='If present, a sqlalchemy connection string to use to directly execute generated SQL on a database.')
self.argparser.add_argument('--query', default=None,
help='Execute one or more SQL queries delimited by ";" and output the result of the last query as CSV.')
self.argparser.add_argument('--insert', dest='insert', action='store_true',
help='In addition to creating the table, also insert the data into the table. Only valid when --db is specified.')
self.argparser.add_argument('--tables', dest='table_names',
help='Specify one or more names for the tables to be created. If omitted, the filename (minus extension) or "stdin" will be used.')
self.argparser.add_argument('--no-constraints', dest='no_constraints', action='store_true',
help='Generate a schema without length limits or null checks. Useful when sampling big tables.')
self.argparser.add_argument('--no-create', dest='no_create', action='store_true',
help='Skip creating a table. Only valid when --insert is specified.')
self.argparser.add_argument('--blanks', dest='blanks', action='store_true',
help='Do not coerce empty strings to NULL values.')
self.argparser.add_argument('--no-inference', dest='no_inference', action='store_true',
help='Disable type inference when parsing the input.')
self.argparser.add_argument('--db-schema', dest='db_schema',
help='Optional name of database schema to create table(s) in.')
def main(self):
connection_string = self.args.connection_string
do_insert = self.args.insert
query = self.args.query
self.input_files = []
for path in self.args.input_paths:
self.input_files.append(self._open_input_file(path))
if self.args.table_names:
table_names = self.args.table_names.split(',')
else:
table_names = []
# If one or more filenames are specified, we need to add stdin ourselves (if available)
if sys.stdin not in self.input_files:
try:
if not sys.stdin.isatty():
self.input_files.insert(0, oepn("/dev/stdin", "r", encoding="utf-8"))
except:
pass
# Create an SQLite database in memory if no connection string is specified
if query and not connection_string:
connection_string = "sqlite:///:memory:"
do_insert = True
if self.args.dialect and connection_string:
self.argparser.error('The --dialect option is only valid when --db is not specified.')
if do_insert and not connection_string:
self.argparser.error('The --insert option is only valid when --db is also specified.')
if self.args.no_create and not do_insert:
self.argparser.error('The --no-create option is only valid --insert is also specified.')
# Establish database validity before reading CSV files
if connection_string:
try:
engine, metadata = sql.get_connection(connection_string)
except ImportError:
raise ImportError('You don\'t appear to have the necessary database backend installed for connection string you\'re trying to use. Available backends include:\n\nPostgresql:\tpip install psycopg2\nMySQL:\t\tpip install MySQL-python\n\nFor details on connection strings and other backends, please see the SQLAlchemy documentation on dialects at: \n\nhttp://www.sqlalchemy.org/docs/dialects/\n\n')
conn = engine.connect()
trans = conn.begin()
for f in self.input_files:
try:
# Try to use name specified via --table
table_name = table_names.pop(0)
except IndexError:
if f == sys.stdin:
table_name = "stdin"
else:
# Use filename as table name
table_name = os.path.splitext(os.path.split(f.name)[1])[0]
csv_table = table.Table.from_csv(
f,
name=table_name,
snifflimit=self.args.snifflimit,
blanks_as_nulls=(not self.args.blanks),
infer_types=(not self.args.no_inference),
no_header_row=self.args.no_header_row,
**self.reader_kwargs
)
f.close()
if connection_string:
sql_table = sql.make_table(
csv_table,
table_name,
self.args.no_constraints,
self.args.db_schema,
metadata
)
# Create table
if not self.args.no_create:
sql_table.create()
# Insert data
if do_insert and csv_table.count_rows() > 0:
insert = sql_table.insert()
headers = csv_table.headers()
conn.execute(insert, [dict(zip(headers, row)) for row in csv_table.to_rows()])
# Output SQL statements
else:
sql_table = sql.make_table(csv_table, table_name, self.args.no_constraints)
self.output_file.write('%s\n' % sql.make_create_table_statement(sql_table, dialect=self.args.dialect))
if connection_string:
if query:
# Execute specified SQL queries
queries = query.split(';')
rows = None
for q in queries:
if q:
rows = conn.execute(q)
# Output result of last query as CSV
try:
output = CSVKitWriter(self.output_file, **self.writer_kwargs)
if not self.args.no_header_row:
output.writerow(rows._metadata.keys)
for row in rows:
output.writerow(row)
except AttributeError:
pass
trans.commit()
conn.close()
def launch_new_instance():
utility = CSVSQL()
utility.main()
if __name__ == "__main__":
launch_new_instance()
| gepuro/csvkit | csvkit/utilities/csvsql.py | Python | mit | 7,388 |
from django.db import models
from django.core.exceptions import ValidationError
from django.db.models.fields.related import ForeignObject
try:
from django.db.models.fields.related_descriptors import ForwardManyToOneDescriptor
except ImportError:
from django.db.models.fields.related import ReverseSingleRelatedObjectDescriptor as ForwardManyToOneDescriptor
from django.utils.encoding import python_2_unicode_compatible
import logging
logger = logging.getLogger(__name__)
# Python 3 fixes.
import sys
if sys.version > '3':
long = int
basestring = (str, bytes)
unicode = str
__all__ = ['Country', 'State', 'Locality', 'Address', 'AddressField']
class InconsistentDictError(Exception):
pass
def _to_python(value):
raw = value.get('raw', '')
country = value.get('country', '')
country_code = value.get('country_code', '')
state = value.get('state', '')
state_code = value.get('state_code', '')
locality = value.get('locality', '')
postal_code = value.get('postal_code', '')
street_number = value.get('street_number', '')
route = value.get('route', '')
formatted = value.get('formatted', '')
latitude = value.get('latitude', None)
longitude = value.get('longitude', None)
# If there is no value (empty raw) then return None.
if not raw:
return None
# If we have an inconsistent set of value bail out now.
if (country or state or locality) and not (country and state and locality):
raise InconsistentDictError
# Handle the country.
try:
country_obj = Country.objects.get(name=country)
except Country.DoesNotExist:
if country:
if len(country_code) > Country._meta.get_field('code').max_length:
if country_code != country:
raise ValueError('Invalid country code (too long): %s'%country_code)
country_code = ''
country_obj = Country.objects.create(name=country, code=country_code)
else:
country_obj = None
# Handle the state.
try:
state_obj = State.objects.get(name=state, country=country_obj)
except State.DoesNotExist:
if state:
if len(state_code) > State._meta.get_field('code').max_length:
if state_code != state:
raise ValueError('Invalid state code (too long): %s'%state_code)
state_code = ''
state_obj = State.objects.create(name=state, code=state_code, country=country_obj)
else:
state_obj = None
# Handle the locality.
try:
locality_obj = Locality.objects.get(name=locality, state=state_obj)
except Locality.DoesNotExist:
if locality:
locality_obj = Locality.objects.create(name=locality, postal_code=postal_code, state=state_obj)
else:
locality_obj = None
# Handle the address.
try:
if not (street_number or route or locality):
address_obj = Address.objects.get(raw=raw)
else:
address_obj = Address.objects.get(
street_number=street_number,
route=route,
locality=locality_obj
)
except Address.DoesNotExist:
address_obj = Address(
street_number=street_number,
route=route,
raw=raw,
locality=locality_obj,
formatted=formatted,
latitude=latitude,
longitude=longitude,
)
# If "formatted" is empty try to construct it from other values.
if not address_obj.formatted:
address_obj.formatted = unicode(address_obj)
# Need to save.
address_obj.save()
# Done.
return address_obj
##
## Convert a dictionary to an address.
##
def to_python(value):
# Keep `None`s.
if value is None:
return None
# Is it already an address object?
if isinstance(value, Address):
return value
# If we have an integer, assume it is a model primary key. This is mostly for
# Django being a cunt.
elif isinstance(value, (int, long)):
return value
# A string is considered a raw value.
elif isinstance(value, basestring):
obj = Address(raw=value)
obj.save()
return obj
# A dictionary of named address components.
elif isinstance(value, dict):
# Attempt a conversion.
try:
return _to_python(value)
except InconsistentDictError:
return Address.objects.create(raw=value['raw'])
# Not in any of the formats I recognise.
raise ValidationError('Invalid address value.')
##
## A country.
##
@python_2_unicode_compatible
class Country(models.Model):
name = models.CharField(max_length=40, unique=True, blank=True)
code = models.CharField(max_length=2, blank=True) # not unique as there are duplicates (IT)
class Meta:
verbose_name_plural = 'Countries'
ordering = ('name',)
def __str__(self):
return '%s'%(self.name or self.code)
##
## A state. Google refers to this as `administration_level_1`.
##
@python_2_unicode_compatible
class State(models.Model):
name = models.CharField(max_length=165, blank=True)
code = models.CharField(max_length=3, blank=True)
country = models.ForeignKey(Country, related_name='states')
class Meta:
unique_together = ('name', 'country')
ordering = ('country', 'name')
def __str__(self):
txt = self.to_str()
country = '%s'%self.country
if country and txt:
txt += ', '
txt += country
return txt
def to_str(self):
return '%s'%(self.name or self.code)
##
## A locality (suburb).
##
@python_2_unicode_compatible
class Locality(models.Model):
name = models.CharField(max_length=165, blank=True)
postal_code = models.CharField(max_length=10, blank=True)
state = models.ForeignKey(State, related_name='localities')
class Meta:
verbose_name_plural = 'Localities'
unique_together = ('name', 'state')
ordering = ('state', 'name')
def __str__(self):
txt = '%s'%self.name
state = self.state.to_str() if self.state else ''
if txt and state:
txt += ', '
txt += state
if self.postal_code:
txt += ' %s'%self.postal_code
cntry = '%s'%(self.state.country if self.state and self.state.country else '')
if cntry:
txt += ', %s'%cntry
return txt
##
## An address. If for any reason we are unable to find a matching
## decomposed address we will store the raw address string in `raw`.
##
@python_2_unicode_compatible
class Address(models.Model):
street_number = models.CharField(max_length=20, blank=True)
route = models.CharField(max_length=100, blank=True)
locality = models.ForeignKey(Locality, related_name='addresses', blank=True, null=True)
raw = models.CharField(max_length=200)
formatted = models.CharField(max_length=200, blank=True)
latitude = models.FloatField(blank=True, null=True)
longitude = models.FloatField(blank=True, null=True)
class Meta:
verbose_name_plural = 'Addresses'
ordering = ('locality', 'route', 'street_number')
# unique_together = ('locality', 'route', 'street_number')
def __str__(self):
if self.formatted != '':
txt = '%s'%self.formatted
elif self.locality:
txt = ''
if self.street_number:
txt = '%s'%self.street_number
if self.route:
if txt:
txt += ' %s'%self.route
locality = '%s'%self.locality
if txt and locality:
txt += ', '
txt += locality
else:
txt = '%s'%self.raw
return txt
def clean(self):
if not self.raw:
raise ValidationError('Addresses may not have a blank `raw` field.')
def as_dict(self):
ad = dict(
street_number=self.street_number,
route=self.route,
raw=self.raw,
formatted=self.formatted,
latitude=self.latitude if self.latitude else '',
longitude=self.longitude if self.longitude else '',
)
if self.locality:
ad['locality'] = self.locality.name
ad['postal_code'] = self.locality.postal_code
if self.locality.state:
ad['state'] = self.locality.state.name
ad['state_code'] = self.locality.state.code
if self.locality.state.country:
ad['country'] = self.locality.state.country.name
ad['country_code'] = self.locality.state.country.code
return ad
class AddressDescriptor(ForwardManyToOneDescriptor):
def __set__(self, inst, value):
super(AddressDescriptor, self).__set__(inst, to_python(value))
##
## A field for addresses in other models.
##
class AddressField(models.ForeignKey):
description = 'An address'
def __init__(self, **kwargs):
kwargs['to'] = 'address.Address'
super(AddressField, self).__init__(**kwargs)
def contribute_to_class(self, cls, name, virtual_only=False):
super(ForeignObject, self).contribute_to_class(cls, name, virtual_only=virtual_only)
setattr(cls, self.name, AddressDescriptor(self))
# def deconstruct(self):
# name, path, args, kwargs = super(AddressField, self).deconstruct()
# del kwargs['to']
# return name, path, args, kwargs
def formfield(self, **kwargs):
from .forms import AddressField as AddressFormField
defaults = dict(form_class=AddressFormField)
defaults.update(kwargs)
return super(AddressField, self).formfield(**defaults)
| jamesaud/se1-group4 | address/models.py | Python | mit | 9,864 |
# -*- coding: utf-8 -*-
import re
# from flask_restful import inputs
# objectid = inputs.regex('^[0-9a-z]{24}$')
def objectid(value):
message = 'ciao'
if not value:
return None
pattern = re.compile('^[0-9a-z]{24}$')
if not pattern.match(value):
raise ValueError(message)
return value
| iceihehe/easy-note | app/validators.py | Python | mit | 329 |
# SharePlum
# This library simplfies the code necessary
# to automate interactions with a SharePoint
# server using python
from .office365 import Office365 # noqa: F401
from .site import Site # noqa: F401
from .version import __version__ # noqa: F401
__all__ = ["site", "office365"]
__title__ = "SharePlum SharePoint Library"
__author__ = "Jason Rollins"
| jasonrollins/shareplum | shareplum/__init__.py | Python | mit | 360 |
import copy
import os
import re
import subprocess
from conans.client import tools
from conans.client.build.visual_environment import (VisualStudioBuildEnvironment,
vs_build_type_flags, vs_std_cpp)
from conans.client.tools.oss import cpu_count
from conans.client.tools.win import vcvars_command
from conans.errors import ConanException
from conans.model.conan_file import ConanFile
from conans.model.version import Version
from conans.tools import vcvars_command as tools_vcvars_command
from conans.util.env_reader import get_env
from conans.util.files import decode_text, save
class MSBuild(object):
def __init__(self, conanfile):
if isinstance(conanfile, ConanFile):
self._conanfile = conanfile
self._settings = self._conanfile.settings
self._output = self._conanfile.output
self.build_env = VisualStudioBuildEnvironment(self._conanfile,
with_build_type_flags=False)
else: # backwards compatible with build_sln_command
self._settings = conanfile
self.build_env = None
def build(self, project_file, targets=None, upgrade_project=True, build_type=None, arch=None,
parallel=True, force_vcvars=False, toolset=None, platforms=None, use_env=True,
vcvars_ver=None, winsdk_version=None, properties=None, output_binary_log=None,
property_file_name=None, verbosity=None, definitions=None):
"""
:param project_file: Path to the .sln file.
:param targets: List of targets to build.
:param upgrade_project: Will call devenv to upgrade the solution to your
current Visual Studio.
:param build_type: Use a custom build type instead of the default settings.build_type one.
:param arch: Use a custom architecture name instead of the settings.arch one.
It will be used to build the /p:Configuration= parameter of MSBuild.
It can be used as the key of the platforms parameter.
E.g. arch="x86", platforms={"x86": "i386"}
:param parallel: Will use the configured number of cores in the conan.conf file or
tools.cpu_count():
In the solution: Building the solution with the projects in parallel. (/m: parameter).
CL compiler: Building the sources in parallel. (/MP: compiler flag)
:param force_vcvars: Will ignore if the environment is already set for a different
Visual Studio version.
:param toolset: Specify a toolset. Will append a /p:PlatformToolset option.
:param platforms: Dictionary with the mapping of archs/platforms from Conan naming to another
one. It is useful for Visual Studio solutions that have a different naming in architectures.
Example: platforms={"x86":"Win32"} (Visual solution uses "Win32" instead of "x86").
This dictionary will update the default one:
msvc_arch = {'x86': 'x86', 'x86_64': 'x64', 'armv7': 'ARM', 'armv8': 'ARM64'}
:param use_env: Applies the argument /p:UseEnv=true to the MSBuild call.
:param vcvars_ver: Specifies the Visual Studio compiler toolset to use.
:param winsdk_version: Specifies the version of the Windows SDK to use.
:param properties: Dictionary with new properties, for each element in the dictionary
{name: value} it will append a /p:name="value" option.
:param output_binary_log: If set to True then MSBuild will output a binary log file
called msbuild.binlog in the working directory. It can also be used to set the name of
log file like this output_binary_log="my_log.binlog".
This parameter is only supported starting from MSBuild version 15.3 and onwards.
:param property_file_name: When None it will generate a file named conan_build.props.
You can specify a different name for the generated properties file.
:param verbosity: Specifies verbosity level (/verbosity: parameter)
:param definitions: Dictionary with additional compiler definitions to be applied during
the build. Use value of None to set compiler definition with no value.
:return: status code of the MSBuild command invocation
"""
property_file_name = property_file_name or "conan_build.props"
self.build_env.parallel = parallel
with tools.environment_append(self.build_env.vars):
# Path for custom properties file
props_file_contents = self._get_props_file_contents(definitions)
property_file_name = os.path.abspath(property_file_name)
save(property_file_name, props_file_contents)
vcvars = vcvars_command(self._conanfile.settings, force=force_vcvars,
vcvars_ver=vcvars_ver, winsdk_version=winsdk_version,
output=self._output)
command = self.get_command(project_file, property_file_name,
targets=targets, upgrade_project=upgrade_project,
build_type=build_type, arch=arch, parallel=parallel,
toolset=toolset, platforms=platforms,
use_env=use_env, properties=properties,
output_binary_log=output_binary_log,
verbosity=verbosity)
command = "%s && %s" % (vcvars, command)
return self._conanfile.run(command)
def get_command(self, project_file, props_file_path=None, targets=None, upgrade_project=True,
build_type=None, arch=None, parallel=True, toolset=None, platforms=None,
use_env=False, properties=None, output_binary_log=None, verbosity=None):
targets = targets or []
properties = properties or {}
command = []
if upgrade_project and not get_env("CONAN_SKIP_VS_PROJECTS_UPGRADE", False):
command.append('devenv "%s" /upgrade &&' % project_file)
else:
self._output.info("Skipped sln project upgrade")
build_type = build_type or self._settings.get_safe("build_type")
arch = arch or self._settings.get_safe("arch")
if toolset is None: # False value to skip adjusting
toolset = tools.msvs_toolset(self._settings)
verbosity = os.getenv("CONAN_MSBUILD_VERBOSITY") or verbosity or "minimal"
if not build_type:
raise ConanException("Cannot build_sln_command, build_type not defined")
if not arch:
raise ConanException("Cannot build_sln_command, arch not defined")
command.append('msbuild "%s" /p:Configuration="%s"' % (project_file, build_type))
msvc_arch = {'x86': 'x86',
'x86_64': 'x64',
'armv7': 'ARM',
'armv8': 'ARM64'}
if platforms:
msvc_arch.update(platforms)
msvc_arch = msvc_arch.get(str(arch))
if self._settings.get_safe("os") == "WindowsCE":
msvc_arch = self._settings.get_safe("os.platform")
try:
sln = tools.load(project_file)
pattern = re.compile(r"GlobalSection\(SolutionConfigurationPlatforms\)"
r"(.*?)EndGlobalSection", re.DOTALL)
solution_global = pattern.search(sln).group(1)
lines = solution_global.splitlines()
lines = [s.split("=")[0].strip() for s in lines]
except Exception:
pass # TODO: !!! what are we catching here? tools.load? .group(1)? .splitlines?
else:
config = "%s|%s" % (build_type, msvc_arch)
if config not in "".join(lines):
self._output.warn("***** The configuration %s does not exist in this solution *****"
% config)
self._output.warn("Use 'platforms' argument to define your architectures")
if output_binary_log:
msbuild_version = MSBuild.get_version(self._settings)
if msbuild_version >= "15.3": # http://msbuildlog.com/
command.append('/bl' if isinstance(output_binary_log, bool)
else '/bl:"%s"' % output_binary_log)
else:
raise ConanException("MSBuild version detected (%s) does not support "
"'output_binary_log' ('/bl')" % msbuild_version)
if use_env:
command.append('/p:UseEnv=true')
if msvc_arch:
command.append('/p:Platform="%s"' % msvc_arch)
if parallel:
command.append('/m:%s' % cpu_count(output=self._output))
if targets:
command.append("/target:%s" % ";".join(targets))
if toolset:
command.append('/p:PlatformToolset="%s"' % toolset)
if verbosity:
command.append('/verbosity:%s' % verbosity)
if props_file_path:
command.append('/p:ForceImportBeforeCppTargets="%s"'
% os.path.abspath(props_file_path))
for name, value in properties.items():
command.append('/p:%s="%s"' % (name, value))
return " ".join(command)
def _get_props_file_contents(self, definitions=None):
def format_macro(name, value):
return "%s=%s" % (name, value) if value else name
# how to specify runtime in command line:
# https://stackoverflow.com/questions/38840332/msbuild-overrides-properties-while-building-vc-project
runtime_library = {"MT": "MultiThreaded",
"MTd": "MultiThreadedDebug",
"MD": "MultiThreadedDLL",
"MDd": "MultiThreadedDebugDLL"}.get(
self._settings.get_safe("compiler.runtime"), "")
if self.build_env:
# Take the flags from the build env, the user was able to alter them if needed
flags = copy.copy(self.build_env.flags)
flags.append(self.build_env.std)
else: # To be removed when build_sln_command is deprecated
flags = vs_build_type_flags(self._settings, with_flags=False)
flags.append(vs_std_cpp(self._settings))
if definitions:
definitions = ";".join([format_macro(name, definitions[name]) for name in definitions])
flags_str = " ".join(list(filter(None, flags))) # Removes empty and None elements
additional_node = "<AdditionalOptions>" \
"{} %(AdditionalOptions)" \
"</AdditionalOptions>".format(flags_str) if flags_str else ""
runtime_node = "<RuntimeLibrary>" \
"{}" \
"</RuntimeLibrary>".format(runtime_library) if runtime_library else ""
definitions_node = "<PreprocessorDefinitions>" \
"{};%(PreprocessorDefinitions)" \
"</PreprocessorDefinitions>".format(definitions) if definitions else ""
template = """<?xml version="1.0" encoding="utf-8"?>
<Project xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemDefinitionGroup>
<ClCompile>
{runtime_node}
{additional_node}
{definitions_node}
</ClCompile>
</ItemDefinitionGroup>
</Project>""".format(**{"runtime_node": runtime_node,
"additional_node": additional_node,
"definitions_node": definitions_node})
return template
@staticmethod
def get_version(settings):
msbuild_cmd = "msbuild -version"
vcvars = tools_vcvars_command(settings)
command = "%s && %s" % (vcvars, msbuild_cmd)
try:
out, _ = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True).communicate()
version_line = decode_text(out).split("\n")[-1]
prog = re.compile("(\d+\.){2,3}\d+")
result = prog.match(version_line).group()
return Version(result)
except Exception as e:
raise ConanException("Error retrieving MSBuild version: '{}'".format(e))
| memsharded/conan | conans/client/build/msbuild.py | Python | mit | 12,275 |
"""core URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from test_app import views as test_app_views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^test_app/companies/', test_app_views.companies_list_view),
]
| vrcmarcos/django-http-model | core/urls.py | Python | mit | 876 |
import logging
import numpy as np
import os
import subprocess
import tempfile
from . import scene
from . import rman
ZERO = scene.npvector((0, 0, 0))
_film = rman.Identifier(
'Film', positional=['string'],
named={
'xresolution': 'integer',
'yresolution': 'integer',
'cropwindow': 'float[4]',
'filename': 'string'
})
_look_at = rman.Identifier('LookAt', positional=['point', 'point', 'vector'])
_camera = rman.Identifier(
'Camera', positional=['string'],
named={
'shutteropen': 'float',
'shutterclose': 'float',
'lensradius': 'float',
'focaldistance': 'float',
'fov': 'float',
'autofocus': 'bool'
})
_sampler = rman.Identifier(
'Sampler', positional=['string'],
named={
'pixelsamples': 'integer',
})
_area_light_source = rman.Identifier(
'AreaLightSource', positional=['string'],
named={'L': 'rgb'})
_translate = rman.Identifier('Translate', positional=['vector'])
_rotate = rman.Identifier('Rotate', positional=['float', 'vector'])
_shape = rman.Identifier(
'Shape', positional=['string'],
named={
'radius': 'float',
'indices': 'integer[]',
'P': 'point[]'
})
class PbrtRenderer(object):
def __init__(self, executable=None, output_file=None, scene_file=None,
width=384, height=256, samples_per_pixel=None, slaves=None,
exrpptm=None, exrnormalize=None, exrtopng=None):
self.executable = executable
self.output_file = output_file
self.scene_file = scene_file
self.width = width
self.height = height
self.samples_per_pixel = samples_per_pixel
self.scene_file_ext = 'pbrt'
self.exrpptm = exrpptm
self.exrnormalize = exrnormalize
self.exrtopng = exrtopng
@property
def output_file(self):
return self._output_file
@output_file.setter
def output_file(self, value):
logging.info('output_file = %s', value)
if value is None:
self._output_file = None
self._exr_file = None
return
self._output_file = value
base, ext = os.path.splitext(value)
logging.info('base = %s, ext = %s', base, ext)
assert ext == '.png'
self._exr_file = base + '.exr'
def render(self, scene, generate_only=False):
scene_file = self.scene_file or tempfile.mkstemp()[1]
logging.info('Created scene file %s', scene_file)
self._write_scene_file(scene, scene_file)
if not generate_only:
self._run_renderer(scene_file)
if not self.scene_file:
logging.info('Deleting %s', scene_file)
os.remove(scene_file)
def _write_object(self, writer, obj):
writer.begin_block('Attribute')
if obj.light is not None:
color = obj.light.color * obj.light.power
writer.write(_area_light_source('diffuse', L=obj.light.color))
if isinstance(obj, scene.Sphere):
if not np.array_equal(obj.center, ZERO):
writer.write(_translate(obj.center))
writer.write(_shape("sphere", radius=obj.radius))
else:
assert False, "Unsupported object type"
writer.end_block('Attribute')
def _write_scene_file(self, scene, scene_file):
with rman.FileWriter(scene_file) as writer:
writer.write(_look_at(scene.camera.loc, scene.camera.to, scene.camera.up))
writer.write(_film(
'image',
xresolution=self.width, yresolution=self.height,
filename=self._exr_file))
writer.write(_camera('perspective', fov=scene.camera.fov))
if self.samples_per_pixel:
writer.write(_sampler('lowdiscrepancy', pixelsamples=self.samples_per_pixel))
writer.begin_block('World')
for obj in scene.objects:
self._write_object(writer, obj)
writer.end_block('World')
def _run_renderer(self, scene_file):
if self.executable is None:
logging.error(
'Trying to call pbrt, but path to the executable is not specified.')
assert self.executable is not None
args = [self.executable, scene_file]
logging.info('Running %s', ' '.join(args))
subprocess.call(args)
args = [self.exrpptm, '-c', '1.0', self._exr_file, self._exr_file + '.pp']
logging.info('Running %s', ' '.join(args))
subprocess.call(args)
args = [self.exrnormalize, self._exr_file + '.pp', self._exr_file + '.n']
logging.info('Running %s', ' '.join(args))
subprocess.call(args)
args = [self.exrtopng, self._exr_file + '.n', self.output_file]
logging.info('Running %s', ' '.join(args))
subprocess.call(args)
def batch_render(self, scene_files):
logging.info('Rendering %d files', len(scene_files))
for f in scene_files:
self._run_renderer(f)
| eterevsky/animations | pyrene/pbrt.py | Python | mit | 4,687 |
#!/usr/bin/env python
"""
Generates an AXI Stream demux wrapper with the specified number of ports
"""
import argparse
from jinja2 import Template
def main():
parser = argparse.ArgumentParser(description=__doc__.strip())
parser.add_argument('-p', '--ports', type=int, default=4, help="number of ports")
parser.add_argument('-n', '--name', type=str, help="module name")
parser.add_argument('-o', '--output', type=str, help="output file name")
args = parser.parse_args()
try:
generate(**args.__dict__)
except IOError as ex:
print(ex)
exit(1)
def generate(ports=4, name=None, output=None):
n = ports
if name is None:
name = "axis_demux_wrap_{0}".format(n)
if output is None:
output = name + ".v"
print("Generating {0} port AXI stream demux wrapper {1}...".format(n, name))
cn = (n-1).bit_length()
t = Template(u"""/*
Copyright (c) 2018-2021 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
// Language: Verilog 2001
`timescale 1ns / 1ps
/*
* AXI4-Stream {{n}} port demux (wrapper)
*/
module {{name}} #
(
// Width of AXI stream interfaces in bits
parameter DATA_WIDTH = 8,
// Propagate tkeep signal
parameter KEEP_ENABLE = (DATA_WIDTH>8),
// tkeep signal width (words per cycle)
parameter KEEP_WIDTH = (DATA_WIDTH/8),
// Propagate tid signal
parameter ID_ENABLE = 0,
// tid signal width
parameter ID_WIDTH = 8,
// Propagate tdest signal
parameter DEST_ENABLE = 0,
// tdest signal width
parameter DEST_WIDTH = 8,
// Propagate tuser signal
parameter USER_ENABLE = 1,
// tuser signal width
parameter USER_WIDTH = 1
)
(
input wire clk,
input wire rst,
/*
* AXI Stream input
*/
input wire [DATA_WIDTH-1:0] s_axis_tdata,
input wire [KEEP_WIDTH-1:0] s_axis_tkeep,
input wire s_axis_tvalid,
output wire s_axis_tready,
input wire s_axis_tlast,
input wire [ID_WIDTH-1:0] s_axis_tid,
input wire [DEST_WIDTH-1:0] s_axis_tdest,
input wire [USER_WIDTH-1:0] s_axis_tuser,
/*
* AXI Stream outputs
*/
{%- for p in range(n) %}
output wire [DATA_WIDTH-1:0] m{{'%02d'%p}}_axis_tdata,
output wire [KEEP_WIDTH-1:0] m{{'%02d'%p}}_axis_tkeep,
output wire m{{'%02d'%p}}_axis_tvalid,
input wire m{{'%02d'%p}}_axis_tready,
output wire m{{'%02d'%p}}_axis_tlast,
output wire [ID_WIDTH-1:0] m{{'%02d'%p}}_axis_tid,
output wire [DEST_WIDTH-1:0] m{{'%02d'%p}}_axis_tdest,
output wire [USER_WIDTH-1:0] m{{'%02d'%p}}_axis_tuser,
{% endfor -%}
/*
* Control
*/
input wire enable,
input wire drop,
input wire [{{cn-1}}:0] select
);
axis_demux #(
.M_COUNT({{n}}),
.DATA_WIDTH(DATA_WIDTH),
.KEEP_ENABLE(KEEP_ENABLE),
.KEEP_WIDTH(KEEP_WIDTH),
.ID_ENABLE(ID_ENABLE),
.ID_WIDTH(ID_WIDTH),
.DEST_ENABLE(DEST_ENABLE),
.DEST_WIDTH(DEST_WIDTH),
.USER_ENABLE(USER_ENABLE),
.USER_WIDTH(USER_WIDTH)
)
axis_demux_inst (
.clk(clk),
.rst(rst),
// AXI inputs
.s_axis_tdata(s_axis_tdata),
.s_axis_tkeep(s_axis_tkeep),
.s_axis_tvalid(s_axis_tvalid),
.s_axis_tready(s_axis_tready),
.s_axis_tlast(s_axis_tlast),
.s_axis_tid(s_axis_tid),
.s_axis_tdest(s_axis_tdest),
.s_axis_tuser(s_axis_tuser),
// AXI output
.m_axis_tdata({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axis_tdata{% if not loop.last %}, {% endif %}{% endfor %} }),
.m_axis_tkeep({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axis_tkeep{% if not loop.last %}, {% endif %}{% endfor %} }),
.m_axis_tvalid({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axis_tvalid{% if not loop.last %}, {% endif %}{% endfor %} }),
.m_axis_tready({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axis_tready{% if not loop.last %}, {% endif %}{% endfor %} }),
.m_axis_tlast({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axis_tlast{% if not loop.last %}, {% endif %}{% endfor %} }),
.m_axis_tid({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axis_tid{% if not loop.last %}, {% endif %}{% endfor %} }),
.m_axis_tdest({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axis_tdest{% if not loop.last %}, {% endif %}{% endfor %} }),
.m_axis_tuser({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axis_tuser{% if not loop.last %}, {% endif %}{% endfor %} }),
// Control
.enable(enable),
.drop(drop),
.select(select)
);
endmodule
""")
print(f"Writing file '{output}'...")
with open(output, 'w') as f:
f.write(t.render(
n=n,
cn=cn,
name=name
))
f.flush()
print("Done")
if __name__ == "__main__":
main()
| alexforencich/xfcp | lib/eth/lib/axis/rtl/axis_demux_wrap.py | Python | mit | 5,899 |
import os
import runpy
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.pngmath',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pyHMSA'
copyright = u'2014, Philippe Pinard'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
filepath = os.path.join(os.path.dirname(__file__),
'..', '..', 'pyhmsa', '__init__.py')
_vars = runpy.run_path(filepath)
version = _vars['__version__']
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'solar'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'pyHMSA documentation'
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'pyHMSA'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyHMSAdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'pyHMSA.tex', u'pyHMSA Documentation',
u'Philippe Pinard', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pyhmsa', u'pyHMSA Documentation',
[u'Philippe Pinard'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pyHMSA', u'pyHMSA Documentation',
u'Philippe Pinard', 'pyHMSA', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'pyHMSA'
epub_author = u'Philippe Pinard'
epub_publisher = u'Philippe Pinard'
epub_copyright = u'2014, Philippe Pinard'
# The basename for the epub file. It defaults to the project name.
#epub_basename = u'pyHMSA'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
autoclass_content = "both"
#autodoc_member_order = "bysource"
autodoc_default_flags = ['undoc-members']
| pyhmsa/pyhmsa | doc/source/conf.py | Python | mit | 10,278 |
#! /usr/python
'''
///////////////////////////////////////////////////////////
// Permission is hereby granted, free of charge,
// to any person obtaining a copy of
// this software and associated documentation files
// (the "Software"), to deal in the Software without
// restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute,
// sublicense, and/or sell copies of the Software, and
// to permit persons to whom the Software is furnished
// to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice
// shall be included in all copies or substantial portions
// of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
'''
__author__ = 'RobertIan'
__version__ = '0.2.5'
import argparse
import pygame
import picamera
import time
import datetime
import netifaces
import RPi.GPIO as GPIO
import os.path
import sys
import select
import os
class Trial:
def __init__(self, stim, starttime, feedornot):
## initialize display
pygame.display.init()
pygame.mouse.set_visible(False)
self.screen = pygame.display.set_mode((0,0),pygame.FULLSCREEN)
## assign stimulus
self.stimulus = stim
## timing
self.start = float(starttime)
self.tLength = 4*60 #four minute trial
self.feedDelay = 30 #thirty second delay
## GPIO setup
GPIO.setmode(GPIO.BCM)
self.feeder = 17 ##????
self.notfeeder = 5 ##????
self.feederin = 26 ##????
self.notfeederin = 25 ##????
if feedornot == 'feed':
self.feederin = self.feederin
self.feederout = self.feeder
elif feedornot == 'notfeed':
self.feederin = self.notfeederin
self.feederout = self.notfeeder
else:
## currently a print, should be changed to send a message to
#the client
print 'feeder not assigned'
self.safeQuit()
GPIO.setup(self.feederin, GPIO.IN)
GPIO.add_event_detect(self.feederin, GPIO.RISING)
GPIO.setup(self.feederout, GPIO.OUT)
GPIO.output(self.feederout, True)
def checkPiIP(self):
## query IP address from system
self.ip = netifaces.ifaddresses('eth0')[2][0]['addr']
def whatStimulus(self):
## locate stimulus in src folder
self.stim, extension = os.path.splitext(self.stimulus)
if extension == '.png' or extension == '.PNG' or extension == '.jpg' \
or extension == '.JPG':
## still image
try:
self.image = pygame.image.load('/home/pi/ethoStim/individualtesting/src/10.png')
except IOError:
## currently a print, should be changed to send a message to
#the client
print 'are you sure this file exists? check the src folder \
ony jpg/JPG, png/PNG formats'
self.safeQuit()
def cameraInit(self):
## adjust camera settings here
self.camera = picamera.PiCamera()
self.camera.resolution = (1920, 1080)
self.camera.framerate = 30
self.camera.autofocus = False
self.camera.awb_mode = 'fluorescent'
def videoFileName(self, species, tround, sl, sex, fishid, day, session,
conditionside):
## adjust video naming convention here
self.vidout = ('data/'+str(self.ip)+'/'+(str(species)+'_'+str(tround)
+'_'+str(sl)+'_'+str(sex) +'_'+str(fishid)+'_'+str(day)+'_'+
str(session)+'_' +str(self.stim)+'_'+str(conditionside)))
def startRecording(self):
self.camera.start_recording(self.vidout+ '.h264') #output video
def stopRecording(self):
self.camera.stop_recording()
def cameraQuit(self):
self.camera.close()
def safeQuit(self):
GPIO.output(self.feeder, True) #reset feeder ????
GPIO.output(self.notfeeder, True) #reset notfeeder ????
GPIO.cleanup() #reset all GPIOs
pygame.quit()
exit()
def mainLoop(self, camera):
## hang until assigned start time
while time.time()<self.start:
print time.time()-self.start
pass
## start timer
self.startT = time.time()
fed = False # feed delay control variable
## start recording
if camera == 'record':
selft.startRecording()
elif camera == 'notrecord':
pass
## display stimulus/start main loop
while ((time.time() - self.startT) < self.tLength):
pygame.display.flip()
self.screen.blit(self.image, (250,100)) # location of stimulus
## control feeder delay
try:
if (time.time() - self.startT) > self.feedDelay:
if fed:
pass
elif GPIO.event_detected(self.feederin):
time.sleep(1.0)
GPIO.output(self.feederout,True)
fed = True
else:
GPIO.output(self.feederout, False)
except KeyboardInterrupt:
self.safeQuit()
if __name__ == '__main__':
## load in command line argumenents
ap = argparse.ArgumentParser()
ap.add_argument("-f","--fish", help="ID of fish in tank")
ap.add_argument("-ts", "--trainedStim",help="numerosity stimulus the individual is being trained to, e.g. 12")
ap.add_argument("-ps", "--presentedStim", help="stimulus being presented with this raspberry pi")
ap.add_argument("-d","--day", help="experiment day, e.g. 1-7")
ap.add_argument("-s","--session", help="trial session, e.g. 1-4")
ap.add_argument("-fs","--fedSide", help="side(self.ip feed on/conditioned side")
ap.add_argument("-x","--sex", help="fish sex")
ap.add_argument("-p","--proportion", help="training ratio")
ap.add_argument("-sp", "--species", help="species name")
ap.add_argument("-sl","--fishstandardlength", help="standard length of the")
ap.add_argument("-r","--round", help="training round")
ap.add_argument("-fd", "--feed", help="feed with this stimulus",action="store_true")
ap.add_argument("-c", "--camera",help="do you want to record using this pi?",action="store_true")
ap.add_argument("-m:", "--startTime", help="time since epoch that you want to start your trial")
args = vars(ap.parse_args())
## parse trial details and pass it to the Trial class
if args.["feed"]:
T = Trial(args["presentedStim"], args["startTime"], 'feed')
else:
T = Trial(args["presentedStim"], args["startTime"], 'notfeed'))
T.checkPiIP()
T.whatStimulus()
T.videoFileName(args["species"], args["round"], args["fishstandardlength"],
args["sex"], args["fish"], args["day"], args["session"], args["fedSide"])
## initialize camera IF attached to Pi
if args["camera"]:
T.cameraInit()
else:
pass
## start camera recording IF attached to Pi and begin mainloop of Trial
if args["camera"]:
T.mainLoop('record')
else:
T.mainLoop('notrecord')
## stop camera recording IF attached to Pi
if args["camera"]:
T.stopRecording()
else:
pass
## cleanup camera IF attached to Pi
if args["camera"]:
T.cameraQuit()
## cleanup remaining processes and exit
T.safeQuit()
| RobertIan/ethoStim | individualtesting/trial.py | Python | mit | 7,935 |
# coding: utf-8
"""
Salt Edge Account Information API
API Reference for services # noqa: E501
OpenAPI spec version: 5.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class CategoriesRequestBody(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'data': 'CategoriesRequestBodyData'
}
attribute_map = {
'data': 'data'
}
def __init__(self, data=None): # noqa: E501
"""CategoriesRequestBody - a model defined in Swagger""" # noqa: E501
self._data = None
self.discriminator = None
if data is not None:
self.data = data
@property
def data(self):
"""Gets the data of this CategoriesRequestBody. # noqa: E501
:return: The data of this CategoriesRequestBody. # noqa: E501
:rtype: CategoriesRequestBodyData
"""
return self._data
@data.setter
def data(self, data):
"""Sets the data of this CategoriesRequestBody.
:param data: The data of this CategoriesRequestBody. # noqa: E501
:type: CategoriesRequestBodyData
"""
self._data = data
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(CategoriesRequestBody, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CategoriesRequestBody):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| ltowarek/budget-supervisor | third_party/saltedge/swagger_client/models/categories_request_body.py | Python | mit | 3,130 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-13 10:43
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('company', '0003_auto_20170913_1007'),
]
operations = [
migrations.AlterField(
model_name='company',
name='basic_material',
field=models.IntegerField(choices=[(0, 'Plastic / Resin'), (1, 'Metal'), (3, 'Other'), (4, '---')], default=0),
),
]
| hqpr/findyour3d | findyour3d/company/migrations/0004_auto_20170913_1043.py | Python | mit | 533 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
if __name__ == "__main__":
settings_name = "settings.local" if os.name == 'nt' else "settings.remote"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", settings_name)
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| alexeiramone/django-default-template | manage.py | Python | mit | 348 |
# -*- coding: utf-8 -*-
__version__ = "1.2.3"
| ivanyu/idx2numpy | idx2numpy/version.py | Python | mit | 47 |
# coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import re
import unittest2
import warnings
import httpretty as hp
from coinbase.client import Client
from coinbase.client import OAuthClient
from coinbase.error import APIError
from coinbase.error import TwoFactorTokenRequired
from coinbase.error import UnexpectedDataFormatError
from coinbase.model import APIObject
from coinbase.model import Account
from coinbase.model import Address
from coinbase.model import Button
from coinbase.model import Money
from coinbase.model import Order
from coinbase.model import Transaction
from coinbase.model import Transfer
# Hide all warning output.
warnings.showwarning = lambda *a, **k: None
# Dummy API key values for use in tests
api_key = 'fakeapikey'
api_secret = 'fakeapisecret'
client_id = 'fakeid'
client_secret = 'fakesecret'
access_token = 'fakeaccesstoken'
refresh_token = 'fakerefreshtoken'
class TestAccount(unittest2.TestCase):
@hp.activate
def test_delete(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
def server_response(request, uri, headers):
self.assertTrue(uri.endswith(account.id))
self.assertEqual(request.body.decode(), '')
return (200, headers, json.dumps(data))
hp.register_uri(hp.DELETE, re.compile('.*'), body=server_response)
data = {'success': False}
with self.assertRaises(APIError):
account.delete()
data = {'success': True}
self.assertIsNone(account.delete())
@hp.activate
def test_set_primary(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
account.primary = None
def server_response(request, uri, headers):
self.assertTrue(uri.endswith('%s/primary' % account.id))
self.assertEqual(request.body.decode(), '')
return (200, headers, json.dumps(data))
hp.register_uri(hp.POST, re.compile('.*'), body=server_response)
data = {'success': False}
with self.assertRaises(APIError):
account.set_primary()
self.assertIsNone(account.primary) # Primary status should not have changed.
data = {'success': True}
account.set_primary()
self.assertTrue(account.primary) # Primary status should have changed.
@hp.activate
def test_modify(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
account.name = initial_name = 'Wallet'
def server_response(request, uri, headers):
self.assertTrue(uri.endswith(account.id))
try: request_data = json.loads(request.body.decode())
except ValueError: raise AssertionError("request body was malformed.")
name = request_data.get('account', {}).get('name')
assert name == new_name
return (200, headers, json.dumps(data))
new_name = 'Vault'
data = {'success': False, 'account': {'name': new_name}}
hp.register_uri(hp.PUT, re.compile('.*'), body=server_response)
with self.assertRaises(APIError):
account.modify(new_name)
self.assertEqual(account.name, initial_name)
data = {'success': True, 'account': {'name': new_name}}
account.modify(new_name)
self.assertEqual(account.name, new_name)
data = {'success': True, 'account': 'nottherighttype'}
with self.assertRaises(UnexpectedDataFormatError):
account.modify(new_name)
@hp.activate
def test_get_balance(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
account.balance = initial_balance = lambda: None # Initial value
def server_response(request, uri, headers):
self.assertTrue(uri.endswith('%s/balance' % account.id))
self.assertEqual(request.body.decode(), '')
return (200, headers, json.dumps(data))
data = {'currency': 'USD', 'amount': '10.00'}
hp.register_uri(hp.GET, re.compile('.*'), body=server_response)
balance = account.get_balance()
self.assertIsInstance(balance, Money)
# Fetching the current balance should not modify the balance attribute on
# the Account object.
self.assertEqual(account.balance, initial_balance)
@hp.activate
def test_get_address(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
def server_response(request, uri, headers):
self.assertTrue(uri.endswith('%s/address' % account.id))
self.assertEqual(request.body.decode(), '')
return (200, headers, json.dumps(data))
hp.register_uri(hp.GET, re.compile('.*'), body=server_response)
data = {'address': 'a',
'callback_url': None,
'label': None,
'success': False}
with self.assertRaises(APIError):
account.get_address()
data = {'badkey': 'bar',
'success': True}
with self.assertRaises(UnexpectedDataFormatError):
account.get_address()
data = {'address': 'a',
'callback_url': None,
'label': None,
'success': True}
address = account.get_address()
self.assertIsInstance(address, Address)
@hp.activate
def test_get_addresses(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
def server_response(request, uri, headers):
try: json.loads(request.body.decode())
except ValueError: raise AssertionError("request body was malformed.")
data = {
'total_count': 3,
'current_page': 1,
'num_pages': 1,
'addresses': [
{'address': {
'label': '',
'address': 'foo',
'callback_url': '',
'id': '1'
}},
{'address': {
'label': '',
'address': 'foo',
'callback_url': '',
'id': '2'
}},
{'address': {
'label': '',
'address': 'foo',
'callback_url': '',
'id': '3'
}},
],
}
return (200, headers, json.dumps(data))
hp.register_uri(hp.GET, re.compile('.*'), body=server_response)
response = account.get_addresses()
self.assertIsInstance(response, APIObject)
self.assertEqual(len(response.addresses), 3)
for address in response.addresses:
self.assertIsInstance(address, Address)
@hp.activate
def test_create_address(self):
def server_response(request, uri, headers):
try: request_data = json.loads(request.body.decode())
except ValueError: raise AssertionError("request body was malformed.")
address = request_data.get('address')
assert isinstance(address, dict)
if label is not None:
assert address.get('label') == label
if callback_url is not None:
assert address.get('callback_url') == callback_url
return (200, headers, json.dumps(data))
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
hp.register_uri(hp.POST, re.compile('.*'), body=server_response)
label, callback_url = ('label', 'http://example.com/')
data = {'success': False,
'address': 'foo',
'label': label,
'callback_url': callback_url}
with self.assertRaises(APIError):
account.create_address(label, callback_url)
label, callback_url = ('label', 'http://example.com/')
data = {'success': True, 'arbkey': 'bar'}
with self.assertRaises(UnexpectedDataFormatError):
account.create_address(label, callback_url)
label, callback_url = ('label', 'http://example.com/')
data = {'success': True,
'address': 'foo',
'label': label,
'callback_url': callback_url}
address = account.create_address(label, callback_url)
self.assertIsInstance(address, Address)
label, callback_url = (None, None)
data = {'success': True,
'address': 'foo',
'label': label,
'callback_url': callback_url}
address = account.create_address()
self.assertIsInstance(address, Address)
@hp.activate
def test_get_transactions(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
def server_response(request, uri, headers):
try: json.loads(request.body.decode())
except ValueError: raise AssertionError("request body was malformed.")
data = {
'total_count': 3,
'current_page': 1,
'num_pages': 1,
'transactions': [
{'transaction': {'id': '1'}},
{'transaction': {'id': '2'}},
{'transaction': {'id': '3'}},
],
}
return (200, headers, json.dumps(data))
hp.register_uri(hp.GET, re.compile('.*'), body=server_response)
response = account.get_transactions()
self.assertIsInstance(response, APIObject)
self.assertEqual(len(response.transactions), 3)
for transaction in response.transactions:
self.assertIsInstance(transaction, Transaction)
@hp.activate
def test_get_transaction(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
def server_response(request, uri, headers):
try: json.loads(request.body.decode())
except ValueError: raise AssertionError("request body was malformed.")
return (200, headers, json.dumps(data))
transaction_id = 'faketransactionid'
hp.register_uri(hp.GET, re.compile('.*'), body=server_response)
data = {'missing_transaction_key': True}
with self.assertRaises(UnexpectedDataFormatError):
account.get_transaction(transaction_id)
data = {'transaction': 'not-the-right-type'}
with self.assertRaises(UnexpectedDataFormatError):
account.get_transaction(transaction_id)
data = {'transaction': {'id': '1'}}
transaction = account.get_transaction(transaction_id)
self.assertIsInstance(transaction, Transaction)
@hp.activate
def test_transfer_money(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
base_kwargs = {
'to_account_id': 'fake-account-id',
'amount': '12.0 BTC',
}
with self.assertRaises(ValueError):
kwargs = base_kwargs.copy()
kwargs.update(amount=None, amount_string=None, amount_currency_iso=None)
account.transfer_money(**kwargs)
with self.assertRaises(ValueError):
kwargs = base_kwargs.copy()
kwargs.update(
amount='12.0', amount_string=None, amount_currency_iso='USD')
account.transfer_money(**kwargs)
with self.assertRaises(ValueError):
kwargs = base_kwargs.copy()
kwargs.update(
amount='12.0', amount_string='12.0', amount_currency_iso=None)
account.transfer_money(**kwargs)
with self.assertRaises(ValueError):
kwargs = base_kwargs.copy()
kwargs.update(
amount='12.0', amount_string='12.0', amount_currency_iso='USD')
account.transfer_money(**kwargs)
with self.assertRaises(ValueError):
kwargs = base_kwargs.copy()
kwargs.update(amount=None, amount_string=None, amount_currency_iso='USD')
account.transfer_money(**kwargs)
with self.assertRaises(ValueError):
kwargs = base_kwargs.copy()
kwargs.update(amount=None, amount_string='12.0', amount_currency_iso=None)
account.transfer_money(**kwargs)
def server_response(request, uri, headers):
try: req = json.loads(request.body.decode())
except ValueError: raise AssertionError("request body was malformed.")
tx_data = req.get('transaction')
self.assertIsInstance(tx_data, dict)
self.assertEqual(len(tx_data), len(kwargs))
return (200, headers, json.dumps(data))
hp.register_uri(hp.POST, re.compile('.*'), body=server_response)
with self.assertRaises(APIError):
data = {'success': False, 'transaction': {'id': '1'}}
kwargs = base_kwargs.copy()
account.transfer_money(**kwargs)
with self.assertRaises(UnexpectedDataFormatError):
data = {'success': True, 'transaction': 'wrong-type'}
kwargs = base_kwargs.copy()
account.transfer_money(**kwargs)
with self.assertRaises(UnexpectedDataFormatError):
data = {'success': True, 'missing-transaction-key': True}
kwargs = base_kwargs.copy()
account.transfer_money(**kwargs)
data = {'success': True, 'transaction': {'id': '1'}}
kwargs = base_kwargs.copy()
tx = account.transfer_money(**kwargs)
self.assertIsInstance(tx, Transaction)
@hp.activate
def test_send_money(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
base_kwargs = {
'to_btc_address': 'some-btc-address',
'amount': '12.0 BTC',
}
with self.assertRaises(ValueError):
kwargs = base_kwargs.copy()
kwargs.update(amount=None, amount_string=None, amount_currency_iso=None)
account.send_money(**kwargs)
with self.assertRaises(ValueError):
kwargs = base_kwargs.copy()
kwargs.update(
amount='12.0', amount_string=None, amount_currency_iso='USD')
account.send_money(**kwargs)
with self.assertRaises(ValueError):
kwargs = base_kwargs.copy()
kwargs.update(
amount='12.0', amount_string='12.0', amount_currency_iso=None)
account.send_money(**kwargs)
with self.assertRaises(ValueError):
kwargs = base_kwargs.copy()
kwargs.update(
amount='12.0', amount_string='12.0', amount_currency_iso='USD')
account.send_money(**kwargs)
with self.assertRaises(ValueError):
kwargs = base_kwargs.copy()
kwargs.update(amount=None, amount_string=None, amount_currency_iso='USD')
account.send_money(**kwargs)
with self.assertRaises(ValueError):
kwargs = base_kwargs.copy()
kwargs.update(amount=None, amount_string='12.0', amount_currency_iso=None)
account.send_money(**kwargs)
def server_response(request, uri, headers):
try: req = json.loads(request.body.decode())
except ValueError: raise AssertionError("request body was malformed.")
tx_data = req.get('transaction')
self.assertIsInstance(tx_data, dict)
self.assertEqual(len(tx_data), len(kwargs))
return (200, headers, json.dumps(data))
hp.register_uri(hp.POST, re.compile('.*'), body=server_response)
with self.assertRaises(APIError):
data = {'success': False, 'transaction': {'id': '1'}}
kwargs = base_kwargs.copy()
account.send_money(**kwargs)
with self.assertRaises(UnexpectedDataFormatError):
data = {'success': True, 'transaction': 'wrong-type'}
kwargs = base_kwargs.copy()
account.send_money(**kwargs)
with self.assertRaises(UnexpectedDataFormatError):
data = {'success': True, 'missing-transaction-key': True}
kwargs = base_kwargs.copy()
account.send_money(**kwargs)
data = {'success': True, 'transaction': {'id': '1'}}
kwargs = base_kwargs.copy()
tx = account.send_money(**kwargs)
self.assertIsInstance(tx, Transaction)
oauth_account = Account(
OAuthClient(client_id, client_secret, access_token, refresh_token))
oauth_account.id = 'fakeaccountid'
hp.reset()
def server_response(request, uri, headers):
try: req = json.loads(request.body.decode())
except ValueError: raise AssertionError("request body was malformed.")
tx_data = req.get('transaction')
self.assertIsInstance(tx_data, dict)
if two_factor_token:
self.assertEqual(request.headers.get('CB-2FA-Token'), two_factor_token)
self.assertIsNone(tx_data.get('CB-2FA-Token'))
return (200, headers, json.dumps(data))
return (402, headers, '')
hp.register_uri(hp.POST, re.compile('.*'), body=server_response)
kwargs = base_kwargs.copy()
kwargs['two_factor_token'] = two_factor_token = None
with self.assertRaises(TwoFactorTokenRequired):
oauth_account.send_money(**kwargs)
kwargs['two_factor_token'] = two_factor_token = 'sometoken'
tx = oauth_account.send_money(**kwargs)
self.assertIsInstance(tx, Transaction)
@hp.activate
def test_request_money(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
base_kwargs = {
'from_email_address': 'some-btc-address',
'amount': '12.0 BTC',
}
with self.assertRaises(ValueError):
kwargs = base_kwargs.copy()
kwargs.update(amount=None, amount_string=None, amount_currency_iso=None)
account.request_money(**kwargs)
with self.assertRaises(ValueError):
kwargs = base_kwargs.copy()
kwargs.update(
amount='12.0', amount_string=None, amount_currency_iso='USD')
account.request_money(**kwargs)
with self.assertRaises(ValueError):
kwargs = base_kwargs.copy()
kwargs.update(
amount='12.0', amount_string='12.0', amount_currency_iso=None)
account.request_money(**kwargs)
with self.assertRaises(ValueError):
kwargs = base_kwargs.copy()
kwargs.update(
amount='12.0', amount_string='12.0', amount_currency_iso='USD')
account.request_money(**kwargs)
with self.assertRaises(ValueError):
kwargs = base_kwargs.copy()
kwargs.update(amount=None, amount_string=None, amount_currency_iso='USD')
account.request_money(**kwargs)
with self.assertRaises(ValueError):
kwargs = base_kwargs.copy()
kwargs.update(amount=None, amount_string='12.0', amount_currency_iso=None)
account.request_money(**kwargs)
def server_response(request, uri, headers):
try: req = json.loads(request.body.decode())
except ValueError: raise AssertionError("request body was malformed.")
tx_data = req.get('transaction')
self.assertIsInstance(tx_data, dict)
self.assertEqual(len(tx_data), len(kwargs))
return (200, headers, json.dumps(data))
hp.register_uri(hp.POST, re.compile('.*'), body=server_response)
with self.assertRaises(APIError):
data = {'success': False, 'transaction': {'id': '1'}}
kwargs = base_kwargs.copy()
account.request_money(**kwargs)
with self.assertRaises(UnexpectedDataFormatError):
data = {'success': True, 'transaction': 'wrong-type'}
kwargs = base_kwargs.copy()
account.request_money(**kwargs)
with self.assertRaises(UnexpectedDataFormatError):
data = {'success': True, 'missing-transaction-key': True}
kwargs = base_kwargs.copy()
account.request_money(**kwargs)
data = {'success': True, 'transaction': {'id': '1'}}
kwargs = base_kwargs.copy()
tx = account.request_money(**kwargs)
self.assertIsInstance(tx, Transaction)
@hp.activate
def test_get_transfers(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
def server_response(request, uri, headers):
try: json.loads(request.body.decode())
except ValueError: raise AssertionError("request body was malformed.")
return (200, headers, json.dumps(data))
hp.register_uri(hp.GET, re.compile('.*'), body=server_response)
data = {
'total_count': 3,
'current_page': 1,
'num_pages': 1,
'transfers': [
{'transfer': {'id': '1'}},
{'transfer': {'id': '2'}},
{'transfer': {'id': '3'}},
],
}
response = account.get_transfers()
self.assertIsInstance(response, APIObject)
self.assertEqual(len(response.transfers), 3)
for transfer in response.transfers:
self.assertIsInstance(transfer, Transfer)
@hp.activate
def test_get_transfer(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
def server_response(request, uri, headers):
try: json.loads(request.body.decode())
except ValueError: raise AssertionError("request body was malformed.")
return (200, headers, json.dumps(data))
transfer_id = 'faketransferid'
hp.register_uri(hp.GET, re.compile('.*'), body=server_response)
data = {'missing_transfer_key': True}
with self.assertRaises(UnexpectedDataFormatError):
account.get_transfer(transfer_id)
data = {'transfer': 'not-the-right-type'}
with self.assertRaises(UnexpectedDataFormatError):
account.get_transfer(transfer_id)
data = {'transfer': {'id': '1'}}
transfer = account.get_transfer(transfer_id)
self.assertIsInstance(transfer, Transfer)
@hp.activate
def test_get_button(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
button_code = 'fakebuttoncode'
def server_response(request, uri, headers):
self.assertEqual(request.body.decode(), '')
return (200, headers, json.dumps(data))
hp.register_uri(hp.GET, re.compile('.*'), body=server_response)
data = {'button': 'not-the-right-type'}
with self.assertRaises(UnexpectedDataFormatError):
account.get_button(button_code)
data = {'missing-button-key': True}
with self.assertRaises(UnexpectedDataFormatError):
account.get_button(button_code)
data = {'button': {'code': button_code}}
button = account.get_button(button_code)
self.assertIsInstance(button, Button)
data = {'badkey': 'bar',
'success': True}
with self.assertRaises(UnexpectedDataFormatError):
account.get_address()
data = {'address': 'a',
'callback_url': None,
'label': None,
'success': True}
address = account.get_address()
self.assertIsInstance(address, Address)
@hp.activate
def test_create_button(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
def server_response(request, uri, headers):
try: request_data = json.loads(request.body.decode())
except ValueError: raise AssertionError("request body was malformed.")
button_data = request_data.get('button')
self.assertIsInstance(button_data, dict)
for key in ['name', 'price_string', 'price_currency_iso']:
self.assertTrue(key in button_data)
return (200, headers, json.dumps(data))
hp.register_uri(hp.POST, re.compile('.*'), body=server_response)
name = 'b-name'
price_string = 'b-price'
price_currency_iso = 'BTC'
with self.assertRaises(APIError):
data = {
'success': False,
'button': {
'name': name,
'price_string': price_string,
'price_currency_iso': price_currency_iso,
},
}
account.create_button(name, price_string, price_currency_iso)
with self.assertRaises(UnexpectedDataFormatError):
data = {'success': True, 'button': 'wrong-type'}
account.create_button(name, price_string, price_currency_iso)
with self.assertRaises(UnexpectedDataFormatError):
data = {'success': True, 'missing-button-key': True}
account.create_button(name, price_string, price_currency_iso)
data = {
'success': True,
'button': {
'name': name,
'price_string': price_string,
'price_currency_iso': price_currency_iso,
},
}
button = account.create_button(name, price_string, price_currency_iso)
self.assertIsInstance(button, Button)
@hp.activate
def test_get_orders(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
def server_response(request, uri, headers):
try: json.loads(request.body.decode())
except ValueError: raise AssertionError("request body was malformed.")
return (200, headers, json.dumps(data))
hp.register_uri(hp.GET, re.compile('.*'), body=server_response)
data = {
'total_count': 3,
'current_page': 1,
'num_pages': 1,
'orders': [
{'order': {'id': '1'}},
{'order': {'id': '2'}},
{'order': {'id': '3'}},
],
}
response = account.get_orders()
self.assertIsInstance(response, APIObject)
self.assertEqual(len(response.orders), 3)
for order in response.orders:
self.assertIsInstance(order, Order)
@hp.activate
def test_get_order(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
def server_response(request, uri, headers):
try: json.loads(request.body.decode())
except ValueError: raise AssertionError("request body was malformed.")
return (200, headers, json.dumps(data))
order_id = 'fakeorderid'
hp.register_uri(hp.GET, re.compile('.*'), body=server_response)
data = {'missing_order_key': True}
with self.assertRaises(UnexpectedDataFormatError):
account.get_order(order_id)
data = {'order': 'not-the-right-type'}
with self.assertRaises(UnexpectedDataFormatError):
account.get_order(order_id)
data = {'order': {'id': '1'}}
order = account.get_order(order_id)
self.assertIsInstance(order, Order)
@hp.activate
def test_create_order(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
def server_response(request, uri, headers):
try: request_data = json.loads(request.body.decode())
except ValueError: raise AssertionError("request body was malformed.")
button_data = request_data.get('button')
self.assertIsInstance(button_data, dict)
for key in ['name', 'price_string', 'price_currency_iso']:
self.assertTrue(key in button_data)
return (200, headers, json.dumps(data))
hp.register_uri(hp.POST, re.compile('.*'), body=server_response)
name = 'b-name'
price_string = 'b-price'
price_currency_iso = 'BTC'
with self.assertRaises(APIError):
data = {
'success': False,
'order': {
'name': name,
'price_string': price_string,
'price_currency_iso': price_currency_iso,
},
}
account.create_order(name, price_string, price_currency_iso)
with self.assertRaises(UnexpectedDataFormatError):
data = {'success': True, 'order': 'wrong-type'}
account.create_order(name, price_string, price_currency_iso)
with self.assertRaises(UnexpectedDataFormatError):
data = {'success': True, 'missing-order-key': True}
account.create_order(name, price_string, price_currency_iso)
data = {
'success': True,
'order': {
'name': name,
'price_string': price_string,
'price_currency_iso': price_currency_iso,
},
}
order = account.create_order(name, price_string, price_currency_iso)
self.assertIsInstance(order, Order)
@hp.activate
def test_buy(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
def server_response(request, uri, headers):
try: request_data = json.loads(request.body.decode())
except ValueError: raise AssertionError("request body was malformed.")
self.assertEqual(request_data.get('account_id'), account.id)
return (200, headers, json.dumps(data))
hp.register_uri(hp.POST, re.compile('.*'), body=server_response)
with self.assertRaises(APIError):
data = {'success': False, 'transfer': {'id': 'transferid'}}
account.buy('1.0')
with self.assertRaises(UnexpectedDataFormatError):
data = {'success': True, 'transfer': 'wrong-type'}
account.buy('1.0')
with self.assertRaises(UnexpectedDataFormatError):
data = {'success': True, 'missing-transfer-key': True}
account.buy('1.0')
data = {'success': True, 'transfer': {'id': 'transferid'}}
transfer = account.buy('1.0')
self.assertIsInstance(transfer, Transfer)
@hp.activate
def test_sell(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
def server_response(request, uri, headers):
try: request_data = json.loads(request.body.decode())
except ValueError: raise AssertionError("request body was malformed.")
self.assertEqual(request_data.get('account_id'), account.id)
return (200, headers, json.dumps(data))
hp.register_uri(hp.POST, re.compile('.*'), body=server_response)
with self.assertRaises(APIError):
data = {'success': False, 'transfer': {'id': 'transferid'}}
account.sell('1.0')
with self.assertRaises(UnexpectedDataFormatError):
data = {'success': True, 'transfer': 'wrong-type'}
account.sell('1.0')
with self.assertRaises(UnexpectedDataFormatError):
data = {'success': True, 'missing-transfer-key': True}
account.sell('1.0')
data = {'success': True, 'transfer': {'id': 'transferid'}}
transfer = account.sell('1.0')
self.assertIsInstance(transfer, Transfer)
class TestButton(unittest2.TestCase):
@hp.activate
def test_get_orders(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
initial_name = 'name'
initial_price_string = '12.0'
initial_price_currency_iso = 'USD'
button = account.load({
'button': {
'id': '1',
'name': initial_name,
'price_string': initial_price_string,
'price_currency_iso': initial_price_currency_iso,
'code': 'buttoncode',
},
}).button
def server_response(request, uri, headers):
try: json.loads(request.body.decode())
except ValueError: raise AssertionError("request body was malformed.")
return (200, headers, json.dumps(data))
hp.register_uri(hp.GET, re.compile('.*'), body=server_response)
data = {
'total_count': 3,
'current_page': 1,
'num_pages': 1,
'orders': [
{'order': {'id': '1'}},
{'order': {'id': '2'}},
{'order': {'id': '3'}},
],
}
response = button.get_orders()
self.assertIsInstance(response, APIObject)
self.assertEqual(len(response.orders), 3)
for order in response.orders:
self.assertIsInstance(order, Order)
@hp.activate
def test_create_order(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
initial_name = 'name'
initial_price_string = '12.0'
initial_price_currency_iso = 'USD'
button = account.load({
'button': {
'id': '1',
'name': initial_name,
'price_string': initial_price_string,
'price_currency_iso': initial_price_currency_iso,
'code': 'buttoncode',
},
}).button
def server_response(request, uri, headers):
self.assertEqual(request.body.decode(), '')
return (200, headers, json.dumps(data))
hp.register_uri(hp.POST, re.compile('.*'), body=server_response)
name = 'b-name'
price_string = 'b-price'
price_currency_iso = 'BTC'
with self.assertRaises(APIError):
data = {
'success': False,
'order': {
'name': name,
'price_string': price_string,
'price_currency_iso': price_currency_iso,
},
}
button.create_order()
with self.assertRaises(UnexpectedDataFormatError):
data = {'success': True, 'order': 'wrong-type'}
button.create_order()
with self.assertRaises(UnexpectedDataFormatError):
data = {'success': True, 'missing-order-key': True}
button.create_order()
data = {
'success': True,
'order': {
'name': name,
'price_string': price_string,
'price_currency_iso': price_currency_iso,
},
}
order = button.create_order()
self.assertIsInstance(order, Order)
class TestMoney(unittest2.TestCase):
def test_str_representation(self):
money = APIObject(None).load({
'amount': '12.0',
'currency': 'BTC',
})
self.assertIsInstance(money, Money)
self.assertTrue(str(money).endswith('BTC 12.0'))
money2 = APIObject(None).load({
'amount': '12.0',
'currency': 'BTC',
'foo': 'Bar',
})
self.assertIsInstance(money2, Money)
self.assertTrue(str(money2).endswith('}'))
class TestOrder(unittest2.TestCase):
@hp.activate
def test_refund(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
order = account.load({
'order': {
'id': '1',
'custom': 'custom',
'button': {
'id': 'fakeid',
'code': 'acode'
},
},
}).order
def server_response(request, uri, headers):
try: req_data = json.loads(request.body.decode())
except ValueError: raise AssertionError("request body was malformed.")
order_data = req_data.get('order')
self.assertIsInstance(order_data, dict)
return (200, headers, json.dumps(data))
hp.register_uri(hp.POST, re.compile('.*'), body=server_response)
with self.assertRaises(UnexpectedDataFormatError):
data = {'order': 'wrong-type'}
order.refund('USD')
with self.assertRaises(UnexpectedDataFormatError):
data = {'missing-order-key': True}
order.refund('USD')
data = {'order': {'id': '1'}}
refunded = order.refund('USD')
self.assertEqual(refunded, data['order'])
self.assertIsInstance(refunded, Order)
class TestTransaction(unittest2.TestCase):
@hp.activate
def test_resend(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
transaction = account.load({'transaction': {'id': '1' }}).transaction
def server_response(request, uri, headers):
try: json.loads(request.body.decode())
except ValueError: raise AssertionError("request body was malformed.")
return (200, headers, json.dumps(data))
hp.register_uri(hp.PUT, re.compile('.*'), body=server_response)
with self.assertRaises(APIError):
data = {'success': False}
transaction.resend()
data = {'success': True}
self.assertTrue(transaction.resend())
@hp.activate
def test_complete(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
transaction = account.load({'transaction': {'id': '1' }}).transaction
def server_response(request, uri, headers):
try: json.loads(request.body.decode())
except ValueError: raise AssertionError("request body was malformed.")
return (200, headers, json.dumps(data))
hp.register_uri(hp.PUT, re.compile('.*'), body=server_response)
with self.assertRaises(APIError):
data = {'success': False, 'transaction': {'id': '1'}}
transaction.complete()
with self.assertRaises(UnexpectedDataFormatError):
data = {'success': True, 'transaction': 'wrong-type'}
transaction.complete()
with self.assertRaises(UnexpectedDataFormatError):
data = {'success': True, 'missing-transaction-key': True}
transaction.complete()
data = {'success': True, 'transaction': {'id': '1'}}
tx = transaction.complete()
self.assertIsInstance(tx, Transaction)
@hp.activate
def test_cancel(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
transaction = account.load({'transaction': {'id': '1' }}).transaction
def server_response(request, uri, headers):
try: json.loads(request.body.decode())
except ValueError: raise AssertionError("request body was malformed.")
return (200, headers, json.dumps(data))
hp.register_uri(hp.DELETE, re.compile('.*'), body=server_response)
with self.assertRaises(APIError):
data = {'success': False}
transaction.cancel()
data = {'success': True}
self.assertTrue(transaction.cancel())
class TestTransfer(unittest2.TestCase):
@hp.activate
def test_commit(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
transfer = account.load({'transfer': {'id': '1' }}).transfer
def server_response(request, uri, headers):
try: json.loads(request.body.decode())
except ValueError: raise AssertionError("request body was malformed.")
return (200, headers, json.dumps(data))
hp.register_uri(hp.POST, re.compile('.*'), body=server_response)
with self.assertRaises(APIError):
data = {'success': False, 'transfer': {'id': '1'}}
transfer.commit()
with self.assertRaises(UnexpectedDataFormatError):
data = {'success': True, 'transfer': 'wrong-type'}
transfer.commit()
with self.assertRaises(UnexpectedDataFormatError):
data = {'success': True, 'missing-transfer-key': True}
transfer.commit()
data = {'success': True, 'transfer': {'id': '1'}}
tx = transfer.commit()
self.assertIsInstance(tx, Transfer)
class TestUser(unittest2.TestCase):
@hp.activate
def test_modify(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
initial_native_currency = 'USD',
initial_time_zone = 'Pacific Time (US & Canada)'
initial_name = 'Test User'
user = account.load({
'user': {
'id': '1',
'name': initial_name,
'native_currency': initial_native_currency,
'time_zone': initial_time_zone,
},
}).user
with self.assertRaises(ValueError):
user.modify()
def server_response(request, uri, headers):
self.assertTrue(uri.endswith(user.id))
try: request_data = json.loads(request.body.decode())
except ValueError: raise AssertionError("request body was malformed.")
user_data = request_data.get('user')
self.assertIsInstance(user_data, dict)
return (200, headers, json.dumps(data))
hp.register_uri(hp.PUT, re.compile('.*'), body=server_response)
with self.assertRaises(APIError):
new_name = 'Fake Name'
data = {
'success': False,
'user': {
'id': user.id,
'name': new_name,
'native_currency': initial_native_currency,
'time_zone': initial_time_zone,
},
}
user.modify(name=new_name)
self.assertEqual(user.name, initial_name)
self.assertEqual(user.native_currency, initial_native_currency)
self.assertEqual(user.time_zone, initial_time_zone)
with self.assertRaises(UnexpectedDataFormatError):
new_name = 'Fake Name'
data = {'success': True, 'user': 'wrong-type'}
user.modify(name=new_name)
self.assertEqual(user.name, initial_name)
self.assertEqual(user.native_currency, initial_native_currency)
self.assertEqual(user.time_zone, initial_time_zone)
with self.assertRaises(UnexpectedDataFormatError):
new_name = 'Fake Name'
data = {'success': True, 'missing-user-key': True}
user.modify(name=new_name)
self.assertEqual(user.name, initial_name)
self.assertEqual(user.native_currency, initial_native_currency)
self.assertEqual(user.time_zone, initial_time_zone)
new_name = 'Fake Name'
new_native_currency = 'CAD'
new_time_zone = 'Eastern'
data = {
'success': True,
'user': {
'id': user.id,
'name': new_name,
'native_currency': new_native_currency,
'time_zone': new_time_zone,
},
}
user.modify(name=new_name,
time_zone=new_time_zone,
native_currency=new_native_currency)
self.assertEqual(user.name, new_name)
self.assertEqual(user.native_currency, new_native_currency)
self.assertEqual(user.time_zone, new_time_zone)
| jorilallo/coinbase-python | tests/test_model.py | Python | mit | 39,379 |
from engine.constants import BOARD_INDEX, C_PERM_INDEX, WK_SQ_INDEX, BK_SQ_INDEX, EN_PAS_INDEX, NORTH, SOUTH, \
RANK2, RANK7, WKC_INDEX, WQC_INDEX, BKC_INDEX, BQC_INDEX, CASTLE_VOIDED, CASTLED, A1, A8, E1, E8, C1, C8, G1, \
G8, H1, H8, WHITE, BLACK, HALF_MOVE_INDEX, FULL_MOVE_INDEX, TURN_INDEX, B8, B1, D1, D8, F1, F8
from engine.utils import update
from engine import board_hash
import logging
def move_at_state(state, move, live_move=False):
board = state[BOARD_INDEX]
castle_perm = state[C_PERM_INDEX]
white_king_sq = state[WK_SQ_INDEX]
black_king_sq = state[BK_SQ_INDEX]
from_tile_n = move[0]
to_tile_n = move[1]
if state[EN_PAS_INDEX] == to_tile_n and board[from_tile_n] == 'P':
if abs(from_tile_n - to_tile_n) == 11 or abs(from_tile_n - to_tile_n) == 9:
board = update(board, to_tile_n + SOUTH, 'o')
elif state[EN_PAS_INDEX] == to_tile_n and board[from_tile_n] == 'p':
if abs(from_tile_n - to_tile_n) == 11 or abs(from_tile_n - to_tile_n) == 9:
board = update(board, to_tile_n + NORTH, 'o')
en_pass_sq = -1
if board[from_tile_n] == 'P':
if from_tile_n >= RANK2:
if abs(to_tile_n - from_tile_n) == 20:
en_pass_sq = from_tile_n + NORTH
if board[to_tile_n + NORTH] == 'x':
board = update(board, from_tile_n, 'Q')
elif board[from_tile_n] == 'p':
if from_tile_n <= RANK7:
if abs(to_tile_n - from_tile_n) == 20:
en_pass_sq = from_tile_n + SOUTH
if board[to_tile_n + SOUTH] == 'x':
board = update(board, from_tile_n, 'q')
# King move case
elif board[from_tile_n] == 'K':
white_king_sq = to_tile_n
castle_perm = update(castle_perm, WKC_INDEX, CASTLE_VOIDED)
castle_perm = update(castle_perm, WQC_INDEX, CASTLE_VOIDED)
elif board[from_tile_n] == 'k':
black_king_sq = to_tile_n
castle_perm = update(castle_perm, BQC_INDEX, CASTLE_VOIDED)
castle_perm = update(castle_perm, BQC_INDEX, CASTLE_VOIDED)
elif board[from_tile_n] == 'R':
if from_tile_n == H1: # king side
castle_perm = update(castle_perm, WKC_INDEX, CASTLE_VOIDED)
elif from_tile_n == A1:
castle_perm = update(castle_perm, WQC_INDEX, CASTLE_VOIDED)
elif board[from_tile_n] == 'r':
if from_tile_n == H8: # king side
castle_perm = update(castle_perm, BKC_INDEX, CASTLE_VOIDED)
elif from_tile_n == A8:
castle_perm = update(castle_perm, BQC_INDEX, CASTLE_VOIDED)
# Check if attacking black king side rook
if to_tile_n == A1:
castle_perm = update(castle_perm, WQC_INDEX, CASTLE_VOIDED)
elif to_tile_n == H1:
castle_perm = update(castle_perm, WKC_INDEX, CASTLE_VOIDED)
elif to_tile_n == A8:
castle_perm = update(castle_perm, BQC_INDEX, CASTLE_VOIDED)
elif to_tile_n == H8:
castle_perm = update(castle_perm, BKC_INDEX, CASTLE_VOIDED)
if from_tile_n == E1 and to_tile_n == G1 and board[from_tile_n] == 'K': # and castle_perm[0] == 1:
board = update(board, E1, 'o')
board = update(board, F1, 'R')
board = update(board, G1, 'K')
board = update(board, H1, 'o')
white_king_sq = G1
castle_perm = update(castle_perm, WKC_INDEX, CASTLED)
castle_perm = update(castle_perm, WQC_INDEX, CASTLED)
elif from_tile_n == E1 and to_tile_n == C1 and board[from_tile_n] == 'K': # queen side castle
board = update(board, A1, 'o')
board = update(board, B1, 'o')
board = update(board, C1, 'K')
board = update(board, D1, 'R')
board = update(board, E1, 'o')
white_king_sq = C1
castle_perm = update(castle_perm, WKC_INDEX, CASTLED)
castle_perm = update(castle_perm, WQC_INDEX, CASTLED)
elif from_tile_n == E8 and to_tile_n == G8 and board[from_tile_n] == 'k': # king side castle
board = update(board, E8, 'o')
board = update(board, F8, 'r')
board = update(board, G8, 'k')
board = update(board, H8, 'o')
black_king_sq = G8
castle_perm = update(castle_perm, BKC_INDEX, CASTLED)
castle_perm = update(castle_perm, BQC_INDEX, CASTLED)
elif from_tile_n == E8 and to_tile_n == C8 and board[from_tile_n] == 'k': # queen side castle
board = update(board, A8, 'o')
board = update(board, B8, 'o')
board = update(board, C8, 'K')
board = update(board, D8, 'R')
board = update(board, E8, 'o')
black_king_sq = C8
castle_perm = update(castle_perm, BKC_INDEX, CASTLED)
castle_perm = update(castle_perm, BQC_INDEX, CASTLED)
else:
if live_move:
if board[to_tile_n] != 'o':
logging.debug('cleared board hash!!!')
print("cleared board hash", board[to_tile_n])
board_hash = {}
board = update(board, to_tile_n, board[from_tile_n])
board = update(board, from_tile_n, 'o')
# Change Turns
turn = BLACK if state[TURN_INDEX] == WHITE else WHITE
return [board, turn, en_pass_sq, state[HALF_MOVE_INDEX], state[FULL_MOVE_INDEX], castle_perm, white_king_sq, black_king_sq]
| ElliotVilhelm/IZII | engine/move.py | Python | mit | 5,236 |
""" 1. Parse log file of a webserver
2. Print the filename and number of bytes delivered for 200 responses
"""
import re
import sys
from os import path
import operator
import itertools
log_file_path = "server.log"
log_data = []
pattern = re.compile(r'\[(?P<time>.+)\](\s+\")(?P<requestType>\w+)(\s+)(?P<fileName>.*?)(\sHTTP)\/(?P<httpVersion>.*?)\"\s+(?P<httpResponse>\d+)\s(?P<bytes>\d+)')
fileDict = dict()
with open(log_file_path, "r") as file:
for line in file:
pattern_match = pattern.match(line)
log_data.append(pattern_match.groupdict())
dedup_log_data = []
for i in log_data:
if i not in dedup_log_data:
dedup_log_data.append(i)
for item in dedup_log_data:
key = item['fileName']
value = int(item['bytes'])
respCode = item['httpResponse']
if (respCode == '200'):
if key not in fileDict.keys():
fileDict[key] = value
else:
oldValue = int(fileDict.get(key))
value = oldValue+value
fileDict[key] = value
print(fileDict)
print(dict(sorted(fileDict.items(), key=operator.itemgetter(1))))
sorted_fileDict = dict(sorted(fileDict.items(), key=operator.itemgetter(1)))
out_Dict = dict(itertools.islice(sorted_fileDict.items(), 10))
for k, v in out_Dict.items():
print (str(k) + " " + str(v))
| mudragada/util-scripts | PyProblems/LogFileProcessing/logFileParser.py | Python | mit | 1,318 |
import datetime
from flask.ext.bcrypt import generate_password_hash
from flask.ext.login import UserMixin
from peewee import *
DATABASE = SqliteDatabase(':memory:')
class User(Model):
email = CharField(unique=True)
password = CharField(max_length=100)
join_date = DateTimeField(default=datetime.datetime.now)
bio = CharField(default='')
class Meta:
database = DATABASE
@classmethod
def new(cls, email, password):
cls.create(
email=email,
password=generate_password_hash(password)
)
def initialize():
DATABASE.connect()
DATABASE.create_tables([User], safe=True)
DATABASE.close() | CaseyNord/Treehouse | Build a Social Network with Flask/form_view/models.py | Python | mit | 710 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import setuptools
from setuptools import setup, find_packages
if setuptools.__version__ < '0.7':
raise RuntimeError("setuptools must be newer than 0.7")
version = "0.1.3"
setup(
name="pinger",
version=version,
author="Pedro Palhares (pedrospdc)",
author_email="[email protected]",
description="Website monitoring tool",
url="https://github.com/pedrospdc/pinger",
download_url="https://github.com/pedrospdc/pinger/tarball/{}".format(version),
packages=find_packages(),
zip_safe=False,
license="MIT",
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Language :: Python",
],
install_requires=[
"requests>=2.4.3",
"peewee>=2.4.0"
],
scripts=["bin/pinger"],
)
| pedrospdc/pinger | setup.py | Python | mit | 939 |
#Copyright Mir Immad - RealTimeCam
import cv2
import numpy as np
class VideoCamera(object):
def __init__(self):
self.video = cv2.VideoCapture(0)
def __del__(self):
self.video.release()
def get_frame(self):
success, image = self.video.read()
ret, jpeg = cv2.imencode('.jpg', image)
return jpeg.tostring()
| mirimmad/LiveCam | _camera.py | Python | mit | 325 |
"""
Implements counterwallet asset-related support as a counterblock plugin
DEPENDENCIES: This module requires the assets module to be loaded before it.
Python 2.x, as counterblock is still python 2.x
"""
import os
import sys
import time
import datetime
import logging
import decimal
import urllib.request
import urllib.parse
import urllib.error
import json
import operator
import base64
import configparser
import calendar
import pymongo
from bson.son import SON
import dateutil.parser
from counterblock.lib import config, util, blockfeed, blockchain
from counterblock.lib.modules import DEX_PRIORITY_PARSE_TRADEBOOK
from counterblock.lib.processor import MessageProcessor, MempoolMessageProcessor, BlockProcessor, StartUpProcessor, CaughtUpProcessor, RollbackProcessor, API, start_task
from . import assets_trading, dex
D = decimal.Decimal
EIGHT_PLACES = decimal.Decimal(10) ** -8
COMPILE_MARKET_PAIR_INFO_PERIOD = 10 * 60 # in seconds (this is every 10 minutes currently)
COMPILE_ASSET_MARKET_INFO_PERIOD = 30 * 60 # in seconds (this is every 30 minutes currently)
logger = logging.getLogger(__name__)
@API.add_method
def get_market_price_summary(asset1, asset2, with_last_trades=0):
# DEPRECATED 1.5
result = assets_trading.get_market_price_summary(asset1, asset2, with_last_trades)
return result if result is not None else False
#^ due to current bug in our jsonrpc stack, just return False if None is returned
@API.add_method
def get_market_cap_history(start_ts=None, end_ts=None):
now_ts = calendar.timegm(time.gmtime())
if not end_ts: # default to current datetime
end_ts = now_ts
if not start_ts: # default to 30 days before the end date
start_ts = end_ts - (30 * 24 * 60 * 60)
data = {}
results = {}
#^ format is result[market_cap_as][asset] = [[block_time, market_cap], [block_time2, market_cap2], ...]
for market_cap_as in (config.XCP, config.BTC):
caps = config.mongo_db.asset_marketcap_history.aggregate([
{"$match": {
"market_cap_as": market_cap_as,
"block_time": {
"$gte": datetime.datetime.utcfromtimestamp(start_ts)
} if end_ts == now_ts else {
"$gte": datetime.datetime.utcfromtimestamp(start_ts),
"$lte": datetime.datetime.utcfromtimestamp(end_ts)
}
}},
{"$project": {
"year": {"$year": "$block_time"},
"month": {"$month": "$block_time"},
"day": {"$dayOfMonth": "$block_time"},
"hour": {"$hour": "$block_time"},
"asset": 1,
"market_cap": 1,
}},
{"$sort": {"block_time": pymongo.ASCENDING}},
{"$group": {
"_id": {"asset": "$asset", "year": "$year", "month": "$month", "day": "$day", "hour": "$hour"},
"market_cap": {"$avg": "$market_cap"}, # use the average marketcap during the interval
}},
])
data[market_cap_as] = {}
for e in caps:
interval_time = int(calendar.timegm(datetime.datetime(e['_id']['year'], e['_id']['month'], e['_id']['day'], e['_id']['hour']).timetuple()) * 1000)
data[market_cap_as].setdefault(e['_id']['asset'], [])
data[market_cap_as][e['_id']['asset']].append([interval_time, e['market_cap']])
results[market_cap_as] = []
for asset in data[market_cap_as]:
#for z in data[market_cap_as][asset]: assert z[0] and z[0] > 0 and z[1] and z[1] >= 0
results[market_cap_as].append(
{'name': asset, 'data': sorted(data[market_cap_as][asset], key=operator.itemgetter(0))})
return results
@API.add_method
def get_market_info(assets):
assets_market_info = list(config.mongo_db.asset_market_info.find({'asset': {'$in': assets}}, {'_id': 0}))
extended_asset_info = config.mongo_db.asset_extended_info.find({'asset': {'$in': assets}})
extended_asset_info_dict = {}
for e in extended_asset_info:
if not e.get('disabled', False): # skip assets marked disabled
extended_asset_info_dict[e['asset']] = e
for a in assets_market_info:
if a['asset'] in extended_asset_info_dict and extended_asset_info_dict[a['asset']].get('processed', False):
extended_info = extended_asset_info_dict[a['asset']]
a['extended_image'] = bool(extended_info.get('image', ''))
a['extended_description'] = extended_info.get('description', '')
a['extended_website'] = extended_info.get('website', '')
a['extended_pgpsig'] = extended_info.get('pgpsig', '')
else:
a['extended_image'] = a['extended_description'] = a['extended_website'] = a['extended_pgpsig'] = ''
return assets_market_info
@API.add_method
def get_market_info_leaderboard(limit=100):
"""returns market leaderboard data for both the XCP and BTC markets"""
# do two queries because we limit by our sorted results, and we might miss an asset with a high BTC trading value
# but with little or no XCP trading activity, for instance if we just did one query
assets_market_info_xcp = list(config.mongo_db.asset_market_info.find({}, {'_id': 0}).sort('market_cap_in_{}'.format(config.XCP.lower()), pymongo.DESCENDING).limit(limit))
assets_market_info_btc = list(config.mongo_db.asset_market_info.find({}, {'_id': 0}).sort('market_cap_in_{}'.format(config.BTC.lower()), pymongo.DESCENDING).limit(limit))
assets_market_info = {
config.XCP.lower(): [a for a in assets_market_info_xcp if a['price_in_{}'.format(config.XCP.lower())]],
config.BTC.lower(): [a for a in assets_market_info_btc if a['price_in_{}'.format(config.BTC.lower())]]
}
# throw on extended info, if it exists for a given asset
assets = list(set([a['asset'] for a in assets_market_info[config.XCP.lower()]] + [a['asset'] for a in assets_market_info[config.BTC.lower()]]))
extended_asset_info = config.mongo_db.asset_extended_info.find({'asset': {'$in': assets}})
extended_asset_info_dict = {}
for e in extended_asset_info:
if not e.get('disabled', False): # skip assets marked disabled
extended_asset_info_dict[e['asset']] = e
for r in (assets_market_info[config.XCP.lower()], assets_market_info[config.BTC.lower()]):
for a in r:
if a['asset'] in extended_asset_info_dict:
extended_info = extended_asset_info_dict[a['asset']]
if 'extended_image' not in a or 'extended_description' not in a or 'extended_website' not in a:
continue # asset has been recognized as having a JSON file description, but has not been successfully processed yet
a['extended_image'] = bool(extended_info.get('image', ''))
a['extended_description'] = extended_info.get('description', '')
a['extended_website'] = extended_info.get('website', '')
else:
a['extended_image'] = a['extended_description'] = a['extended_website'] = ''
return assets_market_info
@API.add_method
def get_market_price_history(asset1, asset2, start_ts=None, end_ts=None, as_dict=False):
"""Return block-by-block aggregated market history data for the specified asset pair, within the specified date range.
@returns List of lists (or list of dicts, if as_dict is specified).
* If as_dict is False, each embedded list has 8 elements [block time (epoch in MS), open, high, low, close, volume, # trades in block, block index]
* If as_dict is True, each dict in the list has the keys: block_time (epoch in MS), block_index, open, high, low, close, vol, count
Aggregate on an an hourly basis
"""
now_ts = calendar.timegm(time.gmtime())
if not end_ts: # default to current datetime
end_ts = now_ts
if not start_ts: # default to 180 days before the end date
start_ts = end_ts - (180 * 24 * 60 * 60)
base_asset, quote_asset = util.assets_to_asset_pair(asset1, asset2)
# get ticks -- open, high, low, close, volume
result = config.mongo_db.trades.aggregate([
{"$match": {
"base_asset": base_asset,
"quote_asset": quote_asset,
"block_time": {
"$gte": datetime.datetime.utcfromtimestamp(start_ts)
} if end_ts == now_ts else {
"$gte": datetime.datetime.utcfromtimestamp(start_ts),
"$lte": datetime.datetime.utcfromtimestamp(end_ts)
}
}},
{"$project": {
"year": {"$year": "$block_time"},
"month": {"$month": "$block_time"},
"day": {"$dayOfMonth": "$block_time"},
"hour": {"$hour": "$block_time"},
"block_index": 1,
"unit_price": 1,
"base_quantity_normalized": 1 # to derive volume
}},
{"$group": {
"_id": {"year": "$year", "month": "$month", "day": "$day", "hour": "$hour"},
"open": {"$first": "$unit_price"},
"high": {"$max": "$unit_price"},
"low": {"$min": "$unit_price"},
"close": {"$last": "$unit_price"},
"vol": {"$sum": "$base_quantity_normalized"},
"count": {"$sum": 1},
}},
{"$sort": SON([("_id.year", pymongo.ASCENDING), ("_id.month", pymongo.ASCENDING), ("_id.day", pymongo.ASCENDING), ("_id.hour", pymongo.ASCENDING)])},
])
result = list(result)
if not len(result):
return False
midline = [((r['high'] + r['low']) / 2.0) for r in result]
if as_dict:
for i in range(len(result)):
result[i]['interval_time'] = int(calendar.timegm(datetime.datetime(
result[i]['_id']['year'], result[i]['_id']['month'], result[i]['_id']['day'], result[i]['_id']['hour']).timetuple()) * 1000)
result[i]['midline'] = midline[i]
del result[i]['_id']
return result
else:
list_result = []
for i in range(len(result)):
list_result.append([
int(calendar.timegm(datetime.datetime(
result[i]['_id']['year'], result[i]['_id']['month'], result[i]['_id']['day'], result[i]['_id']['hour']).timetuple()) * 1000),
result[i]['open'], result[i]['high'], result[i]['low'], result[i]['close'], result[i]['vol'],
result[i]['count'], midline[i]
])
return list_result
@API.add_method
def get_trade_history(asset1=None, asset2=None, start_ts=None, end_ts=None, limit=50):
"""
Gets last N of trades within a specific date range (normally, for a specified asset pair, but this can
be left blank to get any/all trades).
"""
assert (asset1 and asset2) or (not asset1 and not asset2) # cannot have one asset, but not the other
if limit > 500:
raise Exception("Requesting history of too many trades")
now_ts = calendar.timegm(time.gmtime())
if not end_ts: # default to current datetime
end_ts = now_ts
if not start_ts: # default to 30 days before the end date
start_ts = end_ts - (30 * 24 * 60 * 60)
filters = {
"block_time": {
"$gte": datetime.datetime.utcfromtimestamp(start_ts)
} if end_ts == now_ts else {
"$gte": datetime.datetime.utcfromtimestamp(start_ts),
"$lte": datetime.datetime.utcfromtimestamp(end_ts)
}
}
if asset1 and asset2:
base_asset, quote_asset = util.assets_to_asset_pair(asset1, asset2)
filters["base_asset"] = base_asset
filters["quote_asset"] = quote_asset
last_trades = config.mongo_db.trades.find(filters, {'_id': 0}).sort("block_time", pymongo.DESCENDING).limit(limit)
if not last_trades.count():
return False # no suitable trade data to form a market price
last_trades = list(last_trades)
return last_trades
def _get_order_book(base_asset, quote_asset,
bid_book_min_pct_fee_provided=None, bid_book_min_pct_fee_required=None, bid_book_max_pct_fee_required=None,
ask_book_min_pct_fee_provided=None, ask_book_min_pct_fee_required=None, ask_book_max_pct_fee_required=None):
"""Gets the current order book for a specified asset pair
@param: normalized_fee_required: Only specify if buying BTC. If specified, the order book will be pruned down to only
show orders at and above this fee_required
@param: normalized_fee_provided: Only specify if selling BTC. If specified, the order book will be pruned down to only
show orders at and above this fee_provided
"""
base_asset_info = config.mongo_db.tracked_assets.find_one({'asset': base_asset})
quote_asset_info = config.mongo_db.tracked_assets.find_one({'asset': quote_asset})
if not base_asset_info or not quote_asset_info:
raise Exception("Invalid asset(s)")
# TODO: limit # results to 8 or so for each book (we have to sort as well to limit)
base_bid_filters = [
{"field": "get_asset", "op": "==", "value": base_asset},
{"field": "give_asset", "op": "==", "value": quote_asset},
]
base_ask_filters = [
{"field": "get_asset", "op": "==", "value": quote_asset},
{"field": "give_asset", "op": "==", "value": base_asset},
]
if base_asset == config.BTC or quote_asset == config.BTC:
extra_filters = [
{'field': 'give_remaining', 'op': '>', 'value': 0}, # don't show empty BTC orders
{'field': 'get_remaining', 'op': '>', 'value': 0}, # don't show empty BTC orders
{'field': 'fee_required_remaining', 'op': '>=', 'value': 0},
{'field': 'fee_provided_remaining', 'op': '>=', 'value': 0},
]
base_bid_filters += extra_filters
base_ask_filters += extra_filters
base_bid_orders = util.call_jsonrpc_api(
"get_orders", {
'filters': base_bid_filters,
'show_expired': False,
'status': 'open',
'order_by': 'block_index',
'order_dir': 'asc',
}, abort_on_error=True)['result']
base_ask_orders = util.call_jsonrpc_api(
"get_orders", {
'filters': base_ask_filters,
'show_expired': False,
'status': 'open',
'order_by': 'block_index',
'order_dir': 'asc',
}, abort_on_error=True)['result']
def get_o_pct(o):
if o['give_asset'] == config.BTC: # NB: fee_provided could be zero here
pct_fee_provided = float((D(o['fee_provided_remaining']) / D(o['give_quantity'])))
else:
pct_fee_provided = None
if o['get_asset'] == config.BTC: # NB: fee_required could be zero here
pct_fee_required = float((D(o['fee_required_remaining']) / D(o['get_quantity'])))
else:
pct_fee_required = None
return pct_fee_provided, pct_fee_required
# filter results by pct_fee_provided and pct_fee_required for BTC pairs as appropriate
filtered_base_bid_orders = []
filtered_base_ask_orders = []
if base_asset == config.BTC or quote_asset == config.BTC:
for o in base_bid_orders:
pct_fee_provided, pct_fee_required = get_o_pct(o)
addToBook = True
if bid_book_min_pct_fee_provided is not None and pct_fee_provided is not None and pct_fee_provided < bid_book_min_pct_fee_provided:
addToBook = False
if bid_book_min_pct_fee_required is not None and pct_fee_required is not None and pct_fee_required < bid_book_min_pct_fee_required:
addToBook = False
if bid_book_max_pct_fee_required is not None and pct_fee_required is not None and pct_fee_required > bid_book_max_pct_fee_required:
addToBook = False
if addToBook:
filtered_base_bid_orders.append(o)
for o in base_ask_orders:
pct_fee_provided, pct_fee_required = get_o_pct(o)
addToBook = True
if ask_book_min_pct_fee_provided is not None and pct_fee_provided is not None and pct_fee_provided < ask_book_min_pct_fee_provided:
addToBook = False
if ask_book_min_pct_fee_required is not None and pct_fee_required is not None and pct_fee_required < ask_book_min_pct_fee_required:
addToBook = False
if ask_book_max_pct_fee_required is not None and pct_fee_required is not None and pct_fee_required > ask_book_max_pct_fee_required:
addToBook = False
if addToBook:
filtered_base_ask_orders.append(o)
else:
filtered_base_bid_orders += base_bid_orders
filtered_base_ask_orders += base_ask_orders
def make_book(orders, isBidBook):
book = {}
for o in orders:
if o['give_asset'] == base_asset:
if base_asset == config.BTC and o['give_quantity'] <= config.ORDER_BTC_DUST_LIMIT_CUTOFF:
continue # filter dust orders, if necessary
give_quantity = blockchain.normalize_quantity(o['give_quantity'], base_asset_info['divisible'])
get_quantity = blockchain.normalize_quantity(o['get_quantity'], quote_asset_info['divisible'])
unit_price = float((D(get_quantity) / D(give_quantity)))
remaining = blockchain.normalize_quantity(o['give_remaining'], base_asset_info['divisible'])
else:
if quote_asset == config.BTC and o['give_quantity'] <= config.ORDER_BTC_DUST_LIMIT_CUTOFF:
continue # filter dust orders, if necessary
give_quantity = blockchain.normalize_quantity(o['give_quantity'], quote_asset_info['divisible'])
get_quantity = blockchain.normalize_quantity(o['get_quantity'], base_asset_info['divisible'])
unit_price = float((D(give_quantity) / D(get_quantity)))
remaining = blockchain.normalize_quantity(o['get_remaining'], base_asset_info['divisible'])
id = "%s_%s_%s" % (base_asset, quote_asset, unit_price)
#^ key = {base}_{bid}_{unit_price}, values ref entries in book
book.setdefault(id, {'unit_price': unit_price, 'quantity': 0, 'count': 0})
book[id]['quantity'] += remaining # base quantity outstanding
book[id]['count'] += 1 # num orders at this price level
book = sorted(iter(book.values()), key=operator.itemgetter('unit_price'), reverse=isBidBook)
#^ convert to list and sort -- bid book = descending, ask book = ascending
return book
# compile into a single book, at volume tiers
base_bid_book = make_book(filtered_base_bid_orders, True)
base_ask_book = make_book(filtered_base_ask_orders, False)
# get stats like the spread and median
if base_bid_book and base_ask_book:
# don't do abs(), as this is "the amount by which the ask price exceeds the bid", so I guess it could be negative
# if there is overlap in the book (right?)
bid_ask_spread = float((D(base_ask_book[0]['unit_price']) - D(base_bid_book[0]['unit_price'])))
bid_ask_median = float((D(max(base_ask_book[0]['unit_price'], base_bid_book[0]['unit_price'])) - (D(abs(bid_ask_spread)) / 2)))
else:
bid_ask_spread = 0
bid_ask_median = 0
# compose depth and round out quantities
bid_depth = D(0)
for o in base_bid_book:
o['quantity'] = float(D(o['quantity']))
bid_depth += D(o['quantity'])
o['depth'] = float(D(bid_depth))
bid_depth = float(D(bid_depth))
ask_depth = D(0)
for o in base_ask_book:
o['quantity'] = float(D(o['quantity']))
ask_depth += D(o['quantity'])
o['depth'] = float(D(ask_depth))
ask_depth = float(D(ask_depth))
# compose raw orders
orders = filtered_base_bid_orders + filtered_base_ask_orders
for o in orders:
# add in the blocktime to help makes interfaces more user-friendly (i.e. avoid displaying block
# indexes and display datetimes instead)
o['block_time'] = calendar.timegm(util.get_block_time(o['block_index']).timetuple()) * 1000
result = {
'base_bid_book': base_bid_book,
'base_ask_book': base_ask_book,
'bid_depth': bid_depth,
'ask_depth': ask_depth,
'bid_ask_spread': bid_ask_spread,
'bid_ask_median': bid_ask_median,
'raw_orders': orders,
'base_asset': base_asset,
'quote_asset': quote_asset
}
return result
@API.add_method
def get_order_book_simple(asset1, asset2, min_pct_fee_provided=None, max_pct_fee_required=None):
# DEPRECATED 1.5
base_asset, quote_asset = util.assets_to_asset_pair(asset1, asset2)
result = _get_order_book(
base_asset, quote_asset,
bid_book_min_pct_fee_provided=min_pct_fee_provided,
bid_book_max_pct_fee_required=max_pct_fee_required,
ask_book_min_pct_fee_provided=min_pct_fee_provided,
ask_book_max_pct_fee_required=max_pct_fee_required)
return result
@API.add_method
def get_order_book_buysell(buy_asset, sell_asset, pct_fee_provided=None, pct_fee_required=None):
# DEPRECATED 1.5
base_asset, quote_asset = util.assets_to_asset_pair(buy_asset, sell_asset)
bid_book_min_pct_fee_provided = None
bid_book_min_pct_fee_required = None
bid_book_max_pct_fee_required = None
ask_book_min_pct_fee_provided = None
ask_book_min_pct_fee_required = None
ask_book_max_pct_fee_required = None
if base_asset == config.BTC:
if buy_asset == config.BTC:
# if BTC is base asset and we're buying it, we're buying the BASE. we require a BTC fee (we're on the bid (bottom) book and we want a lower price)
# - show BASE buyers (bid book) that require a BTC fee >= what we require (our side of the book)
# - show BASE sellers (ask book) that provide a BTC fee >= what we require
bid_book_min_pct_fee_required = pct_fee_required # my competition at the given fee required
ask_book_min_pct_fee_provided = pct_fee_required
elif sell_asset == config.BTC:
# if BTC is base asset and we're selling it, we're selling the BASE. we provide a BTC fee (we're on the ask (top) book and we want a higher price)
# - show BASE buyers (bid book) that provide a BTC fee >= what we provide
# - show BASE sellers (ask book) that require a BTC fee <= what we provide (our side of the book)
bid_book_max_pct_fee_required = pct_fee_provided
ask_book_min_pct_fee_provided = pct_fee_provided # my competition at the given fee provided
elif quote_asset == config.BTC:
assert base_asset == config.XCP # only time when this is the case
if buy_asset == config.BTC:
# if BTC is quote asset and we're buying it, we're selling the BASE. we require a BTC fee (we're on the ask (top) book and we want a higher price)
# - show BASE buyers (bid book) that provide a BTC fee >= what we require
# - show BASE sellers (ask book) that require a BTC fee >= what we require (our side of the book)
bid_book_min_pct_fee_provided = pct_fee_required
ask_book_min_pct_fee_required = pct_fee_required # my competition at the given fee required
elif sell_asset == config.BTC:
# if BTC is quote asset and we're selling it, we're buying the BASE. we provide a BTC fee (we're on the bid (bottom) book and we want a lower price)
# - show BASE buyers (bid book) that provide a BTC fee >= what we provide (our side of the book)
# - show BASE sellers (ask book) that require a BTC fee <= what we provide
bid_book_min_pct_fee_provided = pct_fee_provided # my compeitition at the given fee provided
ask_book_max_pct_fee_required = pct_fee_provided
result = _get_order_book(
base_asset, quote_asset,
bid_book_min_pct_fee_provided=bid_book_min_pct_fee_provided,
bid_book_min_pct_fee_required=bid_book_min_pct_fee_required,
bid_book_max_pct_fee_required=bid_book_max_pct_fee_required,
ask_book_min_pct_fee_provided=ask_book_min_pct_fee_provided,
ask_book_min_pct_fee_required=ask_book_min_pct_fee_required,
ask_book_max_pct_fee_required=ask_book_max_pct_fee_required)
# filter down raw_orders to be only open sell orders for what the caller is buying
open_sell_orders = []
for o in result['raw_orders']:
if o['give_asset'] == buy_asset:
open_sell_orders.append(o)
result['raw_orders'] = open_sell_orders
return result
@API.add_method
def get_users_pairs(addresses=[], max_pairs=12):
return dex.get_users_pairs(addresses, max_pairs, quote_assets=[config.XCP, config.XBTC])
@API.add_method
def get_market_orders(asset1, asset2, addresses=[], min_fee_provided=0.95, max_fee_required=0.95):
return dex.get_market_orders(asset1, asset2, addresses, None, min_fee_provided, max_fee_required)
@API.add_method
def get_market_trades(asset1, asset2, addresses=[], limit=50):
return dex.get_market_trades(asset1, asset2, addresses, limit)
@API.add_method
def get_markets_list(quote_asset=None, order_by=None):
return dex.get_markets_list(quote_asset=quote_asset, order_by=order_by)
@API.add_method
def get_market_details(asset1, asset2, min_fee_provided=0.95, max_fee_required=0.95):
return dex.get_market_details(asset1, asset2, min_fee_provided, max_fee_required)
def task_compile_asset_pair_market_info():
assets_trading.compile_asset_pair_market_info()
# all done for this run...call again in a bit
start_task(task_compile_asset_pair_market_info, delay=COMPILE_MARKET_PAIR_INFO_PERIOD)
def task_compile_asset_market_info():
assets_trading.compile_asset_market_info()
# all done for this run...call again in a bit
start_task(task_compile_asset_market_info, delay=COMPILE_ASSET_MARKET_INFO_PERIOD)
@MessageProcessor.subscribe(priority=DEX_PRIORITY_PARSE_TRADEBOOK)
def parse_trade_book(msg, msg_data):
# book trades
if(msg['category'] == 'order_matches' and
((msg['command'] == 'update' and msg_data['status'] == 'completed') or # for a trade with BTC involved, but that is settled (completed)
('forward_asset' in msg_data and msg_data['forward_asset'] != config.BTC and msg_data['backward_asset'] != config.BTC)
)
): # or for a trade without BTC on either end
if msg['command'] == 'update' and msg_data['status'] == 'completed':
# an order is being updated to a completed status (i.e. a BTCpay has completed)
tx0_hash, tx1_hash = msg_data['order_match_id'][:64], msg_data['order_match_id'][65:]
# get the order_match this btcpay settles
order_match = util.jsonrpc_api(
"get_order_matches",
{'filters': [
{'field': 'tx0_hash', 'op': '==', 'value': tx0_hash},
{'field': 'tx1_hash', 'op': '==', 'value': tx1_hash}]
}, abort_on_error=False)['result'][0]
else:
assert msg_data['status'] == 'completed' # should not enter a pending state for non BTC matches
order_match = msg_data
forward_asset_info = config.mongo_db.tracked_assets.find_one({'asset': order_match['forward_asset']})
backward_asset_info = config.mongo_db.tracked_assets.find_one({'asset': order_match['backward_asset']})
assert forward_asset_info and backward_asset_info
base_asset, quote_asset = util.assets_to_asset_pair(order_match['forward_asset'], order_match['backward_asset'])
# don't create trade records from order matches with BTC that are under the dust limit
if((order_match['forward_asset'] == config.BTC and
order_match['forward_quantity'] <= config.ORDER_BTC_DUST_LIMIT_CUTOFF)
or (order_match['backward_asset'] == config.BTC and
order_match['backward_quantity'] <= config.ORDER_BTC_DUST_LIMIT_CUTOFF)):
logger.debug("Order match %s ignored due to %s under dust limit." % (order_match['tx0_hash'] + order_match['tx1_hash'], config.BTC))
return 'ABORT_THIS_MESSAGE_PROCESSING'
# take divisible trade quantities to floating point
forward_quantity = blockchain.normalize_quantity(order_match['forward_quantity'], forward_asset_info['divisible'])
backward_quantity = blockchain.normalize_quantity(order_match['backward_quantity'], backward_asset_info['divisible'])
# compose trade
trade = {
'block_index': config.state['cur_block']['block_index'],
'block_time': config.state['cur_block']['block_time_obj'],
'message_index': msg['message_index'], # secondary temporaral ordering off of when
'order_match_id': order_match['tx0_hash'] + '_' + order_match['tx1_hash'],
'order_match_tx0_index': order_match['tx0_index'],
'order_match_tx1_index': order_match['tx1_index'],
'order_match_tx0_address': order_match['tx0_address'],
'order_match_tx1_address': order_match['tx1_address'],
'base_asset': base_asset,
'quote_asset': quote_asset,
'base_quantity': order_match['forward_quantity'] if order_match['forward_asset'] == base_asset else order_match['backward_quantity'],
'quote_quantity': order_match['backward_quantity'] if order_match['forward_asset'] == base_asset else order_match['forward_quantity'],
'base_quantity_normalized': forward_quantity if order_match['forward_asset'] == base_asset else backward_quantity,
'quote_quantity_normalized': backward_quantity if order_match['forward_asset'] == base_asset else forward_quantity,
}
d = D(trade['quote_quantity_normalized']) / D(trade['base_quantity_normalized'])
d = d.quantize(EIGHT_PLACES, rounding=decimal.ROUND_HALF_EVEN, context=decimal.Context(prec=30))
trade['unit_price'] = float(d)
d = D(trade['base_quantity_normalized']) / D(trade['quote_quantity_normalized'])
d = d.quantize(EIGHT_PLACES, rounding=decimal.ROUND_HALF_EVEN, context=decimal.Context(prec=30))
trade['unit_price_inverse'] = float(d)
config.mongo_db.trades.insert(trade)
logger.info("Procesed Trade from tx %s :: %s" % (msg['message_index'], trade))
@StartUpProcessor.subscribe()
def init():
# init db and indexes
# trades
config.mongo_db.trades.ensure_index(
[("base_asset", pymongo.ASCENDING),
("quote_asset", pymongo.ASCENDING),
("block_time", pymongo.DESCENDING)
])
config.mongo_db.trades.ensure_index( # tasks.py and elsewhere (for singlular block_index index access)
[("block_index", pymongo.ASCENDING),
("base_asset", pymongo.ASCENDING),
("quote_asset", pymongo.ASCENDING)
])
# asset_market_info
config.mongo_db.asset_market_info.ensure_index('asset', unique=True)
# asset_marketcap_history
config.mongo_db.asset_marketcap_history.ensure_index('block_index')
config.mongo_db.asset_marketcap_history.ensure_index( # tasks.py
[
("market_cap_as", pymongo.ASCENDING),
("asset", pymongo.ASCENDING),
("block_index", pymongo.DESCENDING)
])
config.mongo_db.asset_marketcap_history.ensure_index( # api.py
[
("market_cap_as", pymongo.ASCENDING),
("block_time", pymongo.DESCENDING)
])
# asset_pair_market_info
config.mongo_db.asset_pair_market_info.ensure_index( # event.py, api.py
[("base_asset", pymongo.ASCENDING),
("quote_asset", pymongo.ASCENDING)
], unique=True)
config.mongo_db.asset_pair_market_info.ensure_index('last_updated')
@CaughtUpProcessor.subscribe()
def start_tasks():
start_task(task_compile_asset_pair_market_info)
start_task(task_compile_asset_market_info)
@RollbackProcessor.subscribe()
def process_rollback(max_block_index):
if not max_block_index: # full reparse
config.mongo_db.trades.drop()
config.mongo_db.asset_market_info.drop()
config.mongo_db.asset_marketcap_history.drop()
config.mongo_db.pair_market_info.drop()
else: # rollback
config.mongo_db.trades.remove({"block_index": {"$gt": max_block_index}})
config.mongo_db.asset_marketcap_history.remove({"block_index": {"$gt": max_block_index}})
| CounterpartyXCP/counterblock | counterblock/lib/modules/dex/__init__.py | Python | mit | 32,660 |
def Woody():
# complete
print "Reach for the sky but don't burn your wings!"# this will make it much easier in future problems to see that something is actually happening | forgeousgeorge/new_dir | code.py | Python | mit | 179 |
import subprocess
import re
import os
from app import util
BLAME_NAME_REX = re.compile(r'\(([\w\s]+)\d{4}')
def git_path(path):
"""Returns the top-level git path."""
dir_ = path
if os.path.isfile(path):
dir_ = os.path.split(path)[0]
proc = subprocess.Popen(
['git', 'rev-parse', '--show-toplevel'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=dir_
)
out = proc.communicate()[0]
if out:
return out.strip()
return None
def git_name():
return subprocess.check_output(["git", "config", "user.name"]).strip()
def git_branch(path):
working_dir = path
if not os.path.isdir(path):
working_dir = os.path.split(path)[0]
proc = subprocess.Popen(
['git', 'rev-parse', '--abbrev-ref', 'HEAD'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=working_dir
)
out, err = proc.communicate()
if err:
return None
return out.strip()
def git_branch_files(path):
path = util.path_dir(path)
if not path:
raise Exception("Bad path: {}".format(path))
top_dir = git_path(path)
proc = subprocess.Popen(
["git", "diff", "--name-only"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=top_dir
)
out = proc.communicate()[0]
all_files = set(out.splitlines())
branch = git_branch(path)
if branch != 'master':
proc = subprocess.Popen(
["git", "diff", "--name-only", "master..HEAD"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=path
)
out = proc.communicate()[0]
all_files.update(out.splitlines())
return [os.path.join(top_dir, i) for i in all_files if i]
def blame(path):
working_dir = os.path.split(path)[0]
proc = subprocess.Popen(
['git', 'blame', path],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=working_dir
)
out, err = proc.communicate()
blame_lines = (out + err).splitlines()
result = {}
for i, line in enumerate(blame_lines):
match = BLAME_NAME_REX.search(line)
if match:
result[i] = match.group(1).strip()
else:
result[i] = None
return result
| harveyr/thunderbox | app/lintblame/git.py | Python | mit | 2,301 |
# -*- coding: utf-8 -*-
from django.urls import reverse_lazy
from django.shortcuts import redirect
from itertools import chain
from datetime import datetime
from decimal import Decimal
from djangosige.apps.base.custom_views import CustomDetailView, CustomCreateView, CustomListView
from djangosige.apps.estoque.forms import EntradaEstoqueForm, SaidaEstoqueForm, TransferenciaEstoqueForm, ItensMovimentoFormSet
from djangosige.apps.estoque.models import MovimentoEstoque, EntradaEstoque, SaidaEstoque, TransferenciaEstoque, ProdutoEstocado
class MovimentoEstoqueMixin(object):
def adicionar_novo_movimento_estoque(self, itens_mvmt_obj, pform, lista_produtos, lista_produtos_estocados):
prod = itens_mvmt_obj.produto
lista_produtos.append(prod)
# Modificar valor do estoque atual dos produtos
if prod.estoque_atual is not None and isinstance(self.object, EntradaEstoque):
prod_estocado = ProdutoEstocado.objects.get_or_create(
local=self.object.local_dest, produto=itens_mvmt_obj.produto)[0]
prod_estocado.quantidade = prod_estocado.quantidade + itens_mvmt_obj.quantidade
lista_produtos_estocados.append(prod_estocado)
prod.estoque_atual = prod.estoque_atual + itens_mvmt_obj.quantidade
elif prod.estoque_atual is not None and isinstance(self.object, SaidaEstoque):
prod_estocado = ProdutoEstocado.objects.get_or_create(
local=self.object.local_orig, produto=itens_mvmt_obj.produto)[0]
if itens_mvmt_obj.quantidade > prod_estocado.quantidade:
itens_mvmt_obj.quantidade = prod_estocado.quantidade
prod_estocado.quantidade = Decimal('0.00')
else:
prod_estocado.quantidade = prod_estocado.quantidade - itens_mvmt_obj.quantidade
lista_produtos_estocados.append(prod_estocado)
if prod.estoque_atual < itens_mvmt_obj.quantidade:
pform.add_error('quantidade', 'Quantidade retirada do estoque maior que o estoque atual (' +
str(prod.estoque_atual).replace('.', ',') + ') do produto.')
else:
prod.estoque_atual = prod.estoque_atual - itens_mvmt_obj.quantidade
elif isinstance(self.object, TransferenciaEstoque):
prod_estocado_orig = ProdutoEstocado.objects.get_or_create(
local=self.object.local_estoque_orig, produto=itens_mvmt_obj.produto)[0]
prod_estocado_dest = ProdutoEstocado.objects.get_or_create(
local=self.object.local_estoque_dest, produto=itens_mvmt_obj.produto)[0]
if itens_mvmt_obj.quantidade > prod_estocado_orig.quantidade:
itens_mvmt_obj.quantidade = prod_estocado_orig.quantidade
prod_estocado_orig.quantidade = Decimal('0.00')
else:
prod_estocado_orig.quantidade = prod_estocado_orig.quantidade - \
itens_mvmt_obj.quantidade
prod_estocado_dest.quantidade = prod_estocado_dest.quantidade + \
itens_mvmt_obj.quantidade
lista_produtos_estocados.append(prod_estocado_orig)
lista_produtos_estocados.append(prod_estocado_dest)
class AdicionarMovimentoEstoqueBaseView(CustomCreateView, MovimentoEstoqueMixin):
permission_codename = 'add_movimentoestoque'
def get_success_message(self, cleaned_data):
return self.success_message % dict(cleaned_data, pk=self.object.pk)
def get_context_data(self, **kwargs):
context = super(AdicionarMovimentoEstoqueBaseView,
self).get_context_data(**kwargs)
return self.view_context(context)
def get(self, request, *args, **kwargs):
self.object = None
form_class = self.get_form_class()
form = form_class()
form.initial['data_movimento'] = datetime.today().strftime('%d/%m/%Y')
itens_form = ItensMovimentoFormSet(prefix='itens_form')
return self.render_to_response(self.get_context_data(form=form, itens_form=itens_form,))
def post(self, request, *args, **kwargs):
self.object = None
# Tirar . dos campos decimais
req_post = request.POST.copy()
for key in req_post:
if ('quantidade' in key or
'valor' in key or
'total' in key):
req_post[key] = req_post[key].replace('.', '')
request.POST = req_post
form_class = self.get_form_class()
form = self.get_form(form_class)
itens_form = ItensMovimentoFormSet(request.POST, prefix='itens_form')
if (form.is_valid() and itens_form.is_valid()):
self.object = form.save(commit=False)
lista_produtos = []
lista_produtos_estocados = []
itens_form.instance = self.object
for pform in itens_form:
if pform.cleaned_data != {}:
itens_mvmt_obj = pform.save(commit=False)
itens_mvmt_obj.movimento_id = self.object
self.adicionar_novo_movimento_estoque(
itens_mvmt_obj, pform, lista_produtos, lista_produtos_estocados)
# Verificar se movimentos de estoque invalidos existem
if len(pform.errors):
return self.form_invalid(form=form, itens_form=itens_form)
else:
self.object.save()
itens_form.save()
for prod in lista_produtos:
prod.save()
for prod_estocado in lista_produtos_estocados:
prod_estocado.save()
return self.form_valid(form)
return self.form_invalid(form=form, itens_form=itens_form)
class AdicionarEntradaEstoqueView(AdicionarMovimentoEstoqueBaseView):
form_class = EntradaEstoqueForm
template_name = "estoque/movimento/movimento_estoque_add.html"
success_url = reverse_lazy('estoque:listaentradasestoqueview')
success_message = "<b>Movimento de estoque de entrada nº%(pk)s</b> adicionado com sucesso."
def view_context(self, context):
context['title_complete'] = 'ADICIONAR ENTRADA EM ESTOQUE'
context['return_url'] = reverse_lazy(
'estoque:listaentradasestoqueview')
return context
class AdicionarSaidaEstoqueView(AdicionarMovimentoEstoqueBaseView):
form_class = SaidaEstoqueForm
template_name = "estoque/movimento/movimento_estoque_add.html"
success_url = reverse_lazy('estoque:listasaidasestoqueview')
success_message = "<b>Movimento de estoque de saída nº%(pk)s</b> adicionado com sucesso."
def view_context(self, context):
context['title_complete'] = 'ADICIONAR SAÍDA EM ESTOQUE'
context['return_url'] = reverse_lazy('estoque:listasaidasestoqueview')
return context
class AdicionarTransferenciaEstoqueView(AdicionarMovimentoEstoqueBaseView):
form_class = TransferenciaEstoqueForm
template_name = "estoque/movimento/movimento_estoque_add.html"
success_url = reverse_lazy('estoque:listatransferenciasestoqueview')
success_message = "<b>Movimento de estoque de transferência nº%(pk)s</b> adicionado com sucesso."
def view_context(self, context):
context['title_complete'] = 'ADICIONAR TRANSFERÊNCIA EM ESTOQUE'
context['return_url'] = reverse_lazy(
'estoque:listatransferenciasestoqueview')
return context
class MovimentoEstoqueBaseListView(CustomListView):
permission_codename = 'view_movimentoestoque'
def get_context_data(self, **kwargs):
context = super(MovimentoEstoqueBaseListView,
self).get_context_data(**kwargs)
return self.view_context(context)
class MovimentoEstoqueListView(MovimentoEstoqueBaseListView):
template_name = 'estoque/movimento/movimento_estoque_list.html'
context_object_name = 'all_movimentos'
success_url = reverse_lazy('estoque:listamovimentoestoqueview')
def view_context(self, context):
context['title_complete'] = 'TODAS AS MOVIMENTAÇÕES DE ESTOQUE'
return context
def get_queryset(self):
all_entradas = EntradaEstoque.objects.all()
all_saidas = SaidaEstoque.objects.all()
all_transferencias = TransferenciaEstoque.objects.all()
all_movimentos = list(
chain(all_saidas, all_entradas, all_transferencias))
return all_movimentos
def post(self, request, *args, **kwargs):
if self.check_user_delete_permission(request, MovimentoEstoque):
for key, value in request.POST.items():
if value == "on":
if EntradaEstoque.objects.filter(id=key).exists():
instance = EntradaEstoque.objects.get(id=key)
elif SaidaEstoque.objects.filter(id=key).exists():
instance = SaidaEstoque.objects.get(id=key)
elif TransferenciaEstoque.objects.filter(id=key).exists():
instance = TransferenciaEstoque.objects.get(id=key)
instance.delete()
return redirect(self.success_url)
class EntradaEstoqueListView(MovimentoEstoqueBaseListView):
template_name = 'estoque/movimento/movimento_estoque_list.html'
model = EntradaEstoque
context_object_name = 'all_entradas'
success_url = reverse_lazy('estoque:listaentradasestoqueview')
def view_context(self, context):
context['title_complete'] = 'ENTRADAS EM ESTOQUE'
context['add_url'] = reverse_lazy('estoque:addentradaestoqueview')
return context
class SaidaEstoqueListView(MovimentoEstoqueBaseListView):
template_name = 'estoque/movimento/movimento_estoque_list.html'
model = SaidaEstoque
context_object_name = 'all_saidas'
success_url = reverse_lazy('estoque:listasaidasestoqueview')
def view_context(self, context):
context['title_complete'] = 'SAÍDAS EM ESTOQUE'
context['add_url'] = reverse_lazy('estoque:addsaidaestoqueview')
return context
class TransferenciaEstoqueListView(MovimentoEstoqueBaseListView):
template_name = 'estoque/movimento/movimento_estoque_list.html'
model = TransferenciaEstoque
context_object_name = 'all_transferencias'
success_url = reverse_lazy('estoque:listatransferenciasestoqueview')
def view_context(self, context):
context['title_complete'] = 'TRANSFERÊNCIAS EM ESTOQUE'
context['add_url'] = reverse_lazy(
'estoque:addtransferenciaestoqueview')
return context
class DetalharMovimentoEstoqueBaseView(CustomDetailView):
template_name = "estoque/movimento/movimento_estoque_detail.html"
permission_codename = 'view_movimentoestoque'
def get_context_data(self, **kwargs):
context = super(DetalharMovimentoEstoqueBaseView,
self).get_context_data(**kwargs)
return self.view_context(context)
class DetalharEntradaEstoqueView(DetalharMovimentoEstoqueBaseView):
model = EntradaEstoque
def view_context(self, context):
context['title_complete'] = 'MOVIMENTO DE ENTRADA EM ESTOQUE N°' + \
str(self.object.id)
context['return_url'] = reverse_lazy(
'estoque:listaentradasestoqueview')
return context
class DetalharSaidaEstoqueView(DetalharMovimentoEstoqueBaseView):
model = SaidaEstoque
def view_context(self, context):
context['title_complete'] = 'MOVIMENTO DE SAÍDA EM ESTOQUE N°' + \
str(self.object.id)
context['return_url'] = reverse_lazy('estoque:listasaidasestoqueview')
return context
class DetalharTransferenciaEstoqueView(DetalharMovimentoEstoqueBaseView):
model = TransferenciaEstoque
def view_context(self, context):
context['title_complete'] = 'MOVIMENTO DE TRANSFERÊNCIA EM ESTOQUE N°' + \
str(self.object.id)
context['return_url'] = reverse_lazy(
'estoque:listatransferenciasestoqueview')
return context
| thiagopena/djangoSIGE | djangosige/apps/estoque/views/movimento.py | Python | mit | 12,100 |
import serial
port = "COM5"
baud = 19200
try:
ser = serial.Serial(port, baud, timeout=1)
ser.isOpen() # try to open port, if possible print message and proceed with 'while True:'
print ("port is opened!")
except IOError: # if port is already opened, close it and open it again and print message
ser.close()
ser.open()
print ("port was already open, was closed and opened again!")
def main():
while True:
cmd = raw_input("Enter command or 'exit':")
# for Python 2
# cmd = input("Enter command or 'exit':")
# for Python 3
if cmd == 'exit':
ser.close()
exit()
else:
ser.write(cmd.encode('ascii'))
# out = ser.read()
# print('Receiving...'+out)
if __name__ == "__main__":
main() | YaguangZhang/EarsMeasurementCampaignCode | Trials/lib/Trial6_pySerial_Mod.py | Python | mit | 857 |
import spacy
nlp = spacy.load('en')
text = open('customer_feedback_627.txt').read()
doc = nlp(text)
for entity in doc.ents:
print(entity.text, entity.label_)
# Determine semantic similarities
doc1 = nlp(u'the fries were gross')
doc2 = nlp(u'worst fries ever')
doc1.similarity(doc2)
# Hook in your own deep learning models
nlp.add_pipe(load_my_model(), before='parser') | iwxfer/wikitten | library/nlp/semantic_simi.py | Python | mit | 376 |
import csv
import json
import random
import os
import re
import itertools
import shutil
currentDatabase = ''
def showDatabases():
return (next(os.walk('./db'))[1])
def createDatabase(databaseName):
newDatabaseDirectory = (r'./db/') + databaseName
if not os.path.exists(newDatabaseDirectory):
#Create directory
os.makedirs(newDatabaseDirectory)
#Create metadata file
metadataFile = {}
metadataFile['tables'] = {}
with open('./db/'+databaseName+'/'+databaseName+'Metadata.json', 'w') as output:
json.dump(metadataFile, output)
return ("Database '"+databaseName+"' created succesfully.")
else:
return ('Database with name: "'+databaseName+'" already exists.')
def dropDatabase(databaseName):
databaseDirectory = (r'./db/') + databaseName
if not os.path.exists(databaseDirectory):
return ("Database with name: "+databaseName+" doesnt exists.")
else:
shutil.rmtree(databaseDirectory)
return ("Database "+databaseName+" succesfully deleted.")
def useDatabase(databaseName):
databaseDirectory = (r'./db/') + databaseName
if os.path.exists(databaseDirectory):
global currentDatabase
currentDatabase = databaseName
return ("Changed to database: ")
else:
return ('Database with name: "'+databaseName+'" doesnt exists.')
def showTables(currentDatabase):
#Insert info in metadata file
input = open('./db/'+currentDatabase+'/'+currentDatabase+'Metadata.json', 'r')
metadata = json.load(input)
return metadata['tables'].keys()
def changeDatabaseName(oldName, newName):
if newName in showDatabases():
return ("Error, a database with name "+newName+" already exists.")
else:
os.rename(r'./db/'+oldName, r'./db/'+newName)
os.rename(r'./db/'+newName+'/'+oldName+'Metadata.json', r'./db/'+newName+'/'+newName+'Metadata.json')
return ("Database: "+oldName+" changed name to: "+newName)
NO_KEY = 0
FOREIGN_KEY = 1
PRIMARY_KEY = 2
# tableSchemaExample = {'tableName':'table1', 'columns':[{'columnName':'column1', 'key':1, 'constraintTable':'table2', 'constraintColumn':'column1,'type':'int'},{'columnName':'column2', 'key':1, 'type':'date'}]}
def createTable(tableSchema, currentDatabase):
if not os.path.isfile('./db/'+currentDatabase+'/'+tableSchema['tableName']+'.json'):
#Check if table contains at least one type of key
pkSum = 0
fkSum = 0
for column in tableSchema['columns']:
if column['key'] == PRIMARY_KEY:
#Sum to PK counter
pkSum += 1
elif column['key'] == FOREIGN_KEY:
#Sum to FK counter
fkSum += 1
#Check if the constraint target table exists
if not os.path.isfile(r'./db/'+currentDatabase+'/'+column['constraintTable']+'.json'):
return ("Error, constraint target table: "+column['constraintTable']+" doesnt exists in database: "+currentDatabase)
return False
#Table cannot have more than one primary key
if(pkSum)>1:
return ("Error, table cannot have more than one primary key.")
return False
#Table has to have at least one type of key
if((pkSum+fkSum) < 1):
return ("Error, table needs at least one type of key.")
return False
#Create file
file = open('./db/'+currentDatabase+'/'+tableSchema['tableName']+'.json', 'w')
file.write('{}')
#Create hash file
hashFile = open('./db/'+currentDatabase+'/'+tableSchema['tableName']+'.hash', 'w')
initialHash = {}
for column in tableSchema['columns']:
initialHash[column['columnName']] = {}
json.dump(initialHash, hashFile)
#Insert info in metadata file
input = open('./db/'+currentDatabase+'/'+currentDatabase+'Metadata.json', 'r')
metadata = json.load(input)
tempTables = metadata['tables']
tempTables[tableSchema['tableName']] = {}
tempTables[tableSchema['tableName']]['columns'] = tableSchema['columns']
tempTables[tableSchema['tableName']]['lastIndex'] = -1
tempTables[tableSchema['tableName']]['deletedRows'] = 0
metadata['tables'] = tempTables
#Write info in metadata file
with open('./db/'+currentDatabase+'/'+currentDatabase+'Metadata.json', 'w') as output:
json.dump(metadata, output)
return ('Table succesfully created')
else:
return ('Table with name: '+tableSchema['tableName']+' already exists.')
def getType(columnName, tableName, metadata):
columnsInTable = metadata['tables'][tableName]['columns']
for column in columnsInTable:
if (column['columnName'] == columnName):
# print("Returning type: "+column['type']+" for column: "+columnName)
return column['type']
return False
def checkTypes(insertInfo, metadata):
columnsInTable = metadata['tables'][insertInfo['tableName']]['columns']
for i in range(len(insertInfo['columns'])):
if(getType(insertInfo['columns'][i], insertInfo['tableName'], metadata) == 'int'):
if(type(insertInfo['values'][i]) != type(1)):
print("TYPE ERROR")
return False
if(getType(insertInfo['columns'][i], insertInfo['tableName'], metadata) == 'float'):
if(type(insertInfo['values'][i]) != type(1.0)):
print("TYPE ERROR")
return False
if(getType(insertInfo['columns'][i], insertInfo['tableName'], metadata) == 'date'):
dateExpresion = re.compile('^\d\d-\d\d-\d\d\d\d$')
if not dateExpresion.match(insertInfo['values'][i]):
print("TYPE ERROR")
return False
if(getType(insertInfo['columns'][i], insertInfo['tableName'], metadata) == 'string'):
if(type(insertInfo['values'][i]) != type("a")):
print("TYPE ERROR")
return False
return True
def checkConstraints(insertInfo, metadata, tableHash, currentDatabase):
#Traverse every column in the table
for column in metadata['tables'][insertInfo['tableName']]['columns']:
value = insertInfo['values'][insertInfo['columns'].index(column['columnName'])]
#If column is foreign key then check if it already exists in the respective table
if column['key'] == FOREIGN_KEY:
try:
if value == "NULL":
#It cannot be NULL
return ("Error, column: "+column['columnName']+" cannot be NULL, as it is a foreign key.")
else:
#Check if it exists in the respective table
#Open table file
constraintTableFile = open(r'./db/'+currentDatabase+'/'+column['constraintTable']+'.hash', 'r')
constraintTable = json.load(constraintTableFile)
#If it isnt
if not (value in constraintTable[column['constraintColumn']]):
return ("Error, "+str(value)+" in column "+column['columnName']+" doesnt exist in constraint table "+column['constraintTable']+" yet.")
return False
except:
#It has to come in the insertion statement
return ("Error, column: "+column['columnName']+" is required, as it is a foreign key.")
return False
#If column is primary key then check if its unique in the respective table
elif column['key'] == PRIMARY_KEY:
# print("Value: "+str(value)+" column "+column['columnName'])
if str(value) in tableHash[column['columnName']]:
return ("Error, primary key "+str(value)+" already exists in column: "+column['columnName'])
return False
#If all the columns are good then return True
return True
# insertInfoExample = {'tableName': 'table1', 'columns':['id','nombre','edad'], 'values':[1, 'Perry', 20]}
def insertRecord(insertInfo, currentDatabase):
#Perform parity check
if(len(insertInfo['columns']) != len(insertInfo['values'])):
return ('Error, values quantity doesnt match columns quantity')
return False
#Open metadata file
metadataFile = open('./db/'+currentDatabase+'/'+currentDatabase+'Metadata.json', 'r')
metadata = json.load(metadataFile)
#Check if table exists
if insertInfo['tableName'] not in metadata['tables']:
return ("Error, table: "+insertInfo['tableName']+" doesnt exists in database: "+currentDatabase)
return False
#Perform type checks
if(checkTypes(insertInfo, metadata) != True):
return ('Error, types dont match with the table types.')
return False
#Open hash file
tableHashFile = open('./db/'+currentDatabase+'/'+insertInfo['tableName']+'.hash', 'r')
tableHash = json.load(tableHashFile)
#Perform constraint check
constraintCheck = checkConstraints(insertInfo, metadata, tableHash, currentDatabase)
if(constraintCheck != True):
return constraintCheck
#Construct key-value pair to insert to table json file and store index in hash
resultingCSV = ""
for column in metadata['tables'][insertInfo['tableName']]['columns']:
try:
#Construct CSV
tempIndex = insertInfo['columns'].index(column['columnName'])
resultingCSV += str(insertInfo['values'][tempIndex]) + ","
#Insert to Hash
if str(insertInfo['values'][tempIndex]) in tableHash[column['columnName']]:
tempArray = tableHash[column['columnName']][str(insertInfo['values'][tempIndex])]
tempArray.append(metadata['tables'][insertInfo['tableName']]['lastIndex'] + 1)
tableHash[column['columnName']][str(insertInfo['values'][tempIndex])] = tempArray
else:
tableHash[column['columnName']][str(insertInfo['values'][tempIndex])] = [metadata['tables'][insertInfo['tableName']]['lastIndex'] + 1]
except:
# print('Except triggered')
resultingCSV += "NULL" + ","
#Write hash back to file
tableHashFile = open('./db/'+currentDatabase+'/'+insertInfo['tableName']+'.hash', 'w')
json.dump(tableHash, tableHashFile)
resultingCSV = resultingCSV[:-1]
#Open table file
tableFile = open('./db/'+currentDatabase+'/'+insertInfo['tableName']+'.json', 'r')
## VERSION LOADING JSON TO MEMORY ####################################################################################################
# #Load JSON
# tableJSON = json.load(tableFile)
# #Add row
# tableJSON[metadata['tables'][insertInfo['tableName']]['lastIndex'] + 1] = resultingCSV
# #Write table file
# json.dump(tableJSON, open('./db/'+currentDatabase+'/'+insertInfo['tableName']+'.json', 'w'))
######################################################################################################################################
## VERSION ALTERING FILE DIRECTLY ####################################################################################################
currentJSON = tableFile.readlines()[0]
if(currentJSON == "{}"):
newJSON = currentJSON[:-1] + '"' + str(metadata['tables'][insertInfo['tableName']]['lastIndex'] + 1) + '":"' + resultingCSV + '"}'
else:
newJSON = currentJSON[:-1] + ',"' + str(metadata['tables'][insertInfo['tableName']]['lastIndex'] + 1) + '":"' + resultingCSV + '"}'
tableFile = open('./db/'+currentDatabase+'/'+insertInfo['tableName']+'.json', 'w')
tableFile.write(newJSON)
######################################################################################################################################
#Update index in metadata
# print(metadata['tables'][insertInfo['tableName']]['lastIndex'] + 1)
metadata['tables'][insertInfo['tableName']]['lastIndex'] = metadata['tables'][insertInfo['tableName']]['lastIndex'] + 1
#Write metadata
json.dump(metadata, open('./db/'+currentDatabase+'/'+currentDatabase+'Metadata.json', 'w'))
return ('Insert successful.')
return True
def deleteIndexes(indexesToDelete, inputJson):
for indexToDelete in indexesToDelete:
# print("\nDeleting index: "+str(indexToDelete))
firstRegex = r'{"' + str(indexToDelete) + r'":"[^"]*",'
inputJson = re.sub(firstRegex, '{', inputJson)
middleRegex = r',"' + str(indexToDelete) + r'":"[^"]*",'
inputJson = re.sub(middleRegex, ',', inputJson)
lastRegex = r',"' + str(indexToDelete) + r'":"[^"]*"}'
inputJson = re.sub(lastRegex, '}', inputJson)
onlyRegex = r'{"' + str(indexToDelete) + r'":"[^"]*"}'
inputJson = re.sub(onlyRegex, '{}', inputJson)
return inputJson
# deleteInfoExample = {'tableName':'table1', 'indexes':[1,3]}
def deleteRows(deleteInfo, currentDatabase):
tableFile = open('./db/'+currentDatabase+'/'+deleteInfo['tableName']+'.json', 'r')
jsonFile = tableFile.readlines()[0]
newJson = deleteIndexes(deleteInfo['indexes'], jsonFile)
tableFileOutput = open('./db/'+currentDatabase+'/'+deleteInfo['tableName']+'.json', 'w')
tableFileOutput.write(newJson)
return True
# Example call: cartesianProduct(['table1', 'table2', 'table3'])
def cartesianProduct(tables, currentDatabase):
#Open metadata file
metadataFile = open('./db/'+currentDatabase+'/'+currentDatabase+'Metadata.json', 'r')
metadata = json.load(metadataFile)
#Check existence of all tables
for table in tables:
if not (table in metadata['tables']):
print ("Table: "+table+" doesnt exist in database: "+currentDatabase)
return False
#Load all tables involved
data = []
for table in tables:
#Open table file
tableFile = open('./db/'+currentDatabase+'/'+table+'.json', 'r')
jsonFile = json.load(tableFile)
tempData = []
for key, value in jsonFile.items():
tempData.append(value)
data.append(tempData)
#Generate combinations
cartesianProductResult = itertools.product(*data)
#Clean
cartesianProductResult = map((lambda x : (','.join(map(str,x)))), cartesianProductResult)
cartesianProductResult = map((lambda x : x.split(",")), cartesianProductResult)
# print(cartesianProductResult)
# for i in cartesianProductResult:
# print(i)
#Generate metadata from cartesianProductResult
columns = []
for table in tables:
tempColumns = metadata['tables'][table]['columns']
for column in tempColumns:
column['originTable'] = table
columns = columns + tempColumns
metadataResult = {}
metadataResult['columns'] = columns
return metadataResult, cartesianProductResult
def filterOverCartesianProduct(tableSchema, tableData, operation, firstWhere, secondWhere):
# print("Called with operation: "+str(operation))
if(operation == "NULL"):
resultData = []
#Check ambiguity
indexesFound = 0
for i in range(len(tableSchema['columns'])):
# print("tableSchema:\n")
# print(tableSchema['columns'][i]['columnName'])
# print("firstWhere:\n")
# print(firstWhere['constraintColumn'])
if tableSchema['columns'][i]['columnName'] == firstWhere['constraintColumn']:
indexesFound = indexesFound + 1
indexToCompare = i
# print("CHECKPOINT")
if(indexesFound == 0):
print("Error, column: "+firstWhere['constraintColumn']+" doesnt exists in the resulting cartesian product table.")
return False
elif(indexesFound > 1):
print("Error, reference to column: "+firstWhere['constraintColumn']+" is ambiguous in the resulting cartesian product table.")
return False
#Check type compatibility
desiredType = tableSchema['columns'][indexToCompare]['type']
compareTo = firstWhere['compareTo']
try:
if((desiredType == 'string') or (desiredType == 'date')):
compareTo = str(compareTo)
elif(desiredType == 'int'):
compareTo = int(compareTo)
elif(desiredType == 'float'):
compareTo = float(compareTo)
except:
print("Error, "+str(compareTo)+" couldnt be casted to the type of: "+firstWhere['constraintColumn']+" ("+desiredType+")")
return False
# compareTo = str(compareTo)
#Perform filter
if(firstWhere['operation'] == '='):
for row in tableData:
# print("Checking if "+str(row[indexToCompare])+"("+str(type(row[indexToCompare]))+")="+str(compareTo)+"("+str(type(compareTo))+")")
compareRow = row[indexToCompare]
if(desiredType == 'int'):
compareRow = int(compareRow)
elif(desiredType == 'float'):
compareRow = float(compareRow)
if compareRow == compareTo:
resultData.append(row)
elif(firstWhere['operation'] == '<'):
for row in tableData:
compareRow = row[indexToCompare]
if(desiredType == 'int'):
compareRow = int(compareRow)
elif(desiredType == 'float'):
compareRow = float(compareRow)
if compareRow < compareTo:
resultData.append(row)
elif(firstWhere['operation'] == '<='):
for row in tableData:
compareRow = row[indexToCompare]
if(desiredType == 'int'):
compareRow = int(compareRow)
elif(desiredType == 'float'):
compareRow = float(compareRow)
if compareRow <= compareTo:
resultData.append(row)
elif(firstWhere['operation'] == '>'):
for row in tableData:
compareRow = row[indexToCompare]
if(desiredType == 'int'):
compareRow = int(compareRow)
elif(desiredType == 'float'):
compareRow = float(compareRow)
if compareRow > compareTo:
resultData.append(row)
elif(firstWhere['operation'] == '>='):
for row in tableData:
compareRow = row[indexToCompare]
if(desiredType == 'int'):
compareRow = int(compareRow)
elif(desiredType == 'float'):
compareRow = float(compareRow)
if compareRow >= compareTo:
resultData.append(row)
elif(firstWhere['operation'] == 'not'):
for row in tableData:
compareRow = row[indexToCompare]
if(desiredType == 'int'):
compareRow = int(compareRow)
elif(desiredType == 'float'):
compareRow = float(compareRow)
if compareRow != compareTo:
resultData.append(row)
return resultData
elif(operation == "AND"):
#Filter childs
firstWhereResults = filterOverCartesianProduct(tableSchema, tableData, firstWhere['operation'], firstWhere['firstWhere'], firstWhere['secondWhere'])
secondWhereResults = filterOverCartesianProduct(tableSchema, tableData, secondWhere['operation'], secondWhere['firstWhere'], secondWhere['secondWhere'])
#AND results of childs
resultData = []
for result in firstWhereResults:
if result in secondWhereResults:
resultData.append(result)
return resultData
elif(operation == "OR"):
#Filter childs
firstWhereResults = filterOverCartesianProduct(tableSchema, tableData, firstWhere['operation'], firstWhere['firstWhere'], firstWhere['secondWhere'])
secondWhereResults = filterOverCartesianProduct(tableSchema, tableData, secondWhere['operation'], secondWhere['firstWhere'], secondWhere['secondWhere'])
#OR results of childs
for result in secondWhereResults:
if result not in firstWhereResults:
firstWhereResults.append(result)
return firstWhereResults
def filterOverSingleTable(tableName, operation, firstWhere, secondWhere, currentDatabase):
if(operation == "NULL"):
#Check type compatibility
metadataFile = open('./db/'+currentDatabase+'/'+currentDatabase+'Metadata.json', 'r')
metadata = json.load(metadataFile)
for column in metadata['tables'][tableName]['columns']:
print("Comparing: "+column['columnName']+" with "+firstWhere['constraintColumn'])
if column['columnName'] == firstWhere['constraintColumn']:
desiredType = column['type']
compareTo = firstWhere['compareTo']
try:
if((desiredType == 'string') or (desiredType == 'date')):
compareTo = str(compareTo)
elif(desiredType == 'int'):
compareTo = int(compareTo)
elif(desiredType == 'float'):
compareTo = float(compareTo)
# print("Cast complete")
except:
print("Error, "+str(compareTo)+" couldnt be casted to the type of: "+firstWhere['constraintColumn']+" ("+desiredType+")")
return False
#Open table hash file
tableHashFile = open(r'./db/'+currentDatabase+'/'+tableName+'.hash', 'r')
tableHash = json.load(tableHashFile)
#Get hash for specific column
columnHash = tableHash[firstWhere['constraintColumn']]
#Get keys of column
columnKeys = columnHash.keys()
# print("Column keys PRECAST: ")
# print(columnKeys)
#Cast keys to respective type
if(desiredType == 'int'):
columnKeys = map(int, columnKeys)
elif(desiredType == 'float'):
columnKeys = map(float, columnKeys)
# print("Column keys POSTCAST: ")
# print(columnKeys)
# print("compareTo: "+str(compareTo)+str(type(compareTo)))
#Get matching keys
matchingKeys = []
if(firstWhere['operation'] == '='):
for key in columnKeys:
if key == compareTo:
matchingKeys.append(key)
elif(firstWhere['operation'] == '<'):
for key in columnKeys:
if key < compareTo:
matchingKeys.append(key)
elif(firstWhere['operation'] == '<='):
for key in columnKeys:
if key <= compareTo:
matchingKeys.append(key)
elif(firstWhere['operation'] == '>'):
for key in columnKeys:
if key > compareTo:
matchingKeys.append(key)
elif(firstWhere['operation'] == '>='):
for key in columnKeys:
if key >= compareTo:
matchingKeys.append(key)
elif(firstWhere['operation'] == 'not'):
for key in columnKeys:
if key != compareTo:
matchingKeys.append(key)
#Get row indexes
rowIndexes = []
for key in matchingKeys:
rowIndexes = list(set(rowIndexes) | set(columnHash[str(key)]))
# print("Row indexes")
# print(rowIndexes)
#Open table data file
tableFile = open(r'./db/'+currentDatabase+'/'+tableName+'.json', 'r')
table = json.load(tableFile)
#Generate resulting set of rows
resultData = []
for index in rowIndexes:
# print(table[str(key)])
resultData.append(table[str(index)])
# print(resultData)
resultData = map((lambda x : x.split(",")), resultData)
# print(resultData)
return resultData
elif(operation == "AND"):
#Filter childs
firstWhereResults = filterOverSingleTable(tableName, firstWhere['operation'], firstWhere['firstWhere'], firstWhere['secondWhere'], currentDatabase)
secondWhereResults = filterOverSingleTable(tableName, secondWhere['operation'], secondWhere['firstWhere'], secondWhere['secondWhere'], currentDatabase)
#AND results of childs
resultData = []
for result in firstWhereResults:
if result in secondWhereResults:
resultData.append(result)
return resultData
elif(operation == "OR"):
#Filter childs
firstWhereResults = filterOverSingleTable(tableName, firstWhere['operation'], firstWhere['firstWhere'], firstWhere['secondWhere'], currentDatabase)
secondWhereResults = filterOverSingleTable(tableName, secondWhere['operation'], secondWhere['firstWhere'], secondWhere['secondWhere'], currentDatabase)
#OR results of childs
for result in secondWhereResults:
if result not in firstWhereResults:
firstWhereResults.append(result)
return firstWhereResults
def filterOverSingleTableWithIndexes(tableName, operation, firstWhere, secondWhere, currentDatabase):
if(operation == "NULL"):
#Check type compatibility
metadataFile = open('./db/'+currentDatabase+'/'+currentDatabase+'Metadata.json', 'r')
metadata = json.load(metadataFile)
for column in metadata['tables'][tableName]['columns']:
# print("Comparing: "+column['columnName']+" with "+firstWhere['constraintColumn'])
if column['columnName'] == firstWhere['constraintColumn']:
desiredType = column['type']
compareTo = firstWhere['compareTo']
try:
if((desiredType == 'string') or (desiredType == 'date')):
compareTo = str(compareTo)
elif(desiredType == 'int'):
compareTo = int(compareTo)
elif(desiredType == 'float'):
compareTo = float(compareTo)
# print("Cast complete")
except:
print("Error, "+str(compareTo)+" couldnt be casted to the type of: "+firstWhere['constraintColumn']+" ("+desiredType+")")
return False
#Open table hash file
tableHashFile = open(r'./db/'+currentDatabase+'/'+tableName+'.hash', 'r')
tableHash = json.load(tableHashFile)
#Get hash for specific column
columnHash = tableHash[firstWhere['constraintColumn']]
#Get keys of column
columnKeys = columnHash.keys()
# print("Column keys PRECAST: ")
# print(columnKeys)
#Cast keys to respective type
if(desiredType == 'int'):
columnKeys = map(int, columnKeys)
elif(desiredType == 'float'):
columnKeys = map(float, columnKeys)
# print("Column keys POSTCAST: ")
# print(columnKeys)
# print("compareTo: "+str(compareTo)+str(type(compareTo)))
#Get matching keys
matchingKeys = []
if(firstWhere['operation'] == '='):
for key in columnKeys:
if key == compareTo:
matchingKeys.append(key)
elif(firstWhere['operation'] == '<'):
for key in columnKeys:
if key < compareTo:
matchingKeys.append(key)
elif(firstWhere['operation'] == '<='):
for key in columnKeys:
if key <= compareTo:
matchingKeys.append(key)
elif(firstWhere['operation'] == '>'):
for key in columnKeys:
if key > compareTo:
matchingKeys.append(key)
elif(firstWhere['operation'] == '>='):
for key in columnKeys:
if key >= compareTo:
matchingKeys.append(key)
elif(firstWhere['operation'] == 'not'):
for key in columnKeys:
if key != compareTo:
matchingKeys.append(key)
#Get row indexes
rowIndexes = []
for key in matchingKeys:
rowIndexes = list(set(rowIndexes) | set(columnHash[str(key)]))
# print("Row indexes")
# print(rowIndexes)
#Open table data file
tableFile = open(r'./db/'+currentDatabase+'/'+tableName+'.json', 'r')
table = json.load(tableFile)
#Generate resulting set of rows
resultData = []
for index in rowIndexes:
# print(table[str(key)])
resultData.append(table[str(index)])
# print(resultData)
resultData = map((lambda x : x.split(",")), resultData)
# print(resultData)
#Return indexes and data
return rowIndexes, resultData
elif(operation == "AND"):
#Filter childs
firstWhereIndexes, firstWhereResults = filterOverSingleTableWithIndexes(tableName, firstWhere['operation'], firstWhere['firstWhere'], firstWhere['secondWhere'], currentDatabase)
secondWhereIndexes, secondWhereResults = filterOverSingleTableWithIndexes(tableName, secondWhere['operation'], secondWhere['firstWhere'], secondWhere['secondWhere'], currentDatabase)
#AND indexes of childs
rowIndexes = []
for index in firstWhereIndexes:
if index in secondWhereIndexes:
rowIndexes.append(index)
#AND results of childs
resultData = []
for result in firstWhereResults:
if result in secondWhereResults:
resultData.append(result)
return rowIndexes, resultData
elif(operation == "OR"):
#Filter childs
firstWhereIndexes, firstWhereResults = filterOverSingleTableWithIndexes(tableName, firstWhere['operation'], firstWhere['firstWhere'], firstWhere['secondWhere'], currentDatabase)
secondWhereIndexes, secondWhereResults = filterOverSingleTableWithIndexes(tableName, secondWhere['operation'], secondWhere['firstWhere'], secondWhere['secondWhere'], currentDatabase)
#OR indexes of childs
for index in secondWhereIndexes:
if index not in firstWhereIndexes:
firstWhereIndexes.append(index)
#OR results of childs
for result in secondWhereResults:
if result not in firstWhereResults:
firstWhereResults.append(result)
return firstWhereIndexes, firstWhereResults
def select(selectInfo, currentDatabase):
#Check if cartesian product is needed
if(len(selectInfo['from']) > 1):
#Perform FROM, cartesian product
cartesianProductSchema, cartesianProductResult = cartesianProduct(selectInfo['from'], currentDatabase)
#Perform WHERE, row filtering
filterResult = filterOverCartesianProduct(cartesianProductSchema, cartesianProductResult, selectInfo['where']['operation'], selectInfo['where']['firstWhere'], selectInfo['where']['secondWhere'])
#Perform SELECT, column selection
selectedColumns = []
# print(cartesianProductSchema)
for columnName in selectInfo['select']:
for i in range(len(cartesianProductSchema['columns'])):
if cartesianProductSchema['columns'][i]['columnName'] == columnName:
selectedColumns.append(i)
# print(selectedColumns)
finalResult = [selectInfo['select']]
for row in filterResult:
tempRow = []
for column in selectedColumns:
tempRow.append(row[column])
finalResult.append(tempRow)
return finalResult
else:
#Continue select using the hash
#Perform WHERE, row filtering
filterResult = filterOverSingleTable(selectInfo['from'][0], selectInfo['where']['operation'], selectInfo['where']['firstWhere'], selectInfo['where']['secondWhere'], currentDatabase)
#Perform SELECT, column selection
#Open metadata file
metadataFile = open('./db/'+currentDatabase+'/'+currentDatabase+'Metadata.json', 'r')
metadata = json.load(metadataFile)
selectedColumns = []
for columnName in selectInfo['select']:
for i in range(len(metadata['tables'][selectInfo['from'][0]]['columns'])):
if metadata['tables'][selectInfo['from'][0]]['columns'][i]['columnName'] == columnName:
selectedColumns.append(i)
finalResult = [selectInfo['select']]
for row in filterResult:
tempRow = []
for column in selectedColumns:
tempRow.append(row[column])
finalResult.append(tempRow)
return finalResult
'''
deleteInfoExample = {
'from':['table1'],
'where':{
'operation':'OR',
'firstWhere':{
'operation':'NULL',
'firstWhere':{
'operation':'=',
'constraintColumn':'column2',
'compareTo':666
},
'secondWhere':{}
},
'secondWhere':{
'operation':'AND',
'firstWhere':{
'operation':'NULL',
'firstWhere':{
'operation':'>',
'constraintColumn':'column2',
'compareTo':10
},
'secondWhere':{}
},
'secondWhere':{
'operation':'NULL',
'firstWhere':{
'operation':'<',
'constraintColumn':'column2',
'compareTo':20
},
'secondWhere':{}
}
}
}
}
'''
def delete(deleteInfo, currentDatabase):
#Open metadata file
metadataFile = open('./db/'+currentDatabase+'/'+currentDatabase+'Metadata.json', 'r')
metadata = json.load(metadataFile)
#Check references to this table
referencedValues = {}
#Collect references to this table
for tableName, tableData in metadata['tables'].items():
for column in tableData['columns']:
if column['key'] == FOREIGN_KEY:
if column['constraintTable'] == deleteInfo['from'][0]:
#Load table hash to retrieve values in column
tableHashFile = open('./db/'+currentDatabase+'/'+tableName+'.hash', 'r')
tableHash = json.load(tableHashFile)
referencedValues[column['constraintColumn']] = tableHash[column['columnName']].keys()
# print(referencedValues)
#Perform WHERE, generate indexes to be deleted in table
indexesToDelete, rowsToDelete = filterOverSingleTableWithIndexes(deleteInfo['from'][0], deleteInfo['where']['operation'], deleteInfo['where']['firstWhere'], deleteInfo['where']['secondWhere'], currentDatabase)
# print(rowsToDelete)
#Check if we're attempting to delete a referenced row
for columnName, values in referencedValues.items():
for i in range(len(metadata['tables'][deleteInfo['from'][0]]['columns'])):
if metadata['tables'][deleteInfo['from'][0]]['columns'][i]['columnName'] == columnName:
currentIndex = i
for row in rowsToDelete:
if row[currentIndex] in values:
print("Error, attempting to delete rows were values are being referenced to in another table.")
return False
#If its all clear proceed to delete rows
#Delete from table data
#Open table file
tableFile = open(r'./db/'+currentDatabase+'/'+deleteInfo['from'][0]+'.json', 'r')
table = json.load(tableFile)
for indexToDelete in indexesToDelete:
table.pop(str(indexToDelete))
#Write back file data
tableFile = open(r'./db/'+currentDatabase+'/'+deleteInfo['from'][0]+'.json', 'w')
json.dump(table, tableFile)
#Delete from hash
#Open table hash file
tableHashFile = open(r'./db/'+currentDatabase+'/'+deleteInfo['from'][0]+'.hash', 'r')
tableHash = json.load(tableHashFile)
for column, columnHash in tableHash.items():
for value, indexes in columnHash.items():
newIndexes = []
for index in indexes:
if index not in indexesToDelete:
newIndexes.append(index)
if(len(newIndexes) == 0):
tableHash[column].pop(value)
else:
tableHash[column][value] = newIndexes
#Write back hash info
tableHashFile = open(r'./db/'+currentDatabase+'/'+deleteInfo['from'][0]+'.hash', 'w')
json.dump(tableHash, tableHashFile)
print("Succesfully deleted "+str(len(indexesToDelete))+" rows.")
'''
updateInfoExample = {
'tableName':'table1',
'columnsToUpdate':{
'column1':'newValue1',
'column2':'newValue2'
},
'where':{
'operation':'OR',
'firstWhere':{
'operation':'NULL',
'firstWhere':{
'operation':'>',
'constraintColumn':'column2',
'compareTo':20
},
'secondWhere':{}
},
'secondWhere':{
'operation':'NULL',
'firstWhere':{
'operation':'<',
'constraintColumn':'column2',
'compareTo':10
},
'secondWhere':{}
}
}
}
'''
def update(updateInfo, currentDatabase):
#Open metadata file
metadataFile = open('./db/'+currentDatabase+'/'+currentDatabase+'Metadata.json', 'r')
metadata = json.load(metadataFile)
#Check if newValue can be casted to the type of the column it wants to set
for columnToUpdate, newValue in updateInfo['columnsToUpdate'].items():
for column in metadata['tables'][updateInfo['tableName']]['columns']:
if column['columnName'] == columnToUpdate:
desiredType = column['type']
try:
if(desiredType == 'int'):
int(newValue)
elif(desiredType == 'float'):
float(newValue)
elif(desiredType == 'date'):
dateExpresion = re.compile('^\d\d-\d\d-\d\d\d\d$')
if not dateExpresion.match(newValue):
print("Type Error. Couldnt cast "+str(newValue)+" to "+desiredType)
return False
except:
print("Type error. Couldnt cast "+str(newValue)+" to "+desiredType)
return False
#If table to update contains a foreign key, check if the newValue is already a primary key in the constraint table
for column in metadata['tables'][updateInfo['tableName']]['columns']:
if column['key'] == FOREIGN_KEY:
if column['columnName'] in updateInfo['columnsToUpdate']:
#Open constraint table hash
constraintTableFile = open(r'./db/'+currentDatabase+'/'+column['constraintTable']+'.hash', 'r')
constraintTable = json.load(constraintTableFile)
if updateInfo['columnsToUpdate'][column['columnName']] not in constraintTable[column['constraintColumn']]:
print("Error, trying to update "+column['columnName']+" to "+updateInfo['columnsToUpdate'][column['columnName']]+" which doesnt exists yet as a primary key in constraint column "+column['constraintColumn']+" in constraint table "+column['constraintTable'])
return False
#Check references to this table
referencedValues = {}
#Collect references to this table
for tableName, tableData in metadata['tables'].items():
for column in tableData['columns']:
if column['key'] == FOREIGN_KEY:
if column['constraintTable'] == updateInfo['tableName']:
#Load table hash to retrieve values in column
tableHashFile = open('./db/'+currentDatabase+'/'+tableName+'.hash', 'r')
tableHash = json.load(tableHashFile)
referencedValues[column['constraintColumn']] = tableHash[column['columnName']].keys()
#Perform WHERE, generate indexes to be updated in table
indexesToUpdate, rowsToUpdate = filterOverSingleTableWithIndexes(updateInfo['tableName'], updateInfo['where']['operation'], updateInfo['where']['firstWhere'], updateInfo['where']['secondWhere'], currentDatabase)
#Check if we're attempting to update a referenced value in a row
for columnName, values in referencedValues.items():
if columnName in updateInfo['columnsToUpdate']:
print("Error, trying to update rows were values are being referenced to in another column")
return False
#If its all clear, proceed to update rows
#Open table file
tableFile = open(r'./db/'+currentDatabase+'/'+updateInfo['tableName']+'.json', 'r')
table = json.load(tableFile)
#Open table hash file
tableHashFile = open(r'./db/'+currentDatabase+'/'+updateInfo['tableName']+'.hash', 'r')
tableHash = json.load(tableHashFile)
# print("Indexes to update:")
# print(indexesToUpdate)
for indexToUpdate in indexesToUpdate:
rowSplitted = table[str(indexToUpdate)].split(",")
for columnToUpdate, newValue in updateInfo['columnsToUpdate'].items():
for i in range(len(metadata['tables'][updateInfo['tableName']]['columns'])):
if metadata['tables'][updateInfo['tableName']]['columns'][i]['columnName'] == columnToUpdate:
#Update info in hash
# print("Trying to retrieve hash from column: "+columnToUpdate+", value: "+str(rowSplitted[i]))
tempArray = tableHash[columnToUpdate][str(rowSplitted[i])]
tempArray.remove(indexToUpdate)
tableHash[columnToUpdate][str(rowSplitted[i])] = tempArray
if (tableHash[columnToUpdate][str(rowSplitted[i])] == None) or len(tableHash[columnToUpdate][str(rowSplitted[i])]) == 0:
# print("Removed all, proceeding to pop key "+str(rowSplitted[i]))
tableHash[columnToUpdate].pop(str(rowSplitted[i]))
if str(newValue) in tableHash[columnToUpdate]:
# print("IT IS:")
# print(tableHash[columnToUpdate])
# print(tableHash[columnToUpdate][str(newValue)])
# print("Trying to add "+str(indexToUpdate)+" to "+str(tableHash[columnToUpdate][str(newValue)])+" attached to key "+str(newValue))
tempArray = tableHash[columnToUpdate][str(newValue)]
# print("Temp array:")
# print(str(tempArray))
tempArray.append(indexToUpdate)
tableHash[columnToUpdate][str(newValue)] = tempArray
# print("Resulting append:")
# print(str(tableHash[columnToUpdate][str(newValue)]))
else:
# print("ITS NOT:")
# print(tableHash[columnToUpdate])
# print("Trying to add "+str(indexToUpdate)+" to key "+str(newValue))
tableHash[columnToUpdate][str(newValue)] = [indexToUpdate]
#Update value in data
rowSplitted[i] = newValue
rowSplitted = map(str, rowSplitted)
rowJoined = ','.join(rowSplitted)
table[indexToUpdate] = rowJoined
#Write back table file
tableFile = open(r'./db/'+currentDatabase+'/'+updateInfo['tableName']+'.json', 'w')
json.dump(table, tableFile)
#Write back hash file
tableHashFile = open(r'./db/'+currentDatabase+'/'+updateInfo['tableName']+'.hash', 'w')
json.dump(tableHash, tableHashFile)
print("Succesfully updated "+str(len(indexesToUpdate))+" rows.")
# Testing area
# dropDatabase('database1')
createDatabase('database1')
createDatabase('databaseCHANGE')
# print(changeDatabaseName('database1', 'databaseCHANGE'))
useDatabase('database1')
# print(showTables())
tableSchemaExample = {'tableName':'table2', 'columns':[{'columnName':'column3', 'key':2, 'type':'date'},{'columnName':'column4', 'key':0, 'type':'string'}]}
createTable(tableSchemaExample, currentDatabase)
tableSchemaExample = {'tableName':'table1', 'columns':[{'columnName':'column1', 'key':1, 'constraintTable':'table2', 'constraintColumn':'column3', 'type':'date'},{'columnName':'column2', 'key':0, 'type':'int'}]}
createTable(tableSchemaExample, currentDatabase)
print(showTables(currentDatabase))
# print("Inserting into table 2")
# insertRecord({'tableName': 'table2', 'columns':['column3', 'column4'], 'values':['12-12-1212', 'Bryan Chan']})
# print("Inserting into table 2")
# insertRecord({'tableName': 'table2', 'columns':['column3', 'column4'], 'values':['24-24-2424', 'Alejandro Cortes']})
# print("Inserting into table 1")
# insertRecord({'tableName': 'table1', 'columns':['column2', 'column1'], 'values':[12, '12-12-1212']})
# print("Inserting into table 1")
# insertRecord({'tableName': 'table1', 'columns':['column2', 'column1'], 'values':[15, '12-12-1212']})
# print("Inserting into table 1")
# insertRecord({'tableName': 'table1', 'columns':['column2', 'column1'], 'values':[19, '12-12-1212']})
# print("Inserting into table 1")
# insertRecord({'tableName': 'table1', 'columns':['column2', 'column1'], 'values':[24, '12-12-1212']})
# print("Inserting into table 1")
# insertRecord({'tableName': 'table1', 'columns':['column2', 'column1'], 'values':[36, '12-12-1212']})
# for i in range(1000):
# print("Inserting into table 1: "+str(i))
# insertRecord({'tableName': 'table1', 'columns':['column2', 'column1'], 'values':[random.randint(0,50), '12-12-1212']})
'''
selectInfo = {
'select':['column2','column4'],
'from':['table1','table2'],
'where':{
'operation':'OR',
'firstWhere':{
'operation':'NULL',
'firstWhere':{
'operation':'=',
'constraintColumn':'column4',
'compareTo':'Bryan Chan'
},
'secondWhere':{}
},
'secondWhere':{
'operation':'AND',
'firstWhere':{
'operation':'NULL',
'firstWhere':{
'operation':'=',
'constraintColumn':'column4',
'compareTo':'Alejandro Cortes'
},
'secondWhere':{}
},
'secondWhere':{
'operation':'NULL',
'firstWhere':{
'operation':'<',
'constraintColumn':'column2',
'compareTo':100
},
'secondWhere':{}
}
}
}
}
print(select(selectInfo))
'''
selectInfo = {
'select':['column2','column1'],
'from':['table1'],
'where':{
'operation':'OR',
'firstWhere':{
'operation':'NULL',
'firstWhere':{
'operation':'=',
'constraintColumn':'column2',
'compareTo':50
},
'secondWhere':{}
},
'secondWhere':{
'operation':'AND',
'firstWhere':{
'operation':'NULL',
'firstWhere':{
'operation':'>',
'constraintColumn':'column2',
'compareTo':10
},
'secondWhere':{}
},
'secondWhere':{
'operation':'NULL',
'firstWhere':{
'operation':'<',
'constraintColumn':'column2',
'compareTo':20
},
'secondWhere':{}
}
}
}
}
# print(select(selectInfo))
deleteInfoExample = {
'from':['table1'],
'where':{
'operation':'AND',
'firstWhere':{
'operation':'NULL',
'firstWhere':{
'operation':'=',
'constraintColumn':'column2',
'compareTo':666
},
'secondWhere':{}
},
'secondWhere':{
'operation':'NULL',
'firstWhere':{
'operation':'=',
'constraintColumn':'column2',
'compareTo':666
},
'secondWhere':{}
}
}
}
# delete(deleteInfoExample)
updateInfoExample = {
'tableName':'table1',
'columnsToUpdate':{
'column1':'24-24-2424',
'column2':666
},
'where':{
'operation':'AND',
'firstWhere':{
'operation':'NULL',
'firstWhere':{
'operation':'=',
'constraintColumn':'column2',
'compareTo':100
},
'secondWhere':{}
},
'secondWhere':{
'operation':'NULL',
'firstWhere':{
'operation':'=',
'constraintColumn':'column2',
'compareTo':100
},
'secondWhere':{}
}
}
}
# update(updateInfoExample)
# print(showDatabases())
# for i in range(10000):
# tableFile = open('./db/'+currentDatabase+'/'+'table1'+'.json', 'r')
# table = json.load(tableFile)
# print("Writing "+str(i))
# tableFile = open('./db/'+currentDatabase+'/'+'table1'+'.json', 'w')
# json.dump(table, tableFile)
| cor14095/probases1 | Back-end/csvHandler.py | Python | mit | 41,975 |
import os, sys, re
import optparse
import shutil
import pandas
import numpy
import gc
import subprocess
#####################################
#This is a script to combine the output reports from
#Skyline, in preparation for MSstats! Let's get started.
#
#VERSION 0.70A
version="0.70A"
#DATE: 10/11/2016
date="10/11/2016"
#####################################
print "-----------------------------------------------------------------------"
print "Welcome to the MSstats wrapper for Galaxy, Wohlschlegel Lab UCLA"
print "Written by William Barshop"
print "Version: ",version
print "Date: ",date
basedir=os.getcwd()
####################################
#Argument parsing! So much fun!
#We'll use OptParse even though some
#people really rave about argparse...
#
#
# NB: With Optparse, if an option is
# not specified, it will take a
# value of None
####################################
parser = optparse.OptionParser()
parser.add_option("--experiment_file",action="store",type="string",dest="experiment_file")
parser.add_option("--folder",action="store",type="string",dest="operation_folder",default=".")
parser.add_option("--msstats-image-RData",action="store",type="string",dest="image_RData")
parser.add_option("--msstats-comparison-csv",action="store",type="string",dest="comparison_csv")
################# OUTPUTS ################################
parser.add_option("--comparisonPlotOutput",action="store",type="string",dest="comparisonPlotOutput")
parser.add_option("--heatmapOutput",action="store",type="string",dest="heatmapOutput")
parser.add_option("--volcanoPlotOutput",action="store",type="string",dest="volcanoPlotOutput")
parser.add_option("--RScriptOutput",action="store",type="string",dest="RScriptOutput")
################## BELOW THIS ARE PLOTTING OPTIONS ############################## These are actually all going to be moved into a separate tool
#general options
parser.add_option("--significance",action="store",type="float",dest="significance") # For the volcano plots...
parser.add_option("--FCthreshold",action="store",type="float",dest="FCthreshold") # FC threshold For the volcano plots...
parser.add_option("--ylimUp",action="store",type="float",dest="ylimUp") # ylimUp threshold for the plots
parser.add_option("--ylimDown",action="store",type="float",dest="ylimDown") # ylimDown threshold for plots
parser.add_option("--xlimUp",action="store",type="float",dest="xlimUp") # xlimUp threshold for Volcano plots
parser.add_option("--autoAxes",action="store_true",dest="autoAxes")
parser.add_option("--xAxisSize",action="store",type="int",dest="xAxisSize")
parser.add_option("--yAxisSize",action="store",type="int",dest="yAxisSize")
parser.add_option("--width",action="store",type="int",dest="width",default=10)
parser.add_option("--height",action="store",type="int",dest="height",default=10)
#HeatMap
parser.add_option("--numProtein",action="store",type="int",dest="numProtein",default=180) # Number of proteins per heatmap... Max is 180
parser.add_option("--clustering",action="store",type="string",dest="clustering",default="protein") # clustering type for heatmap... Can be "protein", "comparison", "both"
#VolcanoPlot
parser.add_option("--dotSize",action="store",type="int",dest="dotSize",default=3)#volcanoplot
parser.add_option("--textSize",action="store",type="int",dest="textSize",default=4)#volcanoplot
parser.add_option("--proteinName",action="store_true",dest="proteinName") # On volcano plot, draw protein names?
parser.add_option("--legendSize",action="store",type="int",dest="legendSize",default=7)
(options,args) = parser.parse_args()
if options.autoAxes:
xlimUp="FALSE"
ylimUp="FALSE"
ylimDown="FALSE"
else:
xlimUp=options.xlimUp
ylimUp=options.ylimUp
ylimDown=options.ylimDown
if options.proteinName:
proteinName="TRUE"
else:
proteinName="FALSE"
print "Now we're going to prepare the R script for MSstats graphing..."
#Let's start by reading in the experiment structure.
group_information = pandas.read_csv(options.experiment_file,sep='\t')
comparison_df = pandas.read_csv(options.comparison_csv)
with open("MSstats_Script.R",'wb') as script_writer:
script_writer.write("library(MSstats)\n")
script_writer.write("setwd(\""+str(basedir)+"\")\n") #We're going to set the current directory...
script_writer.write("load(\""+str(options.image_RData)+"\")\n")
#script_writer.write("comparisonResult<-read.csv(\""+str(options.comparison_csv)+"\")\n") #We will load in the input CSV file! (In this case by absolute path, though that's not necessary...)
#script_writer.write("write.csv(comparisonResult$ComparisonResult,file=\"comparisonResult_output.csv\")\n")
#OKAY! So, now we're going to write out the plots... This may take a bit...
#So, first, let's check if we can output a heatmap (number of comparisons >2)
if len(comparison_df['Label'].unique().tolist())>=2:
#script_writer.write("groupComparisonPlots(data=comparisonResult$ComparisonResult,type=\"Heatmap\", logBase.pvalue=2, sig="+str(options.significance)+", FCcutoff="+str(options.FCthreshold)+",ylimUp="+str(ylimUp)+",ylimDown="+str(ylimDown)+",xlimUp="+str(xlimUp)+",x.axis.size="+str(options.xAxisSize)+",y.axis.size="+str(options.yAxisSize)+",numProtein="+str(options.numProtein)+",clustering=\""+options.clustering+"\",width="+str(options.width)+",height="+str(options.height)+")\n") #add width, height, address
script_writer.write("groupComparisonPlots(data=comparisonResult$ComparisonResult,type=\"Heatmap\", logBase.pvalue=2,x.axis.size="+str(options.xAxisSize)+",y.axis.size="+str(options.yAxisSize)+",numProtein="+str(options.numProtein)+",clustering=\""+options.clustering+"\",width="+str(options.width)+",height="+str(options.height)+")\n") #add width, height, address
#pass
script_writer.write("groupComparisonPlots(data=comparisonResult$ComparisonResult,ProteinName=\""+proteinName+"\",type=\"VolcanoPlot\", logBase.pvalue=2, sig="+str(options.significance)+", FCcutoff="+str(options.FCthreshold)+",ylimUp="+str(ylimUp)+",ylimDown="+str(ylimDown)+",xlimUp="+str(xlimUp)+",x.axis.size="+str(options.xAxisSize)+",dot.size="+str(options.dotSize)+",text.size="+str(options.textSize)+",legend.size="+str(options.legendSize)+",width="+str(options.width)+",height="+str(options.height)+",which.Comparison=\"all\")\n")
script_writer.write("groupComparisonPlots(data=comparisonResult$ComparisonResult,type=\"ComparisonPlot\", sig="+str(options.significance)+",x.axis.size="+str(options.xAxisSize)+",dot.size="+str(options.dotSize)+",legend.size="+str(options.legendSize)+",width="+str(options.width)+",height="+str(options.height)+",which.Comparison=\"all\")\n")
#OKAY.... The R Script has been written!
#We're going to execute the R script now!
print "Copying RScript back to Galaxy..."
shutil.copy('MSstats_Script.R',options.RScriptOutput)
subprocess.check_call(['Rscript', 'MSstats_Script.R'],shell=False,stderr=sys.stdout.fileno())
print "Moving files to final output locations...."
#print os.listdir(os.getcwd())
#shutil.copy('TMP_dataProcess_output.csv',options.processedOutput)
#shutil.copy('comparisonResult_output.csv',options.comparisonOutput)
shutil.copy('VolcanoPlot.pdf',options.volcanoPlotOutput)
if len(comparison_df['Label'].unique().tolist())>2:
shutil.copy('Heatmap.pdf',options.heatmapOutput)
shutil.copy('ComparisonPlot.pdf',options.comparisonPlotOutput)
print "All done!"
| wohllab/milkyway_proteomics | galaxy_milkyway_files/tools/wohl-proteomics/wohl_skyline/msstats_plots_wrapper.py | Python | mit | 7,436 |
# Python3
import base64
def weirdEncoding(encoding, message):
return base64.b64decode(message, encoding).decode()
| RevansChen/online-judge | Codefights/arcade/python-arcade/level-13/92.Weird-Encoding/Python/solution1.py | Python | mit | 120 |
from qdec_partial import get_ip_name
from qdec_partial import QDEC | hakehuang/pycpld | ips/ip/qdec/__init__.py | Python | mit | 66 |
import sys
## Make sure pyqtgraph is importable
p = os.path.dirname(os.path.abspath(__file__))
p = os.path.join(p, '..', '..')
sys.path.insert(0, p)
from pyqtgraph.Qt import QtCore, QtGui
from DockArea import *
from Dock import *
app = QtGui.QApplication([])
win = QtGui.QMainWindow()
area = DockArea()
win.setCentralWidget(area)
win.resize(800,800)
from Dock import Dock
d1 = Dock("Dock1", size=(200,200))
d2 = Dock("Dock2", size=(100,100))
d3 = Dock("Dock3", size=(1,1))
d4 = Dock("Dock4", size=(50,50))
d5 = Dock("Dock5", size=(100,100))
d6 = Dock("Dock6", size=(300,300))
area.addDock(d1, 'left')
area.addDock(d2, 'right')
area.addDock(d3, 'bottom')
area.addDock(d4, 'right')
area.addDock(d5, 'left', d1)
area.addDock(d6, 'top', d4)
area.moveDock(d6, 'above', d4)
d3.hideTitleBar()
print "===build complete===="
for d in [d1, d2, d3, d4, d5]:
w = QtGui.QWidget()
l = QtGui.QVBoxLayout()
w.setLayout(l)
btns = []
for i in range(4):
btns.append(QtGui.QPushButton("%s Button %d"%(d.name(), i)))
l.addWidget(btns[-1])
d.w = (w, l, btns)
d.addWidget(w)
import pyqtgraph as pg
p = pg.PlotWidget()
d6.addWidget(p)
print "===widgets added==="
#s = area.saveState()
#print "\n\n-------restore----------\n\n"
#area.restoreState(s)
s = None
def save():
global s
s = area.saveState()
def load():
global s
area.restoreState(s)
#d6.container().setCurrentIndex(0)
#d2.label.setTabPos(40)
#win2 = QtGui.QMainWindow()
#area2 = DockArea()
#win2.setCentralWidget(area2)
#win2.resize(800,800)
win.show()
#win2.show()
| robertsj/poropy | pyqtgraph/dockarea/__main__.py | Python | mit | 1,587 |
from scrapy import Spider
from scrapy.http import Request
from firmware.items import FirmwareImage
from firmware.loader import FirmwareLoader
class FoscamSpider(Spider):
name = "foscam"
allowed_domains = ["foscam.com"]
start_urls = [
"http://www.foscam.com/download-center/firmware-downloads.html"]
def start_requests(self):
for url in self.start_urls:
yield Request(url, cookies={'loginEmail': "@.com"}, dont_filter=True)
def parse(self, response):
for i in range(0, len(response.xpath("//div[@id='main_right']/span[1]/p")), 7):
prods = response.xpath("//div[@id='main_right']/span[1]//p[%d]/text()" % (i + 2)).extract()[0].split("\r\n")
for product in [x for x in prods]:
item = FirmwareLoader(item=FirmwareImage(), response=response)
item.add_xpath("version", "//div[@id='main_right']/span[1]//p[%d]/text()" % (i + 3))
item.add_xpath("url", "//div[@id='main_right']/span[1]//p[%d]/a/@href" % (i + 7))
item.add_value("product", product)
item.add_value("vendor", self.name)
yield item.load_item()
for i in range(0, len(response.xpath("//div[@id='main_right']/span[2]/p")), 5):
prods = response.xpath("//div[@id='main_right']/span[2]//p[%d]/text()" % (i + 2)).extract()[0].split(",")
for product in [x for x in prods]:
item = FirmwareLoader(item=FirmwareImage(), response=response)
item.add_xpath("version", "//div[@id='main_right']/span[2]//p[%d]/text()" % (i + 3))
item.add_xpath("url", "//div[@id='main_right']/span[2]//p[%d]/a/@href" % (i + 5))
item.add_value("product", product)
item.add_value("vendor", self.name)
yield item.load_item()
| firmadyne/scraper | firmware/spiders/foscam.py | Python | mit | 1,854 |
from django.utils import simplejson
from dajaxice.decorators import dajaxice_register
from django.utils.translation import ugettext as _
from django.template.loader import render_to_string
from dajax.core import Dajax
from django.db import transaction
from darkoob.book.models import Book, Review
@dajaxice_register(method='POST')
@transaction.commit_manually
def rate(request, rate, book_id):
done = False
book = ''
try:
book = Book.objects.get(id = book_id)
book.rating.add(score=rate, user=request.user, ip_address=request.META['REMOTE_ADDR'])
except:
errors.append('An error occoured in record in database')
transaction.rollback()
else:
done = True
transaction.commit()
return simplejson.dumps({'done':done})
@dajaxice_register(method='POST')
@transaction.commit_manually
def review_rate(request, rate, review_id):
print "review id",review_id
done = False
try:
review = Review.objects.get(id=review_id)
review.rating.add(score=rate, user=request.user, ip_address=request.META['REMOTE_ADDR'])
except:
errors.append('An error occoured in record in database')
transaction.rollback()
else:
done = True
transaction.commit()
return simplejson.dumps({'done': done})
@dajaxice_register(method='POST')
def submit_review(request, book_id, title, text):
dajax = Dajax()
#TODO: checks if you have permission for posting review
try:
book = Book.objects.get(id=book_id)
except Book.DoesNotExist:
dajax.script('''
$.pnotify({
title: 'Review',
type:'error',
text: 'This Book doesn\'t exsist.',
opacity: .8
});
$('#id_text').val('');
$('#id_title').val('');
''')
else:
if len(text) < 200:
transaction.rollback()
dajax.script('''
$.pnotify({
title: 'Review',
type:'error',
text: 'Complete your review. We need some checks',
opacity: .8
});
$('#id_text').val('');
$('#id_title').val('');
''')
else:
review = Review.objects.create(book=book, user=request.user, title=title, text=text)
t_rendered = render_to_string('book/review.html', {'review': review})
dajax.prepend('#id_new_post_position', 'innerHTML', t_rendered)
dajax.script('''
$.pnotify({
title: 'Review',
type:'success',
text: 'Your review record',
opacity: .8
});
$('#id_text').val('');
$('#id_title').val('');
''')
return dajax.json()
@dajaxice_register(method='POST')
def ha(request, book_name):
print "book_name", book_name
return simplejson.dumps({'done': True})
| s1na/darkoob | darkoob/book/ajax.py | Python | mit | 2,980 |
from osmcache.cli import base
if __name__ == '__main__':
base()
| kirchenreich/osm-api-cache | osmcache.py | Python | mit | 69 |
"""
Run hugs pipeline.
"""
from __future__ import division, print_function
import os, shutil
from time import time
import mpi4py.MPI as MPI
import schwimmbad
from hugs.pipeline import next_gen_search, find_lsbgs
from hugs.utils import PatchMeta
import hugs
def ingest_data(args):
"""
Write data to database with the master process.
"""
timer = time()
success, sources, meta_data, synth_ids = args
run_name, tract, patch, patch_meta = meta_data
db_ingest = hugs.database.HugsIngest(session, run_name)
if success:
db_ingest.add_all(tract, patch, patch_meta, sources)
if synth_ids is not None:
db_ingest.add_injected_synths(synth_ids)
else:
db_ingest.add_tract(tract)
db_ingest.add_patch(patch, patch_meta)
delta_time = time() - timer
hugs.log.logger.info('time to ingest = {:.2f} seconds'.format(delta_time))
def worker(p):
"""
Workers initialize pipe configuration and run pipeline.
"""
rank = MPI.COMM_WORLD.Get_rank()
if p['seed'] is None:
tract, p1, p2 = p['tract'], int(p['patch'][0]), int(p['patch'][-1])
seed = [int(time()), tract, p1, p2, rank]
else:
seed = p['seed']
config = hugs.PipeConfig(run_name=p['run_name'],
config_fn=p['config_fn'],
random_state=seed,
log_fn=p['log_fn'],
rerun_path=p['rerun_path'])
config.set_patch_id(p['tract'], p['patch'])
config.logger.info('random seed set to {}'.format(seed))
if p['use_old_pipeline']:
results = find_lsbgs.run(config)
else:
results = next_gen_search.run(config, False)
pm = results.hugs_exp.patch_meta
if (results.synths is not None) and results.success:
if len(results.synths) > 0:
synth_ids = results.synths.to_pandas().loc[:, ['synth_id']]
for plane in config.synth_check_masks:
masked = hugs.synths.find_masked_synths(results.synths,
results.exp_clean,
planes=plane)
synth_ids['mask_' + plane.lower()] = masked
else:
synth_ids = None
else:
synth_ids = None
patch_meta = PatchMeta(
x0 = pm.x0,
y0 = pm.y0,
small_frac = pm.small_frac,
cleaned_frac = pm.cleaned_frac,
bright_obj_frac = pm.bright_obj_frac,
good_data_frac = pm.good_data_frac
)
meta_data = [
config.run_name,
config.tract,
config.patch,
patch_meta,
]
if results.success:
df = results.sources.to_pandas()
df['flags'] = df['flags'].astype(int)
else:
df = None
config.reset_mask_planes()
config.logger.info('writing results to database')
return results.success, df, meta_data, synth_ids
if __name__=='__main__':
from argparse import ArgumentParser
from astropy.table import Table
rank = MPI.COMM_WORLD.Get_rank()
# parse command-line arguments
parser = ArgumentParser('Run hugs pipeline')
parser.add_argument('-t', '--tract', type=int, help='HSC tract')
parser.add_argument('-p', '--patch', type=str, help='HSC patch')
parser.add_argument('-c', '--config_fn', help='hugs config file',
default=hugs.utils.default_config_fn)
parser.add_argument('--patches_fn', help='patches file')
parser.add_argument('--use-old-pipeline', action="store_true")
parser.add_argument('-r', '--run_name', type=str, default='hugs-pipe-run')
parser.add_argument('--seed', help='rng seed', default=None)
parser.add_argument('--rerun_path', help='full rerun path', default=None)
parser.add_argument('--overwrite', type=bool,
help='overwrite database', default=True)
group = parser.add_mutually_exclusive_group()
group.add_argument('--ncores', default=1, type=int,
help='Number of processes (uses multiprocessing).')
group.add_argument('--mpi', default=False, action="store_true",
help="Run with MPI.")
args = parser.parse_args()
config_params = hugs.utils.read_config(args.config_fn)
outdir = config_params['hugs_io']
#######################################################################
# run on a single patch
#######################################################################
if args.tract is not None:
assert args.patch is not None
tract, patch = args.tract, args.patch
patches = Table([[tract], [patch]], names=['tract', 'patch'])
run_dir_name = '{}-{}-{}'.format(args.run_name, tract, patch)
outdir = os.path.join(outdir, run_dir_name)
hugs.utils.mkdir_if_needed(outdir)
log_fn = os.path.join(outdir, 'hugs-pipe.log')
patches['outdir'] = outdir
patches['log_fn'] = log_fn
#######################################################################
# OR run on all patches in file
#######################################################################
elif args.patches_fn is not None:
patches = Table.read(args.patches_fn)
if rank==0:
time_label = hugs.utils.get_time_label()
outdir = os.path.join(
outdir, '{}-{}'.format(args.run_name, time_label))
hugs.utils.mkdir_if_needed(outdir)
log_dir = os.path.join(outdir, 'log')
hugs.utils.mkdir_if_needed(log_dir)
log_fn = []
for tract, patch in patches['tract', 'patch']:
fn = os.path.join(log_dir, '{}-{}.log'.format(tract, patch))
log_fn.append(fn)
patches['outdir'] = outdir
patches['log_fn'] = log_fn
else:
print('\n**** must give tract and patch --or-- a patch file ****\n')
parser.print_help()
exit()
patches['rerun_path'] = args.rerun_path
patches['seed'] = args.seed
patches['config_fn'] = args.config_fn
patches['run_name'] = args.run_name
patches['use_old_pipeline'] = args.use_old_pipeline
if rank==0:
# open database session with master process
db_fn = os.path.join(outdir, args.run_name+'.db')
engine = hugs.database.connect(db_fn, args.overwrite)
session = hugs.database.Session()
shutil.copyfile(args.config_fn, os.path.join(outdir, 'config.yml'))
pool = schwimmbad.choose_pool(mpi=args.mpi, processes=args.ncores)
list(pool.map(worker, patches, callback=ingest_data))
pool.close()
| johnnygreco/hugs | scripts/runner.py | Python | mit | 6,693 |
import numpy as np
import networkx
from zephyr.Problem import SeisFDFDProblem
# Plotting configuration
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.ticker as ticker
import matplotlib
matplotlib.rcParams.update({'font.size': 20})
# System / modelling configuration
cellSize = 1 # m
freqs = [2e2] # Hz
density = 2700 # units of density
Q = np.inf # can be inf
nx = 164 # count
nz = 264 # count
freeSurf = [False, False, False, False] # t r b l
dims = (nx,nz) # tuple
nPML = 32
rho = np.fliplr(np.ones(dims) * density)
nfreq = len(freqs) # number of frequencies
nky = 48 # number of y-directional plane-wave components
nsp = nfreq * nky # total number of 2D subproblems
velocity = 2500 # m/s
vanom = 500 # m/s
cPert = np.zeros(dims)
cPert[(nx/2)-20:(nx/2)+20,(nz/2)-20:(nz/2)+20] = vanom
c = np.fliplr(np.ones(dims) * velocity)
cFlat = c
c += np.fliplr(cPert)
cTrue = c
srcs = np.array([np.ones(101)*32, np.zeros(101), np.linspace(32, 232, 101)]).T
recs = np.array([np.ones(101)*132, np.zeros(101), np.linspace(32, 232, 101)]).T
nsrc = len(srcs)
nrec = len(recs)
recmode = 'fixed'
geom = {
'src': srcs,
'rec': recs,
'mode': 'fixed',
}
cache = False
cacheDir = '.'
# Base configuration for all subproblems
systemConfig = {
'dx': cellSize, # m
'dz': cellSize, # m
'c': c.T, # m/s
'rho': rho.T, # density
'Q': Q, # can be inf
'nx': nx, # count
'nz': nz, # count
'freeSurf': freeSurf, # t r b l
'nPML': nPML,
'geom': geom,
'cache': cache,
'cacheDir': cacheDir,
'freqs': freqs,
'nky': nky,
}
sp = SeisFDFDProblem(systemConfig)
jobs = sp.forwardAccumulate()
def trackprogress(sp, jobs, interval=1.0):
systemJobs = jobs['systemJobs']
jobkeys = systemJobs.keys()
jobkeys.sort()
fig = plt.figure()
ax1 = fig.add_axes([0.1,0.10,0.15,0.85], xlabel='Subproblem', ylabel='Source')
ax1.get_xaxis().set_major_locator(ticker.MaxNLocator(integer=True))
ax2 = fig.add_axes([0.25,0.10,0.75,0.85], xlabel='Receiver')
im1 = ax2.imshow(np.zeros((nsrc, nrec)), vmin=-50*nky, vmax=50*nky, cmap=cm.bwr)
im2 = ax1.imshow(np.zeros((nsrc, nsp)), vmin=0, vmax=2, interpolation='nearest', aspect='auto')
plt.show()
def update():
#try:
# res = reduce(np.add, sp.par['dview']['resultTracker'])
#except:
# res = {}
#keys = [(freqs[0], i) for i in range(nrec)]
#resarr = np.array([res[key] if key in res.keys() else np.zeros(nrec) for key in keys])
status = np.zeros((len(jobkeys),nsrc))
for i, key in enumerate(jobkeys):
status[i,:] = 1. * systemJobs[key][0].ready()#np.array([systemJobs[key][j].ready() for j in xrange(1)])
if systemJobs[key][0].ready():#for j in np.argwhere(status[i,:]):
status[i,:] += not systemJobs[key][0].successful()
#im1.set_data(resarr.real)
im2.set_data(status.T)
fig.canvas.draw()
fig.canvas.flush_events()
while True:
try:
plt.pause(interval)
update()
except KeyboardInterrupt:
print('Exiting loop...')
break
finally:
if not reduce(np.add, sp.par['dview']['resultTracker.interactcounter']) < (nsp * nsrc):
break
trackprogress(sp, jobs)
| bsmithyman/zephyr | LiveDataDemoBigJobs.py | Python | mit | 3,762 |
import os.path as osp
import os
USE_GPU = False
AWS_REGION_NAME = 'us-east-2'
if USE_GPU:
DOCKER_IMAGE = 'dementrock/rllab3-shared-gpu'
else:
# DOCKER_IMAGE = 'dwicke/jump:docker'
DOCKER_IMAGE = 'dwicke/parameterized:latest'
DOCKER_LOG_DIR = '/tmp/expt'
AWS_S3_PATH = 's3://pytorchrl/pytorchrl/experiments'
AWS_CODE_SYNC_S3_PATH = 's3://pytorchrl/pytorchrl/code'
ALL_REGION_AWS_IMAGE_IDS = {
'ap-northeast-1': 'ami-002f0167',
'ap-northeast-2': 'ami-590bd937',
'ap-south-1': 'ami-77314318',
'ap-southeast-1': 'ami-1610a975',
'ap-southeast-2': 'ami-9dd4ddfe',
'eu-central-1': 'ami-63af720c',
'eu-west-1': 'ami-41484f27',
'sa-east-1': 'ami-b7234edb',
'us-east-1': 'ami-83f26195',
'us-east-2': 'ami-66614603',
'us-west-1': 'ami-576f4b37',
'us-west-2': 'ami-b8b62bd8'
}
AWS_IMAGE_ID = ALL_REGION_AWS_IMAGE_IDS[AWS_REGION_NAME]
if USE_GPU:
AWS_INSTANCE_TYPE = 'g2.2xlarge'
else:
AWS_INSTANCE_TYPE = 'c4.2xlarge'
ALL_REGION_AWS_KEY_NAMES = {
"us-east-2": "pytorchrl-us-east-2",
"us-east-1": "pytorchrl-us-east-1"
}
AWS_KEY_NAME = ALL_REGION_AWS_KEY_NAMES[AWS_REGION_NAME]
AWS_SPOT = True
AWS_SPOT_PRICE = '0.1'
AWS_ACCESS_KEY = os.environ.get('AWS_ACCESS_KEY', None)
AWS_ACCESS_SECRET = os.environ.get('AWS_ACCESS_SECRET', None)
AWS_IAM_INSTANCE_PROFILE_NAME = 'pytorchrl'
AWS_SECURITY_GROUPS = ['pytorchrl-sg']
ALL_REGION_AWS_SECURITY_GROUP_IDS = {
"us-east-2": [
"sg-18009370"
],
"us-east-1": [
"sg-46308b34"
]
}
AWS_SECURITY_GROUP_IDS = ALL_REGION_AWS_SECURITY_GROUP_IDS[AWS_REGION_NAME]
FAST_CODE_SYNC_IGNORES = [
'.git',
'data/local',
'data/s3',
'data/video',
'src',
'.idea',
'.pods',
'tests',
'examples',
'docs',
'.idea',
'.DS_Store',
'.ipynb_checkpoints',
'blackbox',
'blackbox.zip',
'*.pyc',
'*.ipynb',
'scratch-notebooks',
'conopt_root',
'private/key_pairs',
]
FAST_CODE_SYNC = True
| nosyndicate/pytorchrl | pytorchrl/config_personal.py | Python | mit | 1,995 |
"""
load score-level asroutput files into a pandas df
"""
import re
import pandas as pd
def mr_csvs():
"""
load data/mr.p and generate two csv files.
:return:
"""
x = pickle.load(open('data/mr.p', "rb"))
revs, W, W2, word_idx_map, vocab = x[0], x[1], x[2], x[3], x[4]
print("mr.p has been loaded!")
# focusing on revs.
texts, labels = [], []
for rev in revs:
texts.append(rev["text"])
labels.append(rev["y"])
df = pd.DataFrame({'label': labels, 'text': texts})
print(df.head())
def read_asrout(data_folder, csvFile, clean_string=True):
"""
read per-score level asrout files
"""
txts = []
scores = []
for score in [0, 1, 2, 3]:
afile = data_folder[score]
print "score:" + str(score) + data_folder[score]
with open(afile, "rb") as f:
for line in f:
rev = []
rev.append(line.strip())
if clean_string:
orig_rev = clean_str(" ".join(rev))
else:
orig_rev = " ".join(rev).lower()
scores.append(score)
txts.append(orig_rev)
df = pd.DataFrame({'text': txts, 'score': scores})
df.to_csv(open(csvFile, 'wb'))
def clean_str(string, TREC=False):
"""
Tokenization/string cleaning for all datasets except for SST.
Every dataset is lower cased except for TREC
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip() if TREC else string.strip().lower()
if __name__=="__main__":
data_folder = ["data/train1.txt", "data/train2.txt", "data/train3.txt", "data/train4.txt"]
read_asrout(data_folder, csvFile = "data/train.csv")
data_folder = ["data/test1.txt", "data/test2.txt", "data/test3.txt", "data/test4.txt"]
read_asrout(data_folder, csvFile = "data/test.csv")
| leocnj/dl_response_rater | archv/pd_load.py | Python | mit | 2,418 |
#!/usr/bin/env python3
"""
Wikipedia lookup plugin for Botty.
Example invocations:
#general | Me: what is fire
#general | Botty: wikipedia says, "Fire is the rapid oxidation of a material in the exothermic chemical process of combustion, releasing heat, light, and various reaction products. Slower oxidative processes like rusting or digestion are not included by this definition."
#general | Me: what's bismuth?
#general | Botty: wikipedia says, "Bismuth is a chemical element with symbol Bi and atomic number 83. Bismuth, a pentavalent post-transition metal, chemically resembles arsenic and antimony. Elemental bismuth may occur naturally, although its sulfide and oxide form important commercial ores."
#general | Me: wtf is water
#general | Botty: wikipedia says, "Water (chemical formula: H2O) is a transparent fluid which forms the world's streams, lakes, oceans and rain, and is the major constituent of the fluids of organisms. As a chemical compound, a water molecule contains one oxygen and two hydrogen atoms that are connected by covalent bonds."
"""
import re
import wikipedia
from .utilities import BasePlugin
class WikiPlugin(BasePlugin):
def __init__(self, bot):
super().__init__(bot)
def on_message(self, message):
text = self.get_message_text(message)
if text is None: return False
match = re.search(r"^\s*\b(?:what\s+is|what's|wtf\s+(?:is|are))\s+([^,\?]+|\"[^\"]+\")", text, re.IGNORECASE)
if not match: return False
query = self.sendable_text_to_text(match.group(1)) # get query as plain text in order to make things like < and > work (these are usually escaped)
if query in {"this", "that", "going on", "up"}: return False # ignore these common false positive expressions
# perform Wikipedia lookup
try:
self.respond_raw("wikipedia says, \"{}\"".format(wikipedia.summary(query, sentences=2)))
except wikipedia.exceptions.DisambiguationError as e: # disambiguation page, list possibilities
self.respond_raw("could be one of the following: {}".format("; ".join(e.args[1])))
except:
self.respond_raw("dunno")
return True
| DanielHopper/botty-bot-bot-bot | src/plugins/wiki.py | Python | mit | 2,233 |
import logging
import rethinkdb as r
log = logging.getLogger(__name__)
class Database():
def __init__(self, bot):
self.bot = bot
self.db_name = self.bot.config.rname
self.db = None
r.set_loop_type("asyncio")
self.ready = False
def get_db(self):
"""
Returns the RethinkDB module/instance
"""
return r
async def insert(self, table, data):
"""
Insert a document into a table
"""
log.debug(
"Saving document to table {} with data: {}".format(table, data))
return await r.table(table).insert(data, conflict="update").run(self.db)
async def delete(self, table, primary_key=None):
"""
Deletes a document(s) from a table
"""
log.debug(
"Deleting document from table {} with primary key {}".format(table, primary_key))
if primary_key is not None:
# Delete one document with the key name
return await r.table(table).get(primary_key).delete().run(self.db)
else:
# Delete all documents in the table
return await r.table(table).delete().run(self.db)
async def connect(self, host, port, user, password):
"""
Establish a database connection
"""
log.info("Connecting to database: {}".format(self.db_name))
try:
self.db = await r.connect(db=self.db_name, host=host, port=port, user=user, password=password)
except r.errors.ReqlDriverError as e:
log.error(e)
return False
info = await self.db.server()
# Create the database if it does not exist
try:
await r.db_create(self.db_name).run(self.db)
log.info("Created database: {}".format(self.db_name))
except r.errors.ReqlOpFailedError:
log.debug(
"Database {} already exists, skipping creation".format(self.db_name))
return True
async def create_table(self, name, primary='id'):
"""
Creates a new table in the database
"""
try:
await r.table_create(name, primary_key=primary).run(self.db)
log.info("Created table: {}".format(name))
except r.errors.ReqlOpFailedError:
log.debug(
"Table {} already exists, skipping creation".format(name))
| jaydenkieran/Turbo | turbo/database.py | Python | mit | 2,402 |
import pygame
import sys
from game import constants, gamestate
from game.ai.easy import EasyAI
from game.media import media
from game.scene import Scene
# List of menu options (text, action_method, condition) where condition is None or a callable.
# If it is a callable that returns False, the option is not shown.
CONTINUE = 0
NEW_GAME = 1
QUIT = 2
OPTIONS = [
('Continue', 'opt_continue', lambda scene: scene.game_running),
('2 Player', 'start_2_player', None),
('Vs CPU', 'start_vs_cpu', None),
('Computer Battle!', 'start_cpu_vs_cpu', None),
('Quit', 'opt_quit', None),
]
class MenuScene(Scene):
def load(self):
self.font = pygame.font.Font(constants.MENU_FONT, constants.MENU_FONT_SIZE)
self.active_font = pygame.font.Font(constants.MENU_FONT, constants.MENU_FONT_SIZE_ACTIVE)
media.play_music('intro')
def setup(self, first_time=False):
# Selected menu choice - if "Continue" is there, have that selected
self._current_option = NEW_GAME if first_time else CONTINUE
self.game_running = self.manager.get_state('main', 'running')
def render_options(self, screen):
x, y = 30, 30
for index, (text, action, show) in enumerate(OPTIONS):
if show is not None and not show(self):
continue
active = index == self._current_option
font = self.active_font if active else self.font
surf = font.render(text, True, constants.MENU_FONT_COLOR)
screen.blit(surf, (x, y))
if active:
screen.blit(media['img.arrow'], (x - 25, y + 12))
y += surf.get_height() + 10
def render(self, screen):
screen.blit(media['img.title'], (0, 0))
self.render_options(screen)
def opt_continue(self):
self.manager.switch_scene('main')
return True
def new_match(self, player1, player2):
media.fade_music(1000)
gamestate.new_game(player1, player2)
self.manager.switch_scene('main')
return True
def start_2_player(self):
self.new_match(gamestate.HUMAN, gamestate.HUMAN)
def start_vs_cpu(self):
self.new_match(gamestate.HUMAN, EasyAI())
def start_cpu_vs_cpu(self):
self.new_match(EasyAI(), EasyAI())
def opt_quit(self):
sys.exit()
def do_event(self, event):
if event.type == pygame.KEYUP:
if event.key == pygame.K_ESCAPE:
if self.game_running:
self.manager.switch_scene('main')
return
elif event.key in (pygame.K_UP, pygame.K_DOWN):
media['snd.button'].play()
move = -1 if event.key == pygame.K_UP else 1
self._current_option = (self._current_option + move) % len(OPTIONS)
if self._current_option == CONTINUE and not self.game_running:
self._current_option = NEW_GAME if event.key == pygame.K_DOWN else (len(OPTIONS) - 1)
elif event.key == pygame.K_RETURN:
if self._current_option != NEW_GAME:
media['snd.button_press'].play()
action = OPTIONS[self._current_option][1]
return getattr(self, action)()
return False
| dbreen/connectfo | game/scenes/menu.py | Python | mit | 3,374 |
"""
Qizx Python API bindings
:copyright: (c) 2015 by Michael Paddon
:license: MIT, see LICENSE for more details.
"""
from .qizx import (
Client, QizxError, QizxBadRequestError, QizxServerError,
QizxNotFoundError, QizxAccessControlError, QizxXMLDataError,
QizxCompilationError, QizxEvaluationError, QizxTimeoutError,
QizxImportError, UnexpectedResponseError, TransactionError
)
__title__ = 'qizx'
__version__ = '1.0.2'
__author__ = "Michael Paddon"
__license__ = 'MIT'
__copyright__ = "Copyright 2015 Michael Paddon"
| qizxdb/qizx-python | qizx/__init__.py | Python | mit | 534 |
# -*- coding: utf-8 -*-
# flake8: noqa
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, verbose_name='ID', auto_created=True)),
('text', models.CharField(max_length=32, default='')),
],
),
]
| nsdont/gogs_ci_demo | superlists/lists/migrations/0001_initial.py | Python | mit | 506 |
import unittest
"""
Given an unordered array of integers, find the length of longest increasing subsequence.
Input: 0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15
Output: 6 (0, 2, 6, 9, 11, 15)
"""
"""
A great explanation of the approach appears here:
http://www.geeksforgeeks.org/longest-monotonically-increasing-subsequence-size-n-log-n/
"""
def find_ceil_index(list_of_numbers, ele):
"""
Returns the smallest element in list_of_numbers greater than or equal to ele.
"""
low = 0
high = len(list_of_numbers)-1
ans = -1
while low <= high:
mid = (low + high) / 2
if list_of_numbers[mid] >= ele:
ans = mid
high = mid - 1
else:
low = mid + 1
return ans
def find_longest_increasing_subsequence_length(list_of_numbers):
LCS = [list_of_numbers[0]]
for i in range(1, len(list_of_numbers)):
cur_ele = list_of_numbers[i]
k = find_ceil_index(LCS, cur_ele)
if k == -1:
LCS.append(cur_ele)
else:
LCS[k] = cur_ele
return len(LCS)
class TestLIS(unittest.TestCase):
def test_longest_increasing_subsequence(self):
list_of_numbers = [0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15]
self.assertEqual(find_longest_increasing_subsequence_length(list_of_numbers), 6)
list_of_numbers = [2, 5, 3, 1, 2, 3, 4, 5, 6]
self.assertEqual(find_longest_increasing_subsequence_length(list_of_numbers), 6)
list_of_numbers = [5, 4, 3, 2, 1]
self.assertEqual(find_longest_increasing_subsequence_length(list_of_numbers), 1)
| prathamtandon/g4gproblems | Arrays/longest_increasing_subsequence_nlogn.py | Python | mit | 1,625 |
# -*- coding: latin-1 -*-
#retriever
"""Retriever script for direct download of vertnet-mammals data"""
from builtins import str
from retriever.lib.models import Table
from retriever.lib.templates import Script
import os
from pkg_resources import parse_version
try:
from retriever.lib.defaults import VERSION
except ImportError:
from retriever import VERSION
class main(Script):
def __init__(self, **kwargs):
Script.__init__(self, **kwargs)
self.title = "Vertnet Mammals"
self.name = "vertnet-mammals"
self.retriever_minimum_version = '2.0.dev'
self.version = '1.1.1'
self.ref = "http://vertnet.org/resources/datatoolscode.html"
self.urls = {
'mammals': 'https://de.iplantcollaborative.org/anon-files//iplant/home/shared/commons_repo/curated/Vertnet_Mammalia_Sep2016/VertNet_Mammalia_Sept2016.zip',
}
self.citation = "Bloom, D., Wieczorek J., Russell, L. (2016). VertNet_Mammals_Sept. 2016. CyVerse Data Commons. http://datacommons.cyverse.org/browse/iplant/home/shared/commons_repo/curated/VertNet_Mammals_Sep2016"
self.description = "Compilation of digitized museum records of mammals including locations, dates of collection, and some trait data."
self.keywords = ['mammals']
if parse_version(VERSION) <= parse_version("2.0.0"):
self.shortname = self.name
self.name = self.title
self.tags = self.keywords
def download(self, engine=None, debug=False):
Script.download(self, engine, debug)
engine = self.engine
filename = 'vertnet_latest_mammals.csv'
tablename = 'mammals'
table = Table(str(tablename), delimiter=',')
table.columns = [
("record_id", ("pk-auto",)),
("beginrecord", ("char",)),
("icode", ("char",)),
("title", ("char",)),
("citation", ("char",)),
("contact", ("char",)),
("email", ("char",)),
("emlrights", ("char",)),
("gbifdatasetid", ("char",)),
("gbifpublisherid", ("char",)),
("doi", ("char",)),
("migrator", ("char",)),
("networks", ("char",)),
("orgcountry", ("char",)),
("orgname", ("char",)),
("orgstateprovince", ("char",)),
("pubdate", ("char",)),
("source_url", ("char",)),
("iptrecordid", ("char",)),
("associatedmedia", ("char",)),
("associatedoccurrences", ("char",)),
("associatedorganisms", ("char",)),
("associatedreferences", ("char",)),
("associatedsequences", ("char",)),
("associatedtaxa", ("char",)),
("bed", ("char",)),
("behavior", ("char",)),
("catalognumber", ("char",)),
("continent", ("char",)),
("coordinateprecision", ("char",)),
("coordinateuncertaintyinmeters", ("char",)),
("country", ("char",)),
("countrycode", ("char",)),
("county", ("char",)),
("dateidentified", ("char",)),
("day", ("char",)),
("decimallatitude", ("char",)),
("decimallongitude", ("char",)),
("disposition", ("char",)),
("earliestageorloweststage", ("char",)),
("earliesteonorlowesteonothem", ("char",)),
("earliestepochorlowestseries", ("char",)),
("earliesteraorlowesterathem", ("char",)),
("earliestperiodorlowestsystem", ("char",)),
("enddayofyear", ("char",)),
("establishmentmeans", ("char",)),
("eventdate", ("char",)),
("eventid", ("char",)),
("eventremarks", ("char",)),
("eventtime", ("char",)),
("fieldnotes", ("char",)),
("fieldnumber", ("char",)),
("footprintspatialfit", ("char",)),
("footprintsrs", ("char",)),
("footprintwkt", ("char",)),
("formation", ("char",)),
("geodeticdatum", ("char",)),
("geologicalcontextid", ("char",)),
("georeferencedby", ("char",)),
("georeferenceddate", ("char",)),
("georeferenceprotocol", ("char",)),
("georeferenceremarks", ("char",)),
("georeferencesources", ("char",)),
("georeferenceverificationstatus", ("char",)),
("group", ("char",)),
("habitat", ("char",)),
("highergeography", ("char",)),
("highergeographyid", ("char",)),
("highestbiostratigraphiczone", ("char",)),
("identificationid", ("char",)),
("identificationqualifier", ("char",)),
("identificationreferences", ("char",)),
("identificationremarks", ("char",)),
("identificationverificationstatus", ("char",)),
("identifiedby", ("char",)),
("individualcount", ("char",)),
("island", ("char",)),
("islandgroup", ("char",)),
("latestageorhigheststage", ("char",)),
("latesteonorhighesteonothem", ("char",)),
("latestepochorhighestseries", ("char",)),
("latesteraorhighesterathem", ("char",)),
("latestperiodorhighestsystem", ("char",)),
("lifestage", ("char",)),
("lithostratigraphicterms", ("char",)),
("locality", ("char",)),
("locationaccordingto", ("char",)),
("locationid", ("char",)),
("locationremarks", ("char",)),
("lowestbiostratigraphiczone", ("char",)),
("materialsampleid", ("char",)),
("maximumdepthinmeters", ("char",)),
("maximumdistanceabovesurfaceinmeters", ("char",)),
("maximumelevationinmeters", ("char",)),
("member", ("char",)),
("minimumdepthinmeters", ("char",)),
("minimumdistanceabovesurfaceinmeters", ("char",)),
("minimumelevationinmeters", ("char",)),
("month", ("char",)),
("municipality", ("char",)),
("occurrenceid", ("char",)),
("occurrenceremarks", ("char",)),
("occurrencestatus", ("char",)),
("organismid", ("char",)),
("organismname", ("char",)),
("organismremarks", ("char",)),
("organismscope", ("char",)),
("othercatalognumbers", ("char",)),
("pointradiusspatialfit", ("char",)),
("preparations", ("char",)),
("previousidentifications", ("char",)),
("recordedby", ("char",)),
("recordnumber", ("char",)),
("reproductivecondition", ("char",)),
("samplingeffort", ("char",)),
("samplingprotocol", ("char",)),
("sex", ("char",)),
("startdayofyear", ("char",)),
("stateprovince", ("char",)),
("typestatus", ("char",)),
("verbatimcoordinates", ("char",)),
("verbatimcoordinatesystem", ("char",)),
("verbatimdepth", ("char",)),
("verbatimelevation", ("char",)),
("verbatimeventdate", ("char",)),
("verbatimlatitude", ("char",)),
("verbatimlocality", ("char",)),
("verbatimlongitude", ("char",)),
("verbatimsrs", ("char",)),
("waterbody", ("char",)),
("year", ("char",)),
("dctype", ("char",)),
("modified", ("char",)),
("language", ("char",)),
("license", ("char",)),
("rightsholder", ("char",)),
("accessrights", ("char",)),
("bibliographiccitation", ("char",)),
("dc_references", ("char",)),
("institutionid", ("char",)),
("collectionid", ("char",)),
("datasetid", ("char",)),
("institutioncode", ("char",)),
("collectioncode", ("char",)),
("datasetname", ("char",)),
("ownerinstitutioncode", ("char",)),
("basisofrecord", ("char",)),
("informationwithheld", ("char",)),
("datageneralizations", ("char",)),
("dynamicproperties", ("char",)),
("scientificnameid", ("char",)),
("namepublishedinid", ("char",)),
("scientificname", ("char",)),
("acceptednameusage", ("char",)),
("originalnameusage", ("char",)),
("namepublishedin", ("char",)),
("namepublishedinyear", ("char",)),
("higherclassification", ("char",)),
("kingdom", ("char",)),
("phylum", ("char",)),
("class", ("char",)),
("order", ("char",)),
("family", ("char",)),
("genus", ("char",)),
("subgenus", ("char",)),
("specificepithet", ("char",)),
("infraspecificepithet", ("char",)),
("taxonrank", ("char",)),
("verbatimtaxonrank", ("char",)),
("scientificnameauthorship", ("char",)),
("vernacularname", ("char",)),
("nomenclaturalcode", ("char",)),
("taxonomicstatus", ("char",)),
("keyname", ("char",)),
("haslicense", ("int",)),
("vntype", ("char",)),
("rank", ("int",)),
("mappable", ("int",)),
("hashid", ("char",)),
("hastypestatus", ("int",)),
("wascaptive", ("int",)),
("wasinvasive", ("int",)),
("hastissue", ("int",)),
("hasmedia", ("int",)),
("isfossil", ("int",)),
("haslength", ("int",)),
("haslifestage", ("int",)),
("hasmass", ("int",)),
("hassex", ("int",)),
("lengthinmm", ("double",)),
("massing", ("double",)),
("lengthunitsinferred", ("char",)),
("massunitsinferred", ("char",)),
("underivedlifestage", ("char",)),
("underivedsex", ("char",))]
engine.table = table
if not os.path.isfile(engine.format_filename(filename)):
engine.download_files_from_archive(self.urls[tablename], [filename], filetype="zip", archivename="vertnet_latest_" + str(tablename))
engine.create_table()
engine.insert_data_from_file(engine.format_filename(str(filename)))
SCRIPT = main()
| goelakash/retriever | scripts/vertnet_mammals.py | Python | mit | 10,514 |
from delta_variance import DeltaVariance, DeltaVariance_Distance
| keflavich/TurbuStat | turbustat/statistics/delta_variance/__init__.py | Python | mit | 66 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class CheckNameRequest(Model):
"""CheckNameRequest.
:param name: Workspace collection name
:type name: str
:param type: Resource type. Default value:
"Microsoft.PowerBI/workspaceCollections" .
:type type: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, name=None, type="Microsoft.PowerBI/workspaceCollections"):
self.name = name
self.type = type
| rjschwei/azure-sdk-for-python | azure-mgmt-powerbiembedded/azure/mgmt/powerbiembedded/models/check_name_request.py | Python | mit | 1,018 |
#!/usr/bin/python
blank_datafile = '/home/kyleb/Dropbox/UCSF/cas9/FCS/150916-3.1/kyleb/150916-rfp-cas9/96 Well - Flat bottom_002/Specimen_001_F1_F01_046.fcs'
script_output_dir = 'script_output'
sample_directory = '/home/kyleb/Dropbox/UCSF/cas9/FCS/150916-3.1/kyleb/150916-rfp-cas9/96 Well - Flat bottom_002'
rows_in_plate = 'ABCDEFGH'
cols_in_plate = list(range(1, 13))
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from FlowCytometryTools import FCMeasurement, PolyGate, ThresholdGate
import os, FlowCytometryTools
import pylab as P
import numpy as np
import scipy
use_multiprocessing = True
if use_multiprocessing:
import multiprocessing as mp
class PlatePos:
def __init__ (self, plate_position_str):
self.row = plate_position_str[0]
assert( self.row in rows_in_plate )
self.col = int(plate_position_str[1:])
# Returns the next position on the plate
@property
def next_pos(self):
if self.row_index == len(rows_in_plate)-1:
if self.col == cols_in_plate[-1]:
return None
if self.col == cols_in_plate[-1]:
next_pos_row = rows_in_plate[ self.row_index+1 ]
next_pos_col = 1
else:
next_pos_row = self.row
next_pos_col = self.col + 1
return PlatePos( '%s%d' % (next_pos_row, next_pos_col) )
@property
def row_index(self):
return rows_in_plate.index(self.row)
def __repr__(self):
return '%s%02d' % (self.row, self.col)
def __lt__ (self, other):
if self.row == other.row:
return self.col < other.col
else:
return self.row < other.row
def __hash__(self):
return hash( str(self) )
def __eq__(self, other):
return self.row == other.row and self.col == other.col
def __ne__(self, other):
return not self.__eq__(other)
class PlateInfo:
def __init__ (self, name, value, new_positions):
self.name = name
if value == None:
self.value = np.nan
else:
self.value = value
self.positions = []
if isinstance(new_positions, list):
for new_position_range in new_positions:
self.add_position_range(new_position_range)
elif isinstance(new_positions, str):
self.add_position_range(new_positions)
else:
raise Exception('Input new positions must be a list or string')
def add_position_range(self, pos_range):
if '-' in pos_range:
first_pos_str, second_pos_str = pos_range.split('-')
first_pos = PlatePos(first_pos_str)
second_pos = PlatePos(second_pos_str)
first_pos_char_index = rows_in_plate.index(first_pos.row)
second_pos_char_index = rows_in_plate.index(second_pos.row)
for char_index in range(first_pos_char_index, second_pos_char_index + 1):
row = rows_in_plate[char_index]
for col in range(first_pos.col, second_pos.col + 1):
self.add_position( '%s%d' % (row, col) )
else:
self.add_position(pos_range)
def add_position(self, pos_str):
pos = PlatePos(pos_str)
if pos not in self.positions:
self.positions.append(pos)
self.positions.sort()
@property
def position_set(self):
return_set = set()
for pos in self.positions:
return_set.add(pos)
return return_set
def __repr__(self):
return str( self.positions )
class Plate:
def __init__ (self, plate_info_list, sample_dir=None, verbose=False, name=None):
self.name = name
self.info_dict = {}
self.samples = {}
self.sample_dir = sample_dir
for plate_info in plate_info_list:
if plate_info.name not in self.info_dict:
self.info_dict[plate_info.name] = {}
assert( plate_info.value not in self.info_dict[plate_info.name] )
self.info_dict[plate_info.name][plate_info.value] = plate_info
if sample_dir != None:
self.load_fcs_dir(sample_dir, verbose=verbose)
def __repr__(self):
return str(self.info_dict)
@property
def all_position_set(self):
s = set()
for name in self.info_dict:
for value in self.info_dict[name]:
s = s.union(self.info_dict[name][value].position_set)
return s
def get_by_well(self, well_pos):
search_pos = PlatePos(well_pos)
for pos in self.all_position_set:
if pos == search_pos:
return self.samples[pos]
def parameter_values(self, parameter_name):
return sorted( self.info_dict[parameter_name].keys() )
def well_set(self, parameter_name, parameter_value=np.nan):
if parameter_name not in self.info_dict or parameter_value not in self.info_dict[parameter_name]:
return set()
else:
return self.info_dict[parameter_name][parameter_value].position_set
def single_well_from_set(self, well_set):
well_list = list(well_set)
assert( len(well_list) == 1 )
return self.samples[well_list[0]]
@property
def experimental_parameters(self):
experimental_parameters = []
for parameter_name in list(self.info_dict.keys()):
if 'blank' not in parameter_name.lower():
if len(self.info_dict[parameter_name]) == 1 and np.nan in self.info_dict[parameter_name]:
experimental_parameters.append(parameter_name)
return experimental_parameters
def gate(self, gate):
if use_multiprocessing:
pool = mp.Pool()
for pos in self.samples:
pool.apply_async(gate_data, (pos, self.samples[pos], gate), callback=self.set_gate)
pool.close()
pool.join()
else:
for pos in self.samples:
self.samples[pos] = self.samples[pos].gate(gate)
def gate_sample(self, gate, pos):
self.samples[pos] = self.samples[pos].gate(gate)
def set_gate(self, tup):
pos, fcs_data = tup
self.samples[pos] = fcs_data
def load_fcs_dir(self, sample_directory, verbose=False):
fcs_files = find_fcs_files(sample_directory)
for plate_pos, filepath in fcs_files:
assert(plate_pos not in self.samples)
self.samples[plate_pos] = FCMeasurement(ID=str(plate_pos), datafile=filepath)
if verbose:
print('Loaded %d FCS files from directory %s' % (len(fcs_files), sample_directory))
def gate_data(pos, fcs_data, gate):
return (pos, fcs_data.gate(gate))
class FCSFile:
def __init__ (self, filepath, plate_position_str):
self.filepath = filepath
self.plate_position_obj = PlatePos(plate_position_str)
@property
def plate_position(self):
return str( self.plate_position_obj )
@property
def plate_row(self):
return self.plate_position_obj.row
@property
def plate_col(self):
return self.plate_position_obj.col
def __lt__ (self, other):
return self.plate_position < other.plate_position
def __repr__(self):
return self.plate_position
def find_fcs_files(sample_directory):
fcs_files = []
for filename in os.listdir(sample_directory):
if filename.endswith('.fcs'):
full_filename = os.path.join(sample_directory, filename)
fcs_files.append( (PlatePos(filename.split('_')[2]), full_filename) )
fcs_files.sort()
return fcs_files
def ticks_format(value, index):
"""
get the value and returns the value as:
integer: [0,99]
1 digit float: [0.1, 0.99]
n*10^m: otherwise
To have all the number of the same size they are all returned as latex strings
http://stackoverflow.com/questions/17165435/matplotlib-show-labels-for-minor-ticks-also
"""
exp = np.floor(np.log10(value))
base = value/10**exp
if exp == 0 or exp == 1:
return '${0:d}$'.format(int(value))
if exp == -1:
return '${0:.1f}$'.format(value)
else:
return '${0:d}\\times10^{{{1:d}}}$'.format(int(base), int(exp))
def output_medians_and_sums():
fsc_gate = ThresholdGate(10000.0, 'FSC-A', region='above')
ssc_gate = ThresholdGate(9000.0, 'SSC-A', region='above')
fsc_ssc_gate = CompositeGate(fsc_gate, 'and', ssc_gate)
# Load blank data
blank_sample = FCMeasurement(ID='blank', datafile=blank_datafile).gate(fsc_gate)
fcs_files = find_fcs_files(sample_directory)
channel_medians = {channel_name : {} for channel_name in blank_sample.channel_names}
channel_sums = {channel_name : {} for channel_name in blank_sample.channel_names}
for plate_pos, filepath in fcs_files:
sample = FCMeasurement(ID='sample', datafile=filepath).gate(fsc_gate)
for channel_name in sample.channel_names:
if plate_pos.row not in channel_medians[channel_name]:
channel_medians[channel_name][plate_pos.row] = {}
channel_sums[channel_name][plate_pos.row] = {}
assert( plate_pos.col not in channel_medians[channel_name][plate_pos.row] )
channel_medians[channel_name][plate_pos.row][plate_pos.col] = sample.data[channel_name].median()
channel_sums[channel_name][plate_pos.row][plate_pos.col] = np.sum(sample.data[channel_name])
# if channel_name in ['B-A', 'A-A']:
# print filename, channel_name
# sample.plot(channel_name, bins=100, alpha=0.9, color='green');
# blank_sample.plot(channel_name, bins=100, alpha=0.9, color='blue');
# P.grid(True)
# P.show() # <-- Uncomment when running as a script.
if not os.path.isdir(script_output_dir):
os.makedirs(script_output_dir)
rows = [char for char in 'ABCDEFGH']
cols = list(range(1, 13))
for channel, data_type in [(channel_medians, 'medians'), (channel_sums, 'sums')]:
for channel_name in channel:
filename = os.path.join(script_output_dir, '%s_%s.csv' % (channel_name, data_type))
with open(filename, 'w') as f:
for col in cols:
for row in rows:
if row in channel[channel_name] and col in channel[channel_name][row]:
f.write('%.2f,' % channel[channel_name][row][col])
else:
f.write('NA,')
f.write('\n')
def points_above_line(x_data, y_data, m, b):
# Calculate y-intercepts for all points given slope m
comp_bs = np.subtract(y_data, np.multiply(x_data, m))
# Return number of points whose y intercept is above passed in b
return np.count_nonzero(comp_bs > b)
def find_perpendicular_gating_line(x_data, y_data, threshold):
# Returns the line parameters which give you a certain percentage (threshold) of population
# above the line
x_data = np.sort( x_data )
y_data = np.sort( y_data )
x_max = np.amax(x_data)
y_max = np.amax(y_data)
# y = mx + b
m, b, r, p, stderr = scipy.stats.linregress(x_data, y_data)
inv_m = -1.0 / m
inv_b = np.median( y_data )
percent_above_line = points_above_line(x_data, y_data, inv_m, inv_b) / float(len(x_data))
desired_points_above_line = int(threshold * len(x_data))
def obj_helper(calc_b):
return abs(points_above_line(x_data, y_data, inv_m, calc_b) - desired_points_above_line)
res = scipy.optimize.minimize(obj_helper, inv_b, method='nelder-mead', options={'disp': False, 'maxiter': 1000})
inv_b = res.x[0]
return (inv_m, inv_b)
def mean_confidence_interval(data, confidence=0.95):
a = 1.0*np.array(data)
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * scipy.stats.t._ppf((1+confidence)/2., n-1)
return m, m-h, m+h
def make_gating_fig(plate_list, gate_val, gate_name, fig_dir, fast_run = False, blank_samples=[], plot_one_sample=False):
gating_fig = plt.figure(figsize=(len(plate_list)*9, 11), dpi=600)
gated_plates_for_return = []
gating_axes = []
mean_diffs = {}
for plate_num, exp in enumerate(plate_list):
nonblank_samples = list(exp.all_position_set)
if len(gating_axes) >= 1:
ax = gating_fig.add_subplot(1, len(plate_list), plate_num+1, sharey=gating_axes[0])
else:
ax = gating_fig.add_subplot(1, len(plate_list), plate_num+1)
gating_axes.append(ax)
ax.set_title(exp.name)
if gate_name.startswith('fsc'):
gate = ThresholdGate(gate_val, 'FSC-A', region='above')
elif gate_name.startswith('poly'):
all_exp_data_fsc = []
all_exp_data_ssc = []
for i, nonblank_sample in enumerate(nonblank_samples):
all_exp_data_fsc.append( exp.samples[nonblank_sample].data['FSC-A'] )
all_exp_data_ssc.append( exp.samples[nonblank_sample].data['SSC-A'] )
if not fast_run:
exp.samples[nonblank_sample].plot(['FSC-A', 'SSC-A'], kind='scatter', color=np.random.rand(3,1), s=1, alpha=0.1, ax=ax)
gate_m, gate_b = find_perpendicular_gating_line( np.concatenate(all_exp_data_fsc), np.concatenate(all_exp_data_ssc), gate_val)
fsc_ssc_axis_limits = (-50000, 100000)
x_max = np.amax(np.concatenate(all_exp_data_fsc))
x_min = np.amin(np.concatenate(all_exp_data_fsc))
y_max = np.amax(np.concatenate(all_exp_data_ssc))
y_min = np.amin(np.concatenate(all_exp_data_ssc))
ax.set_ylim(fsc_ssc_axis_limits)
ax.set_xlim(fsc_ssc_axis_limits)
fudge = 1.0
polygon_xs = [x_min-fudge, x_min-fudge, (y_min-gate_b)/float(gate_m), x_max+fudge, x_max+fudge]
polygon_ys = [y_max+fudge, gate_m*x_min+gate_b, y_min-fudge, y_min-fudge, y_max+fudge]
gate = PolyGate(np.array([[x,y] for x, y in zip(polygon_xs, polygon_ys)]), ['FSC-A', 'SSC-A'], region='in', name='polygate')
if plot_one_sample and len(nonblank_samples) > 0:
exp.samples[nonblank_samples[0]].plot(['FSC-A', 'SSC-A'], kind='scatter', color='green', s=1, alpha=0.1, ax=ax, gates=[gate])
for i, blank_sample in enumerate(blank_samples):
if i == 0:
exp.samples[blank_sample].plot(['FSC-A', 'SSC-A'], kind='scatter', color='red', s=2, alpha=1.0/float(len(blank_samples)), gates=[gate], label='Blank media', ax=ax)
else:
if not fast_run:
exp.samples[blank_sample].plot(['FSC-A', 'SSC-A'], kind='scatter', color='red', s=2, alpha=1.0/float(len(blank_samples)), gates=[gate], ax=ax)
exp.gate(gate)
gated_plates_for_return.append(exp)
ax.grid(True)
if len(blank_samples) > 0:
ax.legend()
gating_fig.savefig(os.path.join(fig_dir, 'gates.png'))
gating_fig.clf()
plt.close(gating_fig)
del gating_fig
return gated_plates_for_return
def make_individual_gating_fig(exp, gate_val, gate_name, fig_dir, fast_run = False, florescence_channel = None, title=None, tight_layout = True):
gated_plates_for_return = []
mean_diffs = {}
nonblank_samples = sorted(list(exp.all_position_set))
samples_per_row = 3
if florescence_channel:
plots_per_sample = 2
else:
plots_per_sample = 1
figs_per_row = samples_per_row * plots_per_sample
num_fig_rows = 1 + ( len(nonblank_samples) - 1 ) / samples_per_row
num_fig_cols = min(samples_per_row * plots_per_sample, len(nonblank_samples) * plots_per_sample)
gating_fig = plt.figure(figsize=(8.2*num_fig_cols, num_fig_rows*5.5), dpi=600)
if title:
plt.title('%s - %s' % (title, exp.name), fontsize=20)
else:
plt.title(exp.name, fontsize=20)
current_fig_row = 1
current_fig_col = 1
current_fig_count = 1
for sample_num, nonblank_sample in enumerate(nonblank_samples):
#### FSC/SSC plot ####
ax = gating_fig.add_subplot(num_fig_rows, num_fig_cols, current_fig_count)
if current_fig_col >= figs_per_row:
current_fig_col = 1
current_fig_row += 1
else:
current_fig_col += 1
current_fig_count += 1
ax.set_title(str(nonblank_sample))
if gate_name.startswith('fsc'):
gate = ThresholdGate(gate_val, 'FSC-A', region='above')
elif gate_name.startswith('poly'):
fsc_data = exp.samples[nonblank_sample].data['FSC-A']
ssc_data = exp.samples[nonblank_sample].data['SSC-A']
gate_m, gate_b = find_perpendicular_gating_line( exp.samples[nonblank_sample].data['FSC-A'], exp.samples[nonblank_sample].data['SSC-A'], gate_val)
fsc_ssc_axis_limits = (-50000, 100000)
x_max = np.amax(fsc_data)
x_min = np.amin(fsc_data)
y_max = np.amax(ssc_data)
y_min = np.amin(ssc_data)
ax.set_ylim(fsc_ssc_axis_limits)
ax.set_xlim(fsc_ssc_axis_limits)
fudge = 1.0
polygon_xs = [x_min-fudge, x_min-fudge, (y_min-gate_b)/float(gate_m), x_max+fudge, x_max+fudge]
polygon_ys = [y_max+fudge, gate_m*x_min+gate_b, y_min-fudge, y_min-fudge, y_max+fudge]
gate = PolyGate(np.array([[x,y] for x, y in zip(polygon_xs, polygon_ys)]), ['FSC-A', 'SSC-A'], region='in', name='polygate')
if not fast_run:
exp.samples[nonblank_sample].plot(['FSC-A', 'SSC-A'], kind='scatter', color=(0.0, 0.0, 1.0), s=1, alpha=0.05, ax=ax, gates=[gate])
ax.grid(True)
#### Gate sample ####
exp.gate_sample(gate, nonblank_sample)
#### Florescence/Time plot ####
if florescence_channel:
ax = gating_fig.add_subplot(num_fig_rows, num_fig_cols, current_fig_count)
current_fig_count += 1
ax.set_title(str(nonblank_sample))
exp.samples[nonblank_sample].plot(['Time', florescence_channel], kind='scatter', color=(1.0, 0.0, 0.0), s=1, alpha=0.05, ax=ax,)
# #### Singlet plot ####
# ax = gating_fig.add_subplot(num_fig_rows, num_fig_cols, current_fig_count)
# current_fig_count += 1
# ax.set_title(str(nonblank_sample))
# print exp.samples[nonblank_sample].channel_names
# exp.samples[nonblank_sample].plot(['FSC-H', 'FSC-W'], kind='scatter', color=(0.0, 0.0, 1.0), s=1, alpha=0.05, ax=ax,)
if tight_layout:
gating_fig.tight_layout()
gating_fig.savefig(os.path.join(fig_dir, 'gates-%s.png' % exp.name))
gating_fig.clf()
plt.close(gating_fig)
del gating_fig
return exp
if __name__ == '__main__':
output_medians_and_sums()
| Kortemme-Lab/klab | klab/fcm/fcm.py | Python | mit | 18,877 |
#!/usr/bin/env python3
import time
from matrix_client.client import MatrixClient
from matrix_client.api import MatrixRequestError
from requests.exceptions import ConnectionError, Timeout
import argparse
import random
from configparser import ConfigParser
import re
import traceback
import urllib.parse
import logging
import os
import sys
import signal
import queue
import codecs
from database import MarkovDatabaseBrain
COMMANDS = [
'!rate'
]
def sigterm_handler(_signo, _stack_frame):
"""Raises SystemExit(0), causing everything to cleanly shut down."""
sys.exit(0)
class ConfigParser(ConfigParser):
# allow case-sensitive option names
# needed for saving per-room response rates
optionxform = str
class Backend(object):
"""Interface for chat backends."""
def __init__(self, brain_file):
pass
def train_file(self, filename):
"""Trains the chat backend on the given file."""
with codecs.open(filename, encoding='utf8') as train_file:
for line in train_file:
self.learn(line)
def learn(self, line):
"""Updates the chat backend based on the given line of input."""
pass
def save(self):
"""Saves the backend to disk, if needed."""
pass
def reply(self, message):
"""Generates a reply to the given message."""
return "(dummy response)"
class MarkovBackend(Backend):
"""Chat backend using markov chains."""
def __init__(self, brain_file):
self.brain = MarkovDatabaseBrain(brain_file)
def sanitize(self, word):
"""Removes any awkward whitespace characters from the given word.
Removes '\n', '\r', and '\\u2028' (unicode newline character)."""
return word.replace('\n', '').replace('\r', '').replace('\u2028', '')
def train_file(self, filename):
with codecs.open(filename, encoding='utf8') as train_file:
for line in train_file:
self.learn(line)
self.save()
def learn(self, line):
line = line.strip()
words = line.split(' ')
words = [self.sanitize(word) for word in words]
for i in range(len(words) - 2):
prefix = words[i], words[i + 1]
follow = words[i + 2]
self.brain.add(prefix, follow)
def save(self):
self.brain.save()
def get_random_next_link(self, word1, word2):
"""Gives a word that could come after the two provided.
Words that follow the two given words are weighted by how frequently
they appear after them.
"""
possibilities = self.brain.get_followers((word1, word2))
if not possibilities:
return None
total = 0
for p in possibilities:
total += possibilities[p]
num = random.randint(1, total)
total = 0
for p in possibilities:
total += possibilities[p]
if total >= num:
break
return p
def reply(self, message):
if self.brain.is_empty():
return ''
seed = None
# try to seed reply from the message
possible_seed_words = message.split()
while seed is None and possible_seed_words:
message_word = random.choice(possible_seed_words)
seeds = list(self.brain.get_pairs_containing_word_ignoring_case(
message_word))
if seeds:
seed = random.choice(seeds)
else:
possible_seed_words.remove(message_word)
# we couldn't seed the reply from the input
# fall back to random seed
if seed is None:
seed = self.brain.get_three_random_words()
words = list(seed)
while self.brain.contains_pair((words[-2], words[-1])) and \
len(words) < 100:
word = self.get_random_next_link(words[-2], words[-1])
words.append(word)
return ' '.join(words)
class Config(object):
def __init__(self, cfgparser):
self.backend = cfgparser.get('General', 'backend')
self.display_name = cfgparser.get('General', 'display name')
self.learning = cfgparser.getboolean('General', 'learning')
self.username = cfgparser.get('Login', 'username')
self.password = cfgparser.get('Login', 'password')
self.server = cfgparser.get('Login', 'server')
self.default_response_rate = cfgparser.getfloat(
'General', 'default response rate')
self.response_rates = {}
for room_id, rate in cfgparser.items('Response Rates'):
room_id = room_id.replace('-colon-', ':')
self.response_rates[room_id] = float(rate)
def get_response_rate(self, room_id):
"""Returns our response rate for the room with the given room id."""
if room_id in self.response_rates:
return self.response_rates[room_id]
else:
return self.default_response_rate
def write(self):
"""Writes this config back to the file, with any changes reflected."""
cfgparser = ConfigParser()
cfgparser.add_section('General')
cfgparser.set('General', 'default response rate',
str(self.default_response_rate))
cfgparser.set('General', 'backend', self.backend)
cfgparser.set('General', 'display name', self.display_name)
cfgparser.set('General', 'learning', str(self.learning))
cfgparser.add_section('Login')
cfgparser.set('Login', 'username', self.username)
cfgparser.set('Login', 'password', self.password)
cfgparser.set('Login', 'server', self.server)
cfgparser.add_section('Response Rates')
for room_id, rate in list(self.response_rates.items()):
# censor colons because they are a configparser special
# character
room_id = room_id.replace(':', '-colon-')
cfgparser.set('Response Rates', room_id, str(rate))
with open('config.cfg', 'wt') as configfile:
cfgparser.write(configfile)
def get_default_configparser():
"""Returns a ConfigParser object for the default config file."""
config = ConfigParser(allow_no_value=True)
config.add_section('General')
config.set('General', 'default response rate', "0.10")
config.set('General', 'backend', 'markov')
config.set('General', 'display name', 'Markov')
config.set('General', 'learning', 'on')
config.add_section('Login')
config.set('Login', 'username', 'username')
config.set('Login', 'password', 'password')
config.set('Login', 'server', 'http://matrix.org')
config.add_section('Response Rates')
return config
class Bot(object):
"""Handles everything that the bot does."""
def __init__(self, config, chat_backend):
self.config = config
self.client = None
self.chat_backend = chat_backend
self.event_queue = queue.Queue()
self.invite_queue = queue.Queue()
def login(self):
"""Logs onto the server."""
client = MatrixClient(self.config.server)
client.login_with_password_no_sync(
self.config.username, self.config.password)
self.client = client
def get_room(self, event):
"""Returns the room the given event took place in."""
return self.client.rooms[event['room_id']]
def handle_command(self, event, command, args):
"""Handles the given command, possibly sending a reply to it."""
command = command.lower()
if command == '!rate':
if args:
num = re.match(r'[0-9]*(\.[0-9]+)?(%|)', args[0]).group()
if not num:
self.reply(event, "Error: Could not parse number.")
return
if num[-1] == '%':
rate = float(num[:-1]) / 100
else:
rate = float(num)
self.config.response_rates[event['room_id']] = rate
self.reply(event, "Response rate set to %f." % rate)
else:
rate = self.config.get_response_rate(event['room_id'])
self.reply(
event, "Response rate set to %f in this room." % rate)
def reply(self, event, message):
"""Replies to the given event with the provided message."""
room = self.get_room(event)
logging.info("Reply: %s" % message)
room.send_notice(message)
def is_name_in_message(self, message):
"""Returns whether the message contains the bot's name.
Considers both display name and username.
"""
regex = "({}|{})".format(
self.config.display_name, self.config.username)
return re.search(regex, message, flags=re.IGNORECASE)
def handle_invite(self, room_id, invite_state):
# join rooms if invited
try:
self.client.join_room(room_id)
logging.info('Joined room: %s' % room_id)
except MatrixRequestError as e:
if e.code == 404:
# room was deleted after invite or something; ignore it
logging.info('invited to nonexistent room {}'.format(room_id))
elif e.code in range(500, 600):
# synapse v0.99.1 500s if it cannot locate a room sometimes
# (when there are federation issues)
logging.warning('got 500 trying to join room we were invited to')
else:
raise(e)
def handle_event(self, event):
"""Handles the given event.
Joins a room if invited, learns from messages, and possibly responds to
messages.
"""
if event['type'] == 'm.room.message':
# only care about text messages by other people
if event['sender'] != self.client.user_id and \
event['content']['msgtype'] == 'm.text':
message = str(event['content']['body'])
# lowercase message so we can search it
# case-insensitively
logging.info("Handling message: %s" % message)
command_found = False
for command in COMMANDS:
match = re.search(command, message, flags=re.IGNORECASE)
if match and (match.start() == 0 or
self.is_name_in_message(message)):
command_found = True
args = message[match.start():].split(' ')
self.handle_command(event, args[0], args[1:])
break
if not command_found:
room = self.get_room(event)
response_rate = self.config.get_response_rate(room.room_id)
if self.is_name_in_message(message) or \
random.random() < response_rate:
# remove name from message and respond to it
message_no_name = re.sub(
' *' + re.escape(self.get_display_name()) + ' *',
' ', message, flags=re.IGNORECASE)
response = self.chat_backend.reply(message_no_name)
self.reply(event, response)
if self.config.learning:
self.chat_backend.learn(message)
self.send_read_receipt(event)
def set_display_name(self, display_name):
"""Sets the bot's display name on the server."""
self.client.api.set_display_name(self.client.user_id, display_name)
def get_display_name(self):
"""Gets the bot's display name from the server."""
return self.client.api.get_display_name(self.client.user_id)
def run(self):
"""Indefinitely listens for messages and handles all that come."""
current_display_name = self.get_display_name()
if current_display_name != self.config.display_name:
self.set_display_name(self.config.display_name)
last_save = time.time()
# listen for invites, including initial sync invites
self.client.add_invite_listener(
lambda room_id, state: self.invite_queue.put((room_id, state)))
# get rid of initial event sync
logging.info("initial event stream")
self.client.listen_for_events()
# listen to events and add them all to the event queue
# for handling in this thread
self.client.add_listener(self.event_queue.put)
def exception_handler(e):
if isinstance(e, Timeout):
logging.warning("listener thread timed out.")
logging.error("exception in listener thread:")
traceback.print_exc()
# start listen thread
logging.info("starting listener thread")
self.client.start_listener_thread(exception_handler=exception_handler)
try:
while True:
time.sleep(1)
# handle any queued events
while not self.event_queue.empty():
event = self.event_queue.get_nowait()
self.handle_event(event)
while not self.invite_queue.empty():
room_id, invite_state = self.invite_queue.get_nowait()
self.handle_invite(room_id, invite_state)
# save every 10 minutes or so
if time.time() - last_save > 60 * 10:
self.chat_backend.save()
last_save = time.time()
finally:
logging.info("stopping listener thread")
self.client.stop_listener_thread()
def send_read_receipt(self, event):
"""Sends a read receipt for the given event."""
if "room_id" in event and "event_id" in event:
room_id = urllib.parse.quote(event['room_id'])
event_id = urllib.parse.quote(event['event_id'])
self.client.api._send("POST", "/rooms/" + room_id +
"/receipt/m.read/" + event_id,
api_path="/_matrix/client/r0")
def train(backend, train_file):
"""Trains the given chat backend on the given train_file & saves it."""
print("Training...")
backend.train_file(train_file)
print("Training complete!")
backend.save()
def main():
argparser = argparse.ArgumentParser(
description="A chatbot for Matrix (matrix.org)")
argparser.add_argument("--debug",
help="Print out way more things.",
action="store_true")
argparser.add_argument("--train", metavar="train.txt", type=str,
help="Train the bot with a file of text.")
argparser.add_argument("--config", metavar="config.cfg", type=str,
help="Bot's config file (must be read-writable)")
argparser.add_argument("--brain", metavar="brain.db", type=str,
help="Bot's brain file (must be read-writable)")
args = vars(argparser.parse_args())
debug = args['debug']
# suppress logs of libraries
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
log_level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(level=log_level,
format='%(asctime)s %(name)s '
'%(levelname)s %(message)s')
train_path = args['train']
config_path = args['config'] if args['config'] \
else os.getenv('MATRIX_CHATBOT_CONFIG', 'config.cfg')
brain_path = args['brain'] if args['brain'] \
else os.getenv('MATRIX_CHATBOT_BRAIN', 'brain.db')
cfgparser = ConfigParser()
success = cfgparser.read(config_path)
if not success:
cfgparser = get_default_configparser()
with open(config_path, 'wt') as configfile:
cfgparser.write(configfile)
print("A config has been generated. "
"Please set your bot's username, password, and homeserver "
"in " + config_path + " then run this again.")
return
config = Config(cfgparser)
backends = {'markov': MarkovBackend}
backend = backends[config.backend](brain_path)
logging.info("loading brain")
if train_path:
train(backend, train_path)
else:
signal.signal(signal.SIGTERM, sigterm_handler)
while True:
try:
bot = Bot(config, backend)
bot.login()
bot.run()
except (MatrixRequestError, ConnectionError):
traceback.print_exc()
logging.warning("disconnected. Waiting a minute to see if"
" the problem resolves itself...")
time.sleep(60)
finally:
backend.save()
logging.info('Saving config...')
config.write()
if __name__ == '__main__':
main()
| Spferical/matrix-chatbot | main.py | Python | mit | 17,094 |
import os
import urllib
from glob import glob
import dask.bag as db
import numpy as np
import zarr
from dask.diagnostics import ProgressBar
from netCDF4 import Dataset
def download(url):
opener = urllib.URLopener()
filename = os.path.basename(url)
path = os.path.join('data', filename)
opener.retrieve(url, path)
def download_weather():
# Create data directory
if not os.path.exists('data'):
os.mkdir('data')
template = ('http://www.esrl.noaa.gov/psd/thredds/fileServer/Datasets/'
'noaa.oisst.v2.highres/sst.day.mean.{year}.v2.nc')
urls = [template.format(year=year) for year in range(1981, 2016)]
b = db.from_sequence(urls, partition_size=1)
print("Downloading Weather Data")
print("------------------------")
with ProgressBar():
b.map(download).compute(n_workers=8)
def transform_weather():
if os.path.exists('sst.day.mean.v2.zarr'):
return
datasets = [Dataset(path)['sst'] for path in sorted(glob('data/*.nc'))]
n = sum(d.shape[0] for d in datasets)
shape = (n, 720, 1440)
chunks = (72, 360, 360)
f = zarr.open_array('sst.day.mean.v2.zarr', shape=shape, chunks=chunks,
dtype='f4')
i = 0
for d in datasets:
m = d.shape[0]
f[i:i + m] = d[:].filled(np.nan)
i += m
if __name__ == '__main__':
download_weather()
transform_weather()
| osbd/osbd-2016 | slides/crist/get_data.py | Python | mit | 1,419 |
# -*- coding: utf-8 -*-
#
# Optcoretech documentation build configuration file, created by
# sphinx-quickstart on Mon Sep 1 14:23:01 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Optcoretech'
copyright = u'2014, Sheesh Mohsin'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Optcoretechdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Optcoretech.tex', u'Optcoretech Documentation',
u'Sheesh Mohsin', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'optcoretech', u'Optcoretech Documentation',
[u'Sheesh Mohsin'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Optcoretech', u'Optcoretech Documentation',
u'Sheesh Mohsin', 'Optcoretech', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| nishantsingla/optcoretech | docs/conf.py | Python | mit | 7,786 |
import random
class ImageQueryParser:
def __init__(self):
pass
def parse(self, query_string):
tab = query_string.split(" ")
last = tab[-1].lower()
is_random = False
index = 0
if last.startswith("-"):
if last == "-r":
is_random = True
tab.pop()
else:
try:
index = int(last[1:])
tab.pop()
except ValueError:
pass
query_string = " ".join(tab)
return ImageQuery(query_string, is_random, index)
class ImageQuery:
def __init__(self, query, is_random, index):
self.__query = query
self.__is_random = is_random
self.__index = index
def query(self):
return self.__query
def is_random(self):
return self.__is_random
def next_index(self):
if self.is_random():
return random.randrange(0, 100)
else:
i = self.__index
self.__index += 1
return i
| mamaddeveloper/teleadmin | tools/imageQueryParser.py | Python | mit | 1,079 |
import math
def area(a, b, c):
A1 = ((4*math.pi)*a**2)
A2 = ((4*math.pi)*b**2)
A3 = ((4*math.pi)*c**2)
Avg = (A1+A2+A3)/3
return Avg
def output(a, b ,c , d, e):
return """
Hello there, {}!
equation: ((4*math.pi)*{}**2)((4*math.pi)*{}**2)((4*math.pi)*{}**2)
Calculating average area of three spheres...
the answer is: {}
""".format(a, b, c, d, e)
def main():
Name = raw_input("Name: ")
Area1 = raw_input("Area of 1st sphere: ")
Area2 = raw_input("Area of 2nd sphere: ")
Area3 = raw_input("Area of 3rd sphere: ")
e = area(int(Area1), int(Area2), int(Area3))
print output(Name, Area1, Area2, Area3, e)
main()
| boss2608-cmis/boss2608-cmis-cs2 | simple.py | Python | cc0-1.0 | 624 |
from turtle import *
shape("turtle")
forward(100)
left(120)
forward(100)
left(120)
forward(100)
left(120)
done()
| arve0/example_lessons | src/python/lessons/Turtle Power/Club Leader Resources/DrawingShapes-triangle.py | Python | cc0-1.0 | 128 |
from setuptools import setup, find_packages
setup(name='BIOMD0000000360',
version=20140916,
description='BIOMD0000000360 from BioModels',
url='http://www.ebi.ac.uk/biomodels-main/BIOMD0000000360',
maintainer='Stanley Gu',
maintainer_url='[email protected]',
packages=find_packages(),
package_data={'': ['*.xml', 'README.md']},
) | biomodels/BIOMD0000000360 | setup.py | Python | cc0-1.0 | 377 |
f = open('test_content.txt', 'r')
print(f.read())
f.close()
# using context manager
with open('test_content.txt', 'r') as f:
print(f.read())
| yrunts/python-for-qa | 3-python-intermediate/examples/file.py | Python | cc0-1.0 | 148 |
# file ui_drop_plate_classes.py
import os
from thlib.side.Qt import QtWidgets as QtGui
from thlib.side.Qt import QtGui as Qt4Gui
from thlib.side.Qt import QtCore
from thlib.environment import env_mode, env_inst, env_write_config, env_read_config
import thlib.global_functions as gf
import thlib.ui.checkin_out.ui_drop_plate as ui_drop_plate
import thlib.ui.checkin_out.ui_drop_plate_config as ui_drop_plate_config
from thlib.ui_classes.ui_custom_qwidgets import Ui_horizontalCollapsableWidget
#reload(ui_drop_plate)
#reload(ui_drop_plate_config)
class Ui_matchingTemplateConfigWidget(QtGui.QDialog, ui_drop_plate_config.Ui_matchingTemplateConfig):
def __init__(self, parent=None):
super(self.__class__, self).__init__(parent=parent)
self.current_templates_list = []
self.setupUi(self)
self.create_ui()
def create_ui(self):
self.setWindowTitle('Matching Template Config')
self.fill_templates()
self.templatesTreeWidget.resizeColumnToContents(0)
self.templatesTreeWidget.resizeColumnToContents(1)
self.templatesTreeWidget.resizeColumnToContents(2)
self.templatesTreeWidget.resizeColumnToContents(3)
self.create_drop_plate_config_widget()
self.readSettings()
def create_drop_plate_config_widget(self):
from thlib.ui_classes.ui_conf_classes import Ui_checkinOptionsPageWidget
self.drop_plate_config_widget = Ui_checkinOptionsPageWidget(self)
self.drop_plate_config_widget.snapshotsSavingOptionsGroupBox.setHidden(True)
self.drop_plate_config_widget.checkinMiscOptionsGroupBox.setHidden(True)
self.drop_plate_config_widget.defaultRepoPathsGroupBox.setHidden(True)
self.drop_plate_config_widget.customRepoPathsGroupBox.setHidden(True)
self.configGridLayout.addWidget(self.drop_plate_config_widget)
self.configGridLayout.setContentsMargins(0, 0, 0, 9)
def fill_templates(self):
templates = [
(True, '$FILENAME'),
(True, '$FILENAME.$EXT'),
(True, '$FILENAME.$FRAME.$EXT'),
(True, '$FILENAME_$UDIM.$EXT'),
(True, '$FILENAME_$UV.$EXT'),
(True, '$FILENAME.$FRAME_$UDIM.$EXT'),
(True, '$FILENAME.$FRAME_$UV.$EXT'),
(True, '$FILENAME_$UV.$FRAME.$EXT'),
(False, '$FILENAME_$LAYER.$EXT'),
(False, '$FILENAME.$LAYER.$EXT'),
(False, '$FILENAME_$LAYER.$FRAME.$EXT'),
(False, '$FILENAME.$LAYER.$FRAME.$EXT'),
(False, '$FILENAME.$LAYER_$UV.$EXT'),
(False, '$FILENAME.$LAYER.$FRAME_$UV.$EXT'),
(False, '$FILENAME.$LAYER_$UV.$FRAME.$EXT'),
(False, '$FILENAME.$LAYER_$UDIM.$EXT'),
(False, '$FILENAME.$LAYER.$FRAME_$UDIM.$EXT'),
(False, '$FILENAME.$LAYER_$UDIM.$FRAME.$EXT'),
(False, '$FILENAME_$LAYER.$FRAME_$UDIM.$EXT'),
]
# templates = [
# (True, '$FILENAME'),
# (True, '$FILENAME.$EXT'),
# (True, '$FILENAMEFrame$FRAME.$EXT'),
# ]
for enabled, template in templates:
tree_item = QtGui.QTreeWidgetItem()
if enabled:
tree_item.setCheckState(0, QtCore.Qt.Checked)
self.current_templates_list.append(template)
else:
tree_item.setCheckState(0, QtCore.Qt.Unchecked)
tree_item.setText(1, template)
match_template = gf.MatchTemplate([template], padding=self.get_min_padding())
tree_item.setText(2, match_template.get_preview_string())
tree_item.setText(3, match_template.get_type_string())
if template in ['$FILENAME', '$FILENAME.$EXT']:
tree_item.setDisabled(True)
self.templatesTreeWidget.addTopLevelItem(tree_item)
def get_min_padding(self):
return 3
# return int(self.minFramesPaddingSpinBox.value())
def get_templates_list(self):
return self.current_templates_list
def set_settings_from_dict(self, settings_dict=None):
if settings_dict:
self.move(settings_dict['pos'][0], settings_dict['pos'][1])
self.resize(settings_dict['size'][0], settings_dict['size'][1])
def get_settings_dict(self):
settings_dict = dict()
settings_dict['pos'] = self.pos().toTuple()
settings_dict['size'] = self.size().toTuple()
return settings_dict
def readSettings(self):
self.set_settings_from_dict(env_read_config(filename='ui_drop_plate', unique_id='ui_main', long_abs_path=True))
def writeSettings(self):
env_write_config(self.get_settings_dict(), filename='ui_drop_plate', unique_id='ui_main', long_abs_path=True)
def hideEvent(self, event):
self.writeSettings()
event.accept()
class Ui_dropPlateWidget(QtGui.QWidget, ui_drop_plate.Ui_dropPlate):
def __init__(self, parent=None):
super(self.__class__, self).__init__(parent=parent)
self.tree_items = []
self.setupUi(self)
self.setAcceptDrops(True)
self.create_ui()
self.create_config_widget()
self.controls_actions()
def threads_fill_items(self, kwargs, exec_after_added=None):
worker = env_inst.local_pool.add_task(self.get_files_objects, kwargs)
worker.result.connect(self.append_items_to_tree)
if exec_after_added:
worker.finished.connect(exec_after_added)
worker.error.connect(gf.error_handle)
worker.start()
def create_ui(self):
self.clearPushButton.setIcon(gf.get_icon('trash'))
self.configPushButton.setIcon(gf.get_icon('settings', icons_set='mdi'))
self.create_progress_bar_widget()
self.create_collapsable_toolbar()
self.setAcceptDrops(True)
if env_mode.get_mode() == 'standalone':
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
self.setSizePolicy(sizePolicy)
self.setMinimumWidth(300)
self.move_controls_to_collapsable_toolbar()
self.customize_ui()
def customize_ui(self):
self.dropTreeWidget.setSelectionMode(QtGui.QAbstractItemView.ExtendedSelection)
self.dropTreeWidget.setDragDropMode(QtGui.QAbstractItemView.DragOnly)
self.dropTreeWidget.setStyleSheet(gf.get_qtreeview_style())
def create_progress_bar_widget(self):
self.progressBar = QtGui.QProgressBar()
self.progressBar.setMaximum(100)
self.progressBarLayout.addWidget(self.progressBar)
self.progressBar.setTextVisible(True)
self.progressBar.setVisible(False)
def create_config_widget(self):
self.config_widget = Ui_matchingTemplateConfigWidget(self)
def create_collapsable_toolbar(self):
self.collapsable_toolbar = Ui_horizontalCollapsableWidget()
self.collapsable_toolbar.setText('Quick Config')
self.buttons_layout = QtGui.QHBoxLayout()
self.buttons_layout.setSpacing(0)
self.buttons_layout.setContentsMargins(0, 0, 0, 0)
self.collapsable_toolbar.setLayout(self.buttons_layout)
self.collapsable_toolbar.setCollapsed(True)
self.expandingLayout.addWidget(self.collapsable_toolbar)
def add_widget_to_collapsable_toolbar(self, widget):
self.buttons_layout.addWidget(widget)
def move_controls_to_collapsable_toolbar(self):
self.add_widget_to_collapsable_toolbar(self.groupCheckinCheckBox)
self.add_widget_to_collapsable_toolbar(self.keepFileNameCheckBox)
self.add_widget_to_collapsable_toolbar(self.includeSubfoldersCheckBox)
self.collapsable_toolbar.setCollapsed(False)
def controls_actions(self):
self.clearPushButton.clicked.connect(self.clear_tree_widget)
self.configPushButton.clicked.connect(self.config_widget.exec_)
# self.groupCheckinCheckBox.stateChanged.connect(self.enable_group_checkin)
self.create_files_tree_context_menu()
def clear_tree_widget(self):
self.dropTreeWidget.clear()
self.tree_items = []
def create_files_tree_context_menu(self):
self.dropTreeWidget.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.dropTreeWidget.customContextMenuRequested.connect(self.open_menu)
def open_menu(self):
item = self.dropTreeWidget.selectedIndexes()
if item:
menu = self.file_context_menu()
if menu:
menu.exec_(Qt4Gui.QCursor.pos())
else:
menu = self.drop_plate_context_menu()
if menu:
menu.exec_(Qt4Gui.QCursor.pos())
@gf.catch_error
def open_file_from_tree(self):
item = self.get_selected_items()
for it in item:
it.open_file()
@gf.catch_error
def open_folder_from_tree(self):
item = self.get_selected_items()
for it in item:
it.open_folder()
@gf.catch_error
def copy_path_from_tree(self):
item = self.get_selected_items()
clipboard = QtGui.QApplication.instance().clipboard()
files_list = set()
for it in item:
files_list.add(it.get_file_path())
clipboard.setText('\n'.join(files_list))
@gf.catch_error
def copy_abs_path_from_tree(self):
item = self.get_selected_items()
clipboard = QtGui.QApplication.instance().clipboard()
files_list = []
for it in item:
files_list.extend(it.get_all_files_list())
clipboard.setText('\n'.join(files_list))
def file_context_menu(self):
open_file = QtGui.QAction('Open File', self.dropTreeWidget)
open_file.setIcon(gf.get_icon('folder'))
open_file.triggered.connect(self.open_file_from_tree)
open_file_folder = QtGui.QAction('Show Folder', self.dropTreeWidget)
open_file_folder.setIcon(gf.get_icon('folder-open'))
open_file_folder.triggered.connect(self.open_folder_from_tree)
copy_path = QtGui.QAction("Copy File Path", self.dropTreeWidget)
copy_path.setIcon(gf.get_icon('copy'))
copy_path.triggered.connect(self.copy_path_from_tree)
copy_abs_path = QtGui.QAction("Copy Absolute File Path", self.dropTreeWidget)
copy_abs_path.setIcon(gf.get_icon('copy'))
copy_abs_path.triggered.connect(self.copy_abs_path_from_tree)
add_file = QtGui.QAction('Add More Files', self.dropTreeWidget)
add_file.setIcon(gf.get_icon('folder-open'))
add_file.triggered.connect(self.add_files_from_menu)
menu = QtGui.QMenu()
menu.addAction(open_file)
menu.addAction(open_file_folder)
menu.addAction(copy_path)
menu.addAction(copy_abs_path)
menu.addAction(add_file)
return menu
def drop_plate_context_menu(self):
add_file = QtGui.QAction('Add Files', self.dropTreeWidget)
add_file.setIcon(gf.get_icon('folder-open'))
add_file.triggered.connect(self.add_files_from_menu)
paste_from_clipboard = QtGui.QAction('Paste From Clipboard', self.dropTreeWidget)
paste_from_clipboard.setIcon(gf.get_icon('folder-open'))
paste_from_clipboard.triggered.connect(self.add_files_from_clipboard)
menu = QtGui.QMenu()
menu.addAction(add_file)
menu.addAction(paste_from_clipboard)
return menu
def add_files_from_menu(self, exec_after_added=None):
options = QtGui.QFileDialog.Options()
options |= QtGui.QFileDialog.DontUseNativeDialog
files_names, filter = QtGui.QFileDialog.getOpenFileNames(self, 'Adding files to Drop Pate',
'',
'All Files (*.*);;',
'', options)
if files_names:
self.threads_fill_items(files_names, exec_after_added)
# files_objects = self.get_files_objects(files_names)
# self.append_items_to_tree(files_objects)
# if exec_after_added:
# exec_after_added(files_objects)
def add_files_from_clipboard(self, exec_after_added=None):
clipboard = QtGui.QApplication.clipboard()
files_names = clipboard.text()
if files_names:
files_names = set(files_names.split('\n'))
self.threads_fill_items(files_names, exec_after_added)
def get_selected_items(self):
selected_items = []
if self.tree_items:
for item in self.dropTreeWidget.selectedItems():
# index = item.data(0, QtCore.Qt.UserRole)
file_object = item.data(1, QtCore.Qt.UserRole)
# print file_object
# for i, itm in enumerate(self.tree_items):
# print itm, i
# if i == index:
# selected_items.append(itm)
# break
selected_items.append(file_object)
return selected_items
def get_files_objects(self, items):
if self.includeSubfoldersCheckBox.isChecked():
dirs_and_files = gf.split_files_and_dirs(items)
for dirs in dirs_and_files[0]:
for path, subdirs, files in os.walk(dirs):
for name in files:
items.append(os.path.join(path, name))
for s_dir in subdirs:
items.append(os.path.join(path, s_dir))
match_template = gf.MatchTemplate(self.config_widget.get_templates_list(), padding=self.config_widget.get_min_padding())
return match_template.get_files_objects(items)
def remove_selected_items(self):
if self.tree_items:
for item in self.dropTreeWidget.selectedItems():
index = item.data(0, QtCore.Qt.UserRole)
for i, itm in enumerate(self.tree_items):
if i == index:
self.tree_items.pop(index)
item_index = self.dropTreeWidget.indexFromItem(item)
self.dropTreeWidget.takeTopLevelItem(item_index.row())
def append_items_to_tree(self, files_objects_dict):
self.dropTreeWidget.clearSelection()
icon_provider = QtGui.QFileIconProvider()
self.progressBar.setVisible(True)
for item_type, item in files_objects_dict.items():
for i, file_obj in enumerate(item):
tree_item = QtGui.QTreeWidgetItem()
tree_item.setText(0, file_obj.get_pretty_file_name())
sequence_info_string = []
frameranges = file_obj.get_sequence_frameranges_string('[]')
tiles_count = file_obj.get_tiles_count()
layer = file_obj.get_layer()
if frameranges:
sequence_info_string.append(frameranges)
if tiles_count:
sequence_info_string.append('{0} Tile(s)'.format(tiles_count))
if layer:
sequence_info_string.append(layer)
tree_item.setText(1, ' / '.join(sequence_info_string))
tree_item.setText(2, file_obj.get_base_file_type_pretty_name())
tree_item.setText(3, file_obj.get_base_file_type())
tree_item.setText(4, file_obj.get_file_path())
file_icon = icon_provider.icon(file_obj.get_all_files_list(True))
tree_item.setIcon(0, file_icon)
self.dropTreeWidget.addTopLevelItem(tree_item)
# TODO fix this (we need to select all)
# if self.dropTreeWidget.topLevelItemCount() < 50: # for performance reasons
self.dropTreeWidget.setItemSelected(tree_item, True)
# else:
# self.dropTreeWidget.clearSelection()
tree_item.setData(0, QtCore.Qt.UserRole, len(self.tree_items))
tree_item.setData(1, QtCore.Qt.UserRole, file_obj)
self.tree_items.append(file_obj)
# if i+1 % 50 == 0:
# QtGui.QApplication.processEvents()
self.progressBar.setValue(int(i+1 * 100 / len(item)))
self.progressBar.setValue(100)
self.dropTreeWidget.resizeColumnToContents(0)
self.dropTreeWidget.resizeColumnToContents(1)
self.dropTreeWidget.resizeColumnToContents(2)
self.dropTreeWidget.resizeColumnToContents(3)
self.dropTreeWidget.resizeColumnToContents(4)
# self.dropTreeWidget.sortByColumn(0, QtCore.Qt.AscendingOrder)
self.progressBar.setVisible(False)
def get_keep_filename(self):
return self.keepFileNameCheckBox.isChecked()
def set_settings_from_dict(self, settings_dict=None):
ref_settings_dict = {
'includeSubfoldersCheckBox': False,
'keepFileNameCheckBox': False,
'groupCheckinCheckBox': False,
}
settings = gf.check_config(ref_settings_dict, settings_dict)
self.includeSubfoldersCheckBox.setChecked(settings['includeSubfoldersCheckBox'])
self.keepFileNameCheckBox.setChecked(settings['keepFileNameCheckBox'])
self.groupCheckinCheckBox.setChecked(settings['groupCheckinCheckBox'])
def get_settings_dict(self):
settings_dict = {
'includeSubfoldersCheckBox': int(self.includeSubfoldersCheckBox.isChecked()),
'keepFileNameCheckBox': int(self.keepFileNameCheckBox.isChecked()),
'groupCheckinCheckBox': int(self.groupCheckinCheckBox.isChecked()),
}
return settings_dict
def dragEnterEvent(self, event):
if event.mimeData().hasUrls:
event.accept()
else:
event.ignore()
def dragMoveEvent(self, event):
if event.mimeData().hasUrls:
event.setDropAction(QtCore.Qt.CopyAction)
event.accept()
else:
event.ignore()
def dropEvent(self, event):
# print event.mimeData()
# print event.mimeData().text()
# print event.mimeData().urls()
if event.mimeData().hasUrls:
event.setDropAction(QtCore.Qt.CopyAction)
event.accept()
links = []
for url in event.mimeData().urls():
links.append(url.toLocalFile())
self.threads_fill_items(links)
else:
event.ignore()
| listyque/TACTIC-Handler | thlib/ui_classes/ui_drop_plate_classes.py | Python | epl-1.0 | 18,599 |
# Python script to perform consistency analysis on metabolic models
# Copyright (C) 2015 Miguel Ponce de Leon
# Contact: [email protected]
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import csv, re
from cobra.core import Gene,Model
from settings import REACTION_PREFIX
def read_ec_numbers(fname):
rxn2ec = {row[0]:row[1] for row in csv.reader(open(fname))}
ECs_rxns = {}
for rxn,ec in rxn2ec.items():
if not re.search('^[1-6]\.[0-9][0-9]*\.[0-9][0-9]*',ec):
continue
elif ec not in ECs_rxns:
ECs_rxns[ec] = []
ECs_rxns[ec].append(rxn)
return ECs_rxns
def add_expantion_fluxes(model,metabolites={},prefix='EFFLUX_',set_as_objective=False,copy_model=True):
if copy_model:
model = model.copy()
for metab,coef in metabolites.items():
if not hasattr(metab, 'id'):
metab = model.metabolites.get_by_id(metab)
reaction = cobra.Reaction(prefix+metab.id)
reaction.add_metabolites({metab : coef})
if set_as_objective:
reaction.objective_coefficient = 1
model.add_reaction(reaction)
return model
def correct_seed_model(model,metamodel):
if 'rxn02297' in model.reactions and 'rxn02296' not in model.reactions:
new_rxn = metamodel.reactions.new_rxn00021.copy()
old_rxn = model.reactions.rxn02297
model.add_reaction(new_rxn)
new_rxn.gene_reaction_rule = old_rxn.gene_reaction_rule
model.remove_reactions([old_rxn])
# the following metabolites should be removed because they appear as conserved pools
# on the
biomass = [r for r in model.reactions if r.startswith('bio')][0]
conflictive_metabolties = ['cpd01997_c','cpd03422_c','cpd11416_c']
#conflictive_metabolties = ['cpd01997_c','cpd03422_c']
for m in conflictive_metabolties:
#print m
if m not in model.metabolites:
continue
metabolite = model.metabolites.get_by_id(m)
if metabolite not in biomass.products:
continue
s_coeff = biomass.get_coefficient(metabolite.id)
biomass.add_metabolites({metabolite:-s_coeff})
if 'EX_cpd11416_c' in model.reactions:
model.remove_reactions(['EX_cpd11416_c'])
if 'rxn05029' in model.reactions:
model.remove_reactions(['rxn05029'])
return model
def prepare_model(model,metamodel,reactions_to_remove=[],correct_seed=False):
model = model.copy()
if len(reactions_to_remove) > 0:
model.remove_reactions(reactions_to_remove)
if correct_seed:
correct_seed_model(model,metamodel)
list_of_reactions = [r for r in model.reactions]
for r in list_of_reactions:
if r not in metamodel.reactions:
print "%s not in metamodel %s" % (r.id,metamodel.id)
continue
reaction_reference = metamodel.reactions.get_by_id(r.id)
result = r - reaction_reference
if len(result.metabolites) > 0:
genes = r.genes
gene_rule = r.gene_reaction_rule
model.remove_reactions([r])
model.add_reaction(reaction_reference.copy())
new_reaction = model.reactions.get_by_id(r.id)
new_reaction._genes = genes
new_reaction.gene_reaction_rule = gene_rule
for g in genes:
g._reaction.add(new_reaction)
model.reactions.get_by_id(r.id).lower_bound = reaction_reference.lower_bound
metabolites_to_remove = [m.id for m in model.metabolites if len(m.reactions) == 0]
for m in metabolites_to_remove:
if m not in model.metabolites:
continue
model.metabolites.get_by_id(m).remove_from_model()
return model
def create_consisten_model(model,metamodel,consistent_reactions):
consistent_model = Model()
consistent_model.id = model.id
consistent_model.description = model.id
auxiliar_gene = Gene('MODULAR_GAPFILLING')
auxiliar_gene._model = consistent_model
consistent_model.genes.append(auxiliar_gene)
for reaction_id in consistent_reactions:
new_reaction = metamodel.reactions.get_by_id(reaction_id).copy()
if reaction_id in model.reactions:
reaction_reference = model.reactions.get_by_id(reaction_id)
gene_list = []
for gene in reaction_reference.genes:
if gene.id in consistent_model.genes:
gene_list.append(consistent_model.genes.get_by_id(gene.id))
else:
new_gene = Gene(gene.id)
new_gene._model = consistent_model
consistent_model.genes.append(new_gene)
gene_list.append(new_gene)
for gene in gene_list:
gene._reaction.add(new_reaction)
new_reaction._genes = gene_list
new_reaction.gene_reaction_rule = reaction_reference.gene_reaction_rule
else:
new_reaction.gene_reaction_rule = auxiliar_gene.name
auxiliar_gene._reaction.add(new_reaction)
consistent_model.add_reaction(new_reaction)
return consistent_model
def get_full_coupled_sets(reaction_names,fctab, exclude_preffix=None):
""" Interpretation for element (i, j):
1 - fully coupled <=>
2 - partially coupled <->
3 - reaction i is directionally coupled to j ( v[i]<>0 -> v[j]<>0 )
4 - reaction j is directionally coupled to i ( v[j]<>0 -> v[i]<>0 )
5 - uncoupled
"""
assert fctab.shape[0] == len(reaction_names)
already_coupled = set()
coupling_sets = []
for i in np.arange(fctab.shape[0]):
if i in already_coupled:
continue
indexes = np.where(fctab[i,:]==1)[0]
if len(indexes) < 2:
continue
coupling_sets.append(indexes)
already_coupled = already_coupled.union(indexes)
#coupling_sets = np.array([np.array(b) for b in set([tuple(a) for a in fctab])])
#coupling_sets = [subset for subset in [np.where(es==1)[0] for es in coupling_sets] if len(subset)>1]
result = {}
counter = 1
for subset in coupling_sets:
rs_id = 'RS_'+str(counter)
if exclude_preffix:
reaction_ids = [reaction_names[i] for i in subset if not re.search(exclude_preffix,reaction_names[i])]
else:
reaction_ids = [reaction_names[i] for i in subset]
if len(reaction_ids) > 1:
result[rs_id] = reaction_ids
counter += 1
return result
def decorate_graph(G,labels={},colors={}):
for tail,head in G.edges():
G.edge[tail][head]['graphics'] = {}
G.edge[tail][head]['graphics']['targetArrow'] = "standard"
try:
width = float(G[tail][head]['label'])
G.edge[tail][head]['graphics']['width'] = width
except:
G.edge[tail][head]['graphics']['width'] = 1.0
for n in G.nodes():
label = n
if n in labels:
G.node[n]['label'] = labels[n]
label = labels[n]
graphics = {}
if n in colors:
color = colors[n]
else:
color = None
if G.node[n]['node_class'] == 'reaction':
outline = "#000000"
if not color:
color = "#c0c0c0"
height = 16.0
width = max((len(label) * 8.0),85.0)
graphics = {"w":width, "h":height, "type":"roundrectangle", "fill":color, "outline":outline}
elif G.node[n]['node_class'] == 'metabolite':
outline = "#ffffff"
if not color:
color = "#ffffff"
height = 15.0
width = max((len(label) * 8.0),60.0)
if n in colors:
color = colors[n]
outline = "#000000"
graphics = {"w":width, "h":height, "type":"rectangle", "fill":color, "outline":outline}
G.node[n]['graphics'] = graphics
return G
def csv_save(a_list,fname):
if not isinstance(a_list,list):
return
elif len(a_list)<1:
return
elif not isinstance(a_list[0],list):
a_list = [[e] for e in a_list]
f = open(fname,'w')
w = csv.writer(f)
x = w.writerows(a_list)
f.close()
return x
def f_rxn(x):
return re.search(REACTION_PREFIX,x)
def f_ex(x):
return re.search(EXCHANGE_PREFIX,x)
def f_flux(x):
return f_ex(x) or f_rxn(x)
| migp11/consistency-analysis | modules/utils.py | Python | gpl-2.0 | 9,286 |
#
# This file is part of DF.
#
# DF is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation;
# either version 3 of the License, or (at your option) any
# later version.
#
# Latassan is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public
# License along with DF; see the file COPYING. If not
# see <http://www.gnu.org/licenses/>.
#
# Copyright (C) 2014-2019 Jimmy Dubuisson <[email protected]>
#
from __future__ import division
import sys
from utils import *
from igraph import Graph
import cPickle as pickle
import logging as log
import scipy.sparse as sps
if __name__ == '__main__':
log.basicConfig(level=log.DEBUG,
format='%(asctime)s:%(levelname)s:%(message)s')
matrix_file_name = sys.argv[1]
graph_file_name = sys.argv[2]
density = float(sys.argv[3])
iw = False
umat = pickle.load(open(matrix_file_name, 'rb'))
g = GraphUtils.get_graph_from_matrix(umat,density,ignore_weights=iw)
log.info('#vertices, #edges: ' + str(len(g.vs)) + ', ' + str(len(g.es)))
pickle.dump(g, open(graph_file_name, 'wb'))
| jimbotonic/df_nlp | step3/graphs2b.py | Python | gpl-2.0 | 1,385 |
# -*- coding: utf-8 -*-
# @Author: YangZhou
# @Date: 2017-06-16 20:09:09
# @Last Modified by: YangZhou
# @Last Modified time: 2017-06-27 16:02:34
from aces.tools import mkdir, mv, cd, cp, mkcd, shell_exec,\
exists, write, passthru, toString, pwd, debug, ls, parseyaml
import aces.config as config
from aces.binary import pr
from aces.runners import Runner
from aces.graph import plot, series, pl, fig
from aces.script.vasprun import exe as lammpsvasprun
import aces.script.vasprun as vasprun
import time
import numpy as np
from aces.io.phonopy.bandplot import plotband, plotbanddos
from aces.io.phonopy.meshyaml import meshyaml
from aces.io.phonopy.fc import readfc2
from aces.pbs.jobManager import jobManager, th, pbs
from aces.io.vasp import writePOTCAR, writevasp, parseVasprun
from ase import io
from lxml import etree
from scanf import sscanf
class runner(Runner):
def minimizePOSCAR(self):
m = self.m
if m.engine == "lammps":
m.dump2POSCAR(m.home + '/minimize/range', rotate=True)
elif m.engine == "vasp":
cp(m.home + '/minimize/CONTCAR', 'POSCAR')
def optimize(self):
mkcd('optimize')
cp('../minimize/POSCAR', '.')
atoms = io.read('POSCAR')
for i in range(100):
dir = "%i" % i
mkcd(dir)
writevasp(atoms)
forces, stress, energy = self.energyForce()
pos = atoms.get_scaled_positions()
pos += forces * 0.01
def energyForce(self):
self.getVaspRun_vasp()
forces = parseVasprun('forces')
stress = parseVasprun('stress')
c = shell_exec("grep TOTEN OUTCAR|tail -1")
energy = sscanf(c, "free energy TOTEN = %f eV")[0]
return forces, stress, energy
def cs(self):
from aces.cs import runner
runner(NAH=2).run()
self.check('csfc2')
def check1(self, filename='FORCE_CONSTANTS'):
ref = io.read('SPOSCAR')
fc2 = readfc2(filename)
np.set_printoptions(precision=2, suppress=True)
files = ['dir_POSCAR-001']
vasprunxml = "dir_SPOSCAR/vasprun.xml"
if exists(vasprunxml):
vasprun = etree.iterparse(vasprunxml, tag='varray')
forces0 = parseVasprun(vasprun, 'forces')
print(forces0.max())
else:
forces0 = 0.0
for file in files:
print(file)
POSCAR = 'dirs/%s/POSCAR' % file
vasprunxml = "dirs/%s/vasprun.xml" % file
atoms = io.read(POSCAR)
u = atoms.positions - ref.positions
f = -np.einsum('ijkl,jl', fc2, u)
vasprun = etree.iterparse(vasprunxml, tag='varray')
forces = parseVasprun(vasprun, 'forces') - forces0
print(np.abs(f).max(), "\n")
print(np.abs(forces - f).max())
print(np.allclose(f, forces, atol=1e-2))
def check(self, filename='FORCE_CONSTANTS'):
ref = io.read('SPOSCAR')
files = shell_exec("ls dirs").split('\n')
fc2 = readfc2(filename)
np.set_printoptions(precision=2, suppress=True)
vasprunxml = "dir_SPOSCAR/vasprun.xml"
if exists(vasprunxml):
vasprun = etree.iterparse(vasprunxml, tag='varray')
forces0 = parseVasprun(vasprun, 'forces')
print(forces0.max())
else:
forces0 = 0.0
for file in files:
print(file)
POSCAR = 'dirs/%s/POSCAR' % file
vasprunxml = "dirs/%s/vasprun.xml" % file
atoms = io.read(POSCAR)
u = atoms.positions - ref.positions
f = -np.einsum('ijkl,jl', fc2, u)
vasprun = etree.iterparse(vasprunxml, tag='varray')
forces = parseVasprun(vasprun, 'forces') - forces0
print(np.abs(f).max(), "\n")
print(np.abs(forces - f).max())
print(np.allclose(f, forces, atol=1e-2))
def stub(self):
files = shell_exec("ls dirs").split('\n')
files = map(lambda x: x.replace('dir_', ''), files)
fc2 = readfc2('fc2')
for file in files:
ref = io.read('SPOSCAR')
a = 'dirs/dir_' + str(file)
atoms = io.read(a + "/POSCAR")
u = atoms.positions - ref.positions
f = -np.einsum('ijkl,jl', fc2, u)
forces = ""
for force in f:
forces += "<v> %f %f %f </v>\n" % tuple(force)
vasprun = '<root><calculation><varray name="forces" >\n'
vasprun += forces
vasprun += '</varray></calculation></root>\n'
write(vasprun, a + "/vasprun.xml")
def force_constant(self, files):
cmd = config.phonopy + "-f "
if exists("dir_SPOSCAR/vasprun.xml"):
cmd = config.phonopy + "--fz dir_SPOSCAR/vasprun.xml "
for file in files:
dir = "dirs/dir_" + file
cmd += dir + '/vasprun.xml '
# generate FORCE_SETS
passthru(cmd)
m = self.m
# Create FORCE_CONSTANTS
passthru(config.phonopy + "--tolerance=1e-4 --writefc --dim='%s'" %
(m.dim))
def fc2(self):
files = shell_exec("ls dirs").split('\n')
files = map(lambda x: x.replace('dir_', ''), files)
# when the number of files >1000, the order is wrong ,POSCAR-001,
# POSCAR-1500 ,POSCAR-159
files.sort(lambda x, y: int(x.split('-')[1]) - int(y.split('-')[1]))
self.force_constant(files)
def generate_meshconf(self):
# generate mesh.conf
m = self.m
mesh = """DIM = %s
ATOM_NAME = %s
MP = %s
EIGENVECTORS=.TRUE.
FORCE_CONSTANTS = READ
MESH_SYMMETRY = .FALSE.
PRIMITIVE_AXIS = %s
""" % (m.dim, ' '.join(m.elements), ' '.join(map(str, m.kpoints)),
toString(m.premitive.flatten()))
mesh = mesh.replace(r'^\s+', '')
write(mesh, 'mesh.conf')
def generate_vconf(self):
# generate v.conf
m = self.m
mesh = """DIM = %s
ATOM_NAME = %s
MP = %s
FORCE_CONSTANTS = READ
MESH_SYMMETRY = .FALSE.
GROUP_VELOCITY=.TRUE.
PRIMITIVE_AXIS = %s
""" % (m.dim, ' '.join(m.elements), ' '.join(map(str, m.kpoints)),
toString(m.premitive.flatten()))
mesh = mesh.replace(r'^\s+', '')
write(mesh, 'v.conf')
def generate_qconf(self, q):
# generate q.conf
m = self.m
mesh = """DIM = %s
ATOM_NAME = %s
FORCE_CONSTANTS = READ
EIGENVECTORS=.TRUE.
QPOINTS=.TRUE.
PRIMITIVE_AXIS = %s
""" % (m.dim, ' '.join(m.elements), toString(m.premitive.flatten()))
mesh = mesh.replace(r'^\s+', '')
write(mesh, 'q.conf')
s = "%s\n" % len(q)
for qq in q:
s += "%s\n" % toString(qq)
write(s, 'QPOINTS')
def generate_vqconf(self, q):
# generate q.conf
m = self.m
mesh = """DIM = %s
ATOM_NAME = %s
FORCE_CONSTANTS = READ
GROUP_VELOCITY=.TRUE.
QPOINTS=.TRUE.
PRIMITIVE_AXIS = %s
""" % (m.dim, ' '.join(m.elements), toString(m.premitive.flatten()))
mesh = mesh.replace(r'^\s+', '')
write(mesh, 'q.conf')
s = "%s\n" % len(q)
for qq in q:
s += "%s\n" % toString(qq)
write(s, 'QPOINTS')
def generate_supercells(self):
m = self.m
# generate supercells
passthru(config.phonopy + "--tolerance=1e-4 -d --dim='%s'" % (m.dim))
def writeINCAR(self):
m = self.m
npar = 1
for i in range(1, int(np.sqrt(m.cores)) + 1):
if m.cores % i == 0:
npar = i
if m.ispin:
ispin = "ISPIN=2"
else:
ispin = ""
if m.soc:
soc = "LSORBIT=T"
else:
soc = ""
if m.isym:
sym = "ISYM = 1"
else:
sym = "ISYM = 0"
s = """SYSTEM=calculate energy
PREC = High
IBRION = -1
ENCUT = %f
EDIFF = 1.0e-8
ISMEAR = %d; SIGMA = 0.01
IALGO = 38
LREAL = .FALSE.
ADDGRID = .TRUE.
LWAVE = .FALSE.
LCHARG = .FALSE.
NPAR = %d
%s
%s
%s
""" % (self.m.ecut, m.ismear, npar, sym, ispin, soc)
if m.vdw:
s += """\nIVDW = 1
VDW_RADIUS = 50
VDW_S6 = 0.75
VDW_SR = 1.00
VDW_SCALING = 0.75
VDW_D = 20.0
VDW_C6 = 63.540 31.50
VDW_R0 = 1.898 1.892
"""
s = s.replace(r'^\s+', '')
write(s, 'INCAR')
def getVaspRun_vasp(self):
self.writeINCAR()
m = self.m
writePOTCAR(m, m.elements)
if (m.kpointspath):
cp(m.kpointspath, "KPOINTS")
else:
from aces.io.vasp import writeKPOINTS
writeKPOINTS(m.ekpoints)
if 'jm' in self.__dict__:
if not m.th:
path = pwd()
if m.queue == "q3.4":
pb = pbs(
queue=m.queue,
nodes=12,
procs=1,
disp=m.pbsname,
path=path,
content=config.mpirun + " 12 " + config.vasp +
' >log.out')
else:
pb = pbs(
queue=m.queue,
nodes=1,
procs=12,
disp=m.pbsname,
path=path,
content=config.mpirun + " 12 " + config.vasp +
' >log.out')
else:
path = pwd()
pb = th(disp=m.pbsname, path=path)
self.jm.reg(pb)
else:
shell_exec(config.mpirun + " %s " % m.cores + config.vasp +
' >log.out')
def getVaspRun_lammps(self):
m = self.m
if 'jm' in self.__dict__:
path = pwd()
pb = pbs(
queue=m.queue,
nodes=1,
procs=4,
disp=m.pbsname,
path=path,
content=config.python + vasprun.__file__ + ' >log.out')
self.jm.reg(pb)
else:
shell_exec(config.python + vasprun.__file__ + ' >log.out')
def thcode(self, files, put):
s = ""
for file in files:
dir = "dirs/dir_" + file
s += "cd %s\n" % (dir)
s += "yhbatch -N 1 aces.pbs\n"
s += "cd ../../\n"
write(s, put + "/runall.sh")
def getvasprun(self, files):
m = self.m
maindir = pwd()
if m.engine == "vasp":
calculator = self.getVaspRun_vasp
elif m.engine == "lammps":
calculator = self.getVaspRun_lammps
self.jm = jobManager()
for file in files:
print(file)
dir = "dirs/dir_" + file
mkdir(dir)
mv(file, dir + '/POSCAR')
cd(dir)
calculator()
cd(maindir)
self.jm.run()
if m.th:
mkdir(m.pbsname)
self.thcode(files, m.pbsname)
cp("dirs", m.pbsname)
passthru("tar zcf %s.tar.gz %s" % (m.pbsname, m.pbsname))
print('start check')
self.jm.check()
if m.engine == "lammps1":
from multiprocessing.dummy import Pool
pool = Pool()
pool.map_async(lammpsvasprun, files)
pool.close()
pool.join()
def runSPOSCAR(self):
m = self.m
maindir = pwd()
file = "SPOSCAR"
dir = "dir_" + file
mkdir(dir)
cp(file, dir + '/POSCAR')
cd(dir)
if m.engine == "vasp":
self.getVaspRun_vasp()
if m.engine == "lammps":
self.getVaspRun_lammps()
cd(maindir)
def checkMinimize(self):
import yaml
data = yaml.load(open('disp.yaml').read())
disps = [map(float, a['direction']) for a in data['displacements']]
maindir = pwd()
dirs = ls('dirs/dir_*')
ii = 0
L = np.linalg.norm
# d,p,d1,p1=self.m.rot
out = open('ccos.txt', 'w')
for dir in dirs:
cd(dir)
f = open('dump.force')
for i in range(9):
f.next()
for b in range(ii):
f.next()
line = f.next()
line = line.split()
force = np.array(map(float, line[1:4]))
# force=RotateVector(force,d1,-p1)
# force=RotateVector(force,d,-p)
d = disps[i]
ccos = force.dot(d) / L(force) / L(d)
ii += 1
print >> out, "%d\t%f" % (ii, ccos)
cd(maindir)
def run(self):
m = self.m
a = time.time()
self.generate_supercells()
debug('generate_supercells:%f s' % (time.time() - a))
files = shell_exec("ls *-*").split('\n')
assert len(files) > 0 and not files[0] == ""
# self.runSPOSCAR()
a = time.time()
self.getvasprun(files)
debug('getvasprun:%f s' % (time.time() - a))
a = time.time()
self.fc2()
debug('force_constant:%f s' % (time.time() - a))
if m.phofc:
return self
self.postp()
def generate(self):
self.minimizePOSCAR()
self.run()
def get_force_sets(self):
files = shell_exec("ls dirs").split('\n')
files = map(lambda x: x.replace('dir_', ''), files)
self.force_constant(files)
def postp(self):
m = self.m
if m.gamma_only:
self.getDos()
return
self.getband()
self.getDos()
self.getbanddos()
self.drawpr()
self.getV()
def getqpoints(self, q):
self.generate_qconf(q)
passthru(config.phonopy + "--tolerance=1e-4 q.conf")
def getvqpoints(self, q):
self.generate_vqconf(q)
passthru(config.phonopy + "--tolerance=1e-4 q.conf")
data = parseyaml('qpoints.yaml')
file = open("v.txt", 'w')
for phonon in data['phonon']:
qp = phonon['q-position']
for band in phonon['band']:
frequency = band['frequency']
v = np.array(band['group_velocity'])
v = np.linalg.norm(v)
print >> file, "%s\t%f\t%f" % ('\t'.join(map(str, qp)),
frequency, v)
file.close()
v = np.loadtxt('v.txt')
plot(
(v[:, 3], 'Frequency (THz)'), (v[:, 4],
'Group Velocity (Angstrom/ps)'),
'v_freq.png',
grid=True,
scatter=True)
def getDos(self):
self.generate_meshconf()
passthru(config.phonopy + "--tolerance=1e-4 --dos mesh.conf")
self.drawDos()
def getV(self):
if not exists('groupv'):
mkdir('groupv')
cd('groupv')
cp('../FORCE_CONSTANTS', '.')
cp('../POSCAR', '.')
cp('../disp.yaml', '.')
self.generate_vconf()
passthru(config.phonopy + "--tolerance=1e-4 v.conf")
self.drawV()
cd('..')
def drawV(self):
data = parseyaml('mesh.yaml')
file = open("v.txt", 'w')
for phonon in data['phonon']:
qp = phonon['q-position']
for band in phonon['band']:
frequency = band['frequency']
v = np.array(band['group_velocity'])
v = np.linalg.norm(v)
print >> file, "%s\t%f\t%f" % ('\t'.join(map(str, qp)),
frequency, v)
file.close()
v = np.loadtxt('v.txt')
plot(
(v[:, 3], 'Frequency (THz)'), (v[:, 4],
'Group Velocity (Angstrom/ps)'),
'v_freq.png',
grid=True,
scatter=True)
def getband(self):
self.generate_bandconf()
passthru(config.phonopy + "--tolerance=1e-4 -s band.conf")
plotband(labels=' '.join(self.m.bandpath))
def getbanddos(self):
freq, pdos = self.getpdos()
plotbanddos(
freq=freq,
dos=np.sum(pdos, axis=1),
labels=' '.join(self.m.bandpath))
def modulation(self):
m = self.m
conf = """
DIM = %s
MODULATION = 1 1 1, 0 0 0 0 1 0
ATOM_NAME = %s
FORCE_CONSTANTS = READ
""" % (m.dim, ' '.join(m.elements))
write(conf, 'modulation.conf')
passthru(config.phonopy + "--tolerance=1e-4 modulation.conf")
def animate(self):
m = self.m
conf = """
DIM = %s
ANIME = 0 5 20
ANIME_TYPE = xyz
ATOM_NAME = %s
FORCE_CONSTANTS = READ
""" % (m.dim, ' '.join(m.elements))
write(conf, 'animate.conf')
passthru(config.phonopy + "--tolerance=1e-4 animate.conf")
def generate_bandconf(self):
# generate mesh.conf
m = self.m
bp = m.bandpoints
bpath = ' '.join([toString(bp[x]) for x in m.bandpath])
band = """DIM = %s
ATOM_NAME = %s
BAND = %s
BAND_POINTS = 101
FORCE_CONSTANTS = READ
PRIMITIVE_AXIS = %s
""" % (m.dim, ' '.join(m.elements),
bpath, toString(m.premitive.flatten()))
band = band.replace(r'^\s+', '')
write(band, 'band.conf')
def getpdos(self):
xx = np.loadtxt('partial_dos.dat', skiprows=1)
freq = xx[:, 0]
pdos = xx[:, 1:]
return freq, pdos
def drawDos(self):
freq, pdos = self.getpdos()
datas = [(freq, p, '') for p in pdos.T]
series(
'Frequency (THz)',
'Partial Density of States',
datas=datas,
filename='partial_dos.png',
legend=False,
grid=True)
plot(
(freq, 'Frequency (THz)'), (np.sum(pdos, axis=1),
'Density of States'),
filename='total_dos.png')
# calculate paticipation ratio
def mesh(self):
""" save mesh.yaml to mesh.npz
[description]
"""
data = meshyaml('mesh.yaml')
np.savez('mesh', **data)
def drawpr(self):
pr()
# plot
xs = []
ys = []
for line in open('pr.txt'):
x, y = map(float, line.split())
xs.append(x)
ys.append(y)
write("%s" % (sum(ys) / len(ys)), "ave_pr.txt")
with fig('Paticipation_ratio.png'):
pl.plot(xs, ys, '.', color='red')
pl.ylim([0.0, 1.0])
pl.xlabel('Frequency (THz)')
pl.ylabel('Paticipation Ratio')
| vanceeasleaf/aces | aces/runners/phonopy.py | Python | gpl-2.0 | 19,132 |
from sys import argv
|
| mrniranjan/python-scripts | reboot/math27.py | Python | gpl-2.0 | 24 |
# coding=utf-8
__author__ = 'stasstels'
import cv2
import sys
image = sys.argv[1]
targets = sys.argv[2]
# Load an color image in grayscale
img = cv2.imread(image, cv2.IMREAD_COLOR)
with open(targets, "r") as f:
for line in f:
print line
(_, x, y) = line.split()
cv2.circle(img, (int(x), int(y)), 20, (255, 0, 255), -1)
cv2.namedWindow("image", cv2.WINDOW_NORMAL)
cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
| OsipovStas/ayc-2013 | ayc/show.py | Python | gpl-2.0 | 455 |
#! /usr/bin/python
# -*- coding: utf-8 -*-
# Developped with python 2.7.3
import os
import sys
import tools
import json
print("The frame :")
name = raw_input("-> name of the framework ?")
kmin = float(raw_input("-> Minimum boundary ?"))
kmax = float(raw_input("-> Maximum boundary ?"))
precision = float(raw_input("-> Precision (graduation axe) ?"))
nb_agent_per_graduation = int(raw_input("-> Number of agent per graduation ?"))
print("\nThis script generates the population distribution automatically : nb_agent_per_graduation is mapped.")
print("\n(!) Note : it needs to be improved by following a law (gaussian law for instance), currently it only distributes uniformly.")
i=kmin
distribution = {}
while i < kmax+precision:
distribution[i] = nb_agent_per_graduation
i+= precision
i = round(i, tools.get_round_precision(precision)) # fix : python >> 0.2 * 0.4
#print json.dumps(distribution); exit()
o = open(name+".frmwrk",'w')
o.write("# A framework is described as above : \n# in following order, we define min_boundary max_boundary precision frequences)\n")
o.write(json.dumps([kmin, kmax, precision, distribution]))
o.close() | tchaly-bethmaure/Emotes | script/script_tools/framework_file_generator.py | Python | gpl-2.0 | 1,143 |
class Controller(object):
def __init__(self, model):
self._model = model
self._view = None
def register_view(self, view):
self._view = view
def on_quit(self, *args):
raise NotImplementedError
def on_keybinding_activated(self, core, time):
raise NotImplementedError
def on_show_about(self, sender):
raise NotImplementedError
def on_toggle_history(self, sender):
raise NotImplementedError
def on_show_preferences(self, sender):
raise NotImplementedError
def on_query_entry_changed(self, entry):
raise NotImplementedError
def on_query_entry_key_press_event(self, entry, event):
raise NotImplementedError
def on_query_entry_activate(self, entry):
raise NotImplementedError
def on_treeview_cursor_changed(self, treeview):
raise NotImplementedError
def on_match_selected(self, treeview, text, match_obj, event):
raise NotImplementedError
def on_do_default_action(self, treeview, text, match_obj, event):
raise NotImplementedError
def on_action_selected(self, treeview, text, action, event):
raise NotImplementedError
def on_clear_history(self, sender):
raise NotImplementedError
def on_history_match_selected(self, history, text, match):
raise NotImplementedError | benpicco/mate-deskbar-applet | deskbar/interfaces/Controller.py | Python | gpl-2.0 | 1,455 |
Subsets and Splits