repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
spesmilo/electrum | electrum/gui/qt/amountedit.py | 1 | 4862 | # -*- coding: utf-8 -*-
from decimal import Decimal
from typing import Union
from PyQt5.QtCore import pyqtSignal, Qt, QSize
from PyQt5.QtGui import QPalette, QPainter
from PyQt5.QtWidgets import (QLineEdit, QStyle, QStyleOptionFrame, QSizePolicy)
from .util import char_width_in_lineedit, ColorScheme
from electrum.util import (format_satoshis_plain, decimal_point_to_base_unit_name,
FEERATE_PRECISION, quantize_feerate)
class FreezableLineEdit(QLineEdit):
frozen = pyqtSignal()
def setFrozen(self, b):
self.setReadOnly(b)
self.setFrame(not b)
self.frozen.emit()
class SizedFreezableLineEdit(FreezableLineEdit):
def __init__(self, *, width: int, parent=None):
super().__init__(parent)
self._width = width
self.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Fixed)
def sizeHint(self) -> QSize:
sh = super().sizeHint()
return QSize(self._width, sh.height())
class AmountEdit(SizedFreezableLineEdit):
shortcut = pyqtSignal()
def __init__(self, base_unit, is_int=False, parent=None):
# This seems sufficient for hundred-BTC amounts with 8 decimals
width = 16 * char_width_in_lineedit()
super().__init__(width=width, parent=parent)
self.base_unit = base_unit
self.textChanged.connect(self.numbify)
self.is_int = is_int
self.is_shortcut = False
self.extra_precision = 0
def decimal_point(self):
return 8
def max_precision(self):
return self.decimal_point() + self.extra_precision
def numbify(self):
text = self.text().strip()
if text == '!':
self.shortcut.emit()
return
pos = self.cursorPosition()
chars = '0123456789'
if not self.is_int: chars +='.'
s = ''.join([i for i in text if i in chars])
if not self.is_int:
if '.' in s:
p = s.find('.')
s = s.replace('.','')
s = s[:p] + '.' + s[p:p+self.max_precision()]
self.setText(s)
# setText sets Modified to False. Instead we want to remember
# if updates were because of user modification.
self.setModified(self.hasFocus())
self.setCursorPosition(pos)
def paintEvent(self, event):
QLineEdit.paintEvent(self, event)
if self.base_unit:
panel = QStyleOptionFrame()
self.initStyleOption(panel)
textRect = self.style().subElementRect(QStyle.SE_LineEditContents, panel, self)
textRect.adjust(2, 0, -10, 0)
painter = QPainter(self)
painter.setPen(ColorScheme.GRAY.as_color())
painter.drawText(textRect, int(Qt.AlignRight | Qt.AlignVCenter), self.base_unit())
def get_amount(self) -> Union[None, Decimal, int]:
try:
return (int if self.is_int else Decimal)(str(self.text()))
except:
return None
def setAmount(self, x):
self.setText("%d"%x)
class BTCAmountEdit(AmountEdit):
def __init__(self, decimal_point, is_int=False, parent=None):
AmountEdit.__init__(self, self._base_unit, is_int, parent)
self.decimal_point = decimal_point
def _base_unit(self):
return decimal_point_to_base_unit_name(self.decimal_point())
def get_amount(self):
# returns amt in satoshis
try:
x = Decimal(str(self.text()))
except:
return None
# scale it to max allowed precision, make it an int
power = pow(10, self.max_precision())
max_prec_amount = int(power * x)
# if the max precision is simply what unit conversion allows, just return
if self.max_precision() == self.decimal_point():
return max_prec_amount
# otherwise, scale it back to the expected unit
amount = Decimal(max_prec_amount) / pow(10, self.max_precision()-self.decimal_point())
return Decimal(amount) if not self.is_int else int(amount)
def setAmount(self, amount_sat):
if amount_sat is None:
self.setText(" ") # Space forces repaint in case units changed
else:
self.setText(format_satoshis_plain(amount_sat, decimal_point=self.decimal_point()))
self.repaint() # macOS hack for #6269
class FeerateEdit(BTCAmountEdit):
def __init__(self, decimal_point, is_int=False, parent=None):
super().__init__(decimal_point, is_int, parent)
self.extra_precision = FEERATE_PRECISION
def _base_unit(self):
return 'sat/byte'
def get_amount(self):
sat_per_byte_amount = BTCAmountEdit.get_amount(self)
return quantize_feerate(sat_per_byte_amount)
def setAmount(self, amount):
amount = quantize_feerate(amount)
super().setAmount(amount)
| mit | -6,846,242,850,368,492,000 | 32.531034 | 95 | 0.613122 | false | 3.700152 | false | false | false |
gjwajda/Computational-Tools-For-Big-Data | Exercise11/exercise11_2.py | 1 | 3455 | #!C:/Users/Greg/Anaconda/Python
import json
from time import time
from os import listdir
import re
import numpy as np
from random import shuffle
from pprint import pprint
#Folder with json files
path = "./json/"
# Function to load all json files into python
def merge_json(path):
merged_json = []
for filename in listdir(path):
with open(path + filename) as json_file:
json_data = json.load(json_file)
# Filter out any articles that don't contain topics or body
json_data = filter(lambda x: "topics" in x.keys() and "body" in x.keys(), json_data)
merged_json += json_data
return merged_json
#Function for creating 2D matrix of size x*y
def declareMatrix(x,y):
matrix = [[0]*y for i in range(x)]
return matrix;
#Bag of words function with json files and desired element to access
def bagOfWords(merge_list,des_string):
#To Count how many lines we are reading
line_count = 0
#To store the list of words in each line
lines = []
#To store the unique words
word_uniq = []
# Look in first 100 articles
for json in merge_list[:100]:
body = json[des_string]
line_count += 1
#Collect string, make lowercase, remove digits, remove
#punctuation, remove email addresses, remove websites
#and split into words for easier access
text = body.lower()
text = re.sub('[\[\]!~*\-,><}{;)(:#$"&%.?]',' ',text)
text = text.replace("\\n",' ')
text = text.split()
for word in text:
if word in word_uniq: #If word is in list of unique words, do nothing
next
else:
word_uniq.append(word) #Add to unique word list
#Add the line's words to a line list
lines.append(text)
#Declare Bag of Words Matrix
bag_matrix = declareMatrix(line_count,len(word_uniq))
#Fill in Bag of Words Matrix
for l in xrange(len(lines)):
for w in lines[l]:
bag_matrix[l][word_uniq.index(w)] += 1
#Print off dimensions of matrix
print "%d * %d are the dimensions of bag of words matrix" % (len(bag_matrix), len(bag_matrix[0]))
return np.array(bag_matrix)
def minhash(bag, numHashes):
# Transpose the bag of words so columns are different articles
# and rows are different words
bag = zip(*bag)
# Find how many rows there are to help with permutations
permutation_length = len(bag)
# Create output matrix
minhash_output = declareMatrix(numHashes, len(bag[0]))
for hashNum in xrange(numHashes):
# Create row permutation array
permutation = [i for i in range(permutation_length)]
shuffle(permutation)
# Go through each column, finding first non-zero
for column in xrange(len(bag[0])):
# Go through shuffled rows to find first nonzero
for i in xrange(len(bag)):
# Find current row in permutation
curr_row = permutation[i]
curr_item = bag[curr_row][column]
# For first nonzero item, store iteration in which it was found
if curr_item != 0:
minhash_output[hashNum][column] = i+1
break
return minhash_output
######################################
start_time = time()
merged_json = merge_json(path)
data = bagOfWords( merged_json, "body" )
print data
print("------ %s seconds ------" % (time() - start_time))
time2 = time()
minhashed = ( minhash(data, 10) )
s = [[str(e) for e in row] for row in minhashed]
lens = [max(map(len, col)) for col in zip(*s)]
fmt = '\t'.join('{{:{}}}'.format(x) for x in lens)
table = [fmt.format(*row) for row in s]
print '\n'.join(table)
print("------ %s seconds ------" % (time() - time2))
| mit | -7,924,714,258,189,322,000 | 26.204724 | 98 | 0.665702 | false | 3.1814 | false | false | false |
tboyce1/home-assistant | homeassistant/components/climate/econet.py | 1 | 7250 | """
Support for Rheem EcoNet water heaters.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/climate.econet/
"""
import datetime
import logging
import voluptuous as vol
from homeassistant.components.climate import (
DOMAIN, PLATFORM_SCHEMA, STATE_ECO, STATE_ELECTRIC, STATE_GAS,
STATE_HEAT_PUMP, STATE_HIGH_DEMAND, STATE_OFF, STATE_PERFORMANCE,
SUPPORT_OPERATION_MODE, SUPPORT_TARGET_TEMPERATURE, ClimateDevice)
from homeassistant.const import (
ATTR_ENTITY_ID, ATTR_TEMPERATURE, CONF_PASSWORD, CONF_USERNAME,
TEMP_FAHRENHEIT)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['pyeconet==0.0.4']
_LOGGER = logging.getLogger(__name__)
ATTR_VACATION_START = 'next_vacation_start_date'
ATTR_VACATION_END = 'next_vacation_end_date'
ATTR_ON_VACATION = 'on_vacation'
ATTR_TODAYS_ENERGY_USAGE = 'todays_energy_usage'
ATTR_IN_USE = 'in_use'
ATTR_START_DATE = 'start_date'
ATTR_END_DATE = 'end_date'
SUPPORT_FLAGS_HEATER = (SUPPORT_TARGET_TEMPERATURE | SUPPORT_OPERATION_MODE)
SERVICE_ADD_VACATION = 'econet_add_vacation'
SERVICE_DELETE_VACATION = 'econet_delete_vacation'
ADD_VACATION_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Optional(ATTR_START_DATE): cv.positive_int,
vol.Required(ATTR_END_DATE): cv.positive_int,
})
DELETE_VACATION_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
})
ECONET_DATA = 'econet'
HA_STATE_TO_ECONET = {
STATE_ECO: 'Energy Saver',
STATE_ELECTRIC: 'Electric',
STATE_HEAT_PUMP: 'Heat Pump',
STATE_GAS: 'gas',
STATE_HIGH_DEMAND: 'High Demand',
STATE_OFF: 'Off',
STATE_PERFORMANCE: 'Performance'
}
ECONET_STATE_TO_HA = {value: key for key, value in HA_STATE_TO_ECONET.items()}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the EcoNet water heaters."""
from pyeconet.api import PyEcoNet
hass.data[ECONET_DATA] = {}
hass.data[ECONET_DATA]['water_heaters'] = []
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
econet = PyEcoNet(username, password)
water_heaters = econet.get_water_heaters()
hass_water_heaters = [
EcoNetWaterHeater(water_heater) for water_heater in water_heaters]
add_devices(hass_water_heaters)
hass.data[ECONET_DATA]['water_heaters'].extend(hass_water_heaters)
def service_handle(service):
"""Handle the service calls."""
entity_ids = service.data.get('entity_id')
all_heaters = hass.data[ECONET_DATA]['water_heaters']
_heaters = [
x for x in all_heaters
if not entity_ids or x.entity_id in entity_ids]
for _water_heater in _heaters:
if service.service == SERVICE_ADD_VACATION:
start = service.data.get(ATTR_START_DATE)
end = service.data.get(ATTR_END_DATE)
_water_heater.add_vacation(start, end)
if service.service == SERVICE_DELETE_VACATION:
for vacation in _water_heater.water_heater.vacations:
vacation.delete()
_water_heater.schedule_update_ha_state(True)
hass.services.register(DOMAIN, SERVICE_ADD_VACATION, service_handle,
schema=ADD_VACATION_SCHEMA)
hass.services.register(DOMAIN, SERVICE_DELETE_VACATION, service_handle,
schema=DELETE_VACATION_SCHEMA)
class EcoNetWaterHeater(ClimateDevice):
"""Representation of an EcoNet water heater."""
def __init__(self, water_heater):
"""Initialize the water heater."""
self.water_heater = water_heater
@property
def name(self):
"""Return the device name."""
return self.water_heater.name
@property
def available(self):
"""Return if the the device is online or not."""
return self.water_heater.is_connected
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_FAHRENHEIT
@property
def device_state_attributes(self):
"""Return the optional device state attributes."""
data = {}
vacations = self.water_heater.get_vacations()
if vacations:
data[ATTR_VACATION_START] = vacations[0].start_date
data[ATTR_VACATION_END] = vacations[0].end_date
data[ATTR_ON_VACATION] = self.water_heater.is_on_vacation
todays_usage = self.water_heater.total_usage_for_today
if todays_usage:
data[ATTR_TODAYS_ENERGY_USAGE] = todays_usage
data[ATTR_IN_USE] = self.water_heater.in_use
return data
@property
def current_operation(self):
"""
Return current operation as one of the following.
["eco", "heat_pump", "high_demand", "electric_only"]
"""
current_op = ECONET_STATE_TO_HA.get(self.water_heater.mode)
return current_op
@property
def operation_list(self):
"""List of available operation modes."""
op_list = []
modes = self.water_heater.supported_modes
for mode in modes:
ha_mode = ECONET_STATE_TO_HA.get(mode)
if ha_mode is not None:
op_list.append(ha_mode)
else:
error = "Invalid operation mode mapping. " + mode + \
" doesn't map. Please report this."
_LOGGER.error(error)
return op_list
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS_HEATER
def set_temperature(self, **kwargs):
"""Set new target temperature."""
target_temp = kwargs.get(ATTR_TEMPERATURE)
if target_temp is not None:
self.water_heater.set_target_set_point(target_temp)
else:
_LOGGER.error("A target temperature must be provided")
def set_operation_mode(self, operation_mode):
"""Set operation mode."""
op_mode_to_set = HA_STATE_TO_ECONET.get(operation_mode)
if op_mode_to_set is not None:
self.water_heater.set_mode(op_mode_to_set)
else:
_LOGGER.error("An operation mode must be provided")
def add_vacation(self, start, end):
"""Add a vacation to this water heater."""
if not start:
start = datetime.datetime.now()
else:
start = datetime.datetime.fromtimestamp(start)
end = datetime.datetime.fromtimestamp(end)
self.water_heater.set_vacation_mode(start, end)
def update(self):
"""Get the latest date."""
self.water_heater.update_state()
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self.water_heater.set_point
@property
def min_temp(self):
"""Return the minimum temperature."""
return self.water_heater.min_set_point
@property
def max_temp(self):
"""Return the maximum temperature."""
return self.water_heater.max_set_point
| apache-2.0 | 5,136,632,818,618,651,000 | 31.657658 | 78 | 0.638897 | false | 3.519417 | false | false | false |
KirillShaman/escalate_gspread | app/channels/models.py | 1 | 5656 | # The MIT License (MIT)
# Escalate Copyright (c) [2014] [Chris Smith]
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
from app import flask_app, datetime, db
import re
from multiprocessing.pool import ThreadPool as Pool
import requests
import bs4
from peewee import *
from app.gspreadsheet import Gspreadsheet
from app import GUser, werkzeug_cache
# ________________________________________________________________________
class ChannelCounter(Model):
name = CharField()
runnable = CharField()
gspread_link = CharField()
channel = CharField(null=True)
created_at = DateTimeField(null=True)
updated_at = DateTimeField(null=True)
class Meta:
database = db
db_table = 'channel_counters'
def get_id(self):
try:
return unicode(self.id) # python 2
except NameError:
return str(self.id) # python 3
def is_runnable(self):
return self.runnable == 'yes'
def __repr__(self):
return 'id={}, name={}'.format(self.id, self.name)
# ________________________________________________________________________
class Channel(Model):
ROOT_URL_PREFIX = 'http://www.youtube.com/user/'
ROOT_URL_SUFFIX = '/videos'
name = CharField()
channel = CharField()
url = CharField()
title = CharField(null=True)
views = IntegerField(null=True)
likes = IntegerField(null=True)
dislikes = IntegerField(null=True)
timestamp = DateTimeField(null=True)
class Meta:
database = db
db_table = 'channel_counts'
def get_id(self):
try:
return unicode(self.id) # python 2
except NameError:
return str(self.id) # python 3
def __repr__(self):
return 'id={}, name={}, url={}, title={}'.format(self.id, self.name, self.url, self.title)
@classmethod
def get_video_page_urls(cls, channel):
response = requests.get(Channel.ROOT_URL_PREFIX + channel + Channel.ROOT_URL_SUFFIX)
soup = bs4.BeautifulSoup(response.text)
urls = []
for title in soup.findAll('h3', attrs={'class': 'yt-lockup-title'}):
urls.append("https://www.youtube.com%s" % title.find('a')['href'])
return urls
@classmethod
def get_video_data(cls, video_page_url):
video_data = {}
video_data['url'] = video_page_url
video_data['title'] = ""
video_data['views'] = 0
video_data['likes'] = 0
video_data['dislikes'] = 0
try:
response = requests.get(
video_data['url'],
headers={'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.77 Safari/537.36'}
)
soup = bs4.BeautifulSoup(response.text)
video_data['title'] = soup.select('span.watch-title')[0].get_text().strip()
video_data['views'] = int(re.sub('[^0-9]', '', soup.select('.watch-view-count')[0].get_text().split()[0]))
video_data['likes'] = int(re.sub('[^0-9]', '',
soup.select('#watch-like-dislike-buttons span.yt-uix-button-content')[0].get_text().split()[0]))
video_data['dislikes'] = int(re.sub('[^0-9]', '',
soup.select('#watch-like-dislike-buttons span.yt-uix-button-content')[2].get_text().split()[0]))
except Exception as e:
# some or all of the channels could not be scraped
print("Error: Channel:get_video_data: %s"%e)
pass
return video_data
@staticmethod
def scrape(video_counter):
guser = werkzeug_cache.get('guser')
gs = Gspreadsheet(guser.gmail, guser.gpassword, None)
gs.login()
ss = gs.gclient.open_by_url(video_counter.gspread_link)
ws = ss.sheet1
urls = gs.col_one(ws)
results = []
try:
pool = Pool(flask_app.config['MULTIPROCESSING_POOL_SIZE'])
# video_page_urls = Channel.get_video_page_urls(channel)
# results = pool.map(Channel.get_video_data, video_page_urls)
results = pool.map(Channel.get_video_data, urls)
now_timestamp = datetime.utcnow()
nrow = 2
for i in range(len(results)):
# gspread update cells in row:
acells = ws.range("B%s:E%s" % (nrow, nrow))
acells[0].value = results[i]['title']
acells[1].value = results[i]['views']
acells[2].value = results[i]['likes']
acells[3].value = results[i]['dislikes']
ws.update_cells(acells)
c = Channel.create(
name=video_counter.name,
channel='',
url=results[i]['url'],
title=results[i]['title'],
views=results[i]['views'],
likes=results[i]['likes'],
dislikes=results[i]['dislikes'],
timestamp=now_timestamp
)
nrow += 1
except Exception as e:
print("Error: Channel:channel_scrape:\n%s" % e)
return len(results)
| mit | 1,731,833,847,790,810,600 | 34.572327 | 138 | 0.637553 | false | 3.593393 | false | false | false |
jgasteiz/fuzzingtheweb | settings.py | 1 | 3336 | # -*- coding: utf-8 -*-
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Javi Manzano', '[email protected]'),
)
MANAGERS = ADMINS
ALLOWED_HOSTS = ['176.58.120.22', 'fuzzingtheweb.com']
TIME_ZONE = 'Europe/London'
USE_TZ = True
LANGUAGE_CODE = 'en-gb'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
MEDIA_ROOT = '/home/ubuntu/media/'
MEDIA_URL = '/static/media/'
STATIC_ROOT = '/static/'
STATIC_URL = '/static/'
ADMIN_MEDIA_PREFIX = '/static/admin/'
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
MARKITUP_FILTER = ('markdown.markdown', {'safe_mode': True})
SECRET_KEY = '%3maeu=guk3p#67j-2--drhy$*^vx+=l9r9bltk-n-^cw4#nic'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'urls'
TEMPLATE_DIRS = (
'/home/ubuntu/django_apps/fuzzopress/blog/templates',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.markup',
'blogadmin',
'markitup',
'south',
'blog',
)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
MARKITUP_SET = 'markitup/sets/markdown'
# Settings for main blog app
FUZZOPRESS_SETTINGS = {
'contact': [
{
'name': 'github',
'icon': 'icon-github-alt',
'url': 'https://github.com/jgasteiz',
'show': True,
},
{
'name': 'twitter',
'icon': 'icon-twitter',
'url': 'https://twitter.com/jgasteiz',
'show': True,
},
{
'name': 'googleplus',
'icon': 'icon-google-plus-sign',
'url': 'https://plus.google.com/u/0/104971241169939266087/posts',
'show': True,
},
{
'name': 'email',
'icon': 'icon-envelope-alt',
'url': 'mailto:[email protected]',
'show': True,
}
],
'analytics': {
'show': True,
'code': 'UA-23612418-1'
},
'tags': {
'show': True
},
'archive': {
'show': True
},
'finder': {
'show': True
},
'entries_per_page': 5
}
try:
from local_settings import *
except ImportError:
pass
| mit | -5,936,774,613,336,678,000 | 22.492958 | 77 | 0.57554 | false | 3.336 | false | false | false |
Paczesiowa/youtube-dl | youtube_dl/extractor/dailymotion.py | 1 | 10862 | # coding: utf-8
from __future__ import unicode_literals
import re
import json
import itertools
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_urllib_request,
)
from ..utils import (
ExtractorError,
int_or_none,
orderedSet,
str_to_int,
unescapeHTML,
)
class DailymotionBaseInfoExtractor(InfoExtractor):
@staticmethod
def _build_request(url):
"""Build a request with the family filter disabled"""
request = compat_urllib_request.Request(url)
request.add_header('Cookie', 'family_filter=off; ff=off')
return request
class DailymotionIE(DailymotionBaseInfoExtractor):
"""Information Extractor for Dailymotion"""
_VALID_URL = r'(?i)(?:https?://)?(?:(www|touch)\.)?dailymotion\.[a-z]{2,3}/(?:(embed|#)/)?video/(?P<id>[^/?_]+)'
IE_NAME = 'dailymotion'
_FORMATS = [
('stream_h264_ld_url', 'ld'),
('stream_h264_url', 'standard'),
('stream_h264_hq_url', 'hq'),
('stream_h264_hd_url', 'hd'),
('stream_h264_hd1080_url', 'hd180'),
]
_TESTS = [
{
'url': 'https://www.dailymotion.com/video/x2iuewm_steam-machine-models-pricing-listed-on-steam-store-ign-news_videogames',
'md5': '2137c41a8e78554bb09225b8eb322406',
'info_dict': {
'id': 'x2iuewm',
'ext': 'mp4',
'uploader': 'IGN',
'title': 'Steam Machine Models, Pricing Listed on Steam Store - IGN News',
'upload_date': '20150306',
}
},
# Vevo video
{
'url': 'http://www.dailymotion.com/video/x149uew_katy-perry-roar-official_musi',
'info_dict': {
'title': 'Roar (Official)',
'id': 'USUV71301934',
'ext': 'mp4',
'uploader': 'Katy Perry',
'upload_date': '20130905',
},
'params': {
'skip_download': True,
},
'skip': 'VEVO is only available in some countries',
},
# age-restricted video
{
'url': 'http://www.dailymotion.com/video/xyh2zz_leanna-decker-cyber-girl-of-the-year-desires-nude-playboy-plus_redband',
'md5': '0d667a7b9cebecc3c89ee93099c4159d',
'info_dict': {
'id': 'xyh2zz',
'ext': 'mp4',
'title': 'Leanna Decker - Cyber Girl Of The Year Desires Nude [Playboy Plus]',
'uploader': 'HotWaves1012',
'age_limit': 18,
}
}
]
def _real_extract(self, url):
video_id = self._match_id(url)
url = 'https://www.dailymotion.com/video/%s' % video_id
# Retrieve video webpage to extract further information
request = self._build_request(url)
webpage = self._download_webpage(request, video_id)
# Extract URL, uploader and title from webpage
self.report_extraction(video_id)
# It may just embed a vevo video:
m_vevo = re.search(
r'<link rel="video_src" href="[^"]*?vevo.com[^"]*?video=(?P<id>[\w]*)',
webpage)
if m_vevo is not None:
vevo_id = m_vevo.group('id')
self.to_screen('Vevo video detected: %s' % vevo_id)
return self.url_result('vevo:%s' % vevo_id, ie='Vevo')
age_limit = self._rta_search(webpage)
video_upload_date = None
mobj = re.search(r'<meta property="video:release_date" content="([0-9]{4})-([0-9]{2})-([0-9]{2}).+?"/>', webpage)
if mobj is not None:
video_upload_date = mobj.group(1) + mobj.group(2) + mobj.group(3)
embed_url = 'https://www.dailymotion.com/embed/video/%s' % video_id
embed_request = self._build_request(embed_url)
embed_page = self._download_webpage(
embed_request, video_id, 'Downloading embed page')
info = self._search_regex(r'var info = ({.*?}),$', embed_page,
'video info', flags=re.MULTILINE)
info = json.loads(info)
if info.get('error') is not None:
msg = 'Couldn\'t get video, Dailymotion says: %s' % info['error']['title']
raise ExtractorError(msg, expected=True)
formats = []
for (key, format_id) in self._FORMATS:
video_url = info.get(key)
if video_url is not None:
m_size = re.search(r'H264-(\d+)x(\d+)', video_url)
if m_size is not None:
width, height = map(int_or_none, (m_size.group(1), m_size.group(2)))
else:
width, height = None, None
formats.append({
'url': video_url,
'ext': 'mp4',
'format_id': format_id,
'width': width,
'height': height,
})
if not formats:
raise ExtractorError('Unable to extract video URL')
# subtitles
video_subtitles = self.extract_subtitles(video_id, webpage)
view_count = str_to_int(self._search_regex(
r'video_views_count[^>]+>\s+([\d\.,]+)',
webpage, 'view count', fatal=False))
title = self._og_search_title(webpage, default=None)
if title is None:
title = self._html_search_regex(
r'(?s)<span\s+id="video_title"[^>]*>(.*?)</span>', webpage,
'title')
return {
'id': video_id,
'formats': formats,
'uploader': info['owner.screenname'],
'upload_date': video_upload_date,
'title': title,
'subtitles': video_subtitles,
'thumbnail': info['thumbnail_url'],
'age_limit': age_limit,
'view_count': view_count,
}
def _get_subtitles(self, video_id, webpage):
try:
sub_list = self._download_webpage(
'https://api.dailymotion.com/video/%s/subtitles?fields=id,language,url' % video_id,
video_id, note=False)
except ExtractorError as err:
self._downloader.report_warning('unable to download video subtitles: %s' % compat_str(err))
return {}
info = json.loads(sub_list)
if (info['total'] > 0):
sub_lang_list = dict((l['language'], [{'url': l['url'], 'ext': 'srt'}]) for l in info['list'])
return sub_lang_list
self._downloader.report_warning('video doesn\'t have subtitles')
return {}
class DailymotionPlaylistIE(DailymotionBaseInfoExtractor):
IE_NAME = 'dailymotion:playlist'
_VALID_URL = r'(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/playlist/(?P<id>.+?)/'
_MORE_PAGES_INDICATOR = r'(?s)<div class="pages[^"]*">.*?<a\s+class="[^"]*?icon-arrow_right[^"]*?"'
_PAGE_TEMPLATE = 'https://www.dailymotion.com/playlist/%s/%s'
_TESTS = [{
'url': 'http://www.dailymotion.com/playlist/xv4bw_nqtv_sport/1#video=xl8v3q',
'info_dict': {
'title': 'SPORT',
'id': 'xv4bw_nqtv_sport',
},
'playlist_mincount': 20,
}]
def _extract_entries(self, id):
video_ids = []
for pagenum in itertools.count(1):
request = self._build_request(self._PAGE_TEMPLATE % (id, pagenum))
webpage = self._download_webpage(request,
id, 'Downloading page %s' % pagenum)
video_ids.extend(re.findall(r'data-xid="(.+?)"', webpage))
if re.search(self._MORE_PAGES_INDICATOR, webpage) is None:
break
return [self.url_result('http://www.dailymotion.com/video/%s' % video_id, 'Dailymotion')
for video_id in orderedSet(video_ids)]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
playlist_id = mobj.group('id')
webpage = self._download_webpage(url, playlist_id)
return {
'_type': 'playlist',
'id': playlist_id,
'title': self._og_search_title(webpage),
'entries': self._extract_entries(playlist_id),
}
class DailymotionUserIE(DailymotionPlaylistIE):
IE_NAME = 'dailymotion:user'
_VALID_URL = r'https?://(?:www\.)?dailymotion\.[a-z]{2,3}/(?:(?:old/)?user/)?(?P<user>[^/]+)$'
_PAGE_TEMPLATE = 'http://www.dailymotion.com/user/%s/%s'
_TESTS = [{
'url': 'https://www.dailymotion.com/user/nqtv',
'info_dict': {
'id': 'nqtv',
'title': 'Rémi Gaillard',
},
'playlist_mincount': 100,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
user = mobj.group('user')
webpage = self._download_webpage(
'https://www.dailymotion.com/user/%s' % user, user)
full_user = unescapeHTML(self._html_search_regex(
r'<a class="nav-image" title="([^"]+)" href="/%s">' % re.escape(user),
webpage, 'user'))
return {
'_type': 'playlist',
'id': user,
'title': full_user,
'entries': self._extract_entries(user),
}
class DailymotionCloudIE(DailymotionBaseInfoExtractor):
_VALID_URL = r'http://api\.dmcloud\.net/embed/[^/]+/(?P<id>[^/?]+)'
_TEST = {
# From http://www.francetvinfo.fr/economie/entreprises/les-entreprises-familiales-le-secret-de-la-reussite_933271.html
# Tested at FranceTvInfo_2
'url': 'http://api.dmcloud.net/embed/4e7343f894a6f677b10006b4/556e03339473995ee145930c?auth=1464865870-0-jyhsm84b-ead4c701fb750cf9367bf4447167a3db&autoplay=1',
'only_matching': True,
}
@classmethod
def _extract_dmcloud_url(self, webpage):
mobj = re.search(r'<iframe[^>]+src=[\'"](http://api\.dmcloud\.net/embed/[^/]+/[^\'"]+)[\'"]', webpage)
if mobj:
return mobj.group(1)
mobj = re.search(r'<input[^>]+id=[\'"]dmcloudUrlEmissionSelect[\'"][^>]+value=[\'"](http://api\.dmcloud\.net/embed/[^/]+/[^\'"]+)[\'"]', webpage)
if mobj:
return mobj.group(1)
def _real_extract(self, url):
video_id = self._match_id(url)
request = self._build_request(url)
webpage = self._download_webpage(request, video_id)
title = self._html_search_regex(r'<title>([^>]+)</title>', webpage, 'title')
video_info = self._parse_json(self._search_regex(
r'var\s+info\s*=\s*([^;]+);', webpage, 'video info'), video_id)
# TODO: parse ios_url, which is in fact a manifest
video_url = video_info['mp4_url']
return {
'id': video_id,
'url': video_url,
'title': title,
'thumbnail': video_info.get('thumbnail_url'),
}
| unlicense | -8,050,191,618,828,455,000 | 35.816949 | 167 | 0.532179 | false | 3.482206 | false | false | false |
joegomes/deepchem | deepchem/models/autoencoder_models/test_tensorflowEncoders.py | 1 | 1452 | from unittest import TestCase
from nose.tools import assert_equals
from rdkit import Chem
import deepchem as dc
from deepchem.data import DiskDataset
from deepchem.feat.one_hot import zinc_charset
from deepchem.models.autoencoder_models.autoencoder import TensorflowMoleculeEncoder, TensorflowMoleculeDecoder
class TestTensorflowEncoders(TestCase):
def test_fit(self):
tf_enc = TensorflowMoleculeEncoder.zinc_encoder()
smiles = [
"Cn1cnc2c1c(=O)n(C)c(=O)n2C", "O=C(O)[C@@H]1/C(=C/CO)O[C@@H]2CC(=O)N21",
"Cn1c2nncnc2c(=O)n(C)c1=O", "Cn1cnc2c1c(=O)[nH]c(=O)n2C",
"NC(=O)c1ncc[nH]c1=O", "O=C1OCc2c1[nH]c(=O)[nH]c2=O",
"Cn1c(N)c(N)c(=O)n(C)c1=O", "CNc1nc2c([nH]1)c(=O)[nH]c(=O)n2C",
"CC(=O)N1CN(C(C)=O)[C@@H](O)[C@@H]1O",
"CC(=O)N1CN(C(C)=O)[C@H](O)[C@H]1O", "Cc1[nH]c(=O)[nH]c(=O)c1CO",
"O=C1NCCCc2c1no[n+]2[O-]", "Cc1nc(C(N)=O)c(N)n1CCO",
"O=c1[nH]cc(N2CCOCC2)c(=O)[nH]1"
]
featurizer = dc.feat.one_hot.OneHotFeaturizer(zinc_charset, 120)
mols = [Chem.MolFromSmiles(x) for x in smiles]
features = featurizer.featurize(mols)
dataset = DiskDataset.from_numpy(features, features)
prediction = tf_enc.predict_on_batch(dataset.X)
tf_de = TensorflowMoleculeDecoder.zinc_decoder()
one_hot_decoded = tf_de.predict_on_batch(prediction)
decoded_smiles = featurizer.untransform(one_hot_decoded)
assert_equals(len(decoded_smiles), len(smiles))
| mit | 627,227,895,701,837,200 | 38.243243 | 111 | 0.664601 | false | 2.206687 | false | false | false |
limodou/uliweb | uliweb/utils/setup.py | 1 | 5756 | from setuptools import setup
from setuptools.command import build_py as b
import os,sys
import glob
#remove build and dist directory
import shutil
#if os.path.exists('build'):
# shutil.rmtree('build')
#if os.path.exists('dist'):
# shutil.rmtree('dist')
def copy_dir(self, package, src, dst):
self.mkpath(dst)
for r in os.listdir(src):
if r in ['.svn', '_svn']:
continue
fpath = os.path.join(src, r)
if os.path.isdir(fpath):
copy_dir(self, package + '.' + r, fpath, os.path.join(dst, r))
else:
ext = os.path.splitext(fpath)[1]
if ext in ['.pyc', '.pyo', '.bak', '.tmp']:
continue
target = os.path.join(dst, r)
self.copy_file(fpath, target)
def find_dir(self, package, src):
for r in os.listdir(src):
if r in ['.svn', '_svn']:
continue
fpath = os.path.join(src, r)
if os.path.isdir(fpath):
for f in find_dir(self, package + '.' + r, fpath):
yield f
else:
ext = os.path.splitext(fpath)[1]
if ext in ['.pyc', '.pyo', '.bak', '.tmp']:
continue
yield fpath
def build_package_data(self):
for package in self.packages or ():
src_dir = self.get_package_dir(package)
build_dir = os.path.join(*([self.build_lib] + package.split('.')))
copy_dir(self, package, src_dir, build_dir)
setattr(b.build_py, 'build_package_data', build_package_data)
def get_source_files(self):
filenames = []
for package in self.packages or ():
src_dir = self.get_package_dir(package)
filenames.extend(list(find_dir(self, package, src_dir)))
return filenames
setattr(b.build_py, 'get_source_files', get_source_files)
from setuptools.command.develop import develop
from distutils import sysconfig
unlink = os.unlink
def rm(obj):
import shutil
if os.path.exists(obj):
try:
if os.path.isdir(obj):
if os.path.islink(obj):
unlink(obj)
else:
shutil.rmtree(obj)
else:
if os.path.islink(obj):
unlink(obj)
else:
os.remove(obj)
except:
import traceback
traceback.print_exc()
raise
__CSL = None
def symlink(source, link_name):
'''symlink(source, link_name)
Creates a symbolic link pointing to source named link_name
copys from http://stackoverflow.com/questions/1447575/symlinks-on-windows/7924557
'''
global __CSL
if __CSL is None:
import ctypes
csl = ctypes.windll.kernel32.CreateSymbolicLinkW
csl.argtypes = (ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_uint32)
csl.restype = ctypes.c_ubyte
__CSL = csl
flags = 0
if source is not None and os.path.isdir(source):
flags = 1
if __CSL(link_name, source, flags) == 0:
raise ctypes.WinError()
def pre_run(func):
def _f(self):
global unlink
if self.distribution.package_dir and sys.platform == 'win32':
try:
import ntfslink
except:
print 'You need to install ntfslink package first in windows platform.'
print 'You can find it at https://github.com/juntalis/ntfslink-python'
sys.exit(1)
if not hasattr(os, 'symlink'):
os.symlink = symlink
os.path.islink = ntfslink.symlink.check
unlink = ntfslink.symlink.unlink
func(self)
return _f
develop.run = pre_run(develop.run)
def post_install_for_development(func):
def _f(self):
func(self)
packages = self.distribution.packages
package_dir = self.distribution.package_dir
libpath = sysconfig.get_python_lib()
if not package_dir: return
for p in sorted(packages):
#if the package is something like 'x.y.z'
#then create site-packages/x/y
#then create symlink to z to src directory
ps = p.split('.')
if len(ps)>1:
path = libpath
for x in ps[:-1]:
path = os.path.join(path, x)
if not os.path.exists(path):
os.makedirs(path)
inifile = os.path.join(path, '__init__.py')
if not os.path.exists(inifile):
with open(inifile, 'w') as f:
f.write('\n')
pkg = os.path.join(libpath, *ps)
d = package_dir.get(p, None)
if d is None:
print "Error: the package %s directory can't be found in package_dir, please config it first" % p
sys.exit(1)
src = os.path.abspath(os.path.join(os.getcwd(), d))
print 'Linking ', src, 'to', pkg
rm(pkg)
os.symlink(src, pkg)
return _f
develop.install_for_development = post_install_for_development(develop.install_for_development)
def post_uninstall_link(func):
def _f(self):
func(self)
packages = self.distribution.packages
package_dir = self.distribution.package_dir
if not package_dir: return
libpath = sysconfig.get_python_lib()
for p in sorted(packages, reverse=True):
print 'Unlink... %s' % p
pkg = os.path.join(libpath, p.replace('.', '/'))
rm(pkg)
return _f
develop.uninstall_link = post_uninstall_link(develop.uninstall_link)
| bsd-2-clause | 7,548,749,908,409,896,000 | 31.519774 | 113 | 0.539611 | false | 3.857909 | false | false | false |
henryroe/Py2MASS | py2mass/__main__.py | 1 | 2088 | from __future__ import absolute_import
import pickle
import sys
from .py2mass import set_2mass_path, fetch_2mass_xsc_box, fetch_2mass_psc_box, __version__
def main():
show_help = False
if len(sys.argv) == 1 or "help" in sys.argv:
show_help = True
else:
if sys.argv[1] == 'psc':
try:
ra_range = [float(sys.argv[2]), float(sys.argv[3])]
dec_range = [float(sys.argv[4]), float(sys.argv[5])]
except:
raise Error("Expected 4 numbers after radec_range: \n\t" +
"RA_low_deg RA_high_deg DEC_low_deg DEC_high_deg ")
stars = fetch_2mass_psc_box(ra_range, dec_range)
if 'pickle' in sys.argv:
pickle.dump(stars, sys.stdout)
else:
sys.stdout.write(stars.to_string() + '\n')
elif sys.argv[1] == 'xsc':
try:
ra_range = [float(sys.argv[2]), float(sys.argv[3])]
dec_range = [float(sys.argv[4]), float(sys.argv[5])]
except:
raise Error("Expected 4 numbers after radec_range: \n\t" +
"RA_low_deg RA_high_deg DEC_low_deg DEC_high_deg ")
sources = fetch_2mass_xsc_box(ra_range, dec_range)
if 'pickle' in sys.argv:
pickle.dump(sources, sys.stdout)
else:
sys.stdout.write(sources.to_string() + '\n')
else:
show_help = True
if show_help:
print "Usage:"
print "py2mass [psc|xsc] minRA maxRA minDEC maxDEC [pickle]"
print "----"
print " psc - 2MASS Point Source Catalog"
print " xsc - 2MASS Extended Source Catalog"
print " Default output is a nicely formatted text table."
print " Optional keyword (pickle) will dump a pickle of that table, "
print " which can then be read back in from file with, e.g.:"
print " import pickle"
print " stars = pickle.load(open(filename, 'r'))"
if __name__ == '__main__':
main() | mit | -1,982,956,073,703,998,700 | 39.173077 | 90 | 0.527299 | false | 3.47421 | false | false | false |
graphql-python/graphql-core | tests/validation/test_unique_directive_names.py | 1 | 2732 | from functools import partial
from graphql.utilities import build_schema
from graphql.validation.rules.unique_directive_names import UniqueDirectiveNamesRule
from .harness import assert_sdl_validation_errors
assert_errors = partial(assert_sdl_validation_errors, UniqueDirectiveNamesRule)
assert_valid = partial(assert_errors, errors=[])
def describe_validate_unique_directive_names():
def no_directive():
assert_valid(
"""
type Foo
"""
)
def one_directive():
assert_valid(
"""
directive @foo on SCHEMA
"""
)
def many_directives():
assert_valid(
"""
directive @foo on SCHEMA
directive @bar on SCHEMA
directive @baz on SCHEMA
"""
)
def directive_and_non_directive_definitions_named_the_same():
assert_valid(
"""
query foo { __typename }
fragment foo on foo { __typename }
type foo
directive @foo on SCHEMA
"""
)
def directives_named_the_same():
assert_errors(
"""
directive @foo on SCHEMA
directive @foo on SCHEMA
""",
[
{
"message": "There can be only one directive named '@foo'.",
"locations": [(2, 24), (4, 24)],
}
],
)
def adding_new_directive_to_existing_schema():
schema = build_schema("directive @foo on SCHEMA")
assert_valid("directive @bar on SCHEMA", schema=schema)
def adding_new_directive_with_standard_name_to_existing_schema():
schema = build_schema("type foo")
assert_errors(
"directive @skip on SCHEMA",
[
{
"message": "Directive '@skip' already exists in the schema."
" It cannot be redefined.",
"locations": [(1, 12)],
}
],
schema,
)
def adding_new_directive_to_existing_schema_with_same_named_type():
schema = build_schema("type foo")
assert_valid("directive @foo on SCHEMA", schema=schema)
def adding_conflicting_directives_to_existing_schema():
schema = build_schema("directive @foo on SCHEMA")
assert_errors(
"directive @foo on SCHEMA",
[
{
"message": "Directive '@foo' already exists in the schema."
" It cannot be redefined.",
"locations": [(1, 12)],
}
],
schema,
)
| mit | 1,620,604,357,516,946,200 | 26.049505 | 84 | 0.503294 | false | 4.759582 | false | false | false |
valohai/valohai-cli | valohai_cli/git.py | 1 | 2207 | import os
import subprocess
from typing import Sequence
from valohai_cli.exceptions import NoCommit, NoGitRepo
def check_git_output(args: Sequence[str], directory: str) -> bytes:
try:
return subprocess.check_output(
args=args,
cwd=directory,
shell=False,
stderr=subprocess.STDOUT,
env=dict(os.environ, LC_ALL='C'),
)
except subprocess.CalledProcessError as cpe:
if cpe.returncode == 128:
output_text = cpe.output.decode().lower()
if 'not a git repository' in output_text:
raise NoGitRepo(directory)
if 'bad revision' in output_text:
raise NoCommit(directory)
raise
def get_current_commit(directory: str) -> str:
"""
(Try to) get the current commit of the Git working copy in `directory`.
:param directory: Directory path.
:return: Commit SHA
"""
return check_git_output(['git', 'rev-parse', 'HEAD'], directory).strip().decode()
def describe_current_commit(directory: str) -> str:
"""
(Try to) describe the lineage and status of the Git working copy in `directory`.
:param directory: Directory path.
:return: Git description string
"""
return check_git_output(['git', 'describe', '--always', '--long', '--dirty', '--all'], directory).strip().decode()
def get_file_at_commit(directory: str, commit: str, path: str) -> bytes:
"""
Get the contents of repository `path` at commit `commit` given the
Git working directory `directory`.
:param directory: Git working directory.
:param commit: Commit ID
:param path: In-repository path
:return: File contents as bytes
"""
args = ['git', 'show', f'{commit}:{path}']
return check_git_output(args, directory)
def expand_commit_id(directory: str, commit: str) -> str:
"""
Expand the possibly abbreviated (or otherwise referred to, i.e. "HEAD")
commit ID, and verify it exists.
:param directory: Git working directory
:param commit: Commit ID
:return: Expanded commit ID.
"""
return check_git_output(['git', 'rev-parse', '--verify', commit], directory).decode().strip()
| mit | -3,123,302,948,295,166,000 | 31.455882 | 118 | 0.63208 | false | 3.998188 | false | false | false |
maartenbreddels/ipyvolume | ipyvolume/utils.py | 1 | 9843 | from __future__ import print_function
import os
import io
import time
import functools
import collections
import collections.abc
import numpy as np
import requests
import IPython
import zmq
# https://stackoverflow.com/questions/14267555/find-the-smallest-power-of-2-greater-than-n-in-python
def next_power_of_2(x):
return 1 if x == 0 else 2 ** (x - 1).bit_length()
# original from http://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth
def dict_deep_update(d, u):
for k, v in u.items():
if isinstance(v, collections.abc.Mapping):
r = dict_deep_update(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d
def nested_setitem(obj, dotted_name, value):
items = dotted_name.split(".")
for item in items[:-1]:
if item not in obj:
obj[item] = {}
obj = obj[item]
obj[items[-1]] = value
def download_to_bytes(url, chunk_size=1024 * 1024 * 10, loadbar_length=10):
"""Download a url to bytes.
if chunk_size is not None, prints a simple loading bar [=*loadbar_length] to show progress (in console and notebook)
:param url: str or url
:param chunk_size: None or int in bytes
:param loadbar_length: int length of load bar
:return: (bytes, encoding)
"""
stream = False if chunk_size is None else True
print("Downloading {0:s}: ".format(url), end="")
response = requests.get(url, stream=stream)
# raise error if download was unsuccessful
response.raise_for_status()
encoding = response.encoding
total_length = response.headers.get('content-length')
if total_length is not None:
total_length = float(total_length)
if stream:
print("{0:.2f}Mb/{1:} ".format(total_length / (1024 * 1024), loadbar_length), end="")
else:
print("{0:.2f}Mb ".format(total_length / (1024 * 1024)), end="")
if stream:
print("[", end="")
chunks = []
loaded = 0
loaded_size = 0
for chunk in response.iter_content(chunk_size=chunk_size):
if chunk: # filter out keep-alive new chunks
# print our progress bar
if total_length is not None:
while loaded < loadbar_length * loaded_size / total_length:
print("=", end='')
loaded += 1
loaded_size += chunk_size
chunks.append(chunk)
if total_length is None:
print("=" * loadbar_length, end='')
else:
while loaded < loadbar_length:
print("=", end='')
loaded += 1
content = b"".join(chunks)
print("] ", end="")
else:
content = response.content
print("Finished")
response.close()
return content, encoding
def download_yield_bytes(url, chunk_size=1024 * 1024 * 10):
"""Yield a downloaded url as byte chunks.
:param url: str or url
:param chunk_size: None or int in bytes
:yield: byte chunks
"""
response = requests.get(url, stream=True)
# raise error if download was unsuccessful
response.raise_for_status()
total_length = response.headers.get('content-length')
if total_length is not None:
total_length = float(total_length)
length_str = "{0:.2f}Mb ".format(total_length / (1024 * 1024))
else:
length_str = ""
print("Yielding {0:s} {1:s}".format(url, length_str))
for chunk in response.iter_content(chunk_size=chunk_size):
yield chunk
response.close()
def download_to_file(url, filepath, resume=False, overwrite=False, chunk_size=1024 * 1024 * 10, loadbar_length=10):
"""Download a url.
prints a simple loading bar [=*loadbar_length] to show progress (in console and notebook)
:type url: str
:type filepath: str
:param filepath: path to download to
:param resume: if True resume download from existing file chunk
:param overwrite: if True remove any existing filepath
:param chunk_size: None or int in bytes
:param loadbar_length: int length of load bar
:return:
"""
resume_header = None
loaded_size = 0
write_mode = 'wb'
if os.path.exists(filepath):
if overwrite:
os.remove(filepath)
elif resume:
# if we want to resume, first try and see if the file is already complete
loaded_size = os.path.getsize(filepath)
clength = requests.head(url).headers.get('content-length')
if clength is not None:
if int(clength) == loaded_size:
return None
# give the point to resume at
resume_header = {'Range': 'bytes=%s-' % loaded_size}
write_mode = 'ab'
else:
return None
stream = False if chunk_size is None else True
# start printing with no return character, so that we can have everything on one line
print("Downloading {0:s}: ".format(url), end="")
response = requests.get(url, stream=stream, headers=resume_header)
# raise error if download was unsuccessful
response.raise_for_status()
# get the size of the file if available
total_length = response.headers.get('content-length')
if total_length is not None:
total_length = float(total_length) + loaded_size
print("{0:.2f}Mb/{1:} ".format(total_length / (1024 * 1024), loadbar_length), end="")
print("[", end="")
parent = os.path.dirname(filepath)
if not os.path.exists(parent) and parent:
os.makedirs(parent)
with io.open(filepath, write_mode) as f:
loaded = 0
for chunk in response.iter_content(chunk_size=chunk_size):
if chunk: # filter out keep-alive new chunks
# print our progress bar
if total_length is not None and chunk_size is not None:
while loaded < loadbar_length * loaded_size / total_length:
print("=", end='')
loaded += 1
loaded_size += chunk_size
f.write(chunk)
if total_length is None:
print("=" * loadbar_length, end='')
else:
while loaded < loadbar_length:
print("=", end='')
loaded += 1
print("] Finished")
def reduce_size(data, max_size, extent):
new_extent = []
for axis in range(3):
shape = data.shape
xmin, xmax = extent[2 - axis]
while shape[axis] > max_size:
slices1 = [slice(None, None, None)] * 3
slices1[axis] = slice(0, -1, 2)
slices2 = [slice(None, None, None)] * 3
slices2[axis] = slice(1, None, 2)
# print(data.shape, data.__getitem__(slices1).shape, data.__getitem__(slices2).shape)
data = (data[slices1] + data[slices2]) / 2
if shape[axis] % 2:
width = xmax - xmin
xmax = xmin + width / shape[axis] * (shape[axis] - 1)
shape = data.shape
new_extent.append((xmin, xmax))
return data, new_extent[::-1]
def grid_slice(amin, amax, shape, bmin, bmax):
"""Give a slice such that [amin, amax] is in [bmin, bmax].
Given a grid with shape, and begin and end coordinates amin, amax, what slice
do we need to take such that it minimally covers bmin, bmax.
amin, amax = 0, 1; shape = 4
0 0.25 0.5 0.75 1
| | | | |
bmin, bmax = 0.5, 1.0 should give 2,4, 0.5, 1.0
bmin, bmax = 0.4, 1.0 should give 1,4, 0.25, 1.0
bmin, bmax = -1, 1.0 should give 0,4, 0, 1.0
what about negative bmin and bmax ?
It will just flip bmin and bmax
bmin, bmax = 1.0, 0.5 should give 2,4, 0.5, 1.5
amin, amax = 1, 0; shape = 4
1 0.75 0.5 0.25 0
| | | | |
bmin, bmax = 0.5, 1.0 should give 0,2, 1.0, 0.5
bmin, bmax = 0.4, 1.0 should give 0,3, 1.0, 0.25
"""
width = amax - amin
bmin, bmax = min(bmin, bmax), max(bmin, bmax)
# normalize the coordinates
nmin = (bmin - amin) / width
nmax = (bmax - amin) / width
# grid indices
if width < 0:
imin = max(0, int(np.floor(nmax * shape)))
imax = min(shape, int(np.ceil(nmin * shape)))
else:
imin = max(0, int(np.floor(nmin * shape)))
imax = min(shape, int(np.ceil(nmax * shape)))
# transform back to the coordinate system of x
nmin = imin / shape
nmax = imax / shape
# if width < 0:
# return imin, imax, amin + nmax * width, amin + nmin * width
# else:
return (imin, imax), (amin + nmin * width, amin + nmax * width)
def get_ioloop():
ipython = IPython.get_ipython()
if ipython and hasattr(ipython, 'kernel'):
return zmq.eventloop.ioloop.IOLoop.instance()
def debounced(delay_seconds=0.5, method=False):
def wrapped(f):
counters = collections.defaultdict(int)
@functools.wraps(f)
def execute(*args, **kwargs):
if method: # if it is a method, we want to have a counter per instance
key = args[0]
else:
key = None
counters[key] += 1
def debounced_execute(counter=counters[key]):
if counter == counters[key]: # only execute if the counter wasn't changed in the meantime
f(*args, **kwargs)
ioloop = get_ioloop()
def thread_safe():
ioloop.add_timeout(time.time() + delay_seconds, debounced_execute)
if ioloop is None: # we live outside of IPython (e.g. unittest), so execute directly
debounced_execute()
else:
ioloop.add_callback(thread_safe)
return execute
return wrapped
| mit | -3,361,862,247,141,486,600 | 32.253378 | 120 | 0.574215 | false | 3.645556 | false | false | false |
thom-at-redhat/cfme_tests | scripts/template_upload_rhevm.py | 1 | 16744 | #!/usr/bin/env python2
"""This script takes various parameters specified in
cfme_data['template_upload']['template_upload_rhevm'] and/or by command-line arguments.
Parameters specified by command-line have higher priority, and override data in cfme_data.
This script is designed to run either as a standalone rhevm template uploader, or it can be used
together with template_upload_all script. This is why all the function calls, which would
normally be placed in main function, are located in function run(**kwargs).
"""
import argparse
import fauxfactory
import sys
from ovirtsdk.api import API
from ovirtsdk.xml import params
from utils.conf import cfme_data
from utils.conf import credentials
from utils.ssh import SSHClient
from utils.wait import wait_for
# temporary vm name (this vm will be deleted)
TEMP_VM_NAME = 'auto-vm-%s' % fauxfactory.gen_alphanumeric(8)
# temporary template name (this template will be deleted)
TEMP_TMP_NAME = 'auto-tmp-%s' % fauxfactory.gen_alphanumeric(8)
def parse_cmd_line():
parser = argparse.ArgumentParser(argument_default=None)
parser.add_argument("--image_url", dest="image_url",
help="URL of ova file to upload", default=None)
parser.add_argument("--template_name", dest="template_name",
help="Name of the new template", default=None)
parser.add_argument("--edomain", dest="edomain",
help="Export domain for the remplate", default=None)
parser.add_argument("--sdomain", dest="sdomain",
help="Storage domain for vm and disk", default=None)
parser.add_argument("--cluster", dest="cluster",
help="Set cluster to operate in", default=None)
parser.add_argument("--disk_size", dest="disk_size",
help="Size of the second (database) disk, in B",
default=None, type=int)
parser.add_argument("--disk_format", dest="disk_format",
help="Format of the second (database) disk", default=None)
parser.add_argument("--disk_interface", dest="disk_interface",
help="Interface of second (database) disk", default=None)
parser.add_argument("--provider", dest="provider",
help="Rhevm provider (to look for in cfme_data)", default=None)
args = parser.parse_args()
return args
def make_ssh_client(rhevip, sshname, sshpass):
connect_kwargs = {
'username': sshname,
'password': sshpass,
'hostname': rhevip
}
return SSHClient(**connect_kwargs)
def get_ova_name(ovaurl):
"""Returns ova filename."""
return ovaurl.split("/")[-1]
def download_ova(ssh_client, ovaurl):
"""Downloads ova file using ssh_client and url
Args:
ssh_client: :py:class:`utils.ssh.SSHClient` instance
ovaurl: URL of ova file
"""
command = 'curl -O %s' % ovaurl
exit_status, output = ssh_client.run_command(command)
if exit_status != 0:
print "RHEVM: There was an error while downloading ova file:"
print output
sys.exit(127)
def template_from_ova(api, username, password, rhevip, edomain, ovaname, ssh_client):
"""Uses rhevm-image-uploader to make a template from ova file.
Args:
api: API for RHEVM.
username: Username to chosen RHEVM provider.
password: Password to chosen RHEVM provider.
rhevip: IP of chosen RHEVM provider.
edomain: Export domain of selected RHEVM provider.
ovaname: Name of ova file.
ssh_client: :py:class:`utils.ssh.SSHClient` instance
"""
if api.storagedomains.get(edomain).templates.get(TEMP_TMP_NAME) is not None:
print "RHEVM: Warning: found another template with this name."
print "RHEVM: Skipping this step. Attempting to continue..."
return
command = ['rhevm-image-uploader']
command.append("-u %s" % username)
command.append("-p %s" % password)
command.append("-r %s:443" % rhevip)
command.append("-N %s" % TEMP_TMP_NAME)
command.append("-e %s" % edomain)
command.append("upload %s" % ovaname)
command.append("-m --insecure")
exit_status, output = ssh_client.run_command(' '.join(command))
if exit_status != 0:
print "RHEVM: There was an error while making template from ova file:"
print output
sys.exit(127)
def import_template(api, edomain, sdomain, cluster):
"""Imports template from export domain to storage domain.
Args:
api: API to RHEVM instance.
edomain: Export domain of selected RHEVM provider.
sdomain: Storage domain of selected RHEVM provider.
cluster: Cluster to save imported template on.
"""
if api.templates.get(TEMP_TMP_NAME) is not None:
print "RHEVM: Warning: found another template with this name."
print "RHEVM: Skipping this step, attempting to continue..."
return
actual_template = api.storagedomains.get(edomain).templates.get(TEMP_TMP_NAME)
actual_storage_domain = api.storagedomains.get(sdomain)
actual_cluster = api.clusters.get(cluster)
import_action = params.Action(async=False, cluster=actual_cluster,
storage_domain=actual_storage_domain)
actual_template.import_template(action=import_action)
# Check if the template is really there
if not api.templates.get(TEMP_TMP_NAME):
print "RHEVM: The template failed to import"
sys.exit(127)
def make_vm_from_template(api, cluster):
"""Makes temporary VM from imported template. This template will be later deleted.
It's used to add a new disk and to convert back to template.
Args:
api: API to chosen RHEVM provider.
cluster: Cluster to save the temporary VM on.
"""
if api.vms.get(TEMP_VM_NAME) is not None:
print "RHEVM: Warning: found another VM with this name."
print "RHEVM: Skipping this step, attempting to continue..."
return
actual_template = api.templates.get(TEMP_TMP_NAME)
actual_cluster = api.clusters.get(cluster)
params_vm = params.VM(name=TEMP_VM_NAME, template=actual_template, cluster=actual_cluster)
api.vms.add(params_vm)
# we must wait for the vm do become available
def check_status():
status = api.vms.get(TEMP_VM_NAME).get_status()
if status.state != 'down':
return False
return True
wait_for(check_status, fail_condition=False, delay=5)
# check, if the vm is really there
if not api.vms.get(TEMP_VM_NAME):
print "RHEVM: VM could not be provisioned"
sys.exit(127)
def check_disks(api):
disks = api.vms.get(TEMP_VM_NAME).disks.list()
for disk in disks:
if disk.get_status().state != "ok":
return False
return True
# sometimes, rhevm is just not cooperative. This is function used to wait for template on
# export domain to become unlocked
def check_edomain_template(api, edomain):
template = api.storagedomains.get(edomain).templates.get(TEMP_TMP_NAME)
if template.get_status().state != "ok":
return False
return True
def add_disk_to_vm(api, sdomain, disk_size, disk_format, disk_interface):
"""Adds second disk to a temporary VM.
Args:
api: API to chosen RHEVM provider.
sdomain: Storage domain to save new disk onto.
disk_size: Size of the new disk (in B).
disk_format: Format of the new disk.
disk_interface: Interface of the new disk.
"""
if len(api.vms.get(TEMP_VM_NAME).disks.list()) > 1:
print "RHEVM: Warning: found more than one disk in existing VM."
print "RHEVM: Skipping this step, attempting to continue..."
return
actual_sdomain = api.storagedomains.get(sdomain)
temp_vm = api.vms.get(TEMP_VM_NAME)
params_disk = params.Disk(storage_domain=actual_sdomain, size=disk_size,
interface=disk_interface, format=disk_format)
temp_vm.disks.add(params_disk)
wait_for(check_disks, [api], fail_condition=False, delay=5, num_sec=900)
# check, if there are two disks
if len(api.vms.get(TEMP_VM_NAME).disks.list()) < 2:
print "RHEVM: Disk failed to add"
sys.exit(127)
def templatize_vm(api, template_name, cluster):
"""Templatizes temporary VM. Result is template with two disks.
Args:
api: API to chosen RHEVM provider.
template_name: Name of the final template.
cluster: Cluster to save the final template onto.
"""
if api.templates.get(template_name) is not None:
print "RHEVM: Warning: found finished template with this name."
print "RHEVM: Skipping this step, attempting to continue..."
return
temporary_vm = api.vms.get(TEMP_VM_NAME)
actual_cluster = api.clusters.get(cluster)
new_template = params.Template(name=template_name, vm=temporary_vm, cluster=actual_cluster)
api.templates.add(new_template)
wait_for(check_disks, [api], fail_condition=False, delay=5, num_sec=900)
# check, if template is really there
if not api.templates.get(template_name):
print "RHEVM: VM failed to templatize"
sys.exit(127)
def cleanup(api, edomain, ssh_client, ovaname):
"""Cleans up all the mess that the previous functions left behind.
Args:
api: API to chosen RHEVM provider.
edomain: Export domain of chosen RHEVM provider.
"""
command = 'rm %s' % ovaname
exit_status, output = ssh_client.run_command(command)
temporary_vm = api.vms.get(TEMP_VM_NAME)
if temporary_vm is not None:
temporary_vm.delete()
temporary_template = api.templates.get(TEMP_TMP_NAME)
if temporary_template is not None:
temporary_template.delete()
# waiting for template on export domain
wait_for(check_edomain_template, [api, edomain], fail_condition=False, delay=5)
unimported_template = api.storagedomains.get(edomain).templates.get(TEMP_TMP_NAME)
if unimported_template is not None:
unimported_template.delete()
def api_params_resolution(item_list, item_name, item_param):
"""Picks and prints info about parameter obtained by api call.
Args:
item_list: List of possible candidates to pick from.
item_name: Name of parameter obtained by api call.
item_param: Name of parameter representing data in the script.
"""
if len(item_list) == 0:
print "RHEVM: Cannot find %s (%s) automatically." % (item_name, item_param)
print "Please specify it by cmd-line parameter '--%s' or in cfme_data." % item_param
return None
elif len(item_list) > 1:
print "RHEVM: Found multiple instances of %s. Picking '%s'." % (item_name, item_list[0])
else:
print "RHEVM: Found %s '%s'." % (item_name, item_list[0])
return item_list[0]
def get_edomain(api):
"""Discovers suitable export domain automatically.
Args:
api: API to RHEVM instance.
"""
edomain_names = []
for domain in api.storagedomains.list(status=None):
if domain.get_type() == 'export':
edomain_names.append(domain.get_name())
return api_params_resolution(edomain_names, 'export domain', 'edomain')
def get_sdomain(api):
"""Discovers suitable storage domain automatically.
Args:
api: API to RHEVM instance.
"""
sdomain_names = []
for domain in api.storagedomains.list(status=None):
if domain.get_type() == 'data':
sdomain_names.append(domain.get_name())
return api_params_resolution(sdomain_names, 'storage domain', 'sdomain')
def get_cluster(api):
"""Discovers suitable cluster automatically.
Args:
api: API to RHEVM instance.
"""
cluster_names = []
for cluster in api.clusters.list():
for host in api.hosts.list():
if host.get_cluster().id == cluster.id:
cluster_names.append(cluster.get_name())
return api_params_resolution(cluster_names, 'cluster', 'cluster')
def check_kwargs(**kwargs):
for key, val in kwargs.iteritems():
if val is None:
print "RHEVM: please supply required parameter '%s'." % key
sys.exit(127)
def update_params_api(api, **kwargs):
"""Updates parameters with ones determined from api call.
Args:
api: API to RHEVM instance.
kwargs: Kwargs generated from cfme_data['template_upload']['template_upload_rhevm']
"""
if kwargs.get('edomain') is None:
kwargs['edomain'] = get_edomain(api)
if kwargs.get('sdomain') is None:
kwargs['sdomain'] = get_sdomain(api)
if kwargs.get('cluster') is None:
kwargs['cluster'] = get_cluster(api)
return kwargs
def make_kwargs(args, cfme_data, **kwargs):
"""Assembles all the parameters in case of running as a standalone script.
Makes sure, that the parameters given by command-line arguments have higher priority.
Makes sure, that all the needed parameters have proper values.
Args:
args: Arguments given from command-line.
cfme_data: Data in cfme_data.yaml
kwargs: Kwargs generated from cfme_data['template_upload']['template_upload_rhevm']
"""
args_kwargs = dict(args._get_kwargs())
if len(kwargs) is 0:
return args_kwargs
template_name = kwargs.get('template_name', None)
if template_name is None:
template_name = cfme_data['basic_info']['appliance_template']
kwargs.update({'template_name': template_name})
for kkey, kval in kwargs.iteritems():
for akey, aval in args_kwargs.iteritems():
if aval is not None:
if kkey == akey:
if kval != aval:
kwargs[akey] = aval
for akey, aval in args_kwargs.iteritems():
if akey not in kwargs.iterkeys():
kwargs[akey] = aval
return kwargs
def run(**kwargs):
"""Calls all the functions needed to upload new template to RHEVM.
This is called either by template_upload_all script, or by main function.
Args:
**kwargs: Kwargs generated from cfme_data['template_upload']['template_upload_rhevm'].
"""
ovaname = get_ova_name(kwargs.get('image_url'))
mgmt_sys = cfme_data['management_systems'][kwargs.get('provider')]
rhevurl = mgmt_sys['hostname']
rhevm_credentials = mgmt_sys['credentials']
username = credentials[rhevm_credentials]['username']
password = credentials[rhevm_credentials]['password']
ssh_rhevm_creds = mgmt_sys['hosts'][0]['credentials']
sshname = credentials[ssh_rhevm_creds]['username']
sshpass = credentials[ssh_rhevm_creds]['password']
rhevip = mgmt_sys['ipaddress']
apiurl = 'https://%s:443/api' % rhevurl
ssh_client = make_ssh_client(rhevip, sshname, sshpass)
api = API(url=apiurl, username=username, password=password,
insecure=True, persistent_auth=False)
template_name = kwargs.get('template_name', None)
if template_name is None:
template_name = cfme_data['basic_info']['appliance_template']
kwargs = update_params_api(api, **kwargs)
check_kwargs(**kwargs)
if api.templates.get(template_name) is not None:
print "RHEVM: Found finished template with this name."
print "RHEVM: The script will now end."
else:
print "RHEVM: Downloading .ova file..."
download_ova(ssh_client, kwargs.get('image_url'))
try:
print "RHEVM: Templatizing .ova file..."
template_from_ova(api, username, password, rhevip, kwargs.get('edomain'),
ovaname, ssh_client)
print "RHEVM: Importing new template..."
import_template(api, kwargs.get('edomain'), kwargs.get('sdomain'),
kwargs.get('cluster'))
print "RHEVM: Making a temporary VM from new template..."
make_vm_from_template(api, kwargs.get('cluster'))
print "RHEVM: Adding disk to created VM..."
add_disk_to_vm(api, kwargs.get('sdomain'), kwargs.get('disk_size'),
kwargs.get('disk_format'), kwargs.get('disk_interface'))
print "RHEVM: Templatizing VM..."
templatize_vm(api, template_name, kwargs.get('cluster'))
finally:
cleanup(api, kwargs.get('edomain'), ssh_client, ovaname)
ssh_client.close()
api.disconnect()
print "RHEVM: Done."
if __name__ == "__main__":
args = parse_cmd_line()
kwargs = cfme_data['template_upload']['template_upload_rhevm']
final_kwargs = make_kwargs(args, cfme_data, **kwargs)
run(**final_kwargs)
| gpl-2.0 | 5,226,702,199,609,717,000 | 35.242424 | 96 | 0.643992 | false | 3.719236 | false | false | false |
TheCamusean/DLRCev3 | scripts/Computer_vision_files/pose_estimation.py | 1 | 2128 | import numpy as np
import cv2
import glob
import math as m
import time
import timeout_decorator as tm
def draw(img, corners, imgpts):
corner = tuple(corners[0].ravel())
img = cv2.line(img, corner, tuple(imgpts[0].ravel()), (255,0,0), 5)
img = cv2.line(img, corner, tuple(imgpts[1].ravel()), (0,255,0), 5)
img = cv2.line(img, corner, tuple(imgpts[2].ravel()), (0,0,255), 5)
return img
def silent_timeout(t, *args):
try:
return tm.timeout(t)(cv2.findChessboardCorners)(*args)
except tm.timeout_decorator.TimeoutError:
print("Timed out")
return (False, False)
# LOAD THE PARAMETERS
data = np.load('camera_parameters.npz')
mtx=data["cam_matrix"]
dist=data["dist_coeff"]
# Now you have the camera calibration parameters
#FROM HERE IS TO PLOT THE AXIS IN THE CHESSBOARD
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((9*6,3), np.float32)
objp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2)
axis = np.float32([[3,0,0], [0,3,0], [0,0,-3]]).reshape(-1,3)
## TO DRAW THE AXIS
cap = cv2.VideoCapture(1)
#img = cv2.imread('Chessboard_9.jpg')
while True:
t0=time.time()
while (time.time()-t0<0.1):
ret,img=cap.read()
tinit=time.time()
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret, corners = silent_timeout(0.07,gray, (9,6),None)
if ret == True:
corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
# Find the rotation and translation vectors.
retval,rvecs2, tvecs2, inliers2= cv2.solvePnPRansac(objp, corners2, mtx, dist)
tvecs2=2.5*tvecs2
print("translation x:{},y:{},z:{}".format(tvecs2[0],tvecs2[1],tvecs2[2]))
#print("rotation x:{},y:{},z:{}".format(rvecs2[0],rvecs2[1],rvecs2[2]))
# project 3D points to image plane
imgpts, jac = cv2.projectPoints(axis, rvecs2, tvecs2, mtx, dist)
img = draw(img,corners2,imgpts)
print("retard",time.time()-tinit)
cv2.imshow('img',img)
if cv2.waitKey(1) & 0xFF==32:
break
break
| mit | -4,619,761,552,942,554,000 | 29.4 | 86 | 0.629229 | false | 2.66 | false | false | false |
Xiol/CVEChecker | old/rhsa.py | 1 | 5144 | #!/usr/bin/env python -OO
# This Source Code Form is subject to the terms of the Mozilla
# Public License, v. 2.0. If a copy of the MPL was not distributed
# with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# CVE -> RHSA Report Generator
#
# Requires Beautiful Soup: http://www.crummy.com/software/BeautifulSoup/
# Currently only tested with Python 2.6, but no reason it shouldn't work
# with older Python versions (minimum 2.3). Not compatible with Python 3.
#
# Use like: ./rhsa.py < cvelist.txt, where cvelist.txt is a whitespace
# separated list of CVE numbers in the format CVE-YYYY-XXXX.
#
# This will find the CVE on the CVE_BASE_URL site and scrape for the
# related RHSA. If it can't find the CVE, chances are it doesn't affect
# Red Hat or Linux. If it can't find an RHSA, then it'll be something
# they don't intend to fix, so output the statement from Red Hat.
# Otherwise, consider resolved and output the link to the RHSA.
# This of course assumes you ARE running the latest CentOS/RHEL release
# versions of the software you're checking the CVEs for.
#
# No guarantees anything this outputs is correct or proper.
import sys
import re
import urllib2
import sqlite3
import os
import snmp
from time import sleep
from BeautifulSoup import BeautifulSoup
CVE_BASE_URL = "https://www.redhat.com/security/data/cve/"
RHEL_VERSION = "5"
rhsa_r = re.compile(".*Red Hat Enterprise Linux version "+RHEL_VERSION+".*")
curdir = os.path.join(os.getcwd(), os.path.dirname(__file__))
conn = sqlite3.connect(os.path.join(curdir, 'cache.db'), check_same_thread = False)
cur = conn.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS cache (id INTEGER PRIMARY KEY AUTOINCREMENT, timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, platform TEXT NOT NULL, cve TEXT NOT NULL, result TEXT NOT NULL)")
cur.execute("CREATE INDEX IF NOT EXISTS cve_idx ON cache (cve)")
conn.commit()
cur.close()
def get_cve_info(cve, platform='x86_64', host=None):
if platform not in ['x86_64','i386']:
return { 'cve': "Platform must be 'x86_64' or 'i386'.", 'verinfo': None }
if host:
snmpq = snmp.SNMPQueryTool(host)
snmpq.get_installed_packages()
cve = cve.strip()
#cachechk = _retr_cve(cve, platform)
#if cachechk is not None:
# return cachechk
cveurl = CVE_BASE_URL + cve + ".html"
try:
html = urllib2.urlopen(cveurl).read()
except urllib2.HTTPError:
# 404 or general screwup, don't cache in case it turns up later
return { 'cve': cve + " -- !!FIX!! Not found on Red Hat's website. Google it, might be Windows only or bad CVE reference.", 'verinfo': None }
except urllib2.URLError:
return { 'cve': "There was a problem with the URL.", 'verinfo': None }
soup = BeautifulSoup(html)
if soup.find(text=rhsa_r) is not None:
# If we've found the above, we have an RHSA (in theory!)
rhsa = soup.find(text=rhsa_r).findNext('a')['href']
rhsa_soup = BeautifulSoup(urllib2.urlopen(rhsa).read())
ver = rhsa_soup.find('a',attrs={"name": "Red Hat Enterprise Linux (v. "+RHEL_VERSION+" server)"}).findNext(text="SRPMS:").findNext('td').contents[0]
ver = ver.replace(".src.", '.'+platform+'.')
result = "Resolved in version "+ver+": " + rhsa
_add_cve(cve, result, platform)
return { 'cve': cve + " -- " + result, 'verinfo': None }
elif soup.find(text="Statement"):
statement = ' '.join([text for text in soup.find(text="Statement").findNext('p').findAll(text=True)])
result = "Red Hat Statement: \""+ statement + "\" - " + cveurl
_add_cve(cve, result, platform)
return { 'cve': cve + " -- " + result, 'verinfo': None }
elif soup.find(text="CVE not found"):
# They changed their website! This is needed to pick up the lack of a CVE now.
result = "!!FIX!! Not found on Red Hat's website. Google it, might be Windows only or bad CVE reference."
return { 'cve': cve + " -- " + result, 'verinfo': None }
else:
result = "!!FIX!! No RHSA for version "+RHEL_VERSION+", no statement either. See: " + cveurl
#_add_cve(cve, result, platform)
return { 'cve': cve + " -- " + result, 'verinfo': None }
def _add_cve(cve, result, platform):
cur = conn.cursor()
cur.execute("""INSERT INTO cache(cve, result, platform) VALUES (?, ?, ?)""", (cve, result, platform))
conn.commit()
cur.close()
def _retr_cve(cve, platform):
cur = conn.cursor()
cur.execute("""SELECT cve,result FROM cache WHERE cve=? AND platform=? LIMIT 1""", (cve, platform))
result = cur.fetchone()
cur.close()
if result is not None:
result = ' -- '.join([t for t in result if t is not None])
return result
if __name__ == '__main__':
rawdata = ""
if sys.stdin.isatty():
print "No input detected. You need to pipe a whitespace separated list of CVEs in!"
print "e.g. `./rhsa.py < cvelist.txt` or your preferred method."
sys.exit(1)
else:
rawdata = sys.stdin.read()
cves = rawdata.split()
for cve in cves:
print get_cve_info(cve)['cve']
| mpl-2.0 | -8,972,298,139,212,128,000 | 40.483871 | 197 | 0.648523 | false | 3.32945 | false | false | false |
jszymon/pacal | pacal/examples/central_limit_demo.py | 1 | 2068 | #!===============================
#! Demo of central limit theorem
#!===============================
from __future__ import print_function
import sys
from pylab import *
from pacal import *
from pacal import params
import time
params.general.warn_on_dependent = False
if __name__ == "__main__":
colors = "kbgrcmy"
def central_limit_demo(X, N = 5, xmin = None, xmax = None, ymax = None, **args):
tic=time.time()
figure()
title("Limit of averages of " + X.getName())
X.plot(linewidth = 4, color = "c", **args)
Y = X
print("Limit of averages of " + X.getName() + ": ", end=' ')
for i in range(N-1):
print(i+2, end=' ')
sys.stdout.flush()
Y += X
(Y/(i+2)).plot(color = colors[i%len(colors)], **args)
if xmin is not None:
xlim(xmin = xmin)
if xmax is not None:
xlim(xmax = xmax)
ylim(ymin = 0)
if ymax is not None:
ylim(ymax = ymax)
print()
print("time===", time.time()-tic)
#show()
#!----------------------
#! uniform distribution
#!----------------------
X = UniformDistr(0,1)
central_limit_demo(X, xmin=-0.1, xmax=1.1)
#!----------------------
#! Chi^2_1
#!----------------------
X = ChiSquareDistr(1)
central_limit_demo(X, N=5, ymax=1.5, xmax=3)
#!----------------------
#! Student T w. 2df
#!----------------------
X = StudentTDistr(2)
central_limit_demo(X, N = 5, xmin=-5, xmax=5)
#!----------------------
#! a ratio distribution
#!----------------------
X = UniformDistr(1,3) / UniformDistr(-2,1)
central_limit_demo(X, N = 5, xmin=-5, xmax=5)
#!----------------------
#! Cauchy distribution
#!----------------------
X = CauchyDistr()
central_limit_demo(X, xmin = -10, xmax = 10)
#!----------------------
#! Levy distribution
#!----------------------
X = LevyDistr()
central_limit_demo(X, xmax=5, numberOfPoints = 10000)
show()
| gpl-3.0 | -3,555,198,765,811,452,400 | 26.210526 | 84 | 0.436654 | false | 3.418182 | false | false | false |
anacode/anacode-toolkit | anacode/api/writers.py | 1 | 20217 | # -*- coding: utf-8 -*-
import os
import csv
import datetime
import pandas as pd
from itertools import chain
from functools import partial
from anacode import codes
def backup(root, files):
"""Backs up `files` from `root` directory and return list of backed up
file names. Backed up files will have datetime suffix appended to original
file name.
:param root: Absolute path to folder where files to backup are located
:type root: str
:param files: Names of files that needs backing up
:type files: str
:return: list -- List of backed up file names
"""
backed_up = []
join = os.path.join
root_contents = os.listdir(root)
dt_str = datetime.datetime.utcnow().strftime('%Y%m%d%H%M%S')
for file_name in files:
if file_name not in root_contents:
continue
new_name = file_name + '_' + dt_str
os.rename(join(root, file_name), join(root, new_name))
backed_up.append(new_name)
return backed_up
HEADERS = {
'categories': [u'doc_id', u'text_order', u'category', u'probability'],
'concepts': [u'doc_id', u'text_order', u'concept', u'freq',
u'relevance_score', u'concept_type'],
'concepts_surface_strings': [u'doc_id', u'text_order', u'concept',
u'surface_string', u'text_span'],
'sentiments': [u'doc_id', u'text_order', u'sentiment_value'],
'absa_entities': [u'doc_id', u'text_order', u'entity_name', u'entity_type',
u'surface_string', u'text_span'],
'absa_normalized_texts': [u'doc_id', u'text_order', u'normalized_text'],
'absa_relations': [u'doc_id', u'text_order', u'relation_id',
u'opinion_holder', u'restriction', u'sentiment_value',
u'is_external', u'surface_string', u'text_span'],
'absa_relations_entities': [u'doc_id', u'text_order', u'relation_id',
u'entity_type', u'entity_name'],
'absa_evaluations': [u'doc_id', u'text_order', u'evaluation_id',
u'sentiment_value', u'surface_string', u'text_span'],
'absa_evaluations_entities': [u'doc_id', u'text_order', u'evaluation_id',
u'entity_type', u'entity_name'],
}
# `anacode.agg.aggregations.ApiDataset.from_path` depends
# on ordering of files defined in values here
CSV_FILES = {
'categories': ['categories.csv'],
'concepts': ['concepts.csv', 'concepts_surface_strings.csv'],
'sentiments': ['sentiments.csv'],
'absa': [
'absa_entities.csv', 'absa_normalized_texts.csv',
'absa_relations.csv', 'absa_relations_entities.csv',
'absa_evaluations.csv', 'absa_evaluations_entities.csv'
]
}
def categories_to_list(doc_id, analyzed, single_document=False):
"""Converts categories response to flat list with doc_id included.
:param doc_id: Will be inserted to each row as first element
:param analyzed: Response json from anacode api for categories call
:type analyzed: list
:param single_document: Is analysis describing just one document
:type single_document: bool
:return: dict -- Dictionary with one key 'categories' pointing to flat list
of categories
"""
cat_list = []
for order, text_analyzed in enumerate(analyzed):
for result_dict in text_analyzed:
row = [doc_id, 0, result_dict.get('label'),
result_dict.get('probability')]
if single_document:
row[1] += order
else:
row[0] += order
cat_list.append(row)
return {'categories': cat_list}
def concepts_to_list(doc_id, analyzed, single_document=False):
"""Converts concepts response to flat lists with doc_id included
:param doc_id: Will be inserted to each row as first element
:param analyzed: Response json from anacode api for concepts call
:type analyzed: list
:param single_document: Is analysis describing just one document
:type single_document: bool
:return: dict -- Dictionary with two keys: 'concepts' pointing to flat list
of found concepts and their metadata and 'concepts_surface_strings'
pointing to flat list of strings realizing found concepts
"""
con_list, exp_list = [], []
for order, text_analyzed in enumerate(analyzed):
for concept in text_analyzed or []:
row = [doc_id, 0, concept.get('concept'),
concept.get('freq'), concept.get('relevance_score'),
concept.get('type')]
if single_document:
row[1] += order
else:
row[0] += order
con_list.append(row)
for string in concept.get('surface', []):
surface_str, span = string['surface_string'], string['span']
exp_list.append([row[0], row[1], concept.get('concept'),
surface_str, '-'.join(map(str, span))])
return {'concepts': con_list, 'concepts_surface_strings': exp_list}
def sentiments_to_list(doc_id, analyzed, single_document=False):
"""Converts sentiments response to flat lists with doc_id included
:param doc_id: Will be inserted to each row as first element
:param analyzed: Response json from anacode api for sentiment call
:type analyzed: list
:param single_document: Is analysis describing just one document
:type single_document: bool
:return: dict -- Dictionary with one key 'sentiments' pointing to flat list
of sentiment probabilities
"""
sen_list = []
for order, sentiment in enumerate(analyzed):
row = [doc_id, 0, sentiment['sentiment_value']]
if single_document:
# this should not happen
row[1] += order
else:
row[0] += order
sen_list.append(row)
return {'sentiments': sen_list}
def _absa_entities_to_list(doc_id, order, entities):
ent_list = []
for entity_dict in entities:
text_span = '-'.join(map(str, entity_dict['surface']['span']))
surface_string = entity_dict['surface']['surface_string']
for semantics in entity_dict['semantics']:
row = [doc_id, order, semantics['value'], semantics['type'],
surface_string, text_span]
ent_list.append(row)
return ent_list
def _absa_normalized_text_to_list(doc_id, order, normalized_text):
return [[doc_id, order, normalized_text]]
def _absa_relations_to_list(doc_id, order, relations):
rel_list, ent_list = [], []
for rel_index, rel in enumerate(relations):
rel_row = [doc_id, order, rel_index,
rel['semantics']['opinion_holder'],
rel['semantics']['restriction'],
rel['semantics']['sentiment_value'],
rel['external_entity'],
rel['surface']['surface_string'],
'-'.join(map(str, rel['surface']['span']))]
rel_list.append(rel_row)
for ent in rel['semantics'].get('entity', []):
ent_row = [doc_id, order, rel_index, ent['type'], ent['value']]
ent_list.append(ent_row)
return rel_list, ent_list
def _absa_evaluations_to_list(doc_id, order, evaluations):
eval_list, ent_list = [], []
for eval_index, evaluation in enumerate(evaluations):
eval_row = [doc_id, order, eval_index,
evaluation['semantics']['sentiment_value'],
evaluation['surface']['surface_string'],
'-'.join(map(str, evaluation['surface']['span']))]
eval_list.append(eval_row)
for ent in evaluation['semantics'].get('entity', []):
ent_row = [doc_id, order, eval_index, ent['type'], ent['value']]
ent_list.append(ent_row)
return eval_list, ent_list
def absa_to_list(doc_id, analyzed, single_document=False):
"""Converts ABSA response to flat lists with doc_id included
:param doc_id: Will be inserted to each row as first element
:param analyzed: Response json from anacode api for ABSA call
:type analyzed: list
:param single_document: Is analysis describing just one document
:type single_document: bool
:return: dict -- Dictionary with six keys: 'absa_entities' pointing to flat
list of found entities with metadata, 'absa_normalized_texts' pointing to
flat list of normalized chinese texts, 'absa_relations' pointing to found
entity relations with metadata, 'absa_relations_entities' pointing to flat
list of entities that belong to absa relations, 'absa_evaluations'
pointing to flat list of entity evaluations with metadata and
'absa_evaluations_entities' specifying entities in absa_evaluations
"""
absa = {
'absa_entities': [],
'absa_normalized_texts': [],
'absa_relations': [],
'absa_relations_entities': [],
'absa_evaluations': [],
'absa_evaluations_entities': []
}
for order, text_analyzed in enumerate(analyzed):
if single_document:
current_id = doc_id
text_order = order
else:
current_id = doc_id + order
text_order = 0
entities = text_analyzed['entities']
ents = _absa_entities_to_list(current_id, text_order, entities)
text = text_analyzed['normalized_text']
texts = _absa_normalized_text_to_list(current_id, text_order, text)
relations = text_analyzed['relations']
rels, rel_ents = _absa_relations_to_list(current_id, text_order,
relations)
evaluations = text_analyzed['evaluations']
evals, eval_ents = _absa_evaluations_to_list(current_id, text_order,
evaluations)
absa['absa_entities'].extend(ents)
absa['absa_normalized_texts'].extend(texts)
absa['absa_relations'].extend(rels)
absa['absa_relations_entities'].extend(rel_ents)
absa['absa_evaluations'].extend(evals)
absa['absa_evaluations_entities'].extend(eval_ents)
return absa
class Writer(object):
"""Base "abstract" class containing common methods that are
needed by all implementations of Writer interface.
The writer interface consists of init, close and write_bulk methods.
"""
def __init__(self):
self.ids = {'scrape': 0, 'analyze': 0}
def write_row(self, call_type, call_result):
"""Decides what kind of data it got and calls appropriate write method.
:param call_type: Library's ID of anacode call
:type call_type: int
:param call_result: JSON response from Anacode API
:type call_result: list
"""
if call_type == codes.SCRAPE:
self.write_scrape(call_result)
if call_type == codes.ANALYZE:
self.write_analysis(call_result)
def _add_new_data_from_dict(self, new_data):
"""Not implemented here!
Used by write methods to submit new Anacode API response data for storage.
:param new_data: dict; keys are data sets names and values are
flat lists of rows
:type new_data: dict
"""
pass
def write_scrape(self, scraped):
self.ids['scrape'] += 1
def write_analysis(self, analyzed):
"""Inspects analysis result for performed analysis and delegates
persisting of results to appropriate write methods.
:param analyzed: JSON object analysis response
:type: dict
"""
single_document = analyzed.get('single_document', False)
analyzed_length = 1
if 'categories' in analyzed:
categories = analyzed['categories']
self.write_categories(categories, single_document=single_document)
if not single_document:
analyzed_length = len(categories)
if 'concepts' in analyzed:
concepts = analyzed['concepts']
self.write_concepts(concepts, single_document=single_document)
if not single_document:
analyzed_length = len(concepts)
if 'sentiment' in analyzed:
sentiment = analyzed['sentiment']
self.write_sentiment(sentiment, single_document=single_document)
if not single_document:
analyzed_length = len(sentiment)
if 'absa' in analyzed:
absa = analyzed['absa']
self.write_absa(analyzed['absa'], single_document=single_document)
if not single_document:
analyzed_length = len(absa)
self.ids['analyze'] += analyzed_length
def write_categories(self, analyzed, single_document=False):
"""Converts categories analysis result to flat lists and stores them.
:param analyzed: JSON categories analysis result
:type analyzed: list
:param single_document: Is analysis describing just one document
:type single_document: bool
"""
doc_id = self.ids['analyze']
new_data = categories_to_list(doc_id, analyzed, single_document)
self._add_new_data_from_dict(new_data)
def write_concepts(self, analyzed, single_document=False):
"""Converts concepts analysis result to flat lists and stores them.
:param analyzed: JSON concepts analysis result
:type analyzed: list
:param single_document: Is analysis describing just one document
:type single_document: bool
"""
doc_id = self.ids['analyze']
new_data = concepts_to_list(doc_id, analyzed, single_document)
self._add_new_data_from_dict(new_data)
def write_sentiment(self, analyzed, single_document=False):
"""Converts sentiment analysis result to flat lists and stores them.
:param analyzed: JSON sentiment analysis result
:type analyzed: list
:param single_document: Is analysis describing just one document
:type single_document: bool
"""
doc_id = self.ids['analyze']
new_data = sentiments_to_list(doc_id, analyzed, single_document)
self._add_new_data_from_dict(new_data)
def write_absa(self, analyzed, single_document=False):
"""Converts absa analysis result to flat lists and stores them.
:param analyzed: JSON absa analysis result
:type analyzed: list
:param single_document: Is analysis describing just one document
:type single_document: bool
"""
doc_id = self.ids['analyze']
new_data = absa_to_list(doc_id, analyzed, single_document)
self._add_new_data_from_dict(new_data)
def write_bulk(self, results):
"""Stores multiple anacode api's JSON responses marked with call IDs as
tuples (call_id, call_result). Both scrape and analyze call IDs
are defined in anacode.codes module.
:param results: List of anacode responses with IDs of calls used
:type results: list
"""
for call_type, call_result in results:
self.write_row(call_type, call_result)
def init(self):
"""Not implemented here! Each subclass should decide what to do here."""
pass
def close(self):
"""Not implemented here! Each subclass should decide what to do here."""
pass
def __enter__(self):
self.init()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
class DataFrameWriter(Writer):
"""Writes Anacode API output into pandas.DataFrame instances."""
def __init__(self, frames=None):
"""Initializes dictionary of result frames. Alternatively uses given
frames dict for storage.
:param frames: Might be specified to use this instead of new dict
:type frames: dict
"""
super(DataFrameWriter, self).__init__()
self.frames = {} if frames is None else frames
self._row_data = {}
def init(self):
"""Initialized empty lists for each possible data frame."""
self._row_data = {
'categories': [],
'concepts': [],
'concepts_surface_strings': [],
'sentiments': [],
'absa_entities': [],
'absa_normalized_texts': [],
'absa_relations': [],
'absa_relations_entities': [],
'absa_evaluations': [],
'absa_evaluations_entities': [],
}
def close(self):
"""Creates pandas data frames to self.frames dict and clears internal
state.
"""
for name, row in self._row_data.items():
if len(row) > 0:
self.frames[name] = pd.DataFrame(row, columns=HEADERS[name])
self._row_data = {}
def _add_new_data_from_dict(self, new_data):
"""Stores anacode api result converted to flat lists.
:param new_data: Anacode api result
:param new_data: list
"""
for name, row_list in new_data.items():
self._row_data[name].extend(row_list)
class CSVWriter(Writer):
def __init__(self, target_dir='.'):
"""Initializes Writer to store Anacode API analysis results in target_dir in
csv files.
:param target_dir: Path to directory where to store csv files
:type target_dir: str
"""
super(CSVWriter, self).__init__()
self.target_dir = os.path.abspath(os.path.expanduser(target_dir))
self._files = {}
self.csv = {}
def _open_csv(self, csv_name):
path = partial(os.path.join, self.target_dir)
try:
return open(path(csv_name), 'w', newline='')
except TypeError:
return open(path(csv_name), 'wb')
def init(self):
"""Opens all csv files for writing and writes headers to them."""
self.close()
backup(self.target_dir, chain.from_iterable(CSV_FILES.values()))
self._files = {
'categories': self._open_csv('categories.csv'),
'concepts': self._open_csv('concepts.csv'),
'concepts_surface_strings': self._open_csv(
'concepts_surface_strings.csv'
),
'sentiments': self._open_csv('sentiments.csv'),
'absa_entities': self._open_csv('absa_entities.csv'),
'absa_normalized_texts': self._open_csv(
'absa_normalized_texts.csv'
),
'absa_relations': self._open_csv('absa_relations.csv'),
'absa_relations_entities': self._open_csv(
'absa_relations_entities.csv'
),
'absa_evaluations': self._open_csv('absa_evaluations.csv'),
'absa_evaluations_entities': self._open_csv(
'absa_evaluations_entities.csv'
),
}
self.csv = {name: csv.writer(fp) for name, fp in self._files.items()}
for name, writer in self.csv.items():
writer.writerow(HEADERS[name])
def _csv_has_content(self, csv_path):
if not os.path.isfile(csv_path):
return False
with open(csv_path) as fp:
for line_count, line in enumerate(fp):
if line_count == 1 and len(line.strip()) != '':
return True
return False
def close(self):
"""Closes all csv files and removes empty ones."""
for name, file in self._files.items():
try:
file.close()
except (IOError, AttributeError):
print('Problem closing "{}"'.format(name))
for file_list in CSV_FILES.values():
for file_name in file_list:
path = os.path.join(self.target_dir, file_name)
if os.path.isfile(path) and not self._csv_has_content(path):
os.unlink(path)
self._files = {}
self.csv = {}
def _add_new_data_from_dict(self, new_data):
"""Stores anacode api result converted to flat lists.
:param new_data: Anacode api result
:param new_data: list
"""
for name, row_list in new_data.items():
self.csv[name].writerows(row_list)
| bsd-3-clause | -7,003,819,235,267,185,000 | 37.729885 | 84 | 0.598012 | false | 3.896877 | false | false | false |
sony/nnabla | python/src/nnabla/backward_function/dropout.py | 1 | 1383 | # Copyright 2019,2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nnabla.functions as F
from .utils import no_grad, get_output
def dropout_backward(inputs, p=0.5, seed=-1, output_mask=False):
"""
Args:
inputs (list of nn.Variable): Incomming grads/inputs to/of the forward function.
kwargs (dict of arguments): Dictionary of the corresponding function arguments.
Return:
list of Variable: Return the gradients wrt inputs of the corresponding function.
"""
if not output_mask:
raise ValueError(
"dropout_backward is supported for output_mask=True.")
dy0 = inputs[0]
dy1 = inputs[1]
x0 = inputs[2]
y1 = get_output(x0, "Dropout", nth_output=1)
m0 = y1.get_unlinked_variable() # mask
dx0 = dy0 * m0 / (1 - p)
return dx0
| apache-2.0 | 1,812,822,212,141,513,200 | 32.731707 | 86 | 0.702097 | false | 3.778689 | false | false | false |
mosra/m.css | documentation/test_doxygen/test_doxyfile.py | 1 | 6508 | #
# This file is part of m.css.
#
# Copyright © 2017, 2018, 2019, 2020 Vladimír Vondruš <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import copy
import os
import shutil
import subprocess
import unittest
from doxygen import parse_doxyfile, State, default_config
from . import BaseTestCase
class Doxyfile(unittest.TestCase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
# Display ALL THE DIFFS
self.maxDiff = None
expected_doxyfile = {
'DOT_FONTNAME': 'Helvetica',
'DOT_FONTSIZE': 10,
'HTML_OUTPUT': 'html',
'OUTPUT_DIRECTORY': '',
'PROJECT_BRIEF': 'is cool',
'PROJECT_LOGO': '',
'PROJECT_NAME': 'My Pet Project',
'SHOW_INCLUDE_FILES': True,
'XML_OUTPUT': 'xml'
}
expected_config = {
'DOXYFILE': 'Doxyfile',
'FAVICON': ('favicon-dark.png', 'image/png'),
'LINKS_NAVBAR1': [(None, 'Pages', 'pages.html', 'pages', []),
(None, 'Modules', 'modules.html', 'modules', [])],
# different order
'LINKS_NAVBAR2': [(None, 'Files', 'files.html', 'files', []),
(None, 'Classes', 'annotated.html', 'annotated', [])],
'FINE_PRINT': 'this is "quotes"',
'THEME_COLOR': '#22272e',
'STYLESHEETS': ['a.css', 'b.css'],
'HTML_HEADER': None,
'EXTRA_FILES': ['css', 'another.png', 'hello'],
'PAGE_HEADER': 'this is "quotes" \'apostrophes\'',
'CLASS_INDEX_EXPAND_LEVELS': 1,
'CLASS_INDEX_EXPAND_INNER': False,
'FILE_INDEX_EXPAND_LEVELS': 1,
'M_CODE_FILTERS_PRE': {},
'M_CODE_FILTERS_POST': {},
'M_MATH_CACHE_FILE': 'm.math.cache',
'SEARCH_DISABLED': False,
'SEARCH_DOWNLOAD_BINARY': False,
'SEARCH_BASE_URL': None,
'SEARCH_EXTERNAL_URL': None,
'SEARCH_HELP':
"""<p class="m-noindent">Search for symbols, directories, files, pages or
modules. You can omit any prefix from the symbol or file path; adding a
<code>:</code> or <code>/</code> suffix lists all members of given symbol or
directory.</p>
<p class="m-noindent">Use <span class="m-label m-dim">↓</span>
/ <span class="m-label m-dim">↑</span> to navigate through the list,
<span class="m-label m-dim">Enter</span> to go.
<span class="m-label m-dim">Tab</span> autocompletes common prefix, you can
copy a link to the result using <span class="m-label m-dim">⌘</span>
<span class="m-label m-dim">L</span> while <span class="m-label m-dim">⌘</span>
<span class="m-label m-dim">M</span> produces a Markdown link.</p>
""",
'SHOW_UNDOCUMENTED': False,
'VERSION_LABELS': False,
}
def test(self):
# Basically mirroring what's in the Doxyfile-legacy. It's silly because
# we don't need to check most of these here anyway but whatever. To
# make this a bit saner, all existing tests are using the
# "legacy Doxyfile" config anyway, so it should be tested more than
# enough... until we port away from that. This should get then further
# extended to cover the cases that are no longer tested by other code.
state = State({**copy.deepcopy(default_config), **{
'EXTRA_FILES': ['css', 'another.png', 'hello'],
'STYLESHEETS': ['a.css', 'b.css'],
'PAGE_HEADER': 'this is "quotes" \'apostrophes\'',
'FINE_PRINT': 'this is "quotes"',
'LINKS_NAVBAR1': [(None, 'pages', []),
(None, 'modules', [])],
'LINKS_NAVBAR2': [(None, 'files', []),
(None, 'annotated', [])]
}})
parse_doxyfile(state, 'test_doxygen/doxyfile/Doxyfile')
self.assertEqual(state.doxyfile, self.expected_doxyfile)
self.assertEqual(state.config, self.expected_config)
def test_legacy(self):
state = State(copy.deepcopy(default_config))
parse_doxyfile(state, 'test_doxygen/doxyfile/Doxyfile-legacy')
self.assertEqual(state.doxyfile, self.expected_doxyfile)
self.assertEqual(state.config, self.expected_config)
def test_subdirs(self):
state = State(copy.deepcopy(default_config))
with self.assertRaises(NotImplementedError):
parse_doxyfile(state, 'test_doxygen/doxyfile/Doxyfile-subdirs')
class UpgradeCustomVariables(BaseTestCase):
def test(self):
# Copy the Doxyfile to a new location because it gets overwritten
shutil.copyfile(os.path.join(self.path, 'Doxyfile'),
os.path.join(self.path, 'Doxyfile-upgrade'))
subprocess.run(['doxygen', '-u', 'Doxyfile-upgrade'], cwd=self.path, check=True)
with open(os.path.join(self.path, 'Doxyfile-upgrade'), 'r') as f:
contents = f.read()
self.assertFalse('UNKNOWN_VARIABLE' in contents)
self.assertFalse('COMMENTED_OUT_VARIABLE' in contents)
self.assertTrue('## HASHED_COMMENTED_VARIABLE = 2' in contents)
self.assertTrue('##! HASHED_BANG_COMMENTED_VARIABLE = 3 \\' in contents)
self.assertTrue('##! HASHED_BANG_COMMENTED_VARIABLE_CONT' in contents)
self.assertTrue('##!HASHED_BANG_COMMENTED_VARIABLE_NOSPACE = 4' in contents)
self.assertTrue('INPUT = 5' in contents)
self.assertTrue('##! HASHED_BANG_COMMENTED_VARIABLE_END = 6' in contents)
| mit | 1,598,037,354,109,257,000 | 42.34 | 88 | 0.627903 | false | 3.571978 | true | false | false |
tBaxter/activity-monitor | activity_monitor/models.py | 1 | 4354 | from django.conf import settings
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.functional import cached_property
from .managers import ActivityItemManager
class Activity(models.Model):
"""
Stores an action that occurred that is being tracked
according to ACTIVITY_MONITOR settings.
"""
actor = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name="subject",
on_delete="CASCADE"
)
timestamp = models.DateTimeField()
verb = models.CharField(blank=True, null=True, max_length=255, editable=False)
override_string = models.CharField(blank=True, null=True, max_length=255, editable=False)
target = models.CharField(blank=True, null=True, max_length=255, editable=False)
actor_name = models.CharField(blank=True, null=True, max_length=255, editable=False)
content_object = GenericForeignKey()
content_type = models.ForeignKey(ContentType, on_delete="CASCADE")
object_id = models.PositiveIntegerField()
objects = ActivityItemManager()
class Meta:
ordering = ['-timestamp']
unique_together = [('content_type', 'object_id')]
get_latest_by = 'timestamp'
verbose_name_plural = 'actions'
def __unicode__(self):
return "{0}: {1}".format(self.content_type.model_class().__name__, self.content_object)
def save(self, *args, **kwargs):
"""
Store a string representation of content_object as target
and actor name for fast retrieval and sorting.
"""
if not self.target:
self.target = str(self.content_object)
if not self.actor_name:
self.actor_name = str(self.actor)
super(Activity, self).save()
def get_absolute_url(self):
"""
Use original content object's
get_absolute_url method.
"""
return self.content_object.get_absolute_url()
@cached_property
def short_action_string(self):
"""
Returns string with actor and verb, allowing target/object
to be filled in manually.
Example:
[actor] [verb] or
"Joe cool posted a comment"
"""
output = "{0} ".format(self.actor)
if self.override_string:
output += self.override_string
else:
output += self.verb
return output
@cached_property
def full_action_string(self):
"""
Returns full string with actor, verb and target content object.
Example:
[actor] [verb] [content object/target] or
Joe cool posted a new topic: "my new topic"
"""
output = "{} {}".format(self.short_action_string, self.content_object)
return output
@cached_property
def image(self):
"""
Attempts to provide a representative image from a content_object based on
the content object's get_image() method.
If there is a another content.object, as in the case of comments and other GFKs,
then it will follow to that content_object and then get the image.
Requires get_image() to be defined on the related model even if it just
returns object.image, to avoid bringing back images you may not want.
Note that this expects the image only. Anything related (caption, etc) should be stripped.
"""
obj = self.content_object
# First, try to get from a get_image() helper method
try:
image = obj.get_image()
except AttributeError:
try:
image = obj.content_object.get_image()
except:
image = None
# if we didn't find one, try to get it from foo.image
# This allows get_image to take precedence for greater control.
if not image:
try:
image = obj.image
except AttributeError:
try:
image = obj.content_object.image
except:
return None
# Finally, ensure we're getting an image, not an image object
# with caption and byline and other things.
try:
return image.image
except AttributeError:
return image
| mit | 5,448,646,263,097,085,000 | 32.236641 | 98 | 0.616904 | false | 4.451943 | false | false | false |
DistributedSystemsGroup/zoe | zoe_master/scheduler/elastic_scheduler.py | 1 | 14941 | # Copyright (c) 2017, Daniele Venzano
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The Elastic scheduler is the implementation of the scheduling algorithm presented in this paper:
https://arxiv.org/abs/1611.09528
"""
import logging
import threading
import time
from zoe_lib.state import Execution, SQLManager, Service # pylint: disable=unused-import
from zoe_master.exceptions import ZoeException
from zoe_master.backends.interface import terminate_execution, terminate_service, start_elastic, start_essential, update_service_resource_limits
from zoe_master.scheduler.simulated_platform import SimulatedPlatform
from zoe_master.exceptions import UnsupportedSchedulerPolicyError
from zoe_master.stats import NodeStats # pylint: disable=unused-import
from zoe_master.metrics.base import StatsManager # pylint: disable=unused-import
log = logging.getLogger(__name__)
SELF_TRIGGER_TIMEOUT = 60 # the scheduler will trigger itself periodically in case platform resources have changed outside its control
def catch_exceptions_and_retry(func):
"""Decorator to catch exceptions in threaded functions."""
def wrapper(self):
"""The wrapper."""
while True:
try:
func(self)
except BaseException: # pylint: disable=broad-except
log.exception('Unmanaged exception in thread loop')
else:
log.debug('Thread terminated')
break
return wrapper
class ExecutionProgress:
"""Additional data for tracking execution sizes while in the queue."""
def __init__(self):
self.last_time_scheduled = 0
self.progress_sequence = []
class ZoeElasticScheduler:
"""The Scheduler class for size-based scheduling. Policy can be "FIFO" or "SIZE"."""
def __init__(self, state: SQLManager, policy, metrics: StatsManager):
if policy not in ('FIFO', 'SIZE', 'DYNSIZE'):
raise UnsupportedSchedulerPolicyError
self.metrics = metrics
self.trigger_semaphore = threading.Semaphore(0)
self.policy = policy
self.queue = []
self.queue_running = []
self.queue_termination = []
self.additional_exec_state = {}
self.loop_quit = False
self.loop_th = threading.Thread(target=self.loop_start_th, name='scheduler')
self.core_limit_recalc_trigger = threading.Event()
self.core_limit_th = threading.Thread(target=self._adjust_core_limits, name='adjust_core_limits')
self.state = state
for execution in self.state.executions.select(status='running'):
if execution.all_services_running:
self.queue_running.append(execution)
else:
self.queue.append(execution)
self.additional_exec_state[execution.id] = ExecutionProgress()
self.loop_th.start()
self.core_limit_th.start()
def trigger(self):
"""Trigger a scheduler run."""
self.trigger_semaphore.release()
def incoming(self, execution: Execution):
"""
This method adds the execution to the end of the queue and triggers the scheduler.
:param execution: The execution
:return:
"""
exec_data = ExecutionProgress()
self.additional_exec_state[execution.id] = exec_data
self.queue.append(execution)
self.trigger()
def terminate(self, execution: Execution) -> None:
"""
Inform the master that an execution has been terminated. This can be done asynchronously.
:param execution: the terminated execution
:return: None
"""
execution.set_cleaning_up()
self.queue_termination.append(execution)
def _terminate_executions(self):
while len(self.queue_termination) > 0:
execution = self.queue_termination.pop(0)
try:
self.queue.remove(execution)
except ValueError:
try:
self.queue_running.remove(execution)
except ValueError:
log.warning('Execution {} is not in any queue, attempting termination anyway'.format(execution.id))
try:
del self.additional_exec_state[execution.id]
except KeyError:
pass
terminate_execution(execution)
log.info('Execution {} terminated successfully'.format(execution.id))
def _refresh_execution_sizes(self):
if self.policy == "FIFO":
return
elif self.policy == "SIZE":
return
elif self.policy == "DYNSIZE":
for execution in self.queue: # type: Execution
try:
exec_data = self.additional_exec_state[execution.id]
except KeyError:
continue
if exec_data.last_time_scheduled == 0:
continue
elif execution.size <= 0:
execution.set_size(execution.total_reservations.cores.min * execution.total_reservations.memory.min)
continue
new_size = execution.size - (time.time() - exec_data.last_time_scheduled) * (256 * 1024 ** 2) # to be tuned
execution.set_size(new_size)
def _pop_all(self):
out_list = []
for execution in self.queue: # type: Execution
if execution.status != Execution.TERMINATED_STATUS or execution.status != Execution.CLEANING_UP_STATUS:
out_list.append(execution)
else:
log.debug('While popping, throwing away execution {} that is in status {}'.format(execution.id, execution.status))
return out_list
def _requeue(self, execution: Execution):
self.additional_exec_state[execution.id].last_time_scheduled = time.time()
if execution not in self.queue: # sanity check: the execution should be in the queue
log.warning("Execution {} wants to be re-queued, but it is not in the queue".format(execution.id))
@catch_exceptions_and_retry
def loop_start_th(self): # pylint: disable=too-many-locals
"""The Scheduler thread loop."""
auto_trigger = SELF_TRIGGER_TIMEOUT
while True:
ret = self.trigger_semaphore.acquire(timeout=1)
if not ret: # Semaphore timeout, do some cleanup
auto_trigger -= 1
if auto_trigger == 0:
auto_trigger = SELF_TRIGGER_TIMEOUT
self.trigger()
continue
if self.loop_quit:
break
self._check_dead_services()
self._terminate_executions()
if len(self.queue) == 0:
log.debug("Scheduler loop has been triggered, but the queue is empty")
self.core_limit_recalc_trigger.set()
continue
log.debug("Scheduler loop has been triggered")
while True: # Inner loop will run until no new executions can be started or the queue is empty
self._refresh_execution_sizes()
if self.policy == "SIZE" or self.policy == "DYNSIZE":
self.queue.sort(key=lambda execution: execution.size)
jobs_to_attempt_scheduling = self._pop_all()
log.debug('Scheduler inner loop, jobs to attempt scheduling:')
for job in jobs_to_attempt_scheduling:
log.debug("-> {} ({})".format(job, job.size))
try:
platform_state = self.metrics.current_stats
except ZoeException:
log.error('Cannot retrieve platform state, cannot schedule')
for job in jobs_to_attempt_scheduling:
self._requeue(job)
break
cluster_status_snapshot = SimulatedPlatform(platform_state)
jobs_to_launch = []
free_resources = cluster_status_snapshot.aggregated_free_memory()
# Try to find a placement solution using a snapshot of the platform status
for job in jobs_to_attempt_scheduling: # type: Execution
jobs_to_launch_copy = jobs_to_launch.copy()
# remove all elastic services from the previous simulation loop
for job_aux in jobs_to_launch: # type: Execution
cluster_status_snapshot.deallocate_elastic(job_aux)
job_can_start = False
if not job.is_running:
job_can_start = cluster_status_snapshot.allocate_essential(job)
if job_can_start or job.is_running:
jobs_to_launch.append(job)
# Try to put back the elastic services
for job_aux in jobs_to_launch:
cluster_status_snapshot.allocate_elastic(job_aux)
current_free_resources = cluster_status_snapshot.aggregated_free_memory()
if current_free_resources >= free_resources:
jobs_to_launch = jobs_to_launch_copy
break
free_resources = current_free_resources
placements = cluster_status_snapshot.get_service_allocation()
log.info('Allocation after simulation: {}'.format(placements))
# We port the results of the simulation into the real cluster
for job in jobs_to_launch: # type: Execution
if not job.essential_services_running:
ret = start_essential(job, placements)
if ret == "fatal":
jobs_to_attempt_scheduling.remove(job)
self.queue.remove(job)
continue # trow away the execution
elif ret == "requeue":
self._requeue(job)
continue
elif ret == "ok":
job.set_running()
assert ret == "ok"
start_elastic(job, placements)
if job.all_services_active:
log.info('execution {}: all services are active'.format(job.id))
jobs_to_attempt_scheduling.remove(job)
self.queue.remove(job)
self.queue_running.append(job)
self.core_limit_recalc_trigger.set()
for job in jobs_to_attempt_scheduling:
self._requeue(job)
if len(self.queue) == 0:
log.debug('empty queue, exiting inner loop')
break
if len(jobs_to_launch) == 0:
log.debug('No executions could be started, exiting inner loop')
break
def quit(self):
"""Stop the scheduler thread."""
self.loop_quit = True
self.trigger()
self.core_limit_recalc_trigger.set()
self.loop_th.join()
self.core_limit_th.join()
def stats(self):
"""Scheduler statistics."""
if self.policy == "SIZE":
queue = sorted(self.queue, key=lambda execution: execution.size)
else:
queue = self.queue
return {
'queue_length': len(self.queue),
'running_length': len(self.queue_running),
'termination_queue_length': len(self.queue_termination),
'queue': [s.id for s in queue],
'running_queue': [s.id for s in self.queue_running],
'termination_queue': [s.id for s in self.queue_termination]
}
@catch_exceptions_and_retry
def _adjust_core_limits(self):
self.core_limit_recalc_trigger.clear()
while not self.loop_quit:
self.core_limit_recalc_trigger.wait()
if self.loop_quit:
break
stats = self.metrics.current_stats
for node in stats.nodes: # type: NodeStats
new_core_allocations = {}
node_services = self.state.services.select(backend_host=node.name, backend_status=Service.BACKEND_START_STATUS)
if len(node_services) == 0:
continue
for service in node_services:
new_core_allocations[service.id] = service.resource_reservation.cores.min
if node.cores_reserved < node.cores_total:
cores_free = node.cores_total - node.cores_reserved
cores_to_add = cores_free / len(node_services)
else:
cores_to_add = 0
for service in node_services:
update_service_resource_limits(service, cores=new_core_allocations[service.id] + cores_to_add)
self.core_limit_recalc_trigger.clear()
def _check_dead_services(self):
# Check for executions that are no longer viable since an essential service died
for execution in self.queue_running:
for service in execution.services:
if service.essential and service.backend_status == service.BACKEND_DIE_STATUS:
log.info("Essential service {} ({}) of execution {} died, terminating execution".format(service.id, service.name, execution.id))
service.restarted()
execution.set_error_message("Essential service {} died".format(service.name))
self.terminate(execution)
break
# Check for executions that need to be re-queued because one of the elastic components died
# Do it in two loops to prevent rescheduling executions that need to be terminated
for execution in self.queue_running:
for service in execution.services:
if not service.essential and service.backend_status == service.BACKEND_DIE_STATUS:
log.info("Elastic service {} ({}) of execution {} died, rescheduling".format(service.id, service.name, execution.id))
terminate_service(service)
service.restarted()
self.queue_running.remove(execution)
self.queue.append(execution)
break
| apache-2.0 | -5,541,096,387,478,391,000 | 41.810888 | 148 | 0.581554 | false | 4.55102 | false | false | false |
praba230890/PYPOWER | pypower/t/t_scale_load.py | 2 | 21996 | # Copyright (c) 1996-2015 PSERC. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""Tests for code in C{scale_load}.
"""
from os.path import dirname, join
from numpy import array, zeros, in1d, vstack, flatnonzero as find
from pypower.loadcase import loadcase
from pypower.isload import isload
from pypower.scale_load import scale_load, ScalingError
from pypower.idx_bus import PD, QD, BUS_AREA
from pypower.idx_gen import GEN_BUS, QG, PMIN, QMIN, QMAX
from pypower.t.t_begin import t_begin
from pypower.t.t_is import t_is
from pypower.t.t_ok import t_ok
from pypower.t.t_end import t_end
def t_scale_load(quiet=False):
"""Tests for code in C{scale_load}.
@author: Ray Zimmerman (PSERC Cornell)
"""
n_tests = 275
t_begin(n_tests, quiet)
ppc = loadcase(join(dirname(__file__), 't_auction_case'))
ppc['gen'][7, GEN_BUS] = 2 ## multiple d. loads per area, same bus as gen
ppc['gen'][7, [QG, QMIN, QMAX]] = array([3, 0, 3])
## put it load before gen in matrix
ppc['gen'] = vstack([ppc['gen'][7, :], ppc['gen'][:7, :], ppc['gen'][8, :]])
ld = find(isload(ppc['gen']))
a = [None] * 3
lda = [None] * 3
for k in range(3):
a[k] = find(ppc['bus'][:, BUS_AREA] == k + 1) ## buses in area k
tmp = find( in1d(ppc['gen'][ld, GEN_BUS] - 1, a[k]) )
lda[k] = ld[tmp] ## disp loads in area k
area = [None] * 3
for k in range(3):
area[k] = {'fixed': {}, 'disp': {}, 'both': {}}
area[k]['fixed']['p'] = sum(ppc['bus'][a[k], PD])
area[k]['fixed']['q'] = sum(ppc['bus'][a[k], QD])
area[k]['disp']['p'] = -sum(ppc['gen'][lda[k], PMIN])
area[k]['disp']['qmin'] = -sum(ppc['gen'][lda[k], QMIN])
area[k]['disp']['qmax'] = -sum(ppc['gen'][lda[k], QMAX])
area[k]['disp']['q'] = area[k]['disp']['qmin'] + area[k]['disp']['qmax']
area[k]['both']['p'] = area[k]['fixed']['p'] + area[k]['disp']['p']
area[k]['both']['q'] = area[k]['fixed']['q'] + area[k]['disp']['q']
total = {'fixed': {}, 'disp': {}, 'both': {}}
total['fixed']['p'] = sum(ppc['bus'][:, PD])
total['fixed']['q'] = sum(ppc['bus'][:, QD])
total['disp']['p'] = -sum(ppc['gen'][ld, PMIN])
total['disp']['qmin'] = -sum(ppc['gen'][ld, QMIN])
total['disp']['qmax'] = -sum(ppc['gen'][ld, QMAX])
total['disp']['q'] = total['disp']['qmin'] + total['disp']['qmax']
total['both']['p'] = total['fixed']['p'] + total['disp']['p']
total['both']['q'] = total['fixed']['q'] + total['disp']['q']
##----- single load zone, one scale factor -----
load = array([2])
t = 'all fixed loads (PQ) * 2 : '
bus, _ = scale_load(load, ppc['bus'])
t_is(sum(bus[:, PD]), load * total['fixed']['p'], 8, [t, 'total fixed P'])
t_is(sum(bus[:, QD]), load * total['fixed']['q'], 8, [t, 'total fixed Q'])
opt = {'which': 'FIXED'}
bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
t_is(sum(bus[:, PD]), load * total['fixed']['p'], 8, [t, 'total fixed P'])
t_is(sum(bus[:, QD]), load * total['fixed']['q'], 8, [t, 'total fixed Q'])
t_is(-sum(gen[ld, PMIN]), total['disp']['p'], 8, [t, 'total disp P'])
t_is(-sum(gen[ld, QMIN]), total['disp']['qmin'], 8, [t, 'total disp Qmin'])
t_is(-sum(gen[ld, QMAX]), total['disp']['qmax'], 8, [t, 'total disp Qmax'])
t = 'all fixed loads (P) * 2 : '
opt = {'pq': 'P'}
bus, _ = scale_load(load, ppc['bus'], None, None, opt)
t_is(sum(bus[:, PD]), load * total['fixed']['p'], 8, [t, 'total fixed P'])
t_is(sum(bus[:, QD]), total['fixed']['q'], 8, [t, 'total fixed Q'])
opt = {'pq': 'P', 'which': 'FIXED'}
bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
t_is(sum(bus[:, PD]), load * total['fixed']['p'], 8, [t, 'total fixed P'])
t_is(sum(bus[:, QD]), total['fixed']['q'], 8, [t, 'total fixed Q'])
t_is(-sum(gen[ld, PMIN]), total['disp']['p'], 8, [t, 'total disp P'])
t_is(-sum(gen[ld, QMIN]), total['disp']['qmin'], 8, [t, 'total disp Qmin'])
t_is(-sum(gen[ld, QMAX]), total['disp']['qmax'], 8, [t, 'total disp Qmax'])
t = 'all loads (PQ) * 2 : '
bus, gen = scale_load(load, ppc['bus'], ppc['gen'])
t_is(sum(bus[:, PD]), load * total['fixed']['p'], 8, [t, 'total fixed P'])
t_is(sum(bus[:, QD]), load * total['fixed']['q'], 8, [t, 'total fixed Q'])
t_is(-sum(gen[ld, PMIN]), load * total['disp']['p'], 8, [t, 'total disp P'])
t_is(-sum(gen[ld, QMIN]), load * total['disp']['qmin'], 8, [t, 'total disp Qmin'])
t_is(-sum(gen[ld, QMAX]), load * total['disp']['qmax'], 8, [t, 'total disp Qmax'])
t = 'all loads (P) * 2 : '
opt = {'pq': 'P'}
bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
t_is(sum(bus[:, PD]), load * total['fixed']['p'], 8, [t, 'total fixed P'])
t_is(sum(bus[:, QD]), total['fixed']['q'], 8, [t, 'total fixed Q'])
t_is(-sum(gen[ld, PMIN]), load * total['disp']['p'], 8, [t, 'total disp P'])
t_is(-sum(gen[ld, QMIN]), total['disp']['qmin'], 8, [t, 'total disp Qmin'])
t_is(-sum(gen[ld, QMAX]), total['disp']['qmax'], 8, [t, 'total disp Qmax'])
t = 'all disp loads (PQ) * 2 : '
opt = {'which': 'DISPATCHABLE'}
bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
t_is(sum(bus[:, PD]), total['fixed']['p'], 8, [t, 'total fixed P'])
t_is(sum(bus[:, QD]), total['fixed']['q'], 8, [t, 'total fixed Q'])
t_is(-sum(gen[ld, PMIN]), load * total['disp']['p'], 8, [t, 'total disp P'])
t_is(-sum(gen[ld, QMIN]), load * total['disp']['qmin'], 8, [t, 'total disp Qmin'])
t_is(-sum(gen[ld, QMAX]), load * total['disp']['qmax'], 8, [t, 'total disp Qmax'])
t = 'all disp loads (P) * 2 : '
opt = {'pq': 'P', 'which': 'DISPATCHABLE'}
bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
t_is(sum(bus[:, PD]), total['fixed']['p'], 8, [t, 'total fixed P'])
t_is(sum(bus[:, QD]), total['fixed']['q'], 8, [t, 'total fixed Q'])
t_is(-sum(gen[ld, PMIN]), load * total['disp']['p'], 8, [t, 'total disp P'])
t_is(-sum(gen[ld, QMIN]), total['disp']['qmin'], 8, [t, 'total disp Qmin'])
t_is(-sum(gen[ld, QMAX]), total['disp']['qmax'], 8, [t, 'total disp Qmax'])
##----- single load zone, one scale quantity -----
load = array([200.0])
t = 'all fixed loads (PQ) => total = 200 : '
opt = {'scale': 'QUANTITY'}
bus, _ = scale_load(load, ppc['bus'], None, None, opt)
t_is(sum(bus[:, PD]), load, 8, [t, 'total fixed P'])
t_is(sum(bus[:, QD]), load / total['fixed']['p'] * total['fixed']['q'], 8, [t, 'total fixed Q'])
opt = {'scale': 'QUANTITY', 'which': 'FIXED'}
bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
t_is(sum(bus[:, PD]), load - total['disp']['p'], 8, [t, 'total fixed P'])
t_is(sum(bus[:, QD]), (load - total['disp']['p'])/total['fixed']['p']*total['fixed']['q'], 8, [t, 'total fixed Q'])
t_is(-sum(gen[ld, PMIN]), total['disp']['p'], 8, [t, 'total disp P'])
t_is(-sum(gen[ld, QMIN]), total['disp']['qmin'], 8, [t, 'total disp Qmin'])
t_is(-sum(gen[ld, QMAX]), total['disp']['qmax'], 8, [t, 'total disp Qmax'])
t = 'all fixed loads (P) => total = 200 : '
opt = {'scale': 'QUANTITY', 'pq': 'P'}
bus, _ = scale_load(load, ppc['bus'], None, None, opt)
t_is(sum(bus[:, PD]), load, 8, [t, 'total fixed P'])
t_is(sum(bus[:, QD]), total['fixed']['q'], 8, [t, 'total fixed Q'])
opt = {'scale': 'QUANTITY', 'pq': 'P', 'which': 'FIXED'}
bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
t_is(sum(bus[:, PD]), load - total['disp']['p'], 8, [t, 'total fixed P'])
t_is(sum(bus[:, QD]), total['fixed']['q'], 8, [t, 'total fixed Q'])
t_is(-sum(gen[ld, PMIN]), total['disp']['p'], 8, [t, 'total disp P'])
t_is(-sum(gen[ld, QMIN]), total['disp']['qmin'], 8, [t, 'total disp Qmin'])
t_is(-sum(gen[ld, QMAX]), total['disp']['qmax'], 8, [t, 'total disp Qmax'])
t = 'all loads (PQ) => total = 200 : '
opt = {'scale': 'QUANTITY'}
bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
t_is(sum(bus[:, PD]), load / total['both']['p']*total['fixed']['p'], 8, [t, 'total fixed P'])
t_is(sum(bus[:, QD]), load / total['both']['p']*total['fixed']['q'], 8, [t, 'total fixed Q'])
t_is(-sum(gen[ld, PMIN]), load / total['both']['p']*total['disp']['p'], 8, [t, 'total disp P'])
t_is(-sum(gen[ld, QMIN]), load / total['both']['p']*total['disp']['qmin'], 8, [t, 'total disp Qmin'])
t_is(-sum(gen[ld, QMAX]), load / total['both']['p']*total['disp']['qmax'], 8, [t, 'total disp Qmax'])
t = 'all loads (P) => total = 200 : '
opt = {'scale': 'QUANTITY', 'pq': 'P'}
bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
t_is(sum(bus[:, PD]), load / total['both']['p']*total['fixed']['p'], 8, [t, 'total fixed P'])
t_is(sum(bus[:, QD]), total['fixed']['q'], 8, [t, 'total fixed Q'])
t_is(-sum(gen[ld, PMIN]), load / total['both']['p']*total['disp']['p'], 8, [t, 'total disp P'])
t_is(-sum(gen[ld, QMIN]), total['disp']['qmin'], 8, [t, 'total disp Qmin'])
t_is(-sum(gen[ld, QMAX]), total['disp']['qmax'], 8, [t, 'total disp Qmax'])
t = 'all disp loads (PQ) => total = 200 : '
opt = {'scale': 'QUANTITY', 'which': 'DISPATCHABLE'}
bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
t_is(sum(bus[:, PD]), total['fixed']['p'], 8, [t, 'total fixed P'])
t_is(sum(bus[:, QD]), total['fixed']['q'], 8, [t, 'total fixed Q'])
t_is(-sum(gen[ld, PMIN]), load - total['fixed']['p'], 8, [t, 'total disp P'])
t_is(-sum(gen[ld, QMIN]), (load - total['fixed']['p'])/total['disp']['p']*total['disp']['qmin'], 8, [t, 'total disp Qmin'])
t_is(-sum(gen[ld, QMAX]), (load - total['fixed']['p'])/total['disp']['p']*total['disp']['qmax'], 8, [t, 'total disp Qmax'])
t = 'all disp loads (P) => total = 200 : '
opt = {'scale': 'QUANTITY', 'pq': 'P', 'which': 'DISPATCHABLE'}
bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
t_is(sum(bus[:, PD]), total['fixed']['p'], 8, [t, 'total fixed P'])
t_is(sum(bus[:, QD]), total['fixed']['q'], 8, [t, 'total fixed Q'])
t_is(-sum(gen[ld, PMIN]), load - total['fixed']['p'], 8, [t, 'total disp P'])
t_is(-sum(gen[ld, QMIN]), total['disp']['qmin'], 8, [t, 'total disp Qmin'])
t_is(-sum(gen[ld, QMAX]), total['disp']['qmax'], 8, [t, 'total disp Qmax'])
##----- 3 zones, area scale factors -----
t = 'area fixed loads (PQ) * [3 2 1] : '
load = array([3, 2, 1])
bus, _ = scale_load(load, ppc['bus'])
for k in range(len(load)):
t_is(sum(bus[a[k], PD]), load[k] * area[k]['fixed']['p'], 8, '%s area %d fixed P' % (t, k))
t_is(sum(bus[a[k], QD]), load[k] * area[k]['fixed']['q'], 8, '%s area %d fixed Q' % (t, k))
opt = {'which': 'FIXED'}
bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
for k in range(len(load)):
t_is(sum(bus[a[k], PD]), load[k] * area[k]['fixed']['p'], 8, '%s area %d fixed P' % (t, k))
t_is(sum(bus[a[k], QD]), load[k] * area[k]['fixed']['q'], 8, '%s area %d fixed Q' % (t, k))
t_is(-sum(gen[lda[k], PMIN]), area[k]['disp']['p'], 8, '%s area %d disp P' % (t, k))
t_is(-sum(gen[lda[k], QMIN]), area[k]['disp']['qmin'], 8, '%s area %d disp Qmin' % (t, k))
t_is(-sum(gen[lda[k], QMAX]), area[k]['disp']['qmax'], 8, '%s area %d disp Qmax' % (t, k))
t = 'area fixed loads (P) * [3 2 1] : '
load = array([3, 2, 1])
opt = {'pq': 'P'}
bus, _ = scale_load(load, ppc['bus'], None, None, opt)
for k in range(len(load)):
t_is(sum(bus[a[k], PD]), load[k] * area[k]['fixed']['p'], 8, '%s area %d fixed P' % (t, k))
t_is(sum(bus[a[k], QD]), area[k]['fixed']['q'], 8, '%s area %d fixed Q' % (t, k))
opt = {'pq': 'P', 'which': 'FIXED'}
bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
for k in range(len(load)):
t_is(sum(bus[a[k], PD]), load[k] * area[k]['fixed']['p'], 8, '%s area %d fixed P' % (t, k))
t_is(sum(bus[a[k], QD]), area[k]['fixed']['q'], 8, '%s area %d fixed Q' % (t, k))
t_is(-sum(gen[lda[k], PMIN]), area[k]['disp']['p'], 8, '%s area %d disp P' % (t, k))
t_is(-sum(gen[lda[k], QMIN]), area[k]['disp']['qmin'], 8, '%s area %d disp Qmin' % (t, k))
t_is(-sum(gen[lda[k], QMAX]), area[k]['disp']['qmax'], 8, '%s area %d disp Qmax' % (t, k))
t = 'all area loads (PQ) * [3 2 1] : '
bus, gen = scale_load(load, ppc['bus'], ppc['gen'])
for k in range(len(load)):
t_is(sum(bus[a[k], PD]), load[k] * area[k]['fixed']['p'], 8, '%s area %d fixed P' % (t, k))
t_is(sum(bus[a[k], QD]), load[k] * area[k]['fixed']['q'], 8, '%s area %d fixed Q' % (t, k))
t_is(-sum(gen[lda[k], PMIN]), load[k] * area[k]['disp']['p'], 8, '%s area %d disp P' % (t, k))
t_is(-sum(gen[lda[k], QMIN]), load[k] * area[k]['disp']['qmin'], 8, '%s area %d disp Qmin' % (t, k))
t_is(-sum(gen[lda[k], QMAX]), load[k] * area[k]['disp']['qmax'], 8, '%s area %d disp Qmax' % (t, k))
t = 'all area loads (P) * [3 2 1] : '
opt = {'pq': 'P'}
bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
for k in range(len(load)):
t_is(sum(bus[a[k], PD]), load[k] * area[k]['fixed']['p'], 8, '%s area %d fixed P' % (t, k))
t_is(sum(bus[a[k], QD]), area[k]['fixed']['q'], 8, '%s area %d fixed Q' % (t, k))
t_is(-sum(gen[lda[k], PMIN]), load[k] * area[k]['disp']['p'], 8, '%s area %d disp P' % (t, k))
t_is(-sum(gen[lda[k], QMIN]), area[k]['disp']['qmin'], 8, '%s area %d disp Qmin' % (t, k))
t_is(-sum(gen[lda[k], QMAX]), area[k]['disp']['qmax'], 8, '%s area %d disp Qmax' % (t, k))
t = 'area disp loads (PQ) * [3 2 1] : '
opt = {'which': 'DISPATCHABLE'}
bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
for k in range(len(load)):
t_is(sum(bus[a[k], PD]), area[k]['fixed']['p'], 8, '%s area %d fixed P' % (t, k))
t_is(sum(bus[a[k], QD]), area[k]['fixed']['q'], 8, '%s area %d fixed Q' % (t, k))
t_is(-sum(gen[lda[k], PMIN]), load[k] * area[k]['disp']['p'], 8, '%s area %d disp P' % (t, k))
t_is(-sum(gen[lda[k], QMIN]), load[k] * area[k]['disp']['qmin'], 8, '%s area %d disp Qmin' % (t, k))
t_is(-sum(gen[lda[k], QMAX]), load[k] * area[k]['disp']['qmax'], 8, '%s area %d disp Qmax' % (t, k))
t = 'area disp loads (P) * [3 2 1] : '
opt = {'pq': 'P', 'which': 'DISPATCHABLE'}
bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
for k in range(len(load)):
t_is(sum(bus[a[k], PD]), area[k]['fixed']['p'], 8, '%s area %d fixed P' % (t, k))
t_is(sum(bus[a[k], QD]), area[k]['fixed']['q'], 8, '%s area %d fixed Q' % (t, k))
t_is(-sum(gen[lda[k], PMIN]), load[k] * area[k]['disp']['p'], 8, '%s area %d disp P' % (t, k))
t_is(-sum(gen[lda[k], QMIN]), area[k]['disp']['qmin'], 8, '%s area %d disp Qmin' % (t, k))
t_is(-sum(gen[lda[k], QMAX]), area[k]['disp']['qmax'], 8, '%s area %d disp Qmax' % (t, k))
##----- 3 zones, area scale quantities -----
t = 'area fixed loads (PQ) => total = [100 80 60] : '
load = array([100, 80, 60], float)
opt = {'scale': 'QUANTITY'}
bus, _ = scale_load(load, ppc['bus'], None, None, opt)
for k in range(len(load)):
t_is(sum(bus[a[k], PD]), load[k], 8, '%s area %d fixed P' % (t, k))
t_is(sum(bus[a[k], QD]), load[k] / area[k]['fixed']['p'] * area[k]['fixed']['q'], 8, '%s area %d fixed Q' % (t, k))
opt = {'scale': 'QUANTITY', 'which': 'FIXED'}
bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
for k in range(len(load)):
t_is(sum(bus[a[k], PD]), load[k] - area[k]['disp']['p'], 8, '%s area %d fixed P' % (t, k))
t_is(sum(bus[a[k], QD]), (load[k] - area[k]['disp']['p']) / area[k]['fixed']['p'] * area[k]['fixed']['q'], 8, '%s area %d fixed Q' % (t, k))
t_is(-sum(gen[lda[k], PMIN]), area[k]['disp']['p'], 8, '%s area %d disp P' % (t, k))
t_is(-sum(gen[lda[k], QMIN]), area[k]['disp']['qmin'], 8, '%s area %d disp Qmin' % (t, k))
t_is(-sum(gen[lda[k], QMAX]), area[k]['disp']['qmax'], 8, '%s area %d disp Qmax' % (t, k))
t = 'area fixed loads (P) => total = [100 80 60] : '
load = array([100, 80, 60], float)
opt = {'scale': 'QUANTITY', 'pq': 'P'}
bus, _ = scale_load(load, ppc['bus'], None, None, opt)
for k in range(len(load)):
t_is(sum(bus[a[k], PD]), load[k], 8, '%s area %d fixed P' % (t, k))
t_is(sum(bus[a[k], QD]), area[k]['fixed']['q'], 8, '%s area %d fixed Q' % (t, k))
opt = {'scale': 'QUANTITY', 'pq': 'P', 'which': 'FIXED'}
bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
for k in range(len(load)):
t_is(sum(bus[a[k], PD]), load[k]-area[k]['disp']['p'], 8, '%s area %d fixed P' % (t, k))
t_is(sum(bus[a[k], QD]), area[k]['fixed']['q'], 8, '%s area %d fixed Q' % (t, k))
t_is(-sum(gen[lda[k], PMIN]), area[k]['disp']['p'], 8, '%s area %d disp P' % (t, k))
t_is(-sum(gen[lda[k], QMIN]), area[k]['disp']['qmin'], 8, '%s area %d disp Qmin' % (t, k))
t_is(-sum(gen[lda[k], QMAX]), area[k]['disp']['qmax'], 8, '%s area %d disp Qmax' % (t, k))
t = 'all area loads (PQ) => total = [100 80 60] : '
opt = {'scale': 'QUANTITY'}
bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
for k in range(len(load)):
t_is(sum(bus[a[k], PD]), load[k] / area[k]['both']['p'] * area[k]['fixed']['p'], 8, '%s area %d fixed P' % (t, k))
t_is(sum(bus[a[k], QD]), load[k] / area[k]['both']['p'] * area[k]['fixed']['q'], 8, '%s area %d fixed Q' % (t, k))
t_is(-sum(gen[lda[k], PMIN]), load[k] / area[k]['both']['p'] * area[k]['disp']['p'], 8, '%s area %d disp P' % (t, k))
t_is(-sum(gen[lda[k], QMIN]), load[k] / area[k]['both']['p'] * area[k]['disp']['qmin'], 8, '%s area %d disp Qmin' % (t, k))
t_is(-sum(gen[lda[k], QMAX]), load[k] / area[k]['both']['p'] * area[k]['disp']['qmax'], 8, '%s area %d disp Qmax' % (t, k))
t = 'all area loads (P) => total = [100 80 60] : '
opt = {'scale': 'QUANTITY', 'pq': 'P'}
bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
for k in range(len(load)):
t_is(sum(bus[a[k], PD]), load[k] / area[k]['both']['p'] * area[k]['fixed']['p'], 8, '%s area %d fixed P' % (t, k))
t_is(sum(bus[a[k], QD]), area[k]['fixed']['q'], 8, '%s area %d fixed Q' % (t, k))
t_is(-sum(gen[lda[k], PMIN]), load[k] / area[k]['both']['p'] * area[k]['disp']['p'], 8, '%s area %d disp P' % (t, k))
t_is(-sum(gen[lda[k], QMIN]), area[k]['disp']['qmin'], 8, '%s area %d disp Qmin' % (t, k))
t_is(-sum(gen[lda[k], QMAX]), area[k]['disp']['qmax'], 8, '%s area %d disp Qmax' % (t, k))
t = 'area disp loads (PQ) => total = [100 80 60] : throws expected exception'
load = array([100, 80, 60], float)
opt = {'scale': 'QUANTITY', 'which': 'DISPATCHABLE'}
err = 0
try:
bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
except ScalingError as e:
expected = 'scale_load: impossible to make zone 2 load equal 80 by scaling non-existent dispatchable load'
err = expected not in str(e)
t_ok(err, t)
t = 'area disp loads (PQ) => total = [100 74.3941 60] : '
load = array([100, area[1]['fixed']['p'], 60], float)
opt = {'scale': 'QUANTITY', 'which': 'DISPATCHABLE'}
bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
for k in range(len(load)):
t_is(sum(bus[a[k], PD]), area[k]['fixed']['p'], 8, '%s area %d fixed P' % (t, k))
t_is(sum(bus[a[k], QD]), area[k]['fixed']['q'], 8, '%s area %d fixed Q' % (t, k))
t_is(-sum(gen[lda[k], PMIN]), load[k]-area[k]['fixed']['p'], 8, '%s area %d disp P' % (t, k))
if k == 1:
t_is(-sum(gen[lda[k], QMIN]), area[k]['disp']['qmin'], 8, '%s area %d disp Qmin' % (t, k))
t_is(-sum(gen[lda[k], QMAX]), area[k]['disp']['qmax'], 8, '%s area %d disp Qmax' % (t, k))
else:
t_is(-sum(gen[lda[k], QMIN]), (load[k] - area[k]['fixed']['p']) / area[k]['disp']['p'] * area[k]['disp']['qmin'], 8, '%s area %d disp Qmin' % (t, k))
t_is(-sum(gen[lda[k], QMAX]), (load[k] - area[k]['fixed']['p']) / area[k]['disp']['p'] * area[k]['disp']['qmax'], 8, '%s area %d disp Qmax' % (t, k))
t = 'area disp loads (P) => total = [100 74.3941 60] : '
opt = {'scale': 'QUANTITY', 'pq': 'P', 'which': 'DISPATCHABLE'}
bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
for k in range(len(load)):
t_is(sum(bus[a[k], PD]), area[k]['fixed']['p'], 8, '%s area %d fixed P' % (t, k))
t_is(sum(bus[a[k], QD]), area[k]['fixed']['q'], 8, '%s area %d fixed Q' % (t, k))
t_is(-sum(gen[lda[k], PMIN]), load[k]-area[k]['fixed']['p'], 8, '%s area %d disp P' % (t, k))
t_is(-sum(gen[lda[k], QMIN]), area[k]['disp']['qmin'], 8, '%s area %d disp Qmin' % (t, k))
t_is(-sum(gen[lda[k], QMAX]), area[k]['disp']['qmax'], 8, '%s area %d disp Qmax' % (t, k))
##----- explict single load zone -----
t = 'explicit single load zone'
load_zone = zeros(ppc['bus'].shape[0])
load_zone[[2, 3]] = 1
load = array([2.0])
bus, gen = scale_load(load, ppc['bus'], ppc['gen'], load_zone)
Pd = ppc['bus'][:, PD]
Pd[[2, 3]] = load * Pd[[2, 3]]
t_is( bus[:, PD], Pd, 8, t)
##----- explict multiple load zone -----
t = 'explicit multiple load zone'
load_zone = zeros(ppc['bus'].shape[0])
load_zone[[2, 3]] = 1
load_zone[[6, 7]] = 2
load = array([2, 0.5])
bus, gen = scale_load(load, ppc['bus'], ppc['gen'], load_zone)
Pd = ppc['bus'][:, PD]
Pd[[2, 3]] = load[0] * Pd[[2, 3]]
Pd[[6, 7]] = load[1] * Pd[[6, 7]]
t_is( bus[:, PD], Pd, 8, t)
t_end()
if __name__ == '__main__':
t_scale_load(quiet=False)
| bsd-3-clause | 6,540,660,341,247,444,000 | 55.544987 | 161 | 0.496636 | false | 2.550261 | false | false | false |
MangoMangoDevelopment/neptune | lib/ros_comm-1.12.0/tools/rosgraph/test/test_rosenv.py | 1 | 2781 | # Software License Agreement (BSD License)
#
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import sys
def test_vars():
import rosgraph.rosenv
assert 'ROS_MASTER_URI' == rosgraph.rosenv.ROS_MASTER_URI
assert rosgraph.rosenv.ROS_IP == 'ROS_IP'
assert rosgraph.rosenv.ROS_HOSTNAME == 'ROS_HOSTNAME'
assert rosgraph.rosenv.ROS_NAMESPACE == 'ROS_NAMESPACE'
def test_get_master_uri():
from rosgraph.rosenv import get_master_uri
val = get_master_uri()
if 'ROS_MASTER_URI' in os.environ:
assert val == os.environ['ROS_MASTER_URI']
# environment override
val = get_master_uri(env=dict(ROS_MASTER_URI='foo'))
assert val == 'foo'
# argv override precedence, first arg wins
val = get_master_uri(env=dict(ROS_MASTER_URI='foo'), argv=['__master:=bar', '__master:=bar2'])
assert val == 'bar'
# empty env
assert None == get_master_uri(env={})
# invalid argv
try:
val = get_master_uri(argv=['__master:='])
assert False, "should have failed"
except ValueError:
pass
# invalid argv
try:
val = get_master_uri(argv=['__master:=foo:=bar'])
assert False, "should have failed"
except ValueError:
pass
| bsd-3-clause | -1,259,702,739,431,144,000 | 35.116883 | 98 | 0.705142 | false | 3.884078 | false | false | false |
possnfiffer/py-emde | py-emde-wind-direction.py | 1 | 5361 | ndata = []
with open('data/winddir_20160202.dat') as f:
for line in f:
ndata.append(line.split(" "))
flist = []
for l in ndata:
if l[4].endswith('00') or l[4].endswith('15') or l[4].endswith('30') or l[4].endswith('45'):
if float(l[-1].strip()) == 0 or float(l[-1].strip()) == 360:
flist.append('DAVAD ZZCOUCAR SITE_ID:32534' + ' ' + l[2]+ l[0]+ l[1]+ l[3]+ l[4]+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'N'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X')
# NNE = 22.5º
elif float(l[-1].strip()) > 0 and float(l[-1].strip()) <= 22.5:
flist.append('DAVAD ZZCOUCAR SITE_ID:32534' + ' ' + l[2]+ l[0]+ l[1]+ l[3]+ l[4]+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'NNE'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X')
#NE = 45º
elif float(l[-1].strip()) > 22.5 and float(l[-1].strip()) <= 45:
flist.append('DAVAD ZZCOUCAR SITE_ID:32534' + ' ' + l[2]+ l[0]+ l[1]+ l[3]+ l[4]+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'NE'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X')
#ENE = 67.5º
elif float(l[-1].strip()) > 45 and float(l[-1].strip()) <= 67.5:
flist.append('DAVAD ZZCOUCAR SITE_ID:32534' + ' ' + l[2]+ l[0]+ l[1]+ l[3]+ l[4]+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'ENE'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X')
#E = 90º
elif float(l[-1].strip()) > 67.5 and float(l[-1].strip()) <= 90:
flist.append('DAVAD ZZCOUCAR SITE_ID:32534' + ' ' + l[2]+ l[0]+ l[1]+ l[3]+ l[4]+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'E'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X')
#ESE = 112.5º
elif float(l[-1].strip()) > 90 and float(l[-1].strip()) <= 112.5:
flist.append('DAVAD ZZCOUCAR SITE_ID:32534' + ' ' + l[2]+ l[0]+ l[1]+ l[3]+ l[4]+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'ESE'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X')
#SE = 135º
elif float(l[-1].strip()) > 112.5 and float(l[-1].strip()) <= 135:
flist.append('DAVAD ZZCOUCAR SITE_ID:32534' + ' ' + l[2]+ l[0]+ l[1]+ l[3]+ l[4]+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'SE'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X')
#SSE = 157.5º
elif float(l[-1].strip()) > 135 and float(l[-1].strip()) <= 157.5:
flist.append('DAVAD ZZCOUCAR SITE_ID:32534' + ' ' + l[2]+ l[0]+ l[1]+ l[3]+ l[4]+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'SSE'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X')
#S = 180º
elif float(l[-1].strip()) > 157.5 and float(l[-1].strip()) <= 180:
flist.append('DAVAD ZZCOUCAR SITE_ID:32534' + ' ' + l[2]+ l[0]+ l[1]+ l[3]+ l[4]+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'S'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X')
#SSW = 202.5º
elif float(l[-1].strip()) > 180 and float(l[-1].strip()) <= 202.5:
flist.append('DAVAD ZZCOUCAR SITE_ID:32534' + ' ' + l[2]+ l[0]+ l[1]+ l[3]+ l[4]+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'SSW'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X')
#SW = 225º
elif float(l[-1].strip()) > 202.5 and float(l[-1].strip()) <= 225:
flist.append('DAVAD ZZCOUCAR SITE_ID:32534' + ' ' + l[2]+ l[0]+ l[1]+ l[3]+ l[4]+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'SW'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X')
#WSW = 247.5º
elif float(l[-1].strip()) > 225 and float(l[-1].strip()) <= 247.5:
flist.append('DAVAD ZZCOUCAR SITE_ID:32534' + ' ' + l[2]+ l[0]+ l[1]+ l[3]+ l[4]+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'WSW'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X')
#W = 270º
elif float(l[-1].strip()) > 247.5 and float(l[-1].strip()) <= 270:
flist.append('DAVAD ZZCOUCAR SITE_ID:32534' + ' ' + l[2]+ l[0]+ l[1]+ l[3]+ l[4]+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'W'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X')
#WNW = 292.5º
elif float(l[-1].strip()) > 270 and float(l[-1].strip()) <= 292.5:
flist.append('DAVAD ZZCOUCAR SITE_ID:32534' + ' ' + l[2]+ l[0]+ l[1]+ l[3]+ l[4]+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'WNW'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X')
#NW = 315º
elif float(l[-1].strip()) > 292.5 and float(l[-1].strip()) <= 315:
flist.append('DAVAD ZZCOUCAR SITE_ID:32534' + ' ' + l[2]+ l[0]+ l[1]+ l[3]+ l[4]+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'NW'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X')
#NNW = 337.5º
elif float(l[-1].strip()) > 315 and float(l[-1].strip()) <= 337.5:
flist.append('DAVAD ZZCOUCAR SITE_ID:32534' + ' ' + l[2]+ l[0]+ l[1]+ l[3]+ l[4]+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'NNW'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X')
fstring = ''
for l in flist:
fstring += l + ' '
data = '//AA ' + fstring + '//ZZ'
from sparkpost import SparkPost
# Send email using the SparkPost api
sp = SparkPost() # uses environment variable named SPARKPOST_API_KEY
response = sp.transmission.send(
recipients=['[email protected]'],
bcc=['[email protected]'],
text=data,
from_email='[email protected]',
subject='DATA'
)
print(response)
| bsd-2-clause | 7,669,188,134,167,574,000 | 68.428571 | 205 | 0.378601 | false | 2.17671 | false | false | false |
eLvErDe/methlab | pymethlab/updatehelper.py | 1 | 1970 | # methlab - A music library application
# Copyright (C) 2007 Ingmar K. Steen ([email protected])
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
__all__ = ['UpdateHelper']
import threading
class UpdateHelper:
def __init__(self, db, scanner_class):
self.db = db
self.scanner_class = scanner_class
self.scanner = None
self.lock = threading.Lock()
self.stop_flag = threading.Event()
self.stopped_flag = threading.Event()
self.stopped_flag.set()
def set_scanner_class(self, scanner_class):
self.lock.acquire()
if self.scanner:
self.lock.release()
self.stop()
self.lock.acquire()
self.scanner_class = scanner_class
self.lock.release()
def stop(self):
self.stop_flag.set()
self.stopped_flag.wait()
def update(self, callback):
def run_scanner():
self.scanner.update()
self.lock.acquire()
self.scanner = None
self.lock.release()
self.stopped_flag.set()
callback()
if not self.stopped_flag.isSet():
return False
self.lock.acquire()
self.stopped_flag.clear()
self.stop_flag.clear()
self.scanner = self.scanner_class(self.db, lambda: not self.stop_flag.isSet())
threading.Thread(target = run_scanner).start()
self.lock.release()
return True
| gpl-2.0 | -6,294,964,913,357,571,000 | 30.774194 | 82 | 0.682741 | false | 3.682243 | false | false | false |
sugarsweetrobotics/wasanbon | wasanbon/core/plugins/admin/wsconverter_plugin/host/outport_converter.py | 1 | 2057 | import os, sys
from common_converter import *
_template = """
import yaml, traceback
import RTC
import OpenRTM_aist
_data = $CONSTRUCTOR
_port = OpenRTM_aist.OutPort("$NAME", _data)
def convert(data, d_list):
it = iter(d_list)
$CODE
print 'converted:', data
return data
def _sendData(d_list):
convert(_data, d_list)
_port.write()
def execute(comp, webSocketSender):
comp.addOutPort("$NAME", _port)
webSocketSender.outports[u"$NAME"] = _sendData
"""
def create_outport_converter_module(parser, name, typename, verbose=False):
module_dir = 'modules'
if not os.path.isdir(module_dir):
os.mkdir(module_dir)
global_module = parser.global_module
typs = global_module.find_types(typename)
if len(typs) == 0:
print 'Invalid Type Name (%s)' % typename
raise InvalidDataTypeException()
module_name = typs[0].parent.name
copy_idl_and_compile(parser, typs[0].filepath)
filename = '%s_OutPort_%s.py' % (name, typename.replace('::', '_').strip())
f = open(os.path.join(module_dir, filename), 'w')
import value_dic as vd
value_dic = vd.generate_value_dic(global_module, typename, root_name='data', verbose=verbose)
#if verbose:
# print '-------value-------'
# import yaml
# print yaml.dump(value_dic, default_flow_style=False)
#import inport_converter as ip
global _template
output = "%s" % _template
code = create_fromlist_converter(value_dic, list_name='d_list', indent = ' ')
if verbose:
print '------data to list-----'
print code
output = output.replace('$NAME', name)
typs = global_module.find_types(typename)
output = output.replace('$CONSTRUCTOR', parser.generate_constructor_python(typs[0]))
output = output.replace('$CODE', code)
#import outport_converter as op
#code = op.create_converter(value_dic)
#print '------list to data-----'
#print code
output = 'import %s\n' % module_name + output
f.write(output)
f.close()
| gpl-3.0 | -3,378,357,359,583,994,400 | 26.065789 | 97 | 0.628099 | false | 3.366612 | false | false | false |
spmjc/plugin.video.freplay | resources/lib/channels/rmcd.py | 1 | 1233 | #-*- coding: utf-8 -*-
import urllib2
import re
import CommonFunctions
common = CommonFunctions
from resources.lib import utils
from resources.lib import globalvar
import json
title=['RMC Decouverte']
img=['rmcd']
readyForUse=True
url_video_json='https://edge.api.brightcove.com/playback/v1/accounts/%s/videos/%s'
def list_shows(channel,page):
shows = []
shows.append([channel,'empty', 'Toutes les videos','','shows'])
return shows
def list_videos(channel,page):
videos=[]
filePath=utils.downloadCatalog('http://rmcdecouverte.bfmtv.com/mediaplayer-replay/' ,'rmcd.html',False,{})
html=open(filePath).read().replace('\n', ' ').replace('\r', '')
match = re.compile(r'<figure class="figure modulx1-5-inside-bloc">(.*?)<a href="(.*?)" title="(.*?)">(.*?)data-original="(.*?)" alt=',re.DOTALL).findall(html)
for a,url,title,b,img in match:
title=utils.formatName(title)
infoLabels = {"Title": title.encode('utf-8')}
videos.append( [channel, url.replace('\t','').encode('utf-8') , title.encode('utf-8') , img.encode('utf-8'),infoLabels,'play'] )
return videos
def getVideoURL(channel,url):
url='http://rmcdecouverte.bfmtv.com' + url
return utils.getExtURL(url)
| gpl-2.0 | 8,747,088,300,629,931,000 | 32.324324 | 161 | 0.664234 | false | 3.05198 | false | false | false |
jplana/python-etcd | setup.py | 1 | 1239 | from setuptools import setup, find_packages
import sys, os
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.rst')).read()
NEWS = open(os.path.join(here, 'NEWS.txt')).read()
version = '0.4.5'
install_requires = [
'urllib3>=1.7.1',
'dnspython>=1.13.0'
]
test_requires = [
'mock',
'nose',
'pyOpenSSL>=0.14'
]
setup(
name='python-etcd',
version=version,
description="A python client for etcd",
long_description=README + '\n\n' + NEWS,
classifiers=[
"Topic :: System :: Distributed Computing",
"Topic :: Software Development :: Libraries",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Topic :: Database :: Front-Ends",
],
keywords='etcd raft distributed log api client',
author='Jose Plana',
author_email='[email protected]',
url='http://github.com/jplana/python-etcd',
license='MIT',
packages=find_packages('src'),
package_dir = {'': 'src'},
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
tests_require=test_requires,
test_suite='nose.collector',
)
| mit | 6,686,755,014,281,602,000 | 25.361702 | 54 | 0.619048 | false | 3.385246 | false | false | false |
GreenGear5/planet-wars | bots/ml-rfc/ml-rfc.py | 1 | 4276 | #!/usr/bin/env python
"""
Uses the Random Forest classifier
"""
from api import State, util
import random, os
from sklearn.externals import joblib
DEFAULT_MODEL = os.path.dirname(os.path.realpath(__file__)) + '/model.pkl'
class Bot:
__max_depth = -1
__randomize = True
__model = None
def __init__(self, randomize=True, depth=12, model_file=DEFAULT_MODEL):
print(model_file)
self.__randomize = randomize
self.__max_depth = depth
# Load the model
self.__model = joblib.load(model_file)
def get_move(self, state):
val, move = self.value(state)
return move
def value(self, state, alpha=float('-inf'), beta=float('inf'), depth=0):
"""
Return the value of this state and the associated move
:param state:
:param alpha: The highest score that the maximizing player can guarantee given current knowledge
:param beta: The lowest score that the minimizing player can guarantee given current knowledge
:param depth: How deep we are in the tree
:return: val, move: the value of the state, and the best move.
"""
if state.finished():
return (1.0, None) if state.winner() == 1 else (-1.0, None)
if depth == self.__max_depth:
return self.heuristic(state), None
best_value = float('-inf') if maximizing(state) else float('inf')
best_move = None
moves = state.moves()
if self.__randomize:
random.shuffle(moves)
for move in moves:
next_state = state.next(move)
value, m = self.value(next_state, alpha, beta, depth + 1)
if maximizing(state):
if value > best_value:
best_value = value
best_move = move
alpha = best_value
else:
if value < best_value:
best_value = value
best_move = move
beta = best_value
# Prune the search tree
# We know this state will never be chosen, so we stop evaluating its children
if alpha < beta:
break
return best_value, best_move
def heuristic(self, state):
# Convert the state to a feature vector
feature_vector = [features(state)]
# These are the classes: ('won', 'lost')
classes = list(self.__model.classes_)
# Ask the model for a prediction
# This returns a probability for each class
prob = self.__model.predict_proba(feature_vector)[0]
# print prob
# print('{} {} {}'.format(classes, prob, util.ratio_ships(state, 1)))
# Weigh the win/loss outcomes (-1 and 1) by their probabilities
res = -1.0 * prob[classes.index('lost')] + 1.0 * prob[classes.index('won')]
return res
def maximizing(state):
"""
Whether we're the maximizing player (1) or the minimizing player (2).
:param state:
:return:
"""
return state.whose_turn() == 1
def features(state):
# type: (State) -> tuple[float, ...]
"""
Extract features from this state. Remember that every feature vector returned should have the same length.
:param state: A state to be converted to a feature vector
:return: A tuple of floats: a feature vector representing this state.
"""
my_id = state.whose_turn()
opponent_id = 1 if my_id == 0 else 0
# How many ships does p1 have in garrisons?
p1_garrisons = 0.0
# How many ships does p2 have in garrisons?
p2_garrisons = 0.0
p1_planets = 0
p2_planets = 0
for planet in state.planets(my_id):
p1_garrisons += state.garrison(planet)
p1_planets += 1
for planet in state.planets(opponent_id):
p2_garrisons += state.garrison(planet)
p2_planets += 1
# How many ships does p1 have in fleets?
p1_fleets = 0.0
# How many ships does p2 have in fleets?
p2_fleets = 0.0
for fleet in state.fleets():
if fleet.owner() == my_id:
p1_fleets = fleet.size()
else:
p2_fleets += fleet.size()
return p1_garrisons, p2_garrisons, p1_fleets, p2_fleets, p1_planets, p2_planets
| mit | -1,888,951,242,847,442,400 | 27.697987 | 110 | 0.58232 | false | 3.731239 | false | false | false |
Flavsditz/projects | eyeTracking/pupil/pupil_src/capture/recorder.py | 1 | 4963 | import os, sys
import cv2
import atb
import numpy as np
from plugin import Plugin
from time import strftime,localtime,time,gmtime
from ctypes import create_string_buffer
from git_version import get_tag_commit
class Recorder(Plugin):
"""Capture Recorder"""
def __init__(self, session_str, fps, img_shape, shared_record, eye_tx):
Plugin.__init__(self)
self.session_str = session_str
self.base_path = os.path.join(os.path.abspath(__file__).rsplit('pupil_src', 1)[0], "recordings")
self.shared_record = shared_record
self.frame_count = 0
self.timestamps = []
self.eye_tx = eye_tx
self.start_time = time()
# set up base folder called "recordings"
try:
os.mkdir(self.base_path)
except:
print "recordings folder already exists, using existing."
session = os.path.join(self.base_path, self.session_str)
try:
os.mkdir(session)
except:
print "recordings session folder already exists, using existing."
# set up self incrementing folder within session folder
counter = 0
while True:
self.path = os.path.join(self.base_path, session, "%03d/" % counter)
try:
os.mkdir(self.path)
break
except:
print "We dont want to overwrite data, incrementing counter & trying to make new data folder"
counter += 1
self.meta_info_path = os.path.join(self.path, "info.csv")
with open(self.meta_info_path, 'w') as f:
f.write("Pupil Recording Name:\t"+self.session_str+ "\n")
f.write("Start Date: \t"+ strftime("%d.%m.%Y", localtime(self.start_time))+ "\n")
f.write("Start Time: \t"+ strftime("%H:%M:%S", localtime(self.start_time))+ "\n")
video_path = os.path.join(self.path, "world.avi")
self.writer = cv2.VideoWriter(video_path, cv2.cv.CV_FOURCC(*'DIVX'), fps, (img_shape[1], img_shape[0]))
self.height = img_shape[0]
self.width = img_shape[1]
# positions path to eye process
self.shared_record.value = True
self.eye_tx.send(self.path)
atb_pos = (10, 540)
self._bar = atb.Bar(name = self.__class__.__name__, label='REC: '+session_str,
help="capture recording control", color=(220, 0, 0), alpha=150,
text='light', position=atb_pos,refresh=.3, size=(300, 80))
self._bar.rec_name = create_string_buffer(512)
self._bar.add_var("rec time",self._bar.rec_name, getter=lambda: create_string_buffer(self.get_rec_time_str(),512), readonly=True)
self._bar.add_button("stop", self.stop_and_destruct, key="s", help="stop recording")
self._bar.define("contained=true")
def get_rec_time_str(self):
rec_time = gmtime(time()-self.start_time)
return strftime("%H:%M:%S", rec_time)
def update(self, frame):
self.frame_count += 1
self.timestamps.append(frame.timestamp)
self.writer.write(frame.img)
def stop_and_destruct(self):
try:
camera_matrix = np.load("camera_matrix.npy")
dist_coefs = np.load("dist_coefs.npy")
cam_path = os.path.join(self.path, "camera_matrix.npy")
dist_path = os.path.join(self.path, "dist_coefs.npy")
np.save(cam_path, camera_matrix)
np.save(dist_path, dist_coefs)
except:
print "no camera intrinsics found, will not copy them into recordings folder"
timestamps_path = os.path.join(self.path, "timestamps.npy")
np.save(timestamps_path,np.array(self.timestamps))
try:
with open(self.meta_info_path, 'a') as f:
f.write("Duration Time: \t"+ self.get_rec_time_str()+ "\n")
f.write("World Camera Frames: \t"+ str(self.frame_count)+ "\n")
f.write("World Camera Resolution: \t"+ str(self.width)+"x"+str(self.height)+"\n")
f.write("Capture Software Version: \t"+ get_tag_commit()+ "\n")
f.write("user:\t"+os.getlogin()+"\n")
try:
sysname, nodename, release, version, machine = os.uname()
except:
sysname, nodename, release, version, machine = sys.platform,None,None,None,None
f.write("Platform:\t"+sysname+"\n")
f.write("Machine:\t"+nodename+"\n")
f.write("Release:\t"+release+"\n")
f.write("Version:\t"+version+"\n")
except:
print "Could not save metadata. Please report this bug!"
print "Stopping recording"
self.shared_record.value = False
self.alive = False
def __del__(self):
"""incase the plugin get deleted while recording
"""
self.stop_and_destruct()
def get_auto_name():
return strftime("%Y_%m_%d", localtime())
| gpl-2.0 | -8,185,987,220,556,389,000 | 39.024194 | 137 | 0.576667 | false | 3.598985 | false | false | false |
li282886931/apistore | server/controllers/novelhandler.py | 1 | 8379 | # -*- coding: utf-8 -*-
import json
import datetime
import urllib
import hashlib
import tornado.gen
import tornado.web
import tornado.httpclient
from controllers.basehandler import BaseHandler
from models.novelmodel import NovelModel
import utils
from utils import json_success, json_failed, cache_error
import const
from settings import BASEURL, appsecret, NOVELSEARCH
class Novel(BaseHandler):
def initialize(self):
super(Novel, self).initialize()
self.novel = NovelModel()
def get(self):
return self.post()
class GetTagList(Novel):
"""获取小说分类"""
@cache_error
@utils.checkSign
def post(self):
first = self.get_argument("first", None)
second = self.get_argument("second", None)
if first == None and second == None:
tag_list = self.novel.loadAllTag()
elif first != None and second == None:
tag_list = self.novel.loadAllSecondTag(first)
elif first == None and second != None:
tag_list = self.novel.loadAllFirstTag(second)
else:
tag_list = self.novel.loadFirstSecondTag(first, second)
result = [{'first': v['first'], 'second': v['id'], 'name': v['second']} for v in tag_list]
self.write(json_success(result))
class GetNovelList(Novel):
"""获取某分类下的小说列表"""
@cache_error
@utils.checkSign
def post(self):
first = self.get_argument("first", None)
second = self.get_argument("second", None)
page = self.get_argument("page", None)
page = 1 if not page else int(page)
limit = self.get_argument("limit", None)
limit = const.NOVEL_LIMIT if not limit else int(limit)
if not first or not second:
raise ValueError(1)
else:
novel_list = self.novel.loadNovelList(first, second, page, limit)
result = [{'novelid': v['id'],
'title': v['title'],
'novelpv': v['novelpv'],
'author': v['author'],
'introduction': "".join(v['introduction'].split()),
'picture': "/static/spider/" + v['picture']} for v in novel_list]
self.write(json_success(result))
class GetNovelIntroduction(Novel):
"""获取小说简介"""
@cache_error
@utils.checkSign
def post(self):
novelid = self.get_argument("novelid", None)
if not novelid:
raise ValueError(1)
else:
intro = self.novel.loadNovelIntroduction(int(novelid))
if intro.__len__() != 1:
raise ValueError(500)
else:
result = {
'title': intro[0]['title'],
'novelid': intro[0]['id'],
'author': intro[0]['author'],
'picture': "/static/spider/"+intro[0]['picture'],
'introduction': "".join(intro[0]['introduction'].split()),
}
self.write(json_success(result))
class GetNovelChapter(Novel):
"""获取小说的章节列表"""
@cache_error
@utils.checkSign
def post(self):
novelid = self.get_argument("novelid", None)
if not novelid:
raise ValueError(401)
else:
chapter_list = self.novel.loadNovelChapter(int(novelid))
result = [{'subtitle': v['subtitle'], 'chapter': i, 'chapterid': v['id']} for i, v in enumerate(chapter_list, 1)]
self.write(json_success(result))
class GetNovelContent(Novel):
"""获取小说的内容"""
@cache_error
@utils.checkSign
def post(self):
chapterid = self.get_argument("chapterid", None)
if not chapterid:
raise ValueError(401)
else:
c = self.novel.loadNovelContent(int(chapterid))
if len(c) != 1:
raise ValueError(500)
else:
result = {'title': c[0]['title'], 'subtitle': c[0]['subtitle'], 'novelid': c[0]['novelid'],
'content': c[0]['text'].encode("utf-8"), 'chapterid': c[0]['id'],
'prev': self.novel.loadPrevNext(int(c[0]['chapter']), int(c[0]['novelid']))[0],
'next': self.novel.loadPrevNext(int(c[0]['chapter']), int(c[0]['novelid']))[1]}
#获取上一章节和下一章节
self.write(json_success(result))
class NovelClick(Novel):
"""计算小说点击数"""
@cache_error
@utils.checkSign
def post(self):
novelid = self.get_argument("novelid", None)
novelid = int(novelid) if novelid else None
if not novelid:
raise ValueError(401)
else:
if self.novel.loadNovelIntroduction(novelid).__len__() != 1:
raise ValueError(406)
n = self.novel.addNovelPv(novelid)[0]
result = {'novelid': n['id'], 'novelpv': n['novelpv']}
self.write(json_success(result))
class GetNovelRank(Novel):
"""获取小说排名"""
@cache_error
@utils.checkSign
def post(self):
page = self.get_argument("page", None)
page = 1 if not page else int(page)
limit = self.get_argument("limit", None)
limit = const.NOVEL_LIMIT if not limit else int(limit)
novel_list = self.novel.loadNovelRank(page, limit)
result = [{
'novelid': v['novelid'],
'title': v['title'],
'introduction': "".join(v['introduction'].split()),
'novelpv': v['novelpv'],
'author': v['author'],
'first': v['first'],
'second': v['second'],
'picture': "/static/spider/" + v['picture'],
'rank': (page-1)*limit + i} for i, v in enumerate(novel_list, 1)]
self.write(json_success(result))
class NovelSearch(Novel):
"""获取小说的搜索结果"""
def initialize(self):
super(NovelSearch, self).initialize()
self.uri = NOVELSEARCH + "/search/"
self.method = "POST"
self.headers = self.request.headers
self.body = None
@cache_error
@utils.checkSign
@tornado.gen.coroutine
def post(self):
wd = self.get_argument("wd", None)
sign_method = self.get_argument("sign_method", None)
if not wd:
raise ValueError(401)
#拼装body
data = {
"appid": self.appid,
"sign_method": sign_method,
"text": wd,
"docids": "0-" + str(self.novel.getNovelDocMaxId(self.appid)),
}
sign = utils.md5sign(appsecret, data)
data["sign"] = sign
self.body = urllib.urlencode(data)
resp = yield self.client()
result = []
try:
jsonret = json.loads(resp.body)
if jsonret["code"] == 200:
if len(jsonret["result"]["docs"]) != 0:
novellist = self.novel.getNovelListById(jsonret["result"]["docs"])
for v in novellist:
result.append({
'id': v['id'],
'picture': "/static/spider/" + v['picture'],
'introduction': "".join(v['introduction'].split()),
'title': v['title'],
'first': v['first'],
'second': v['second'],
'novelv': v['novelpv'],
'author': v['author'],
})
self.write(json_success(result))
else:
self.write(json_success([]))
else:
self.write(json_failed(int(jsonret["code"])))
except Exception as e:
self.write(json_failed(500))
class NovelDownload(Novel):
"""小说下载地址"""
def initialize(self):
super(NovelDownload, self).initialize()
@cache_error
@utils.checkSign
def post(self):
novelid = self.get_argument("novelid", None)
print novelid
if not novelid:
raise ValueError(401)
md5novelid = hashlib.md5(novelid).hexdigest()
self.write(json_success({"novelid": int(novelid), "novelsrc": BASEURL + "/static/novel/" + md5novelid + ".txt"}))
| gpl-2.0 | -1,168,748,543,770,488,600 | 33.103734 | 125 | 0.528045 | false | 3.595363 | false | false | false |
goblinhack/MundusMeus | python/things/rock.py | 1 | 1729 | import tp
import mm
def thing_init(t):
return
def rock_init(name, short_name, tiles=[]):
x = tp.Tp(name)
x.set_is_shadow_caster_soft(True)
x.set_is_shadow_caster(True)
x.set_short_name(short_name)
x.set_is_movement_blocking(True)
x.set_is_rock(True)
x.set_z_depth(mm.Z_DEPTH_ROCK)
x.set_is_solid_ground(True)
x.set_blit_top_off(1)
x.set_blit_bot_off(1)
if tiles is not None:
for t in tiles:
x.set_tile(t, delay_ms=150)
else:
x.set_tile(tile=name, delay_ms=150)
x.thing_init = thing_init
def init():
rock_init(name="rock",
short_name="A boring rock",
tiles=[
"rock.1",
"rock.2",
"rock.3",
"rock.4",
"rock.5",
"rock.6",
"rock.7",
"rock.8",
"rock.9",
"rock.10",
"rock.11",
"rock.12",
"rock.13",
"rock.14",
"rock.15",
"rock.16",
"rock.17",
"rock.18",
"rock.19",
"rock.20",
"rock.21",
"rock.22",
"rock.23",
"rock.24",
"rock.25",
"rock.26",
"rock.27",
"rock.28",
"rock.29",
"rock.30",
"rock.31",
"rock.32",
])
init()
| lgpl-3.0 | -4,111,445,784,893,475,000 | 24.057971 | 43 | 0.338924 | false | 3.816777 | false | false | false |
jeremi/couchdbkit | couchdbkit/ext/django/loading.py | 1 | 5298 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2008-2009 Benoit Chesneau <[email protected]>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Maintain registry of documents used in your django project
and manage db sessions
"""
import sys
import os
import urllib
import urlparse
from couchdbkit import Server, contain, ResourceConflict
from couchdbkit.loaders import FileSystemDocLoader
from couchdbkit.resource import PreconditionFailed
from django.conf import settings
from django.db.models import signals, get_app
from django.core.exceptions import ImproperlyConfigured
from django.utils.datastructures import SortedDict
from restkit.httpc import HttpClient, BasicAuth
COUCHDB_DATABASES = getattr(settings, "COUCHDB_DATABASES", [])
class CouchdbkitHandler(object):
""" The couchdbkit handler for django """
# share state between instances
__shared_state__ = dict(
_databases = {},
app_schema = SortedDict()
)
def __init__(self, databases, transport=None):
""" initialize couchdbkit handler with COUCHDB_DATABASES
settings """
self.__dict__ = self.__shared_state__
if transport is None:
self.transport = HttpClient()
# create databases sessions
for app_name, uri in databases:
if isinstance(uri, tuple):
# case when you want to specify server uri
# and database name specifically. usefull
# when you proxy couchdb on some path
server_part, dbname = uri
parts = urlparse.urlsplit(urllib.unquote(server_part))
else:
parts = urlparse.urlsplit(urllib.unquote(uri))
dbname = parts[2].split("/")[1]
if parts[0] != 'http' and parts[0] != 'https':
raise ValueError('Invalid dbstring')
if "@" in parts[1]:
server_parts = parts[1].split('@')
if ":" in server_parts[0]:
username, password = server_parts[0].split(":")
else:
username = server_parts[0]
password = ''
server_uri = "%s://%s" % (parts[0], server_parts[1])
else:
server_uri = '%s://%s' % (parts[0], parts[1])
username = password = ""
if username:
self.transport.add_authorization(BasicAuth(username, password))
server = Server(server_uri, self.transport)
app_label = app_name.split('.')[-1]
self._databases[app_label] = server[dbname]
def sync(self, app, verbosity=2):
""" used to sync views of all applications and eventually create
database """
app_name = app.__name__.rsplit('.', 1)[0]
app_label = app_name.split('.')[-1]
if app_label in self._databases:
if verbosity >=1:
print "sync `%s` in CouchDB" % app_name
db = self._databases[app_label]
try:
db.server.create_db(db.dbname)
except:
pass
app_path = os.path.abspath(os.path.join(sys.modules[app.__name__].__file__, ".."))
loader = FileSystemDocLoader(app_path, "_design", design_name=app_label)
loader.sync(db)
def get_db(self, app_label):
""" retrieve db session for a django application """
return self._databases[app_label]
def register_schema(self, app_label, *schema):
""" register a Document object"""
for s in schema:
schema_name = schema[0].__name__.lower()
schema_dict = self.app_schema.setdefault(app_label, SortedDict())
if schema_name in schema_dict:
fname1 = os.path.abspath(sys.modules[s.__module__].__file__)
fname2 = os.path.abspath(sys.modules[schema_dict[schema_name].__module__].__file__)
if os.path.splitext(fname1)[0] == os.path.splitext(fname2)[0]:
continue
schema_dict[schema_name] = s
s._db = self.get_db(app_label)
def get_schema(self, app_label, schema_name):
""" retriev Document object from its name and app name """
return self.app_schema.get(app_label, SortedDict()).get(schema_name.lower())
couchdbkit_handler = CouchdbkitHandler(COUCHDB_DATABASES)
register_schema = couchdbkit_handler.register_schema
get_schema = couchdbkit_handler.get_schema
get_db = couchdbkit_handler.get_db
| isc | 7,504,999,183,328,596,000 | 39.442748 | 99 | 0.601737 | false | 4.218153 | false | false | false |
insilichem/ommprotocol | ommprotocol/md.py | 1 | 30554 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ommprotocol: A command line application to launch
# MD protocols with OpenMM
# By Jaime RGP <@jaimergp>
"""
ommprotocol.md
--------------
All the logic that governs the simulation of a MD protocol,
stage by stage.
A protocol is a chained list of stages, which are Stage instances.
"""
# Stdlib
from __future__ import print_function, division
import os
import sys
from contextlib import contextmanager
import logging
# 3rd party
import numpy as np
from simtk import unit as u
from simtk import openmm as mm
from simtk.openmm import app
from mdtraj import Topology as MDTrajTopology
# Own
from .io import REPORTERS, ProgressBarReporter, SerializedReporter, prepare_system_options
from .utils import (random_string, assert_not_exists, timed_input,
available_platforms, warned_getattr)
logger = logging.getLogger(__name__)
OPENMM_VERSION = tuple(map(int, mm.__version__.split('.')))
if sys.version_info.major == 3:
basestring = str
###########################
# Defaults
###########################
FORCEFIELDS = ['amber99sbildn.xml', 'tip3p.xml']
SELECTORS = {
None: 'none',
'protein_no_H': 'protein and element != H',
'calpha': 'name == CA'
}
PRECISION = {
'CPU': None,
'CUDA': 'CudaPrecision',
'OpenCL': 'OpenCLPrecision'
}
SYSTEM_OPTIONS = {
'nonbondedMethod': app.NoCutoff,
}
DEFAULT_OPTIONS = {
'system_options': SYSTEM_OPTIONS,
}
def protocol(handler, cfg):
"""
Run all the stages in protocol
Parameters
----------
handler : SystemHandler
Container of initial conditions of simulation
cfg : dict
Imported YAML file.
"""
# Stages
if 'stages' not in cfg:
raise ValueError('Protocol must include stages of simulation')
pos, vel, box = handler.positions, handler.velocities, handler.box
stages = cfg.pop('stages')
for stage_options in stages:
options = DEFAULT_OPTIONS.copy()
options.update(cfg)
stage_system_options = prepare_system_options(stage_options)
options.update(stage_options)
options['system_options'].update(stage_system_options)
stage = Stage(handler, positions=pos, velocities=vel, box=box,
total_stages=len(stages), **options)
pos, vel, box = stage.run()
del stage
class Stage(object):
"""
Controls a simulation stage from a SystemHandler instance. It will handle
the actual OpenMM system and then the Simulation object. Integrators,
barostat and restraints are all easily handled too.
Using it is easy: instantiate with a SystemHandler object and then call
`run()`. However, you can also use it as an OpenMM high-level controller.
Parameters
----------
handler : simtk.openmm.Topology
The topology input file (PRMTOP, PDB)
positions : simtk.Quantity, optional
The starting coordinates of this stage. Only needed if
handler is a PRMTOP file.
steps : int, optional
Number of MD steps to simulate. If 0, no MD will take place
timestep : float, optional
Integration timestep, in fs. Defaults to 1.0.
forcefields : list of str or file-like, optional
Forcefields to apply in PDB inputs.
velocities : simtk.unit.Quantity, optional
The initial velocities of this stage. If None, they will be set
to the requested temperature
box_vectors : simtk.unit.Quantity, optional
Replacement periodic box vectors, instead of handler's.
barostat : bool, optional
True for NPT @ 1 atmosphere. False for NVT
restrained_atoms, constrained_atoms : str or None, optional
Parts of the system that should remain restrained or constrained
during the stage. Available values in SELECTORS dict.
If None, no atoms will be fixed.
distance_restrained_atoms : list of lists
Pairs of atom indices that will be distance restrained
distance_restraint_length : float or list of floats
Distances at which ``distance_restrained_atoms`` should be. It can be
a single value (all pairs will be restrained at this distance), or n
values, n being the number of pairs to be assigned to. If the value is
'initial', use the initial distance.
distance_restraint_strength : float or list of floats
Force constants for ``distance_restrained_atoms``. It can be
a single value (all pairs will be restrained at this distance), or n
values, n being the number of pairs to be assigned to.
minimization : bool, optional
If True, minimize before MD
minimization_tolerance : float, optional, default=10 kJ/mol
Threshold value minimization should converge to
minimization_max_iterations : int, optional, default=10000
Limit minimization iterations up to this value. If zero, don't limit.
temperature : float, optional
Target temperature of system in Kelvin, defaults to 300K
trajectory : 'PDB' or 'DCD', optional
Output format of trajectory file, if desired.
trajectory_every : int, optional
Frequency of trajectory write, in number of simulation steps
trajectory_new_every : int, optional
Create a new file for trajectory (only DCD) every n steps.
trajectory_atom_subset : int, optional
Save information for just these atoms (only DCD).
restart_every : int, optional
Frequencty of restart file creation. Defaults to 1E6 steps (1ns)
report_every : int, optional
Frequency of stdout print, in number of simulation steps
verbose : bool, optional
Whether to report information to stdout or not
project_name : str, optional
Name of the essay (common for several stages). If not set,
five random characters will be used.
name : str, optional
Name of the stage, used as a suffix for the output files generated
by this stage. If not supplied, a random string will be used.
output : str, optional
Location of output files. Working directory by default.
platform : str, optional
Which platform to use ('CPU', 'CUDA', 'OpenCL'). If not set,
OpenMM will choose the fastest available.
platform_properties : dict, optional
Additional options to be passed to the platform constructor.
system_options : dict, optional
Set of options to configure the system. See SYSTEM_OPTIONS dict
for defaults.
restraint_strength : float, optional
If restraints are in use, the strength of the applied force in
kJ/mol. Defaults to 5.0.
pressure : float, optional
Barostat pressure, in bar. Defaults to 1.01325.
integrator : simtk.openmm.Integrator, optional
Which integrator to use. Defaults to LangevinIntegrator.
friction : float, optional
Friction coefficient for LangevinIntegrator, in 1/ps. Defaults to 1.0.
barostat_interval : float, optional
Interval of steps at which barostat updates. Defaults to 25 steps.
save_state_at_end : bool, optional
Whether to create a state.xml file at the end of the stage or not.
attempt_rescue : bool, optional
Whether to try to generate an emergency state file if an exception
is raised.
total_stages : int, optional
"""
_PROJECTNAME = random_string(length=5)
_stage_index = [0]
def __init__(self, handler, positions=None, velocities=None, box=None,
steps=0, minimization=False, barostat=False, temperature=300,
timestep=1.0, pressure=1.01325, integrator='LangevinIntegrator',
barostat_interval=25, system_options=None, platform=None,
platform_properties=None, trajectory=None, trajectory_every=2000,
outputpath='.', trajectory_atom_subset=None, trajectory_new_every=0,
restart=None, restart_every=1000000, report=True, report_every=1000,
project_name=None, name=None, restrained_atoms=None,
restraint_strength=5, constrained_atoms=None, friction=1.0,
distance_restrained_atoms=None, distance_restraint_length=2,
distance_restraint_strength=5, total_stages=None, verbose=True,
minimization_tolerance=10, minimization_max_iterations=10000,
save_state_at_end=True, attempt_rescue=True,
**kwargs):
for k in kwargs:
if not k.startswith('_'):
logger.warning('Option %s not recognized!', k)
# System properties
self.handler = handler
self.positions = positions
self.velocities = velocities
self.box = box
self.system_options = system_options if system_options else {}
self.restrained_atoms = restrained_atoms
self.restraint_strength = restraint_strength
self.constrained_atoms = constrained_atoms
self.distance_restrained_atoms = np.reshape(distance_restrained_atoms, (-1, 2)) \
if distance_restrained_atoms else None
self.distance_restraint_length = distance_restraint_length
self.distance_restraint_strength = distance_restraint_strength
# Simulation conditions
self.steps = int(steps)
self.minimization = minimization
self.minimization_tolerance = minimization_tolerance
self.minimization_max_iterations = minimization_max_iterations
self.barostat = barostat
self.temperature = temperature
self.timestep = timestep
self.pressure = pressure
self._integrator_name = integrator
self.friction = friction
self.barostat_interval = int(barostat_interval)
# Hardware
self._platform = platform
self.platform_properties = {} if platform_properties is None else platform_properties
# Output parameters
self.project_name = project_name if project_name is not None else self._PROJECTNAME
self.name = name if name is not None else random_string(length=5)
self.outputpath = outputpath
self.verbose = verbose
self.trajectory = trajectory
self.trajectory_every = int(trajectory_every)
self.trajectory_new_every = int(trajectory_new_every)
self.trajectory_atom_subset = self.subset(trajectory_atom_subset) if trajectory_atom_subset else None
self.restart = restart
self.restart_every = int(restart_every)
self.report = report
self.report_every = int(report_every)
self.save_state_at_end = save_state_at_end
self.attempt_rescue = attempt_rescue
self.total_stages = total_stages
# Private attributes
self._system = None
self._simulation = None
self._integrator = None
self._progress_reporter = None
self._log_reporter = None
self._trajectory_reporter = None
self._restart_reporter = None
self._mass_options = {}
self._stage_index[0] += 1
def run(self):
"""
Launch MD simulation, which may consist of:
1. Optional minimization
2. Actual MD simulation, with n steps.
This method also handles reporters.
Returns
-------
positions, velocities : unit.Quantity([natoms, 3])
Position, velocity of each atom in the system
box : unit.Quantity([1, 3])
Periodic conditions box vectors
"""
if self.verbose:
status = '#{}'.format(self.stage_index)
if self.total_stages is not None:
status += '/{}'.format(self.total_stages)
status += ': {}'.format(self.name)
pieces = []
if self.restrained_atoms is not None:
pieces.append('restrained {}'.format(self.restrained_atoms))
if self.constrained_atoms is not None:
pieces.append('constrained {}'.format(self.constrained_atoms))
if self.distance_restrained_atoms is not None:
pieces.append('distance restrained for {} atom pairs'.format(len(self.distance_restrained_atoms)))
if pieces:
status += ' [{}]'.format(', '.join(pieces))
logger.info(status)
# Add forces
self.apply_restraints()
self.apply_constraints()
if self.barostat:
self.apply_barostat()
if self.minimization:
if self.verbose:
logger.info(' Minimizing...')
self.minimize()
uses_pbc = self.system.usesPeriodicBoundaryConditions()
if self.steps:
# Stdout progress
if self.report and self.progress_reporter not in self.simulation.reporters:
self.simulation.reporters.append(self.progress_reporter)
# Log report
if self.report and self.log_reporter not in self.simulation.reporters:
self.simulation.reporters.append(self.log_reporter)
# Trajectory / movie files
if self.trajectory and self.trajectory_reporter not in self.simulation.reporters:
self.simulation.reporters.append(self.trajectory_reporter)
# Checkpoint or restart files
if self.restart and self.restart_reporter not in self.simulation.reporters:
self.simulation.reporters.append(self.restart_reporter)
# MD simulation
if self.verbose:
pbc = 'PBC ' if uses_pbc else ''
conditions = 'NPT' if self.barostat else 'NVT'
logger.info(' Running {}MD for {} steps @ {}K, {}'.format(pbc, self.steps,
self.temperature,
conditions))
with self.handle_exceptions():
self.simulate()
if self.save_state_at_end:
path = self.new_filename(suffix='.state')
self.simulation.saveState(path)
# Save and return state
state = self.simulation.context.getState(getPositions=True, getVelocities=True,
enforcePeriodicBox=uses_pbc)
return state.getPositions(), state.getVelocities(), state.getPeriodicBoxVectors()
def minimize(self, tolerance=None, max_iterations=None):
"""
Minimize energy of the system until meeting `tolerance` or
performing `max_iterations`.
"""
if tolerance is None:
tolerance = self.minimization_tolerance
if max_iterations is None:
max_iterations = self.minimization_max_iterations
self.simulation.minimizeEnergy(tolerance * u.kilojoules_per_mole, max_iterations)
def simulate(self, steps=None):
"""
Advance simulation n steps
"""
if steps is None:
steps = self.steps
self.simulation.step(steps)
@property
def system(self):
if self._system is None:
if self.constrained_atoms and self.system_options.pop('constraints', None):
logger.warning(' Warning: `constraints` and `constrained_atoms` are incompatible. '
'Removing `constraints` option for this stage.')
self._system = self.handler.create_system(**self.system_options)
return self._system
@system.deleter
def system(self):
del self._system
self._system = None
@property
def simulation(self):
if self._simulation is None:
platform = self.platform
try:
sim = self._simulation = app.Simulation(self.handler.topology, self.system,
self.integrator, *platform)
except Exception as e:
template = '{}. Try with: {}.'
if 'Illegal property name' in str(e):
msg = template.format(e, ', '.join(platform[0].getPropertyNames()))
raise ValueError(msg)
elif 'There is no registered Platform' in str(e):
msg = template.format(e, ', '.join(available_platforms()))
raise ValueError(msg)
raise e
# Box vectors
box = self.box if self.box is not None else self.handler.box
if box is not None:
sim.context.setPeriodicBoxVectors(*box)
# Positions
pos = self.positions if self.positions is not None else self.handler.positions
if pos is None:
raise ValueError('Positions must be set to start a simulation.')
sim.context.setPositions(pos)
# Velocities
vel = self.velocities if self.velocities is not None else self.handler.velocities
if vel is not None:
sim.context.setVelocities(vel)
else:
sim.context.setVelocitiesToTemperature(self.temperature*u.kelvin)
return self._simulation
@simulation.deleter
def simulation(self):
del self._simulation
self._simulation = None
@property
def integrator(self):
if self._integrator is None:
try:
i = getattr(mm, self._integrator_name)
except (TypeError, AttributeError):
raise NotImplementedError('Integrator {} not found'
.format(self._integrator_name))
self._integrator = i(self.temperature * u.kelvin,
self.friction / u.picoseconds,
self.timestep * u.femtoseconds)
return self._integrator
@integrator.deleter
def integrator(self):
del self._integrator
self._integrator = None
@property
def platform(self):
if self._platform is None:
return None,
platform = mm.Platform.getPlatformByName(self._platform)
if self._platform.upper() == 'CUDA' and OPENMM_VERSION < (7, 2, 3) \
and 'DisablePmeStream' not in self.platform_properties:
self.platform_properties['DisablePmeStream'] = 'true'
# Patch to allow env-defined GPUs
device = self.platform_properties.get('DeviceIndex', '')
if str(device).startswith('ENV_'):
envvar = os.environ.get(device[4:], None)
if envvar is not None:
logger.warning('Setting DeviceIndex from env var %s to %s', device[4:], envvar)
self.platform_properties['DeviceIndex'] = envvar
return platform, self.platform_properties
def reporter(self, name):
try:
return REPORTERS[name.upper()]
except KeyError:
raise NotImplementedError('Reporter {} not found'.format(name))
def apply_barostat(self):
if not self.system.usesPeriodicBoundaryConditions():
raise ValueError('Barostat can only be used with PBC conditions.')
self.system.addForce(mm.MonteCarloBarostat(self.pressure*u.bar,
self.temperature*u.kelvin,
self.barostat_interval))
def apply_constraints(self):
if self.constrained_atoms is not None:
indices = self.subset(self.constrained_atoms)
system = self.system
for i in indices:
system.setParticleMass(int(i), 0.0)
def apply_restraints(self):
if self.restrained_atoms:
indices = self.subset(self.restrained_atoms)
r_force = self.restraint_force(indices, self.restraint_strength)
self.system.addForce(r_force)
if self.distance_restrained_atoms is not None:
atoms = self.distance_restrained_atoms
if isinstance(self.distance_restraint_length, (int, float, basestring)):
distances = [self.distance_restraint_length] * atoms.shape[0]
elif len(self.distance_restraint_length) == atoms.shape[0]:
distances = self.distance_restraint_length
else:
raise ValueError('Restraint distances do not match '
'number of distance restrained pairs.')
if isinstance(self.distance_restraint_strength, (int, float)):
strengths = [self.distance_restraint_strength] * atoms.shape[0]
elif len(self.distance_restraint_strength) == atoms.shape[0]:
strengths = self.distance_restraint_strength
else:
raise ValueError('Restraint distance strengths do not '
'match number of distance restrained pairs.')
d_force = self.distance_restraint_force(atoms, distances, strengths)
self.system.addForce(d_force)
def restraint_force(self, indices=None, strength=5.0):
"""
Force that restrains atoms to fix their positions, while allowing
tiny movement to resolve severe clashes and so on.
Returns
-------
force : simtk.openmm.CustomExternalForce
A custom force to restrain the selected atoms
"""
if self.system.usesPeriodicBoundaryConditions():
expression = 'k*periodicdistance(x, y, z, x0, y0, z0)^2'
else:
expression = 'k*((x-x0)^2 + (y-y0)^2 + (z-z0)^2)'
force = mm.CustomExternalForce(expression)
force.addGlobalParameter('k', strength*u.kilocalories_per_mole/u.angstroms**2)
force.addPerParticleParameter('x0')
force.addPerParticleParameter('y0')
force.addPerParticleParameter('z0')
positions = self.positions if self.positions is not None else self.handler.positions
if indices is None:
indices = range(self.handler.topology.getNumAtoms())
for index in indices:
force.addParticle(int(index), positions[index].value_in_unit(u.nanometers))
return force
def distance_restraint_force(self, atoms, distances, strengths):
"""
Parameters
----------
atoms : tuple of tuple of int or str
Pair of atom indices to be restrained, with shape (n, 2),
like ((a1, a2), (a3, a4)). Items can be str compatible with MDTraj DSL.
distances : tuple of float
Equilibrium distances for each pair
strengths : tuple of float
Force constant for each pair
"""
system = self.system
force = mm.HarmonicBondForce()
force.setUsesPeriodicBoundaryConditions(self.system.usesPeriodicBoundaryConditions())
for pair, distance, strength in zip(atoms, distances, strengths):
indices = []
for atom in pair:
if isinstance(atom, str):
index = self.subset(atom)
if len(index) != 1:
raise ValueError('Distance restraint for selection `{}` returns != 1 atom!: {}'
.format(atom, index))
indices.append(int(index[0]))
elif isinstance(atom, (int, float)):
indices.append(int(atom))
else:
raise ValueError('Distance restraint atoms must be int or str DSL selections')
if distance == 'current':
pos = self.positions or system.positions
distance = np.linalg.norm(pos[indices[0]] - pos[indices[1]])
force.addBond(indices[0], indices[1], distance*u.nanometers,
strength*u.kilocalories_per_mole/u.angstroms**2)
return force
def subset(self, selector):
"""
Returns a list of atom indices corresponding to a MDTraj DSL
query. Also will accept list of numbers, which will be coerced
to int and returned.
"""
if isinstance(selector, (list, tuple)):
return map(int, selector)
selector = SELECTORS.get(selector, selector)
mdtop = MDTrajTopology.from_openmm(self.handler.topology)
return mdtop.select(selector)
@property
def system_mass(self):
system_mass = sum(a.element.mass._value for a in self.handler.topology.atoms())
return system_mass * u.dalton
@property
def progress_reporter(self):
if self._progress_reporter is None:
if os.environ.get('OMMPROTOCOL_SLAVE'):
rep = SerializedReporter(sys.stdout, self.report_every)
else:
rep = ProgressBarReporter(sys.stdout, self.report_every, total_steps=self.steps)
self._progress_reporter = rep
return self._progress_reporter
@progress_reporter.deleter
def progress_reporter(self):
try:
self.simulation.reporters.remove(self._progress_reporter)
except ValueError:
pass
self._progress_reporter = None
@property
def log_reporter(self):
if self._log_reporter is None:
mass = {'systemMass': self.system_mass} if self.constrained_atoms else {}
path = self.new_filename(suffix='.log')
rep = app.StateDataReporter(path, self.report_every, step=True,
potentialEnergy=True, kineticEnergy=True,
temperature=True, volume=True, progress=True,
remainingTime=True, speed=True,
totalSteps=self.steps, separator='\t', **mass)
self._log_reporter = rep
return self._log_reporter
@log_reporter.deleter
def log_reporter(self):
try:
self.simulation.reporters.remove(self._log_reporter)
except ValueError:
pass
self._log_reporter = None
@property
def trajectory_reporter(self):
if self._trajectory_reporter is None:
suffix = '.{}'.format(self.trajectory.lower())
path = self.new_filename(suffix=suffix)
options = {}
if self.trajectory == 'DCD':
options.update({'new_every': self.trajectory_new_every,
'atomSubset': self.trajectory_atom_subset})
rep = self.reporter(self.trajectory)(path, self.trajectory_every, **options)
self._trajectory_reporter = rep
return self._trajectory_reporter
@trajectory_reporter.deleter
def trajectory_reporter(self):
try:
self.simulation.reporters.remove(self._trajectory_reporter)
except ValueError:
pass
self._trajectory_reporter = None
@property
def restart_reporter(self):
if self._restart_reporter is None:
suffix = '.{}.{}'.format(self.restart.lower(), self.restart_every)
path, ext_or_int, n = self.new_filename(suffix=suffix).rsplit('.', 2)
try:
ext_or_int = int(ext_or_int) # Is ext an integer?
except ValueError: # Ext is the actual file extension
path = '{}.{}'.format(path, ext_or_int)
else: # Ext is an int! Reformat
name, ext = os.path.splitext(path)
path = '{}.{}{}'.format(name, ext_or_int, ext)
rep = self.reporter(self.restart)(path, self.restart_every)
self._restart_reporter = rep
return self._restart_reporter
@restart_reporter.deleter
def restart_reporter(self):
try:
self.simulation.reporters.remove(self._restart_reporter)
except ValueError:
pass
self._restart_reporter = None
@property
def stage_index(self):
return self._stage_index[0]
@stage_index.setter
def stage_index(self, value):
self._stage_index[0] = value
def new_filename(self, suffix='', prefix='', avoid_overwrite=True):
fn_template = '{prefix}{project}_{index:0{index_len}d}_{stage}{suffix}'
filename = fn_template.format(prefix=prefix, project=self.project_name,
index=self.stage_index, stage=self.name,
suffix=suffix,
index_len=len(str(self.total_stages)))
path = os.path.join(self.outputpath, filename)
if avoid_overwrite:
path = assert_not_exists(path)
return path
@contextmanager
def handle_exceptions(self, verbose=True):
"""
Handle Ctrl+C and accidental exceptions and attempt to save
the current state of the simulation
"""
try:
yield
except (KeyboardInterrupt, Exception) as ex:
if not self.attempt_rescue:
raise ex
if isinstance(ex, KeyboardInterrupt):
reraise = False
answer = timed_input('\n\nDo you want to save current state? (y/N): ')
if answer and answer.lower() not in ('y', 'yes'):
if verbose:
sys.exit('Ok, bye!')
else:
reraise = True
logger.error('\n\nAn error occurred: %s', ex)
if verbose:
logger.info('Saving state...')
try:
self.backup_simulation()
except Exception:
if verbose:
logger.error('FAILED :(')
else:
if verbose:
logger.info('SUCCESS!')
finally:
if reraise:
raise ex
sys.exit()
def backup_simulation(self):
"""
Creates an emergency report run, .state included
"""
path = self.new_filename(suffix='_emergency.state')
self.simulation.saveState(path)
uses_pbc = self.system.usesPeriodicBoundaryConditions()
state_kw = dict(getPositions=True, getVelocities=True,
getForces=True, enforcePeriodicBox=uses_pbc,
getParameters=True, getEnergy=True)
state = self.simulation.context.getState(**state_kw)
for reporter in self.simulation.reporters:
if not isinstance(reporter, app.StateDataReporter):
reporter.report(self.simulation, state)
| lgpl-3.0 | -3,036,808,844,007,292,000 | 40.233468 | 114 | 0.600445 | false | 4.377364 | false | false | false |
tgibson37/tiny-c | cairopy.py | 1 | 2552 | import sys
import os
import cairo
import shlex
def comment(cmt):
cmt = cmt[:-1]
#print "in comment: ", cmt
def draw(x):
global ctx
'''
hard way: very long list of if-elif's
if x[0] == "moveto":
ctx.move_to( int(x[1]), int(x[2]) )
elif x[0] == "lineto":
ctx.line_to( int(x[1]), int(x[2]) )
ctx.stroke()
easy way: do ALL cmds with int or float only args:
given x = [name,1,2] produce string = "ctx.<name>(1, 2)"
then eval(string)
'''
cmd=x[0]
argslist=x[1:]
args= ','.join(argslist)
string = "ctx." + cmd + "(" + args + ")"
#print "in draw: ", string
eval(string)
def window(x):
global surface, ctx, width, height
width = int(x[1])
height= int(x[2])
#print "in window: ", x
surface = cairo.ImageSurface(cairo.FORMAT_RGB24, width, height)
ctx = cairo.Context(surface)
ctx.rectangle(0,0,width,height)
ctx.set_source_rgb(1,1,1)
ctx.fill()
ctx.set_source_rgb(0,0,0)
def show():
global surface
#print "in show: ", pngname
ctx.stroke()
surface.write_to_png(pngname)
from subprocess import call
if len(show_app)>0:
call([show_app, pngname])
def usage():
print("Usage: ", sys.argv[0], "drawfile")
# It all starts here
if len(sys.argv)>1:
drawname = sys.argv[1]
drawfile = sys.argv[1] + ".draw"
pngname = sys.argv[1] + ".png"
else:
usage()
sys.exit()
if os.name == "nt":
show_app = "mspaint.exe"
else:
show_app = "display"
M_PI = 3.14159265358979323846
with open(drawfile) as f:
for line in f:
x = shlex.split(line)
if ord(line[:1])==35: # decimal 35 is hash
comment(line)
elif x[0]=="arc":
r1 = float(x[1])
r2 = float(x[2])
r3 = float(x[3])
d4 = int(x[4])
r4 = float(d4*(M_PI/180))
d5 = int(x[5])
r5 = float(d5*(M_PI/180))
ctx.arc(r1,r2,r3,r4,r5)
elif x[0]=="arcneg":
r1 = float(x[1])
r2 = float(x[2])
r3 = float(x[3])
d4 = int(x[4])
r4 = float(d4*(M_PI/180))
d5 = int(x[5])
r5 = float(d5*(M_PI/180))
ctx.arc_negative(r1,r2,r3,r4,r5)
elif x[0]=="setdash":
#print ""
#print x
d = x[1:-1]
id = [float(x) for x in d]
st = float(x[-1])
#print "id:", id
#print "st:", st
ctx.set_dash(id,st)
elif x[0]=="setrgb":
r1 = float(x[1])/256
r2 = float(x[2])/256
r3 = float(x[3])/256
ctx.set_source_rgb(r1,r2,r3)
elif x[0]=="show":
if len(x)>1:
image_display=x[1]
show()
elif x[0]=="showapp":
if len(x)>1:
show_app = x[1]
else:
show_app = ""
elif x[0]=="showtext":
ctx.show_text(x[1])
elif x[0]=="window":
window(x)
else:
draw(x)
'''
elif x[0]=="":
ctx.
'''
| gpl-3.0 | 6,053,136,622,952,715,000 | 20.266667 | 64 | 0.574843 | false | 2.19244 | false | false | false |
MarkusHackspacher/unknown-horizons | horizons/gui/tabs/residentialtabs.py | 1 | 5161 | # ###################################################
# Copyright (C) 2008-2017 The Unknown Horizons Team
# [email protected]
# This file is part of Unknown Horizons.
#
# Unknown Horizons is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# ###################################################
from horizons.command.uioptions import SetTaxSetting
from horizons.component.namedcomponent import NamedComponent
from horizons.constants import SETTLER
from horizons.gui.util import create_resource_icon, get_happiness_icon_and_helptext
from horizons.i18n import gettext_lazy as LazyT
from horizons.messaging import SettlerUpdate
from horizons.util.loaders.actionsetloader import ActionSetLoader
from horizons.util.python.callback import Callback
from .overviewtab import OverviewTab
class SettlerOverviewTab(OverviewTab):
widget = 'overview_settler.xml'
helptext = LazyT("Settler overview")
def init_widget(self):
super().init_widget()
name = self.instance.settlement.get_component(NamedComponent).name
self.widget.findChild(name="headline").text = name
setup_tax_slider(self.widget.child_finder('tax_slider'),
self.widget.child_finder('tax_val_label'),
self.instance.settlement,
self.instance.level)
taxes = self.instance.settlement.tax_settings[self.instance.level]
self.widget.child_finder('tax_val_label').text = str(taxes)
action_set = ActionSetLoader.get_set(self.instance._action_set_id)
action_gfx = list(action_set.items())[0][1]
image = list(action_gfx[45].keys())[0]
self.widget.findChild(name="building_image").image = image
def on_settler_level_change(self, message):
assert isinstance(message, SettlerUpdate)
setup_tax_slider(self.widget.child_finder('tax_slider'),
self.widget.child_finder('tax_val_label'),
self.instance.settlement,
message.level)
taxes = self.instance.settlement.tax_settings[self.instance.level]
self.widget.child_finder('tax_val_label').text = str(taxes)
imgs = list(ActionSetLoader.get_set(self.instance._action_set_id).items())[0][1]
self.widget.findChild(name="building_image").image = list(imgs[45].keys())[0]
def show(self):
super().show()
SettlerUpdate.subscribe(self.on_settler_level_change, sender=self.instance)
def hide(self):
SettlerUpdate.discard(self.on_settler_level_change, sender=self.instance)
super().hide()
def refresh(self):
image, helptext = get_happiness_icon_and_helptext(self.instance.happiness, self.instance.session)
self.widget.child_finder('happiness_label').image = image
self.widget.child_finder('happiness_label').helptext = helptext
self.widget.child_finder('happiness').progress = self.instance.happiness
self.widget.child_finder('inhabitants').text = "{}/{}".format(
self.instance.inhabitants,
self.instance.inhabitants_max)
self.widget.child_finder('taxes').text = str(self.instance.last_tax_payed)
self.update_consumed_res()
name = self.instance.settlement.get_component(NamedComponent).name
self.widget.findChild(name="headline").text = name
events = {
'headline': Callback(self.instance.session.ingame_gui.show_change_name_dialog,
self.instance.settlement)
}
self.widget.mapEvents(events)
super().refresh()
def update_consumed_res(self):
"""Updates the container that displays the needed resources of the settler"""
container = self.widget.findChild(name="needed_res")
# remove icons from the container
container.removeAllChildren()
# create new ones
resources = self.instance.get_currently_not_consumed_resources()
for res in resources:
icon = create_resource_icon(res, self.instance.session.db)
icon.max_size = icon.min_size = icon.size = (32, 32)
container.addChild(icon)
container.adaptLayout()
def setup_tax_slider(slider, val_label, settlement, level):
"""Set up a slider to work as tax slider"""
step_count = int((SETTLER.TAX_SETTINGS_MAX - SETTLER.TAX_SETTINGS_MIN) / SETTLER.TAX_SETTINGS_STEP)
slider.steps = [SETTLER.TAX_SETTINGS_MIN + SETTLER.TAX_SETTINGS_STEP * i for i in
range(step_count)]
slider.value = settlement.tax_settings[level]
def on_slider_change():
val_label.text = str(slider.value)
if settlement.tax_settings[level] != slider.value:
SetTaxSetting(settlement, level, slider.value).execute(settlement.session)
slider.capture(on_slider_change)
| gpl-2.0 | -4,440,255,941,613,164,500 | 42.737288 | 100 | 0.708584 | false | 3.422414 | false | false | false |
davidsminor/gaffer | python/GafferRenderManTest/RenderManShaderTest.py | 1 | 61179 | ##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import unittest
import IECore
import Gaffer
import GafferTest
import GafferScene
import GafferRenderMan
import GafferRenderManTest
class RenderManShaderTest( GafferRenderManTest.RenderManTestCase ) :
def setUp( self ) :
GafferRenderManTest.RenderManTestCase.setUp( self )
GafferRenderMan.RenderManShader.shaderLoader().clear()
def test( self ) :
n = GafferRenderMan.RenderManShader()
n.loadShader( "plastic" )
self.failUnless( isinstance( n["parameters"]["Ks"], Gaffer.FloatPlug ) )
self.failUnless( isinstance( n["parameters"]["Kd"], Gaffer.FloatPlug ) )
self.failUnless( isinstance( n["parameters"]["Ka"], Gaffer.FloatPlug ) )
self.failUnless( isinstance( n["parameters"]["roughness"], Gaffer.FloatPlug ) )
self.failUnless( isinstance( n["parameters"]["specularcolor"], Gaffer.Color3fPlug ) )
self.assertEqual( n["parameters"]["Ks"].getValue(), 0.5 )
self.assertEqual( n["parameters"]["Kd"].getValue(), 0.5 )
self.assertEqual( n["parameters"]["Ka"].getValue(), 1 )
self.assertAlmostEqual( n["parameters"]["roughness"].getValue(), 0.1 )
self.assertEqual( n["parameters"]["specularcolor"].getValue(), IECore.Color3f( 1 ) )
def testSerialisation( self ) :
s = Gaffer.ScriptNode()
s["n"] = GafferRenderMan.RenderManShader()
s["n"].loadShader( "plastic" )
ss = s.serialise()
s = Gaffer.ScriptNode()
s.execute( ss )
st = s["n"].state()
self.assertEqual( len( st ), 1 )
self.assertEqual( st[0].type, "ri:surface" )
self.assertEqual( st[0].name, "plastic" )
self.failUnless( isinstance( s["n"]["parameters"]["Ks"], Gaffer.FloatPlug ) )
self.failUnless( isinstance( s["n"]["parameters"]["Kd"], Gaffer.FloatPlug ) )
self.failUnless( isinstance( s["n"]["parameters"]["Ka"], Gaffer.FloatPlug ) )
self.failUnless( isinstance( s["n"]["parameters"]["roughness"], Gaffer.FloatPlug ) )
self.failUnless( isinstance( s["n"]["parameters"]["specularcolor"], Gaffer.Color3fPlug ) )
self.assertTrue( "parameters1" not in s["n"] )
def testShader( self ) :
n = GafferRenderMan.RenderManShader()
n.loadShader( "plastic" )
s = n.state()
self.assertEqual( len( s ), 1 )
self.assertEqual( s[0].type, "ri:surface" )
self.assertEqual( s[0].name, "plastic" )
self.assertEqual( s[0].parameters["Ks"], IECore.FloatData( .5 ) )
self.assertEqual( s[0].parameters["Kd"], IECore.FloatData( .5 ) )
self.assertEqual( s[0].parameters["Ka"], IECore.FloatData( 1 ) )
self.assertEqual( s[0].parameters["roughness"], IECore.FloatData( .1 ) )
self.assertEqual( s[0].parameters["specularcolor"], IECore.Color3fData( IECore.Color3f( 1 ) ) )
def testShaderHash( self ) :
n = GafferRenderMan.RenderManShader()
n.loadShader( "checker" )
h1 = n.stateHash()
n["parameters"]["Kd"].setValue( 0.25 )
self.assertNotEqual( n.stateHash(), h1 )
def testCoshaderHash( self ) :
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderParameter.sl" )
shaderNode = GafferRenderMan.RenderManShader()
shaderNode.loadShader( shader )
self.assertTrue( "coshaderParameter" in shaderNode["parameters"] )
self.assertEqual( shaderNode["parameters"]["coshaderParameter"].typeId(), Gaffer.Plug.staticTypeId() )
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
coshaderNode = GafferRenderMan.RenderManShader()
coshaderNode.loadShader( coshader )
shaderNode["parameters"]["coshaderParameter"].setInput( coshaderNode["out"] )
h1 = shaderNode.stateHash()
coshaderNode["parameters"]["floatParameter"].setValue( 0.25 )
self.assertNotEqual( shaderNode.stateHash(), h1 )
def testParameterOrdering( self ) :
n = GafferRenderMan.RenderManShader()
n.loadShader( "plastic" )
self.assertEqual( n["parameters"][0].getName(), "Ks" )
self.assertEqual( n["parameters"][1].getName(), "Kd" )
self.assertEqual( n["parameters"][2].getName(), "Ka" )
self.assertEqual( n["parameters"][3].getName(), "roughness" )
self.assertEqual( n["parameters"][4].getName(), "specularcolor" )
n = GafferRenderMan.RenderManShader()
n.loadShader( "matte" )
self.assertEqual( n["parameters"][0].getName(), "Ka" )
self.assertEqual( n["parameters"][1].getName(), "Kd" )
def testCoshader( self ) :
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderParameter.sl" )
shaderNode = GafferRenderMan.RenderManShader()
shaderNode.loadShader( shader )
self.assertTrue( "coshaderParameter" in shaderNode["parameters"] )
self.assertEqual( shaderNode["parameters"]["coshaderParameter"].typeId(), Gaffer.Plug.staticTypeId() )
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
coshaderNode = GafferRenderMan.RenderManShader()
coshaderNode.loadShader( coshader )
shaderNode["parameters"]["coshaderParameter"].setInput( coshaderNode["out"] )
s = shaderNode.state()
self.assertEqual( len( s ), 2 )
self.assertEqual( s[0].name, coshader )
self.assertEqual( s[1].name, shader )
self.assertEqual( s[0].parameters["__handle"], s[1].parameters["coshaderParameter"] )
def testInputAcceptance( self ) :
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderParameter.sl" )
shaderNode = GafferRenderMan.RenderManShader()
shaderNode.loadShader( shader )
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
coshaderNode = GafferRenderMan.RenderManShader()
coshaderNode.loadShader( coshader )
random = Gaffer.Random()
self.assertTrue( shaderNode["parameters"]["coshaderParameter"].acceptsInput( coshaderNode["out"] ) )
self.assertFalse( shaderNode["parameters"]["coshaderParameter"].acceptsInput( random["outFloat"] ) )
self.assertTrue( shaderNode["parameters"]["floatParameter"].acceptsInput( random["outFloat"] ) )
self.assertFalse( shaderNode["parameters"]["floatParameter"].acceptsInput( coshaderNode["out"] ) )
self.assertTrue( coshaderNode["parameters"]["colorParameter"].acceptsInput( random["outColor"] ) )
self.assertFalse( coshaderNode["parameters"]["colorParameter"].acceptsInput( coshaderNode["out"] ) )
def testParameterDefaultValue( self ) :
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderParameter.sl" )
shaderNode = GafferRenderMan.RenderManShader()
shaderNode.loadShader( shader )
self.assertEqual( shaderNode["parameters"]["floatParameter"].defaultValue(), 1 )
def testParameterMinMax( self ) :
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderParameter.sl" )
shaderNode = GafferRenderMan.RenderManShader()
shaderNode.loadShader( shader )
self.assertEqual( shaderNode["parameters"]["floatParameter"].minValue(), -1 )
self.assertEqual( shaderNode["parameters"]["floatParameter"].maxValue(), 10 )
def testReload( self ) :
shader1 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/version1.sl" )
shaderNode = GafferRenderMan.RenderManShader()
shaderNode.loadShader( shader1 )
shaderNode["parameters"]["float1"].setValue( 0.1 )
shaderNode["parameters"]["string1"].setValue( "test" )
shaderNode["parameters"]["color1"].setValue( IECore.Color3f( 1, 2, 3 ) )
self.assertAlmostEqual( shaderNode["parameters"]["float1"].getValue(), 0.1 )
self.assertEqual( shaderNode["parameters"]["string1"].getValue(), "test" )
self.assertEqual( shaderNode["parameters"]["color1"].getValue(), IECore.Color3f( 1, 2, 3 ) )
shader2 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/version2.sl" )
shaderNode.loadShader( shader2, keepExistingValues=True )
self.assertEqual( shaderNode["parameters"].keys(), [ "float1", "string1", "color1", "float2", "string2", "color2" ] )
self.assertAlmostEqual( shaderNode["parameters"]["float1"].getValue(), 0.1 )
self.assertEqual( shaderNode["parameters"]["string1"].getValue(), "test" )
self.assertEqual( shaderNode["parameters"]["color1"].getValue(), IECore.Color3f( 1, 2, 3 ) )
shaderNode.loadShader( shader1, keepExistingValues=True )
self.assertEqual( shaderNode["parameters"].keys(), [ "float1", "string1", "color1" ] )
self.assertAlmostEqual( shaderNode["parameters"]["float1"].getValue(), 0.1 )
self.assertEqual( shaderNode["parameters"]["string1"].getValue(), "test" )
self.assertEqual( shaderNode["parameters"]["color1"].getValue(), IECore.Color3f( 1, 2, 3 ) )
shaderNode.loadShader( shader1, keepExistingValues=False )
self.assertEqual( shaderNode["parameters"].keys(), [ "float1", "string1", "color1" ] )
self.assertEqual( shaderNode["parameters"]["float1"].getValue(), 1 )
self.assertEqual( shaderNode["parameters"]["string1"].getValue(), "" )
self.assertEqual( shaderNode["parameters"]["color1"].getValue(), IECore.Color3f( 1, 1, 1 ) )
def testReloadRemovesOldParameters( self ) :
shader2 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/version2.sl" )
shaderNode = GafferRenderMan.RenderManShader()
shaderNode.loadShader( shader2 )
self.assertEqual( shaderNode["parameters"].keys(), [ "float1", "string1", "color1", "float2", "string2", "color2" ] )
shader3 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/version3.sl" )
shaderNode.loadShader( shader3 )
self.assertEqual( shaderNode["parameters"].keys(), [ "float1", "string1", "color1", "float2" ] )
def testAutomaticReloadOnScriptLoad( self ) :
shader1 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/version1.sl", shaderName = "unversioned" )
s = Gaffer.ScriptNode()
s["shader"] = GafferRenderMan.RenderManShader()
s["shader"].loadShader( shader1 )
s["shader"]["parameters"]["float1"].setValue( 0.1 )
s["shader"]["parameters"]["string1"].setValue( "test" )
s["shader"]["parameters"]["color1"].setValue( IECore.Color3f( 1, 2, 3 ) )
ss = s.serialise()
self.compileShader( os.path.dirname( __file__ ) + "/shaders/version2.sl", shaderName = "unversioned" )
GafferRenderMan.RenderManShader.shaderLoader().clear()
s = Gaffer.ScriptNode()
s.execute( ss )
self.assertEqual( s["shader"]["parameters"].keys(), [ "float1", "string1", "color1", "float2", "string2", "color2" ] )
self.assertAlmostEqual( s["shader"]["parameters"]["float1"].getValue(), 0.1 )
self.assertEqual( s["shader"]["parameters"]["string1"].getValue(), "test" )
self.assertEqual( s["shader"]["parameters"]["color1"].getValue(), IECore.Color3f( 1, 2, 3 ) )
def testReloadPreservesConnections( self ) :
n = GafferRenderMan.RenderManShader()
n.loadShader( "plastic" )
random = Gaffer.Random()
n["parameters"]["Ks"].setInput( random["outFloat"] )
n["parameters"]["specularcolor"].setInput( random["outColor"] )
n.loadShader( "plastic", keepExistingValues = True )
self.assertTrue( n["parameters"]["Ks"].getInput().isSame( random["outFloat"] ) )
self.assertTrue( n["parameters"]["specularcolor"].getInput().isSame( random["outColor"] ) )
def testReloadPreservesConnectionsWhenMinMaxOrDefaultChanges( self ) :
shader1 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/version1.sl", shaderName = "unversioned" )
n = GafferRenderMan.RenderManShader()
n.loadShader( shader1 )
self.assertFalse( n["parameters"]["float1"].hasMinValue() )
self.assertFalse( n["parameters"]["float1"].hasMaxValue() )
self.assertEqual( n["parameters"]["string1"].defaultValue(), "" )
nn = Gaffer.Node()
nn["outFloat"] = Gaffer.FloatPlug( direction = Gaffer.Plug.Direction.Out )
nn["outString"] = Gaffer.StringPlug( direction = Gaffer.Plug.Direction.Out )
n["parameters"]["float1"].setInput( nn["outFloat"] )
n["parameters"]["string1"].setInput( nn["outString"] )
shader2 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/version2.sl", shaderName = "unversioned" )
GafferRenderMan.RenderManShader.shaderLoader().clear()
n.loadShader( shader1, keepExistingValues=True )
self.assertTrue( n["parameters"]["float1"].hasMinValue() )
self.assertTrue( n["parameters"]["float1"].hasMaxValue() )
self.assertEqual( n["parameters"]["float1"].minValue(), -1 )
self.assertEqual( n["parameters"]["float1"].maxValue(), 2 )
self.assertEqual( n["parameters"]["string1"].defaultValue(), "newDefaultValue" )
self.assertTrue( n["parameters"]["float1"].getInput().isSame( nn["outFloat"] ) )
self.assertTrue( n["parameters"]["string1"].getInput().isSame( nn["outString"] ) )
def testReloadPreservesPartialConnectionsWhenMinMaxOrDefaultChanges( self ) :
shader1 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/version1.sl", shaderName = "unversioned" )
n = GafferRenderMan.RenderManShader()
n.loadShader( shader1 )
nn = Gaffer.Node()
nn["outFloat"] = Gaffer.FloatPlug( direction = Gaffer.Plug.Direction.Out )
n["parameters"]["color1"][0].setInput( nn["outFloat"] )
n["parameters"]["color1"][1].setInput( nn["outFloat"] )
n["parameters"]["color1"][2].setValue( 0.75 )
shader2 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/version2.sl", shaderName = "unversioned" )
GafferRenderMan.RenderManShader.shaderLoader().clear()
n.loadShader( shader1, keepExistingValues=True )
self.assertTrue( n["parameters"]["color1"][0].getInput().isSame( nn["outFloat"] ) )
self.assertTrue( n["parameters"]["color1"][1].getInput().isSame( nn["outFloat"] ) )
self.assertEqual( n["parameters"]["color1"][2].getValue(), 0.75 )
def testReloadPreservesValuesWhenMinMaxOrDefaultChanges( self ) :
shader1 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/version1.sl", shaderName = "unversioned" )
n = GafferRenderMan.RenderManShader()
n.loadShader( shader1 )
n["parameters"]["float1"].setValue( 0.25 )
n["parameters"]["string1"].setValue( "dog" )
n["parameters"]["color1"].setValue( IECore.Color3f( 0.1, 0.25, 0.5 ) )
shader2 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/version2.sl", shaderName = "unversioned" )
GafferRenderMan.RenderManShader.shaderLoader().clear()
n.loadShader( shader1, keepExistingValues=True )
self.assertEqual( n["parameters"]["float1"].getValue(), 0.25 )
self.assertEqual( n["parameters"]["string1"].getValue(), "dog" )
self.assertEqual( n["parameters"]["color1"].getValue(), IECore.Color3f( 0.1, 0.25, 0.5 ) )
def testOutputParameters( self ) :
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/version3.sl" )
n = GafferRenderMan.RenderManShader()
n.loadShader( shader )
self.failIf( "outputFloat" in n["parameters"].keys() )
def testAssignmentDirtyPropagation( self ) :
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderParameter.sl" )
shaderNode = GafferRenderMan.RenderManShader()
shaderNode.loadShader( shader )
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
coshaderNode = GafferRenderMan.RenderManShader()
coshaderNode.loadShader( coshader )
shaderNode["parameters"]["coshaderParameter"].setInput( coshaderNode["out"] )
plane = GafferScene.Plane()
assignment = GafferScene.ShaderAssignment()
assignment["in"].setInput( plane["out"] )
assignment["shader"].setInput( shaderNode["out"] )
cs = GafferTest.CapturingSlot( assignment.plugDirtiedSignal() )
coshaderNode["parameters"]["floatParameter"].setValue( 12 )
dirtiedNames = [ x[0].fullName() for x in cs ]
self.assertEqual( len( dirtiedNames ), 3 )
self.assertEqual( dirtiedNames[0], "ShaderAssignment.shader" )
self.assertEqual( dirtiedNames[1], "ShaderAssignment.out.attributes" )
self.assertEqual( dirtiedNames[2], "ShaderAssignment.out" )
def testArrayParameters( self ) :
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/arrayParameters.sl" )
n = GafferRenderMan.RenderManShader()
n.loadShader( shader )
expected = {
"dynamicFloatArray" : IECore.FloatVectorData( [] ),
"fixedFloatArray" : IECore.FloatVectorData( [ 1, 2, 3, 4 ] ),
"dynamicStringArray" : IECore.StringVectorData( [ "dynamic", "arrays", "can", "still", "have", "defaults" ] ),
"fixedStringArray" : IECore.StringVectorData( [ "hello", "goodbye" ] ),
"dynamicColorArray" : IECore.Color3fVectorData( [ IECore.Color3f( 1 ), IECore.Color3f( 2 ) ] ),
"fixedColorArray" : IECore.Color3fVectorData( [ IECore.Color3f( 1 ), IECore.Color3f( 2 ) ] ),
"dynamicVectorArray" : IECore.V3fVectorData( [] ),
"fixedVectorArray" : IECore.V3fVectorData( [ IECore.V3f( x ) for x in range( 1, 6 ) ] ),
"dynamicPointArray" : IECore.V3fVectorData( [] ),
"fixedPointArray" : IECore.V3fVectorData( [ IECore.V3f( x ) for x in range( 1, 6 ) ] ),
"dynamicNormalArray" : IECore.V3fVectorData( [] ),
"fixedNormalArray" : IECore.V3fVectorData( [ IECore.V3f( x ) for x in range( 1, 6 ) ] ),
}
self.assertEqual( set( n["parameters"].keys() ), set( expected.keys() ) )
for name, value in expected.items() :
self.assertEqual( n["parameters"][name].defaultValue(), value )
self.assertEqual( n["parameters"][name].getValue(), value )
s = n.state()[0]
for name, value in expected.items() :
self.assertEqual( s.parameters[name], value )
def testFixedCoshaderArrayParameters( self ) :
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderArrayParameters.sl" )
n = GafferRenderMan.RenderManShader()
n.loadShader( shader )
self.assertEqual( n["parameters"].keys(), [ "dynamicShaderArray", "fixedShaderArray" ] )
self.assertTrue( isinstance( n["parameters"]["fixedShaderArray"], Gaffer.CompoundPlug ) )
self.assertEqual( len( n["parameters"]["fixedShaderArray"] ), 4 )
self.assertTrue( isinstance( n["parameters"]["fixedShaderArray"]["fixedShaderArray0"], Gaffer.Plug ) )
self.assertTrue( isinstance( n["parameters"]["fixedShaderArray"]["fixedShaderArray1"], Gaffer.Plug ) )
self.assertTrue( isinstance( n["parameters"]["fixedShaderArray"]["fixedShaderArray2"], Gaffer.Plug ) )
self.assertTrue( isinstance( n["parameters"]["fixedShaderArray"]["fixedShaderArray3"], Gaffer.Plug ) )
state = n.state()
self.assertEqual( state[0].parameters["fixedShaderArray"], IECore.StringVectorData( [ "" ] * 4 ) )
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
coshaderNode = GafferRenderMan.RenderManShader()
coshaderNode.loadShader( coshader )
n["parameters"]["fixedShaderArray"]["fixedShaderArray0"].setInput( coshaderNode["out"] )
state = n.state()
self.assertEqual( state[1].parameters["fixedShaderArray"], IECore.StringVectorData( [ state[0].parameters["__handle"].value, "", "", "" ] ) )
def testCoshaderType( self ) :
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
coshaderNode = GafferRenderMan.RenderManShader()
coshaderNode.loadShader( coshader )
self.assertEqual( coshaderNode.state()[0].type, "ri:shader" )
def testCantConnectSurfaceShaderIntoCoshaderInput( self ) :
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderParameter.sl" )
n1 = GafferRenderMan.RenderManShader()
n1.loadShader( shader )
n2 = GafferRenderMan.RenderManShader()
n2.loadShader( "plastic" )
self.assertFalse( n1["parameters"]["coshaderParameter"].acceptsInput( n2["out"] ) )
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
n3 = GafferRenderMan.RenderManShader()
n3.loadShader( coshader )
self.assertTrue( n1["parameters"]["coshaderParameter"].acceptsInput( n3["out"] ) )
arrayShader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderArrayParameters.sl" )
n4 = GafferRenderMan.RenderManShader()
n4.loadShader( arrayShader )
self.assertFalse( n4["parameters"]["fixedShaderArray"]["fixedShaderArray0"].acceptsInput( n2["out"] ) )
self.assertTrue( n4["parameters"]["fixedShaderArray"]["fixedShaderArray0"].acceptsInput( n3["out"] ) )
def testConnectionsBetweenParameters( self ) :
s = GafferRenderMan.RenderManShader()
s.loadShader( "plastic" )
s["parameters"]["Kd"].setValue( 0.25 )
s["parameters"]["Ks"].setInput( s["parameters"]["Kd"] )
shader = s.state()[0]
self.assertEqual( shader.parameters["Kd"].value, 0.25 )
self.assertEqual( shader.parameters["Ks"].value, 0.25 )
def testFixedCoshaderArrayParameterHash( self ) :
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderArrayParameters.sl" )
n = GafferRenderMan.RenderManShader()
n.loadShader( shader )
h1 = n.stateHash()
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
coshaderNode = GafferRenderMan.RenderManShader()
coshaderNode.loadShader( coshader )
n["parameters"]["fixedShaderArray"]["fixedShaderArray0"].setInput( coshaderNode["out"] )
h2 = n.stateHash()
self.assertNotEqual( h2, h1 )
n["parameters"]["fixedShaderArray"]["fixedShaderArray1"].setInput( coshaderNode["out"] )
h3 = n.stateHash()
self.assertNotEqual( h3, h2 )
self.assertNotEqual( h3, h1 )
n["parameters"]["fixedShaderArray"]["fixedShaderArray1"].setInput( None )
n["parameters"]["fixedShaderArray"]["fixedShaderArray2"].setInput( coshaderNode["out"] )
h4 = n.stateHash()
self.assertNotEqual( h4, h3 )
self.assertNotEqual( h4, h2 )
self.assertNotEqual( h4, h1 )
def testDisabling( self ) :
s = GafferRenderMan.RenderManShader()
s.loadShader( "plastic" )
stateHash = s.stateHash()
state = s.state()
self.assertEqual( len( state ), 1 )
self.assertEqual( state[0].name, "plastic" )
self.assertTrue( s["enabled"].isSame( s.enabledPlug() ) )
s["enabled"].setValue( False )
stateHash2 = s.stateHash()
self.assertNotEqual( stateHash2, stateHash )
state2 = s.state()
self.assertEqual( len( state2 ), 0 )
def testDisablingCoshaders( self ) :
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderParameter.sl" )
shaderNode = GafferRenderMan.RenderManShader()
shaderNode.loadShader( shader )
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
coshaderNode = GafferRenderMan.RenderManShader()
coshaderNode.loadShader( coshader )
shaderNode["parameters"]["coshaderParameter"].setInput( coshaderNode["out"] )
s = shaderNode.state()
self.assertEqual( len( s ), 2 )
self.assertEqual( s[0].name, coshader )
self.assertEqual( s[1].name, shader )
h = shaderNode.stateHash()
coshaderNode["enabled"].setValue( False )
s2 = shaderNode.state()
self.assertEqual( len( s2 ), 1 )
self.assertEqual( s2[0].name, shader )
self.assertTrue( "coshaderParameter" not in s2[0].parameters )
self.assertNotEqual( shaderNode.stateHash(), h )
def testDisablingCoshaderArrayInputs( self ) :
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderArrayParameters.sl" )
n = GafferRenderMan.RenderManShader()
n.loadShader( shader )
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
coshaderNode1 = GafferRenderMan.RenderManShader()
coshaderNode1.loadShader( coshader )
coshaderNode2 = GafferRenderMan.RenderManShader()
coshaderNode2.loadShader( coshader )
n["parameters"]["fixedShaderArray"][0].setInput( coshaderNode1["out"] )
n["parameters"]["fixedShaderArray"][2].setInput( coshaderNode2["out"] )
state = n.state()
h1 = n.stateHash()
self.assertEqual(
state[2].parameters["fixedShaderArray"],
IECore.StringVectorData( [
state[0].parameters["__handle"].value,
"",
state[1].parameters["__handle"].value,
""
] )
)
coshaderNode1["enabled"].setValue( False )
state = n.state()
self.assertEqual(
state[1].parameters["fixedShaderArray"],
IECore.StringVectorData( [
"",
"",
state[0].parameters["__handle"].value,
""
] )
)
h2 = n.stateHash()
self.assertNotEqual( h2, h1 )
coshaderNode2["enabled"].setValue( False )
state = n.state()
self.assertEqual(
state[0].parameters["fixedShaderArray"],
IECore.StringVectorData( [
"",
"",
"",
""
] )
)
self.assertNotEqual( n.stateHash(), h1 )
self.assertNotEqual( n.stateHash(), h2 )
def testCorrespondingInput( self ) :
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
coshaderNode = GafferRenderMan.RenderManShader()
coshaderNode.loadShader( coshader )
self.assertEqual( coshaderNode.correspondingInput( coshaderNode["out"] ), None )
coshader2 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderWithPassThrough.sl" )
coshaderNode2 = GafferRenderMan.RenderManShader()
coshaderNode2.loadShader( coshader2 )
self.assertTrue( coshaderNode2.correspondingInput( coshaderNode2["out"] ).isSame( coshaderNode2["parameters"]["aColorIWillTint"] ) )
def testCoshaderPassThrough( self ) :
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderParameter.sl" )
shaderNode = GafferRenderMan.RenderManShader()
shaderNode.loadShader( shader )
passThroughCoshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderWithPassThrough.sl" )
passThroughCoshaderNode = GafferRenderMan.RenderManShader()
passThroughCoshaderNode.loadShader( passThroughCoshader )
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
coshaderNode = GafferRenderMan.RenderManShader()
coshaderNode.loadShader( coshader )
shaderNode["parameters"]["coshaderParameter"].setInput( passThroughCoshaderNode["out"] )
passThroughCoshaderNode["parameters"]["aColorIWillTint"].setInput( coshaderNode["out"] )
h = shaderNode.stateHash()
s = shaderNode.state()
self.assertEqual( len( s ), 3 )
self.assertEqual( s[2].parameters["coshaderParameter"], s[1].parameters["__handle"] )
self.assertEqual( s[1].name, passThroughCoshader )
self.assertEqual( s[1].parameters["aColorIWillTint"], s[0].parameters["__handle"] )
self.assertEqual( s[0].name, coshader )
passThroughCoshaderNode["enabled"].setValue( False )
s = shaderNode.state()
self.assertEqual( len( s ), 2 )
self.assertEqual( s[1].parameters["coshaderParameter"], s[0].parameters["__handle"] )
self.assertEqual( s[0].name, coshader )
def testSplineParameters( self ) :
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/splineParameters.sl" )
n = GafferRenderMan.RenderManShader()
n.loadShader( shader )
self.assertEqual( n["parameters"].keys(), [ "floatSpline", "colorSpline", "colorSpline2" ] )
self.assertTrue( isinstance( n["parameters"]["floatSpline"], Gaffer.SplineffPlug ) )
self.assertTrue( isinstance( n["parameters"]["colorSpline"], Gaffer.SplinefColor3fPlug ) )
self.assertEqual(
n["parameters"]["floatSpline"].defaultValue(),
IECore.Splineff(
IECore.CubicBasisf.catmullRom(),
[
( 0, 0 ),
( 0, 0 ),
( 1, 1 ),
( 1, 1 ),
]
)
)
self.assertEqual(
n["parameters"]["colorSpline"].defaultValue(),
IECore.SplinefColor3f(
IECore.CubicBasisf.catmullRom(),
[
( 0, IECore.Color3f( 0 ) ),
( 0, IECore.Color3f( 0 ) ),
( 1, IECore.Color3f( 1 ) ),
( 1, IECore.Color3f( 1 ) ),
]
)
)
floatValue = IECore.Splineff(
IECore.CubicBasisf.catmullRom(),
[
( 0, 0 ),
( 0, 0 ),
( 1, 2 ),
( 1, 2 ),
]
)
colorValue = IECore.SplinefColor3f(
IECore.CubicBasisf.catmullRom(),
[
( 0, IECore.Color3f( 0 ) ),
( 0, IECore.Color3f( 0 ) ),
( 1, IECore.Color3f( .5 ) ),
( 1, IECore.Color3f( .5 ) ),
]
)
n["parameters"]["floatSpline"].setValue( floatValue )
n["parameters"]["colorSpline"].setValue( colorValue )
s = n.state()[0]
self.assertEqual( s.parameters["floatSpline"].value, floatValue )
self.assertEqual( s.parameters["colorSpline"].value, colorValue )
def testSplineParameterSerialisationKeepsExistingValues( self ) :
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/splineParameters.sl" )
s = Gaffer.ScriptNode()
s["n"] = GafferRenderMan.RenderManShader()
s["n"].loadShader( shader )
s["n"]["parameters"]["floatSpline"].setValue(
IECore.Splineff(
IECore.CubicBasisf.catmullRom(),
[
( 0, 0 ),
( 0, 0 ),
( 1, 2 ),
( 1, 2 ),
]
)
)
self.assertEqual(
s["n"]["parameters"]["floatSpline"].getValue(),
IECore.Splineff(
IECore.CubicBasisf.catmullRom(),
[
( 0, 0 ),
( 0, 0 ),
( 1, 2 ),
( 1, 2 ),
]
),
)
ss = s.serialise()
s2 = Gaffer.ScriptNode()
s2.execute( ss )
self.assertEqual(
s2["n"]["parameters"]["floatSpline"].getValue(),
IECore.Splineff(
IECore.CubicBasisf.catmullRom(),
[
( 0, 0 ),
( 0, 0 ),
( 1, 2 ),
( 1, 2 ),
]
),
)
def testSplineParameterDefaultValueAnnotation( self ) :
# because variable length parameters must be initialised
# with a zero length array, we have to pass the defaults we actually
# want via an annotation.
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/splineParameters.sl" )
n = GafferRenderMan.RenderManShader()
n.loadShader( shader )
self.assertEqual(
n["parameters"]["colorSpline2"].getValue(),
IECore.SplinefColor3f(
IECore.CubicBasisf.catmullRom(),
[
( 0, IECore.Color3f( 1 ) ),
( 0, IECore.Color3f( 1 ) ),
( 0.5, IECore.Color3f( 1, 0.5, 0.25 ) ),
( 1, IECore.Color3f( 0 ) ),
( 1, IECore.Color3f( 0 ) ),
]
),
)
def testCoshadersInBox( self ) :
s = Gaffer.ScriptNode()
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderParameter.sl" )
s["shader"] = GafferRenderMan.RenderManShader()
s["shader"].loadShader( shader )
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
s["coshader"] = GafferRenderMan.RenderManShader()
s["coshader"].loadShader( coshader )
s["shader"]["parameters"]["coshaderParameter"].setInput( s["coshader"]["out"] )
b = Gaffer.Box.create( s, Gaffer.StandardSet( [ s["coshader"] ] ) )
self.assertTrue( s["shader"]["parameters"]["coshaderParameter"].getInput().parent().isSame( b ) )
s = s["shader"].state()
self.assertEqual( len( s ), 2 )
self.assertEqual( s[1].parameters["coshaderParameter"], s[0].parameters["__handle"] )
self.assertEqual( s[0].name, coshader )
def testCoshadersInBox( self ) :
s = Gaffer.ScriptNode()
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderParameter.sl" )
s["shader"] = GafferRenderMan.RenderManShader()
s["shader"].loadShader( shader )
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
s["coshader"] = GafferRenderMan.RenderManShader()
s["coshader"].loadShader( coshader )
s["shader"]["parameters"]["coshaderParameter"].setInput( s["coshader"]["out"] )
b = Gaffer.Box.create( s, Gaffer.StandardSet( [ s["coshader"] ] ) )
self.assertTrue( s["shader"]["parameters"]["coshaderParameter"].getInput().parent().isSame( b ) )
s = s["shader"].state()
self.assertEqual( len( s ), 2 )
self.assertEqual( s[1].parameters["coshaderParameter"], s[0].parameters["__handle"] )
self.assertEqual( s[0].name, coshader )
def testShaderInBoxWithExternalCoshader( self ) :
s = Gaffer.ScriptNode()
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderParameter.sl" )
s["shader"] = GafferRenderMan.RenderManShader()
s["shader"].loadShader( shader )
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
s["coshader"] = GafferRenderMan.RenderManShader()
s["coshader"].loadShader( coshader )
s["shader"]["parameters"]["coshaderParameter"].setInput( s["coshader"]["out"] )
b = Gaffer.Box.create( s, Gaffer.StandardSet( [ s["shader"] ] ) )
self.assertTrue( b["shader"]["parameters"]["coshaderParameter"].getInput().parent().isSame( b ) )
s = b["shader"].state()
self.assertEqual( len( s ), 2 )
self.assertEqual( s[1].parameters["coshaderParameter"], s[0].parameters["__handle"] )
self.assertEqual( s[0].name, coshader )
def testNumericTypeAnnotations( self ) :
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/numericTypeAnnotations.sl" )
shaderNode = GafferRenderMan.RenderManShader()
shaderNode.loadShader( shader )
self.assertTrue( isinstance( shaderNode["parameters"]["floatParameter1"], Gaffer.FloatPlug ) )
self.assertTrue( isinstance( shaderNode["parameters"]["floatParameter2"], Gaffer.FloatPlug ) )
self.assertTrue( isinstance( shaderNode["parameters"]["intParameter"], Gaffer.IntPlug ) )
self.assertTrue( isinstance( shaderNode["parameters"]["boolParameter"], Gaffer.BoolPlug ) )
self.assertEqual( shaderNode["parameters"]["floatParameter1"].defaultValue(), 1.25 )
self.assertEqual( shaderNode["parameters"]["floatParameter2"].defaultValue(), 1.5 )
self.assertEqual( shaderNode["parameters"]["intParameter"].defaultValue(), 10 )
self.assertEqual( shaderNode["parameters"]["boolParameter"].defaultValue(), True )
self.assertEqual( shaderNode["parameters"]["floatParameter1"].getValue(), 1.25 )
self.assertEqual( shaderNode["parameters"]["floatParameter2"].getValue(), 1.5 )
self.assertEqual( shaderNode["parameters"]["intParameter"].getValue(), 10 )
self.assertEqual( shaderNode["parameters"]["boolParameter"].getValue(), True )
def testCoshaderTypeAnnotations( self ) :
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
coshaderNode = GafferRenderMan.RenderManShader()
coshaderNode.loadShader( coshader )
coshaderType1 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderType1.sl" )
coshaderType1Node = GafferRenderMan.RenderManShader()
coshaderType1Node.loadShader( coshaderType1 )
coshaderType2 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderType2.sl" )
coshaderType2Node = GafferRenderMan.RenderManShader()
coshaderType2Node.loadShader( coshaderType2 )
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/typedCoshaderParameters.sl" )
shaderNode = GafferRenderMan.RenderManShader()
shaderNode.loadShader( shader )
self.assertTrue( shaderNode["parameters"]["coshaderParameter"].acceptsInput( coshaderNode["out"] ) )
self.assertTrue( shaderNode["parameters"]["coshaderParameter"].acceptsInput( coshaderType1Node["out"] ) )
self.assertTrue( shaderNode["parameters"]["coshaderParameter"].acceptsInput( coshaderType2Node["out"] ) )
self.assertFalse( shaderNode["parameters"]["coshaderParameterType1"].acceptsInput( coshaderNode["out"] ) )
self.assertTrue( shaderNode["parameters"]["coshaderParameterType1"].acceptsInput( coshaderType1Node["out"] ) )
self.assertFalse( shaderNode["parameters"]["coshaderParameterType1"].acceptsInput( coshaderType2Node["out"] ) )
self.assertFalse( shaderNode["parameters"]["coshaderParameterType2"].acceptsInput( coshaderNode["out"] ) )
self.assertFalse( shaderNode["parameters"]["coshaderParameterType2"].acceptsInput( coshaderType1Node["out"] ) )
self.assertTrue( shaderNode["parameters"]["coshaderParameterType2"].acceptsInput( coshaderType2Node["out"] ) )
self.assertTrue( shaderNode["parameters"]["coshaderArrayParameter"]["coshaderArrayParameter0"].acceptsInput( coshaderNode["out"] ) )
self.assertTrue( shaderNode["parameters"]["coshaderArrayParameter"]["coshaderArrayParameter0"].acceptsInput( coshaderType1Node["out"] ) )
self.assertTrue( shaderNode["parameters"]["coshaderArrayParameter"]["coshaderArrayParameter0"].acceptsInput( coshaderType2Node["out"] ) )
self.assertFalse( shaderNode["parameters"]["coshaderArrayParameterType1"]["coshaderArrayParameterType1_0"].acceptsInput( coshaderNode["out"] ) )
self.assertTrue( shaderNode["parameters"]["coshaderArrayParameterType1"]["coshaderArrayParameterType1_0"].acceptsInput( coshaderType1Node["out"] ) )
self.assertFalse( shaderNode["parameters"]["coshaderArrayParameterType1"]["coshaderArrayParameterType1_0"].acceptsInput( coshaderType2Node["out"] ) )
self.assertFalse( shaderNode["parameters"]["coshaderArrayParameterType2"][0].acceptsInput( coshaderNode["out"] ) )
self.assertFalse( shaderNode["parameters"]["coshaderArrayParameterType2"][0].acceptsInput( coshaderType1Node["out"] ) )
self.assertTrue( shaderNode["parameters"]["coshaderArrayParameterType2"][0].acceptsInput( coshaderType2Node["out"] ) )
def testMultipleCoshaderTypeAnnotations( self ) :
coshaderType1And2 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderType1And2.sl" )
coshaderType1And2Node = GafferRenderMan.RenderManShader()
coshaderType1And2Node.loadShader( coshaderType1And2 )
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/typedCoshaderParameters.sl" )
shaderNode = GafferRenderMan.RenderManShader()
shaderNode.loadShader( shader )
self.assertTrue( shaderNode["parameters"]["coshaderParameter"].acceptsInput( coshaderType1And2Node["out"] ) )
self.assertTrue( shaderNode["parameters"]["coshaderParameterType1"].acceptsInput( coshaderType1And2Node["out"] ) )
self.assertTrue( shaderNode["parameters"]["coshaderParameterType2"].acceptsInput( coshaderType1And2Node["out"] ) )
self.assertFalse( shaderNode["parameters"]["coshaderParameterType3"].acceptsInput( coshaderType1And2Node["out"] ) )
def testSplitCoshaderPassThrough( self ) :
# C ----S S is connected to C both directly
# | | and as a pass-through of the disabled
# D ---- node D.
#
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderArrayParameters.sl" )
S = GafferRenderMan.RenderManShader()
S.loadShader( shader )
passThroughCoshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderWithPassThrough.sl" )
D = GafferRenderMan.RenderManShader()
D.loadShader( passThroughCoshader )
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
C = GafferRenderMan.RenderManShader()
C.loadShader( coshader )
S["parameters"]["fixedShaderArray"][0].setInput( C["out"] )
S["parameters"]["fixedShaderArray"][1].setInput( D["out"] )
D["parameters"]["aColorIWillTint"].setInput( C["out"] )
h = S.stateHash()
s = S.state()
self.assertEqual( len( s ), 3 )
self.assertEqual( s[2].parameters["fixedShaderArray"], IECore.StringVectorData( [ s[0].parameters["__handle"].value, s[1].parameters["__handle"].value, "", "" ] ) )
self.assertEqual( s[0].name, coshader )
self.assertEqual( s[1].parameters["aColorIWillTint"], s[0].parameters["__handle"] )
self.assertEqual( s[1].name, passThroughCoshader )
D["enabled"].setValue( False )
self.assertNotEqual( S.stateHash(), h )
s = S.state()
self.assertEqual( len( s ), 2 )
self.assertEqual( s[1].parameters["fixedShaderArray"], IECore.StringVectorData( [ s[0].parameters["__handle"].value, s[0].parameters["__handle"].value, "", "" ] ) )
self.assertEqual( s[0].name, coshader )
def testSerialDisabledShaders( self ) :
# C ----> D1 ----> D2 ----> S
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderParameter.sl" )
S = GafferRenderMan.RenderManShader()
S.loadShader( shader )
passThroughCoshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderWithPassThrough.sl" )
D1 = GafferRenderMan.RenderManShader()
D1.loadShader( passThroughCoshader )
D2 = GafferRenderMan.RenderManShader()
D2.loadShader( passThroughCoshader )
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
C = GafferRenderMan.RenderManShader()
C.loadShader( coshader )
S["parameters"]["coshaderParameter"].setInput( D2["out"] )
D2["parameters"]["aColorIWillTint"].setInput( D1["out"] )
D1["parameters"]["aColorIWillTint"].setInput( C["out"] )
h1 = S.stateHash()
s = S.state()
self.assertEqual( len( s ), 4 )
self.assertEqual( s[0].name, coshader )
self.assertEqual( s[1].name, passThroughCoshader )
self.assertEqual( s[2].name, passThroughCoshader )
self.assertEqual( s[3].name, shader )
self.assertEqual( s[3].parameters["coshaderParameter"], s[2].parameters["__handle"] )
self.assertEqual( s[2].parameters["aColorIWillTint"], s[1].parameters["__handle"] )
self.assertEqual( s[1].parameters["aColorIWillTint"], s[0].parameters["__handle"] )
D2["enabled"].setValue( False )
h2 = S.stateHash()
self.assertNotEqual( h1, h2 )
s = S.state()
self.assertEqual( len( s ), 3 )
self.assertEqual( s[0].name, coshader )
self.assertEqual( s[1].name, passThroughCoshader )
self.assertEqual( s[2].name, shader )
self.assertEqual( s[2].parameters["coshaderParameter"], s[1].parameters["__handle"] )
self.assertEqual( s[1].parameters["aColorIWillTint"], s[0].parameters["__handle"] )
D1["enabled"].setValue( False )
h3 = S.stateHash()
self.assertNotEqual( h3, h2 )
self.assertNotEqual( h3, h1 )
s = S.state()
self.assertEqual( len( s ), 2 )
self.assertEqual( s[0].name, coshader )
self.assertEqual( s[1].name, shader )
self.assertEqual( s[1].parameters["coshaderParameter"], s[0].parameters["__handle"] )
def testDynamicCoshaderArrayParameters( self ) :
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
coshaderNode = GafferRenderMan.RenderManShader()
coshaderNode.loadShader( coshader )
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderArrayParameters.sl" )
shaderNode = GafferRenderMan.RenderManShader()
shaderNode.loadShader( shader )
self.assertEqual( len( shaderNode["parameters"]["dynamicShaderArray"] ), 1 )
self.assertTrue( isinstance( shaderNode["parameters"]["dynamicShaderArray"][0], Gaffer.Plug ) )
self.assertTrue( shaderNode["parameters"]["dynamicShaderArray"][0].getInput() is None )
shaderNode["parameters"]["dynamicShaderArray"][0].setInput( coshaderNode["out"] )
self.assertEqual( len( shaderNode["parameters"]["dynamicShaderArray"] ), 2 )
self.assertTrue( isinstance( shaderNode["parameters"]["dynamicShaderArray"][0], Gaffer.Plug ) )
self.assertTrue( isinstance( shaderNode["parameters"]["dynamicShaderArray"][1], Gaffer.Plug ) )
self.assertTrue( shaderNode["parameters"]["dynamicShaderArray"][0].getInput().isSame( coshaderNode["out"] ) )
self.assertTrue( shaderNode["parameters"]["dynamicShaderArray"][1].getInput() is None )
shaderNode["parameters"]["dynamicShaderArray"][0].setInput( None )
self.assertEqual( len( shaderNode["parameters"]["dynamicShaderArray"] ), 1 )
self.assertTrue( isinstance( shaderNode["parameters"]["dynamicShaderArray"][0], Gaffer.Plug ) )
self.assertTrue( shaderNode["parameters"]["dynamicShaderArray"][0].getInput() is None )
def testSerialiseDynamicCoshaderArrayParameters( self ) :
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderArrayParameters.sl" )
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
s = Gaffer.ScriptNode()
s["n"] = GafferRenderMan.RenderManShader()
s["n"].loadShader( shader )
s["c"] = GafferRenderMan.RenderManShader()
s["c"].loadShader( coshader )
s["n"]["parameters"]["dynamicShaderArray"][0].setInput( s["c"]["out"] )
s["n"]["parameters"]["dynamicShaderArray"][1].setInput( s["c"]["out"] )
s["n"]["parameters"]["dynamicShaderArray"][2].setInput( s["c"]["out"] )
s["n"]["parameters"]["dynamicShaderArray"][1].setInput( None )
self.assertEqual( len( s["n"]["parameters"]["dynamicShaderArray"] ), 4 )
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
self.assertEqual( len( s2["n"]["parameters"]["dynamicShaderArray"] ), 4 )
self.assertTrue( s2["n"]["parameters"]["dynamicShaderArray"][0].getInput().isSame( s2["c"]["out"] ) )
self.assertTrue( s2["n"]["parameters"]["dynamicShaderArray"][1].getInput() is None )
self.assertTrue( s2["n"]["parameters"]["dynamicShaderArray"][2].getInput().isSame( s2["c"]["out"] ) )
self.assertTrue( s2["n"]["parameters"]["dynamicShaderArray"][3].getInput() is None )
s2["n"]["parameters"]["dynamicShaderArray"][3].setInput( s2["c"]["out"] )
self.assertEqual( len( s2["n"]["parameters"]["dynamicShaderArray"] ), 5 )
self.assertTrue( s2["n"]["parameters"]["dynamicShaderArray"][0].getInput().isSame( s2["c"]["out"] ) )
self.assertTrue( s2["n"]["parameters"]["dynamicShaderArray"][1].getInput() is None )
self.assertTrue( s2["n"]["parameters"]["dynamicShaderArray"][2].getInput().isSame( s2["c"]["out"] ) )
self.assertTrue( s2["n"]["parameters"]["dynamicShaderArray"][3].getInput().isSame( s2["c"]["out"] ) )
self.assertTrue( s2["n"]["parameters"]["dynamicShaderArray"][4].getInput() is None )
def testConvertFixedCoshaderArrayToDynamic( self ) :
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderArrayParameters.sl" )
shaderV2 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderArrayParametersV2.sl" )
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
s = Gaffer.ScriptNode()
s["n"] = GafferRenderMan.RenderManShader()
s["n"].loadShader( shader )
s["c"] = GafferRenderMan.RenderManShader()
s["c"].loadShader( coshader )
s["n"]["parameters"]["fixedShaderArray"][0].setInput( s["c"]["out"] )
self.assertTrue( len( s["n"]["parameters"]["fixedShaderArray"] ), 4 )
s["n"].loadShader( shaderV2, keepExistingValues = True )
self.assertTrue( s["n"]["parameters"]["fixedShaderArray"][0].getInput().isSame( s["c"]["out"] ) )
self.assertTrue( s["n"]["parameters"]["fixedShaderArray"][1].getInput() is None )
s["n"]["parameters"]["fixedShaderArray"][0].setInput( None )
self.assertEqual( len( s["n"]["parameters"]["fixedShaderArray"] ), 1 )
self.assertTrue( s["n"]["parameters"]["fixedShaderArray"][0].getInput() is None )
def testConvertFixedCoshaderArrayToDynamicWithFirstPlugUnconnected( self ) :
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderArrayParameters.sl" )
shaderV2 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderArrayParametersV2.sl" )
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
s = Gaffer.ScriptNode()
s["n"] = GafferRenderMan.RenderManShader()
s["n"].loadShader( shader )
s["c"] = GafferRenderMan.RenderManShader()
s["c"].loadShader( coshader )
s["n"]["parameters"]["fixedShaderArray"][1].setInput( s["c"]["out"] )
self.assertTrue( len( s["n"]["parameters"]["fixedShaderArray"] ), 4 )
s["n"].loadShader( shaderV2, keepExistingValues = True )
self.assertTrue( s["n"]["parameters"]["fixedShaderArray"][1].getInput().isSame( s["c"]["out"] ) )
self.assertTrue( s["n"]["parameters"]["fixedShaderArray"][0].getInput() is None )
s["n"]["parameters"]["fixedShaderArray"][1].setInput( None )
self.assertEqual( len( s["n"]["parameters"]["fixedShaderArray"] ), 1 )
self.assertTrue( s["n"]["parameters"]["fixedShaderArray"][0].getInput() is None )
def testConvertFixedCoshaderArrayToDynamicDuringLoading( self ) :
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderArrayParameters.sl" )
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
s = Gaffer.ScriptNode()
s["n"] = GafferRenderMan.RenderManShader()
s["n"].loadShader( shader )
s["c"] = GafferRenderMan.RenderManShader()
s["c"].loadShader( coshader )
s["n"]["parameters"]["fixedShaderArray"][1].setInput( s["c"]["out"] )
self.assertTrue( len( s["n"]["parameters"]["fixedShaderArray"] ), 4 )
GafferRenderMan.RenderManShader.shaderLoader().clear()
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderArrayParametersV2.sl", shaderName = "coshaderArrayParameters" )
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
self.assertTrue( s2["n"]["parameters"]["fixedShaderArray"][1].getInput().isSame( s2["c"]["out"] ) )
self.assertTrue( s2["n"]["parameters"]["fixedShaderArray"][0].getInput() is None )
s2["n"]["parameters"]["fixedShaderArray"][1].setInput( None )
self.assertEqual( len( s2["n"]["parameters"]["fixedShaderArray"] ), 1 )
self.assertTrue( s2["n"]["parameters"]["fixedShaderArray"][0].getInput() is None )
def testHashThroughBox( self ):
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderParameter.sl" )
shaderNode = GafferRenderMan.RenderManShader()
shaderNode.loadShader( shader )
# box up an intermediate coshader:
b = Gaffer.Box()
b.addChild( Gaffer.Plug( "in" ) )
b.addChild( Gaffer.Plug( "out", direction = Gaffer.Plug.Direction.Out ) )
intermediateCoshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderWithPassThrough.sl" )
intermediateCoshaderNode = GafferRenderMan.RenderManShader()
intermediateCoshaderNode.loadShader( intermediateCoshader )
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
coshaderNode = GafferRenderMan.RenderManShader()
coshaderNode.loadShader( coshader )
b["in"].setInput( coshaderNode["out"] )
intermediateCoshaderNode["parameters"]["aColorIWillTint"].setInput( b["in"] )
b["out"].setInput( intermediateCoshaderNode["out"] )
shaderNode["parameters"]["coshaderParameter"].setInput( b["out"] )
h1 = shaderNode.stateHash()
coshaderNode["parameters"]["floatParameter"].setValue( 0.25 )
self.assertNotEqual( shaderNode.stateHash(), h1 )
def testDanglingBoxConnection( self ):
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderParameter.sl" )
shaderNode1 = GafferRenderMan.RenderManShader()
shaderNode1.loadShader( shader )
shaderNode2 = GafferRenderMan.RenderManShader()
shaderNode2.loadShader( shader )
b = Gaffer.Box()
b.addChild( Gaffer.Plug( "in" ) )
b.addChild( Gaffer.Plug( "out", direction = Gaffer.Plug.Direction.Out ) )
b["shader1"] = shaderNode1
shaderNode1["parameters"]["coshaderParameter"].setInput( b["in"] )
shaderNode2["parameters"]["coshaderParameter"].setInput( b["out"] )
def testUnconnectedCustomBoxInput( self ) :
class CustomBox( Gaffer.Box ) :
def __init__( self, name = "CustomBox" ) :
Gaffer.Box.__init__( self, name )
IECore.registerRunTimeTyped( CustomBox )
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderParameter.sl" )
# create a box and put a shader in it
b = CustomBox()
b["s"] = GafferRenderMan.RenderManShader()
b["s"].loadShader( shader )
# create a plug on the outside of the box, and connect it into
# the shader.
b["in"] = b["s"]["parameters"]["coshaderParameter"].createCounterpart( "in", Gaffer.Plug.Direction.In )
b["s"]["parameters"]["coshaderParameter"].setInput( b["in"] )
s = b["s"].state()
self.assertEqual( len( s ), 1 )
self.assertEqual( s[0].name, shader )
self.assertTrue( b["s"]["parameters"]["coshaderParameter"].getInput().isSame( b["in"] ) )
# check that it is now possible to connect appropriate coshaders
# into the box plug, and that appropriate networks are generated that way.
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
c = GafferRenderMan.RenderManShader()
c.loadShader( coshader )
self.assertTrue( b["in"].acceptsInput( c["out"] ) )
b["in"].setInput( c["out"] )
s = b["s"].state()
self.assertEqual( len( s ), 2 )
self.assertEqual( s[1].parameters["coshaderParameter"], s[0].parameters["__handle"] )
# check that it's not possible to use the plug on the box to create rogue connections
# that the shader itself wouldn't have accepted directly.
n = Gaffer.Node()
n["out"] = b["in"].createCounterpart( "out", Gaffer.Plug.Direction.Out )
self.assertFalse( b["in"].acceptsInput( n["out"] ) )
self.assertRaises( RuntimeError, b["in"].setInput, n["out"] )
# and check that if we remove the internal connection to the shader, the exterior plug
# will start accepting new connections again.
b["s"]["parameters"]["coshaderParameter"].setInput( None )
self.assertTrue( b["in"].acceptsInput( n["out"] ) )
b["in"].setInput( n["out"] )
self.assertTrue( b["in"].getInput().isSame( n["out"] ) )
# and that the shader will reject connection to the plug with the dodgy input.
self.assertFalse( b["s"]["parameters"]["coshaderParameter"].acceptsInput( b["in"] ) )
self.assertRaises( RuntimeError, b["s"]["parameters"]["coshaderParameter"].setInput, b["in"] )
def testCoshaderSwitching( self ) :
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderParameter.sl" )
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
coshaderNode0 = GafferRenderMan.RenderManShader()
coshaderNode0.loadShader( coshader )
coshaderNode1 = GafferRenderMan.RenderManShader()
coshaderNode1.loadShader( coshader )
coshaderNode0["parameters"]["floatParameter"].setValue( 0 )
coshaderNode1["parameters"]["floatParameter"].setValue( 1 )
shaderNode = GafferRenderMan.RenderManShader()
shaderNode.loadShader( shader )
switch = GafferScene.ShaderSwitch()
switch["in"].setInput( coshaderNode0["out"] )
switch["in1"].setInput( coshaderNode1["out"] )
shaderNode["parameters"]["coshaderParameter"].setInput( switch["out"] )
self.assertEqual( shaderNode.state()[0].parameters["floatParameter"].value, 0 )
switch["index"].setValue( 1 )
self.assertEqual( shaderNode.state()[0].parameters["floatParameter"].value, 1 )
switch["enabled"].setValue( False )
self.assertEqual( shaderNode.state()[0].parameters["floatParameter"].value, 0 )
def testCoshaderTypingPreventsNewInvalidSwitchInputs( self ) :
coshaderType1 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderType1.sl" )
coshaderType1Node = GafferRenderMan.RenderManShader()
coshaderType1Node.loadShader( coshaderType1 )
coshaderType2 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderType2.sl" )
coshaderType2Node = GafferRenderMan.RenderManShader()
coshaderType2Node.loadShader( coshaderType2 )
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/typedCoshaderParameters.sl" )
shaderNode = GafferRenderMan.RenderManShader()
shaderNode.loadShader( shader )
switch = GafferScene.ShaderSwitch()
switch["in"].setInput( coshaderType1Node["out"] )
shaderNode["parameters"]["coshaderParameterType1"].setInput( switch["out"] )
self.assertFalse( switch["in1"].acceptsInput( coshaderType2Node["out"] ) )
self.assertTrue( switch["in1"].acceptsInput( coshaderType1Node["out"] ) )
def testAcceptInputFromEmptySwitch( self ) :
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderParameter.sl" )
shaderNode = GafferRenderMan.RenderManShader()
shaderNode.loadShader( shader )
switch = GafferScene.ShaderSwitch()
self.assertTrue( shaderNode["parameters"]["coshaderParameter"].acceptsInput( switch["out"] ) )
def testCoshaderSwitchingInBox( self ) :
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderParameter.sl" )
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
script = Gaffer.ScriptNode()
script["coshaderNode0"] = GafferRenderMan.RenderManShader()
script["coshaderNode0"].loadShader( coshader )
script["coshaderNode1"] = GafferRenderMan.RenderManShader()
script["coshaderNode1"].loadShader( coshader )
script["coshaderNode0"]["parameters"]["floatParameter"].setValue( 0 )
script["coshaderNode1"]["parameters"]["floatParameter"].setValue( 1 )
script["shaderNode"] = GafferRenderMan.RenderManShader()
script["shaderNode"].loadShader( shader )
script["switch"] = GafferScene.ShaderSwitch()
script["switch"]["in"].setInput( script["coshaderNode0"]["out"] )
script["switch"]["in1"].setInput( script["coshaderNode1"]["out"] )
script["shaderNode"]["parameters"]["coshaderParameter"].setInput( script["switch"]["out"] )
self.assertEqual( script["shaderNode"].state()[0].parameters["floatParameter"].value, 0 )
box = Gaffer.Box.create( script, Gaffer.StandardSet( script.children( Gaffer.Node.staticTypeId() ) ) )
self.assertEqual( box["shaderNode"].state()[0].parameters["floatParameter"].value, 0 )
promotedIndex = box.promotePlug( box["switch"]["index"] )
self.assertEqual( box["shaderNode"].state()[0].parameters["floatParameter"].value, 0 )
promotedIndex.setValue( 1 )
self.assertEqual( box["shaderNode"].state()[0].parameters["floatParameter"].value, 1 )
def testRepeatability( self ) :
s1 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
s2 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderParameter.sl" )
sn1 = GafferRenderMan.RenderManShader()
sn2 = GafferRenderMan.RenderManShader()
sn1.loadShader( s1 )
sn2.loadShader( s2 )
sn2["parameters"]["coshaderParameter"].setInput( sn1["out"] )
self.assertEqual( sn2.stateHash(), sn2.stateHash() )
self.assertEqual( sn2.state(), sn2.state() )
def testHandlesAreHumanReadable( self ) :
s1 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
s2 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderParameter.sl" )
sn1 = GafferRenderMan.RenderManShader( "Shader1" )
sn2 = GafferRenderMan.RenderManShader( "Shader2" )
sn1.loadShader( s1 )
sn2.loadShader( s2 )
sn2["parameters"]["coshaderParameter"].setInput( sn1["out"] )
state = sn2.state()
self.assertTrue( "Shader1" in state[0].parameters["__handle"].value )
def testHandlesAreUniqueEvenIfNodeNamesArent( self ) :
s1 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
s2 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderArrayParameters.sl" )
script = Gaffer.ScriptNode()
script["in1"] = GafferRenderMan.RenderManShader()
script["in1"].loadShader( s1 )
script["in2"] = GafferRenderMan.RenderManShader()
script["in2"].loadShader( s1 )
script["shader"] = GafferRenderMan.RenderManShader()
script["shader"].loadShader( s2 )
script["shader"]["parameters"]["fixedShaderArray"][0].setInput( script["in1"]["out"] )
script["shader"]["parameters"]["fixedShaderArray"][1].setInput( script["in2"]["out"] )
box = Gaffer.Box.create( script, Gaffer.StandardSet( [ script["in1"] ] ) )
# because the nodes have different parents, we can give them the same name.
box["in1"].setName( "notUnique" )
script["in2"].setName( "notUnique" )
state = script["shader"].state()
self.assertNotEqual( state[0].parameters["__handle"], state[1].parameters["__handle"] )
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | -2,309,061,230,335,838,000 | 38.419459 | 166 | 0.686167 | false | 3.317733 | true | false | false |
hiseh/yinanan | tps/converter/formats.py | 1 | 2208 | #!/usr/bin/env python
class BaseFormat(object):
"""
Base format class.
Supported formats are: ogg, avi, mkv, webm, flv, mov, mp4, mpeg
"""
format_name = None
ffmpeg_format_name = None
def parse_options(self, opt):
if 'format' not in opt or opt.get('format') != self.format_name:
raise ValueError('invalid Format format')
return ['-f', self.ffmpeg_format_name]
class OggFormat(BaseFormat):
"""
Ogg container format, mostly used with Vorbis and Theora.
"""
format_name = 'ogg'
ffmpeg_format_name = 'ogg'
class AviFormat(BaseFormat):
"""
Avi container format, often used vith DivX video.
"""
format_name = 'avi'
ffmpeg_format_name = 'avi'
class MkvFormat(BaseFormat):
"""
Matroska format, often used with H.264 video.
"""
format_name = 'mkv'
ffmpeg_format_name = 'matroska'
class WebmFormat(BaseFormat):
"""
WebM is Google's variant of Matroska containing only
VP8 for video and Vorbis for audio content.
"""
format_name = 'webm'
ffmpeg_format_name = 'webm'
class FlvFormat(BaseFormat):
"""
Flash Video container format.
"""
format_name = 'flv'
ffmpeg_format_name = 'flv'
class MovFormat(BaseFormat):
"""
Mov container format, used mostly with H.264 video
content, often for mobile platforms.
"""
format_name = 'mov'
ffmpeg_format_name = 'mov'
class Mp4Format(BaseFormat):
"""
Mp4 container format, the default Format for H.264
video content.
"""
format_name = 'mp4'
ffmpeg_format_name = 'mp4'
class MpegFormat(BaseFormat):
"""
MPEG(TS) container, used mainly for MPEG 1/2 video codecs.
"""
format_name = 'mpg'
ffmpeg_format_name = 'mpegts'
class Mp3Format(BaseFormat):
"""
Mp3 container, used audio-only mp3 files
"""
format_name = 'mp3'
ffmpeg_format_name = 'mp3'
class FlacFormat(BaseFormat):
"""
Flac container
hiseh
"""
format_name = 'flac'
ffmpeg_format_name = 'flac'
format_list = [
OggFormat, AviFormat, MkvFormat, WebmFormat, FlvFormat,
MovFormat, Mp4Format, MpegFormat, Mp3Format, FlacFormat
]
| mit | 4,400,064,395,914,517,500 | 19.635514 | 72 | 0.625906 | false | 3.280832 | false | false | false |
draklaw/lair | src/sys_sdl2/sys_sdl2.py | 1 | 2854 | #!/usr/bin/env python3
##
## Copyright (C) 2015 Simon Boyé
##
## This file is part of lair.
##
## lair is free software: you can redistribute it and/or modify it
## under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## lair is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with lair. If not, see <http://www.gnu.org/licenses/>.
##
from sys import path, argv
from os import getcwd
path.append(getcwd())
from autopy import *
from os.path import join, dirname
from importlib.machinery import SourceFileLoader
core = SourceFileLoader('core', join(dirname(__file__), '..', 'core', 'core.py')).load_module()
window_class = (
AutoClass('Window', None)
.add_getset('width', auto_int)
.add_getset('height', auto_int)
.add_getset('title', auto_string.const(), 'utf8Title', 'setUtf8Title')
.add_method('isValid', auto_bool)
.add_method('isFullscreen', auto_bool)
.add_method('isVisible', auto_bool)
.add_method('resize', None, [ (auto_int, 'width'), (auto_int, 'height') ])
.add_method('setFullscreen', None, [ (auto_bool, 'fullscreen') ])
.add_method('setVisible', None, [ (auto_bool, 'visible') ])
.add_method('destroy')
.add_method('swapBuffers')
)
sys_module_class = (
AutoClass('SysModule', [ OPTIONAL_PARAM, (core.master_logger_class, 'MasterLogger', 'NULL') ])
.add_method('initialize', auto_bool)
.add_method('shutdown')
.add_method('isScreensaverEnabled', auto_bool)
.add_method('setScreensaverEnabled', None, [ (auto_bool, 'enable') ])
.add_method('isVSyncEnabled', auto_bool)
.add_method('setVSyncEnabled', None, [ (auto_bool, 'enable') ])
.add_method('createWindow', window_class, [
(auto_string, 'title'), (auto_int, 'width'), (auto_int, 'height') ])
.add_method('destroyAllWindows')
.add_method('waitAndDispatchSystemEvents')
.add_method('dispatchPendingSystemEvents')
.add_method('getKeyState', auto_int, [ (auto_int, 'scancode') ])
.add_method('getTimeNs', auto_int64)
.add_method('waitNs', None, [ (auto_int64, 'ns') ])
)
sys_module_module = (
AutoModule('sys_sdl2')
.add_include('../core/core_py.h')
.add_include('lair/sys_sdl2/sys_module.h')
.add_include('lair/sys_sdl2/window.h')
.add_use_namespace('lair')
.add_class(window_class)
.add_class(sys_module_class)
)
if __name__ == '__main__':
if len(argv) == 2:
base_file = argv[1]
elif len(argv > 2):
stderr.write("Usage: {} BASE\n".format(argv[0]))
exit(1)
sys_module_module.write_module(base_file)
| lgpl-3.0 | 5,791,961,998,432,265,000 | 33.792683 | 95 | 0.676481 | false | 2.956477 | false | false | false |
muatik/dahi | dahi/document.py | 1 | 1446 | from bson import ObjectId
from dahi.statement import Statement
class InvalidDocument(Exception):
pass
class Document(object):
def __init__(self, docID=None, botSay=None, humanSay=None, onMatch=None):
super(Document, self).__init__()
self.botSay = botSay
self.humanSay = humanSay
self.id = docID
self.onMatch = onMatch
@staticmethod
def generate(data):
botSay = None
humanSay = None
if data.get("botSay", None):
botSay = Statement.generate(data["botSay"])
if data.get("humanSay", None):
humanSay = Statement.generate(data["humanSay"])
return Document(
docID=str(data["_id"]),
botSay=botSay,
humanSay=humanSay,
onMatch=data["onMatch"])
def __repr__(self):
return "Document <{}>".format(self.id)
def toJson(self):
return {
"_id": str(self.id),
"botSay": self.botSay.toJson() if self.botSay else None,
"humanSay": self.humanSay.toJson() if self.humanSay else None,
"onMatch": self.onMatch
}
def toDB(self):
return {
"_id": ObjectId(self.id), # FIXME: I don't like ObjectId() here
"botSay": self.botSay.toDB() if self.botSay else None,
"humanSay": self.humanSay.toDB() if self.humanSay else None,
"onMatch": self.onMatch
}
| mit | 1,258,744,973,872,936,000 | 27.352941 | 77 | 0.564315 | false | 3.509709 | false | false | false |
saeidadli/Python-ArcGIS-Convertor | arcgdfconvertor/convertor.py | 1 | 3491 | import os
import sys
import tempfile
from pathlib import Path
import arcpy
import pandas as pd
import numpy as np
import geopandas as gpd
#constants
#WGS_1984 coordinate system
WGS_1984 = \
"GEOGCS['GCS_WGS_1984',DATUM['D_WGS_1984', "+\
"SPHEROID['WGS_1984',6378137.0,298.257223563]], "+\
"PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]]; "+\
"-400 -400 1000000000;-100000 10000;-100000 10000; "+\
"8.98315284119522E-09;0.001;0.001;IsHighPrecision"
#functions
def gdb_path(in_fc):
"""
Returns the properties of a input gis data
"""
if arcpy.Exists(in_fc):
desc = arcpy.Describe(in_fc)
in_fc = desc.catalogPath
fc_name = desc.name
else:
fc_name = os.path.basename(in_fc)
dirname = os.path.dirname(in_fc)
workspace = arcpy.Describe(dirname).dataType
if workspace == 'FeatureDataset':
GDB = os.path.dirname(dirname)
elif workspace == 'Workspace':
GDB = dirname
elif workspace == 'Folder':
GDB = ''
else:
GDB = ''
return GDB, workspace, dirname, fc_name
def get_fields(in_fc, output_type = 'list'):
#Gets list of fileds from a feature class
fields = arcpy.ListFields(in_fc)
if output_type == 'list':
output = [f.name for f in fields]
elif output_type == 'dict':
output = {f.name: f.type for f in fields}
else:
output = ''
return output
#pandas convertor for ArcGIS
def gdf_to_fc(gdf, fc):
"""
converts a geopandas dataframe to a layer in a ESRI file geodatabase.
Notes:
- gdf have to have geometry field.
"""
if 'geometry' not in gdf.columns.values:
sys.exit()
GDB, workspace, dirname, fc_name = gdb_path(fc)
# convert fc to a gpkg in a temporary directory
tmp_dir = tempfile.TemporaryDirectory()
p = Path(tmp_dir.name)
n = fc_name + '.shp'
gdf.to_file(str(p/n))
fc_cols = get_fields(str(p/n))[2:]
#copy the file into a feature class
fc = arcpy.CopyFeatures_management(str(p/n), fc)
gdf_cols = gdf.columns.tolist()
gdf_cols.remove('geometry')
#fixing the columns
if gdf_cols:
col_dict = {col: gdf_cols[indx] for indx, col in enumerate(fc_cols) }
for col in col_dict:
if col_dict[col] != col:
arcpy.AlterField_management(fc, col, col_dict[col], clear_field_alias="true")
# Delete temporary directory
tmp_dir.cleanup()
return fc
def gdf_to_tbl(gdf, tbl):
gdf_cols = gdf.columns.values.tolist()
if 'geometry' in gdf_cols:
gdf_cols.remove('geometry')
gdf = gdf[gdf_cols].copy()
x = np.array(np.rec.fromrecords(gdf.values))
names = gdf.dtypes.index.tolist()
names = [str(arcpy.ValidateTableName(name)) for name in names]
x.dtype.names = tuple(names)
arcpy.da.NumPyArrayToTable(x, tbl)
return tbl
def fc_to_gdf(fc):
#use scratch work space for temporary files
GDB, workspace, dirname, fc_name = gdb_path(fc)
if GDB != '':
gdf = gpd.read_file(GDB, layer = fc_name)
else:
desc = arcpy.Describe(fc)
fc_path = desc.catalogPath
gdf = gpd.read_file(fc_path)
return gdf
def tbl_to_gdf(tbl, fieldnames = None):
gdf = fc_to_gdf(tbl)
if fieldnames != None:
fieldnames = [f for f in fieldnames if f in gdf.columns()]
else:
fieldnames = get_fields(tbl)[1:]
return gdf[fieldnames].copy()
| mit | 6,099,987,639,100,289,000 | 25.648855 | 93 | 0.613005 | false | 3.179417 | false | false | false |
AravindK95/ee106b | project3/src/lab3/src/extra/grasp_ctrl.py | 1 | 5226 | #!/usr/bin/env python
import sys
import rospkg
import rospy
import tf
import numpy as np
from std_msgs.msg import String, Bool
from geometry_msgs.msg import Transform, Pose, Vector3, Quaternion, Point
from lab3.msg import FrameCall
PROJECT_PATH = rospkg.RosPack().get_path('lab3')
sys.path.append(PROJECT_PATH+'/src/lab3')
sys.path.append(PROJECT_PATH+'/src/extra')
SPRAY_BOTTLE_MESH_FILENAME = PROJECT_PATH+'/data/spray.obj'
import obj_file
import transformations
from lab3_starter import contacts_to_baxter_hand_pose
BASE = 'base'
OBJ_BASE = 'graspable_object'
def publish_frame_group(trans, rot, name, base, to_add):
tf_pub.publish(Transform(Vector3(trans[0], trans[1], trans[2]),
Quaternion(rot[0], rot[1], rot[2], rot[3])),
name,
base,
to_add)
#One of these is the correct direction to off set grap pos by.
pre_trans = Vector3(trans[0] - 0.2, trans[1], trans[2])
pre_rot = Quaternion(rot[0], rot[1], rot[2], rot[3])
#One of these is the correct direction to lift it straight up. Probably z.
post_trans = Vector3(trans[0], trans[1], trans[2] + 0.3)
post_rot = Quaternion(rot[0], rot[1], rot[2], rot[3])
#We want to the post orientation to be the same as the initial orientation during grasp
#so we do not need to change orientation of end effector.
#Publish the pre and post trans
tf_pub.publish(Transform(pre_trans, pre_rot), 'pre'+name, base, to_add)
tf_pub.publish(Transform(post_trans, post_rot), 'post'+name, base, to_add)
def addframe(trans, rot, name, base):
publish_frame_group(trans, rot, name, base, True)
def rmframe(name):
# trans and rot values irrelevant
publish_frame_group((0,0,0), (0,0,0,0), name, 'blah', False)
def moveto(name):
(trans,rot) = tf_listener.lookupTransform(BASE, name, rospy.Time(0))
moveit_pub.publish(Pose(Point(trans[0], trans[1], trans[2]),
Quaternion(rot[0], rot[1], rot[2], rot[3])))
def setclaw(state):
claw_pub.publish(state)
def makepose(name, idx1, idx2):
trans,rot = contacts_to_baxter_hand_pose(vertices[idx1], vertices[idx2])
trans = (trans[0], trans[1], trans[2])
#rot = (rot[0], rot[1], rot[2], rot[3])
rot = (0, np.sqrt(2)/2, 0, np.sqrt(2)/2)
publish_frame_group(trans, rot, name, OBJ_BASE, True)
if __name__ == '__main__':
of = obj_file.ObjFile(SPRAY_BOTTLE_MESH_FILENAME)
mesh = of.read()
vertices = mesh.vertices
triangles = mesh.triangles
normals = mesh.normals
rospy.init_node('grasp_ctrl')
tf_pub = rospy.Publisher('lab3/tf', FrameCall, queue_size=3)
moveit_pub = rospy.Publisher('new_position', Pose, queue_size=3)
claw_pub = rospy.Publisher('gripper_control', Bool, queue_size=3)
tf_listener = tf.TransformListener()
while not rospy.is_shutdown():
# parse input
inval = raw_input("cmd >> ")
cmd = None
try:
inval = inval.split(' ')
cmd = inval[0]
except:
print 'Bad input!'
continue
if cmd == 'addframe':
# publish grasp frame
"""Example input:
$ cmd >> addframe (1,2,3) (4,5,6,7) child base
"""
trans = eval(inval[1]) # XYZ vector
rot = eval(inval[2]) # quaternion
name = inval[3]
base = inval[4]
addframe(trans, rot, name, base)
elif cmd == 'rmframe':
# stop publishing grasp frame
"""Example input:
$ cmd >> rmframe child
"""
name = inval[1]
rmframe(name)
elif cmd == 'moveto':
# command moveit
"""Example input:
$ cmd >> moveto child
"""
name = inval[1]
moveto(name)
elif cmd == 'setclaw':
# command the end effector
"""Example input:
$ cmd >> setclaw True
"""
claw_bool = eval(inval[1])
setclaw(claw_bool)
elif cmd == 'makepose':
# turn two force closure vertices into a tf frame
"""Example input:
$ cmd >> makepose name 2473 2035
"""
name = inval[1]
idx1 = int(inval[2])
idx2 = int(inval[3])
makepose(name, idx1, idx2)
elif cmd == 'test':
# runs repeated tests of a single grasp
"""Example input:
$ cmd >> test name
"""
name = inval[1]
while not rospy.is_shutdown():
if raw_input("Test again? [y/n] >> ") == 'n':
break
moveto('pre'+name)
rospy.sleep(2)
moveto(name)
rospy.sleep(2)
setclaw(True)
rospy.sleep(2)
moveto('post'+name)
rospy.sleep(4)
moveto(name)
rospy.sleep(2)
setclaw(False)
rospy.sleep(2)
moveto('pre'+name)
else:
print 'Bad command: '+inval[0]
| mit | 895,466,151,847,824,100 | 31.259259 | 91 | 0.539801 | false | 3.444957 | false | false | false |
dcrosta/nymwit | nymwit/game/management/commands/advancegamestate.py | 1 | 2917 | # Copyright (c) 2011, Daniel Crosta
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
from pytz import utc
from logging import getLogger
from datetime import datetime, timedelta
from django.core.management.base import NoArgsCommand
from game.models import Game
log = getLogger('job.advancegamestate')
class Command(NoArgsCommand):
args = ''
help = 'Advances game state from "playing" to "voting" to "finished" as necessary for active games'
def handle_noargs(self, **options):
games = Game.objects(state__in=('playing', 'voting'), next_ending__lte=datetime.now(utc))
for game in games:
if game.state == 'playing':
if game.num_players < 2:
game.update(set__state='invalid')
log.debug('advanced game %s from playing to invalid, only %d players', game.pk, game.num_players)
else:
new_next_ending = game.next_ending + timedelta(minutes=game.minutes_per_round)
game.update(set__state='voting', set__next_ending=new_next_ending)
log.debug('advanced game %s from playing to voting, next ending %s', game.pk, new_next_ending)
elif game.state == 'voting':
total_votes = sum(len(play.upvotes) for play in game.plays)
if total_votes == 0:
game.update(set__state='invalid')
log.debug('advanced game %s from voting to invalid, 0 votes', game.pk)
else:
game.update(set__state='finished')
log.debug('advanced game %s from voting to finished', game.pk)
| bsd-2-clause | -979,874,457,618,385,700 | 47.616667 | 117 | 0.685293 | false | 4.30236 | false | false | false |
pmleveque/cross-site-navigation | authentification.py | 1 | 1642 | from cefbase import *
from google.appengine.api import users
# Permet de savoir si c'est le bon utilisateur qui cherche a avoir acces aux donnees
# Si l'utlisateur a aucun droit, il n'a acces a rien
# Si le contributeur est un admin de diocese, il n'a acees qu'a ses barres propres, ses menus propres et les menus publics
# Si le contributeur a ete designe admin, il a acces en a toutes les barres de tous les contributeurs et tous les menus. Il peut
# en plus decider de rendre public ou prive un menu
# Si le contributeur est admin de l'appli elle meme (administrateur app engine), il peut ajouter des contributeurs de diocese et
# choisir si un contributeur peut avoir acces a l'option admin (acces a toutes les barres et tous les menus)
class Authentification():
@staticmethod
def check_authentification(must_admin=False):
if not users.get_current_user():
return False
else:
list_admin = Administrator.all().filter(
"user =",
users.get_current_user()
).fetch(1)
if len(list_admin) == 0:
if users.is_current_user_admin():
admin = Administrator(
user=users.get_current_user(),
admin=True
)
admin.put()
else:
return False
else:
admin = list_admin[0]
admin.super_admin = users.is_current_user_admin()
if must_admin and not admin.admin:
return False
return admin
| mit | 1,967,716,344,745,664,500 | 42.378378 | 128 | 0.588916 | false | 3.827506 | false | false | false |
landlab/drivers | scripts/ecohydrology_flat_surface/run_driver.py | 1 | 4929 | """
Created on Wed Jul 20 2016
This tutorial is on:
landlab/tutorials/ecohydrology/cellular_automaton_vegetation_flat_surface.ipynb
Creating a (.py) version of the same.
@author: Sai Nudurupati & Erkan Istanbulluoglu
"""
import os
import time
import numpy as np
from landlab import RasterModelGrid, load_params
from ecohyd_functions_flat import (initialize, empty_arrays,
create_pet_lookup, save, plot)
grid1 = RasterModelGrid((100, 100), spacing=(5., 5.))
grid = RasterModelGrid((5, 4), spacing=(5., 5.))
# Create dictionary that holds the inputs
data = load_params('inputs_vegetation_ca.yaml')
(precip_dry, precip_wet, radiation, pet_tree, pet_shrub,
pet_grass, soil_moisture, vegetation, vegca) = initialize(data, grid, grid1)
n_years = 2000 # Approx number of years for model to run
# Calculate approximate number of storms per year
fraction_wet = (data['doy__end_of_monsoon'] -
data['doy__start_of_monsoon']) / 365.
fraction_dry = 1 - fraction_wet
no_of_storms_wet = 8760 * fraction_wet / (data['mean_interstorm_wet'] +
data['mean_storm_wet'])
no_of_storms_dry = 8760 * fraction_dry / (data['mean_interstorm_dry'] +
data['mean_storm_dry'])
n = int(n_years * (no_of_storms_wet + no_of_storms_dry))
(precip, inter_storm_dt, storm_dt, time_elapsed, veg_type, daily_pet,
rad_factor, EP30, pet_threshold) = empty_arrays(n, grid, grid1)
create_pet_lookup(radiation, pet_tree, pet_shrub, pet_grass, daily_pet,
rad_factor, EP30, grid)
# Represent current time in years
current_time = 0 # Start from first day of Jan
# Keep track of run time for simulation - optional
wallclock_start = time.clock() # Recording time taken for simulation
# declaring few variables that will be used in the storm loop
time_check = 0. # Buffer to store current_time at previous storm
yrs = 0 # Keep track of number of years passed
water_stress = 0. # Buffer for Water Stress
Tg = 270 # Growing season in days
# Run storm Loop
for i in range(n):
# Update objects
# Calculate Day of Year (DOY)
julian = np.int(np.floor((current_time - np.floor(current_time)) * 365.))
# Generate seasonal storms
# Wet Season - Jul to Sep - NA Monsoon
if data['doy__start_of_monsoon'] <= julian <= data['doy__end_of_monsoon']:
precip_wet.update()
precip[i] = precip_wet.storm_depth
storm_dt[i] = precip_wet.storm_duration
inter_storm_dt[i] = precip_wet.interstorm_duration
else: # for Dry season
precip_dry.update()
precip[i] = precip_dry.storm_depth
storm_dt[i] = precip_dry.storm_duration
inter_storm_dt[i] = precip_dry.interstorm_duration
# Spatially distribute PET and its 30-day-mean (analogous to degree day)
grid.at_cell['surface__potential_evapotranspiration_rate'] = daily_pet[julian]
grid.at_cell['surface__potential_evapotranspiration_30day_mean'] = EP30[julian]
# Assign spatial rainfall data
grid.at_cell['rainfall__daily_depth'] = np.full(grid.number_of_cells, precip[i])
# Update soil moisture component
current_time = soil_moisture.update(current_time, Tr=storm_dt[i],
Tb=inter_storm_dt[i])
# Decide whether its growing season or not
if julian != 364:
if EP30[julian + 1, 0] > EP30[julian, 0]:
pet_threshold = 1
# 1 corresponds to ETThresholdup (begin growing season)
else:
pet_threshold = 0
# 0 corresponds to ETThresholddown (end growing season)
# Update vegetation component
vegetation.update(PETThreshold_switch=pet_threshold, Tb=inter_storm_dt[i],
Tr=storm_dt[i])
# Update yearly cumulative water stress data
water_stress += (grid.at_cell['vegetation__water_stress'] *
inter_storm_dt[i] / 24.)
# Record time (optional)
time_elapsed[i] = current_time
# Update spatial PFTs with Cellular Automata rules
if (current_time - time_check) >= 1.:
if yrs % 100 == 0:
print 'Elapsed time = ', yrs, ' years'
veg_type[yrs] = grid1.at_cell['vegetation__plant_functional_type']
WS_ = np.choose(veg_type[yrs], water_stress)
grid1.at_cell['vegetation__cumulative_water_stress'] = WS_ / Tg
vegca.update()
time_check = current_time
water_stress = 0
yrs += 1
veg_type[yrs] = grid1.at_cell['vegetation__plant_functional_type']
wallclock_stop = time.clock()
walltime = (wallclock_stop - wallclock_start) / 60. # in minutes
print 'Time_consumed = ', walltime, ' minutes'
# Saving
try:
os.mkdir('output')
except OSError:
pass
finally:
os.chdir('output')
save('veg', inter_storm_dt, storm_dt, precip, veg_type, yrs,
walltime, time_elapsed)
plot('veg', grid1, veg_type, yrs, yr_step=100)
| mit | -338,219,367,900,883,300 | 33.957447 | 84 | 0.646379 | false | 3.078701 | false | false | false |
danielnyga/dnutils | src/dnutils/tools.py | 1 | 10478 | '''
Created on May 22, 2017
@author: nyga
'''
import re
def ifnone(if_, else_, transform=None):
'''Returns the condition ``if_`` iff it is not ``None``, or if a transformation is
specified, ``transform(if_)``. Returns ``else_`` if the condition is ``None``.
``transform`` can be any callable, which will be passed ``if_`` in case ``if_`` is not ``None``.'''
if if_ is None:
return else_
else:
if transform is not None: return transform(if_)
else: return if_
def ifnot(if_, else_, transform=None):
'''Returns the condition ``if_`` iff it evaluates to ``True``, or if a transformation is
specified, ``transform(if_)``. Returns ``else_`` if the condition is ``False``.
``transform`` can be any callable, which will be passed ``if_`` in case ``if_`` is not ``False``.'''
if not bool(if_):
return else_
else:
if transform is not None: return transform(if_)
else: return if_
def ifstr(arg, transform):
'''
Returns ``transform(arg)`` if ``arg`` is a string, or returns ``arg``, otherwise
:param arg:
:param transform:
:return:
'''
return transform(arg) if type(arg) is str else arg
def allnone(it):
'''Returns True iff all elements in the iterable ``it`` are ``None``, and ``False`` otherwise.'''
return not ([1 for e in it if e is not None])
def allnot(it):
'''Returns True iff all elements in the iterable ``it`` evaluate to ``False``, and ``False`` otherwise.'''
return not ([1 for e in it if bool(e) is True])
def idxif(it, idx, transform=None):
'''Returns the element with the specified index of the iterable ``it``. If a ``transformation`` is specified,
the result of the ``transformation`` will be returned applied to the element.
If the iterable is ``None``, or ``it`` does not have enough elements, ``None`` is returned.'''
try:
it[idx]
except (IndexError, TypeError):
return None
el = it[idx]
if transform is not None:
return transform(el)
else:
return el
def first(it, transform=None, else_=None):
'''
Returns the first element of the iterable ``it``, if it has any.
Returns ``None``, if ``it`` is ``None`` or ``it` does not contain any elements. If a transformation is
specified, the result of the transformation applied to the first element is returned.
:param transform:
:param it:
:return:
'''
if it is None:
return else_
try:
el = next(iter(it))
if transform is not None:
return transform(el)
else:
return el
except StopIteration:
pass
return else_
def last(it, transform=None):
'''
Same as :func:`dnutils.tools.first`, but returns the last element.
:param it:
:param transform:
:return:
'''
return idxif(it, -1, transform=transform)
sqbrpattern = re.compile(r'\[(-?\d+)\]')
class edict(dict):
'''
Enhanced ``dict`` with some convenience methods such as dict addition and
subtraction.
Warning: The constructor using keyword arguments, ie. ``dict(one=1, two=2, ...)`` does not work
with the edict dictionaries. Instead, ``edict``s support default values corresponding to the
``defaultdict`` class from the ``itertools`` package.
:Example:
>>> s = edict({'a':{'b': 1}, 'c': [1,2,3]})
>>> r = edict({'x': 'z', 'c': 5})
>>> print s
{'a': {'b': 1}, 'c': [1, 2, 3]}
>>> print r
{'x': 'z', 'c': 5}
>>> print s + r
{'a': {'b': 1}, 'x': 'z', 'c': 5}
>>> print s - r
{'a': {'b': 1}}
>>> print r
{'x': 'z', 'c': 5}
'''
def __init__(self, d=None, default=None, recursive=False):
if d is None:
dict.__init__(self)
else:
dict.__init__(self, dict(d))
self._default = default
if recursive:
self._recurse()
def __iadd__(self, d):
self.update(d)
return self
def __isub__(self, d):
for k in d:
if k in self: del self[k]
return self
def __add__(self, d):
return type(self)({k: v for items in (self.items(), d.items())for k, v in items})
def __sub__(self, d):
return type(self)({k: v for k, v in self.items() if k not in d})
def __getitem__(self, key):
if self._default is not None and key not in self:
self[key] = self._default()
return self[key]
else:
return dict.__getitem__(self, key)
def _recurse(self):
for key, value in self.items():
if type(value) is list:
self[key] = [edict(v) if hasattr(v, '__getitem__') else v for v in value]
elif hasattr(value, '__getitem__'): #type(value) is dict:
self[key] = edict(value, default=self._default, recursive=True)
@staticmethod
def _todict(d, recursive=True):
d = dict(d)
if recursive:
for key, value in d.items():
if type(value) is edict:
d[key] = edict._todict(value, recursive=True)
return d
@staticmethod
def _parse_xpath(selector):
keys = map(str.strip, selector.split('/'))
for key in keys:
m = sqbrpattern.match(key)
if m is not None:
yield int(m.group(1))
else:
yield key
def xpath(self, selector, insert=None, force=False):
'''
Allows a 'pseudo-xpath' query to a nested set of dictionaries.
At the moment, only nested dict-selections separated by slashes (``/``) are supported.
Allows to conveniently access hierarchical dictionart structures without the need
of checking every key for existence.
:param selector: a slash-separated list of dict keys
:param insert:
:param force:
:return:
'''
keys = edict._parse_xpath(selector)
d = self
for key in keys:
if type(key) is int:
d = None if key >= len(d) else d[key]
else:
d = d.get(key)
if d is None:
if insert is None:
return None
return self.set_xpath(selector, insert, force=force)
return d
def set_xpath(self, selector, data, force=False):
'''
Creates the xpath structure represented by the selector string, if necessary, to
set the data to the end point.
:param selector:
:param data:
:return:
'''
keys = list(edict._parse_xpath(selector))
d = self
for key in keys[:-1]:
if type(key) is int:
raise ValueError('indexing in set_xpath() is not yet supported')
else:
d_ = d.get(key)
if d_ is None or not isinstance(d_, dict) and force:
d[key] = edict()
d = d[key]
d[keys[-1]] = data
return data
def pprint(self):
from pprint import pprint
pprint(self)
def project(self, *keys):
'''
Returns a copy of this edict that contains only the pairs whose key is in ``keys``.
:param keys:
:return:
'''
return edict({k: v for k, v in self.items() if k in keys})
class RStorage(edict, object):
'''
Recursive extension of web.util.Storage that applies the Storage constructor
recursively to all value elements that are dicts.
'''
__slots__ = ['_utf8']
def __init__(self, d=None, utf8=False):
self._utf8 = utf8
if d is not None:
for k, v in d.iteritems(): self[k] = v
def __setattr__(self, key, value):
if key in self.__slots__:
self.__dict__[key] = value
else:
self[key] = value
def __setitem__(self, key, value):
if self._utf8 and isinstance(key, str): key = key.encode('utf8')
dict.__setitem__(self, key, rstorify(value, utf8=self._utf8))
def __getattr__(self, key):
if key in type(self).__slots__:
return self.__dict__[key]
else:
try:
return self[key]
except KeyError as k:
raise (AttributeError, k)
def __delattr__(self, key):
try:
del self[key]
except KeyError as k:
raise (AttributeError, k)
def __repr__(self):
return ('<%s ' % type(self).__name__) + dict.__repr__(self) + '>'
def rstorify(e):
if type(e) is dict:
return RStorage(d=e)
elif type(e) in (list, tuple):
return [rstorify(i) for i in e]
else: return e
def jsonify(item, ignore_errors=False):
'''
Recursively construct a json representation of the argument ``item``.
:param item:
:return:
'''
if hasattr(item, 'json'):
return item.json
elif hasattr(item, 'tojson'):
return item.tojson()
elif isinstance(item, dict):
return {str(k): jsonify(v, ignore_errors=ignore_errors) for k, v in item.items()}
elif type(item) in (list, tuple):
return [jsonify(e, ignore_errors=ignore_errors) for e in item]
elif isinstance(item, (int, float, bool, str, type(None))):
return item
else:
if not ignore_errors:
raise TypeError('object of type "%s" is not jsonifiable: %s' % (type(item), repr(item)))
else: return '%s (NOT JSONIFIABLE)' % str(item)
class LinearScale(object):
'''
Implementation of a linear mapping from one interval of real
numbers [a,b] into another one [c,d] by linearly interpolating.
Example:
>>> scale = LinearScale((1, 2), (-2, 278))
>>> scale(1.5)
138.0
'''
def __init__(self, fromint, toint, strict=True):
self._from = fromint
self._to = toint
self._fromrange = fromint[1] - fromint[0]
self._torange = toint[1] - toint[0]
self.strict = strict
def _apply(self, value):
if self.strict and not self._from[0] <= value <= self._from[1]:
raise ValueError('value out of range [%s, %s], got %s' % (self._from[0], self._from[1], value))
v = float((value-self._from[0])) / self._fromrange
return v * self._torange + self._to[0]
def __call__(self, value):
return self._apply(value)
if __name__ == '__main__':
d = edict({1:2,2:3})
print(d.project(2))
| mit | -7,107,193,630,464,736,000 | 29.637427 | 113 | 0.550678 | false | 3.792255 | false | false | false |
aringh/odl | examples/tomo/backends/astra_performance_cuda_parallel_2d_cg.py | 1 | 2880 | """Performance example of running native ASTRA vs using ODL for reconstruction.
In this example, a 512x512 image is reconstructed using the Conjugate Gradient
Least Squares method on the GPU.
In general, ASTRA is faster than ODL since it does not need to perform any
copies and all arithmetic is performed on the GPU. Despite this, ODL is not
much slower. In this example, the overhead is about 60 %, depending on the
hardware used.
"""
import astra
import numpy as np
import matplotlib.pyplot as plt
import scipy
import odl
# Common geometry parameters
domain_size = np.array([512, 512])
n_angles = 180
det_size = 362
niter = 50
phantom = np.rot90(scipy.misc.ascent().astype('float'), -1)
# --- ASTRA ---
# Define ASTRA geometry
vol_geom = astra.create_vol_geom(domain_size[0], domain_size[1])
proj_geom = astra.create_proj_geom('parallel',
np.linalg.norm(domain_size) / det_size,
det_size,
np.linspace(0, np.pi, n_angles))
# Create ASTRA projector
proj_id = astra.create_projector('cuda', proj_geom, vol_geom)
# Create sinogram
sinogram_id, sinogram = astra.create_sino(phantom, proj_id)
# Create a data object for the reconstruction
rec_id = astra.data2d.create('-vol', vol_geom)
# Set up the parameters for a reconstruction algorithm using the CUDA backend
cfg = astra.astra_dict('CGLS_CUDA')
cfg['ReconstructionDataId'] = rec_id
cfg['ProjectionDataId'] = sinogram_id
cfg['ProjectorId'] = proj_id
# Create the algorithm object from the configuration structure
alg_id = astra.algorithm.create(cfg)
with odl.util.Timer('ASTRA run'):
# Run the algorithm
astra.algorithm.run(alg_id, niter)
# Get the result
rec = astra.data2d.get(rec_id)
# Clean up.
astra.algorithm.delete(alg_id)
astra.data2d.delete(rec_id)
astra.data2d.delete(sinogram_id)
astra.projector.delete(proj_id)
# --- ODL ---
# Create reconstruction space
reco_space = odl.uniform_discr(-domain_size / 2, domain_size / 2, domain_size)
# Create geometry
geometry = odl.tomo.parallel_beam_geometry(reco_space, n_angles, det_size)
# Create ray transform
ray_trafo = odl.tomo.RayTransform(reco_space, geometry, impl='astra_cuda')
# Create sinogram
data = ray_trafo(phantom)
# Solve with CGLS (aka CGN)
x = reco_space.zero()
with odl.util.Timer('ODL run'):
odl.solvers.conjugate_gradient_normal(ray_trafo, x, data, niter=niter)
# Display results for comparison
plt.figure('Phantom')
plt.imshow(phantom.T, origin='lower', cmap='bone')
plt.figure('ASTRA sinogram')
plt.imshow(sinogram.T, origin='lower', cmap='bone')
plt.figure('ASTRA reconstruction')
plt.imshow(rec.T, origin='lower', cmap='bone')
plt.figure('ODL sinogram')
plt.imshow(data.asarray().T, origin='lower', cmap='bone')
plt.figure('ODL reconstruction')
plt.imshow(x.asarray().T, origin='lower', cmap='bone')
plt.show()
| mpl-2.0 | -7,807,791,797,508,731,000 | 28.090909 | 79 | 0.711111 | false | 3.034773 | false | false | false |
lalpert/gradsearch-scala | scrapers/gradsearch/spiders/princetonee_spider.py | 1 | 1145 | import scrapy
import urlparse
from gradsearch.items import Professor
class PrincetonEESpider(scrapy.Spider):
name = "princeton_ee"
allowed_domains = ["princeton.edu"]
start_urls = ["http://ee.princeton.edu/people/faculty"]
def cleanup(self, sel):
return sel.xpath('normalize-space(text())').extract()
def parse(self, response):
for prof_box in response.css(".views-row"):
href = prof_box.xpath('./div/span/a/@href').extract()
if href:
yield scrapy.Request(urlparse.urljoin(response.url, href[0]), callback = self.parse_prof)
def parse_prof(self, response):
name = response.css('.node').xpath('.//h1/text()').extract()[0]
keywords = response.css('h4.core-areas').xpath('./a/text()').extract() # TODO: can also get "application thrusts"
research_summary = ''.join(response.css('.field').xpath('./div/div/node()').extract()[1:])
image = response.css('.node').xpath('.//img/@src').extract()[0]
department = "Electrical Engineering"
yield Professor(
name = name,
keywords = keywords,
school = "Princeton",
image = image,
research_summary = research_summary,
department = department)
| mit | -9,096,574,537,370,510,000 | 32.676471 | 115 | 0.676856 | false | 3.171745 | false | false | false |
alexbruy/QGIS | python/plugins/processing/gui/GetScriptsAndModels.py | 1 | 14193 | # -*- coding: utf-8 -*-
"""
***************************************************************************
GetScriptsAndModels.py
---------------------
Date : June 2014
Copyright : (C) 2014 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'June 2014'
__copyright__ = '(C) 2014, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import json
from functools import partial
from qgis.PyQt import uic
from qgis.PyQt.QtCore import Qt, QCoreApplication, QUrl
from qgis.PyQt.QtGui import QIcon, QCursor
from qgis.PyQt.QtWidgets import QApplication, QTreeWidgetItem, QPushButton
from qgis.PyQt.QtNetwork import QNetworkReply, QNetworkRequest
from qgis.utils import iface, show_message_log
from qgis.core import QgsNetworkAccessManager, QgsMessageLog
from qgis.gui import QgsMessageBar
from processing.core.alglist import algList
from processing.gui.ToolboxAction import ToolboxAction
from processing.gui import Help2Html
from processing.gui.Help2Html import getDescription, ALG_DESC, ALG_VERSION, ALG_CREATOR
from processing.script.ScriptUtils import ScriptUtils
from processing.algs.r.RUtils import RUtils
from processing.modeler.ModelerUtils import ModelerUtils
pluginPath = os.path.split(os.path.dirname(__file__))[0]
WIDGET, BASE = uic.loadUiType(
os.path.join(pluginPath, 'ui', 'DlgGetScriptsAndModels.ui'))
class GetScriptsAction(ToolboxAction):
def __init__(self):
self.name, self.i18n_name = self.trAction('Get scripts from on-line scripts collection')
self.group, self.i18n_group = self.trAction('Tools')
def getIcon(self):
return QIcon(os.path.join(pluginPath, 'images', 'script.png'))
def execute(self):
dlg = GetScriptsAndModelsDialog(GetScriptsAndModelsDialog.SCRIPTS)
dlg.exec_()
if dlg.updateProvider:
algList.reloadProvider('script')
class GetRScriptsAction(ToolboxAction):
def __init__(self):
self.name, self.i18n_name = self.trAction('Get R scripts from on-line scripts collection')
self.group, self.i18n_group = self.trAction('Tools')
def getIcon(self):
return QIcon(os.path.join(pluginPath, 'images', 'r.svg'))
def execute(self):
dlg = GetScriptsAndModelsDialog(GetScriptsAndModelsDialog.RSCRIPTS)
dlg.exec_()
if dlg.updateProvider:
self.toolbox.updateProvider('r')
class GetModelsAction(ToolboxAction):
def __init__(self):
self.name, self.i18n_name = self.trAction('Get models from on-line scripts collection')
self.group, self.i18n_group = self.trAction('Tools')
def getIcon(self):
return QIcon(os.path.join(pluginPath, 'images', 'model.png'))
def execute(self):
dlg = GetScriptsAndModelsDialog(GetScriptsAndModelsDialog.MODELS)
dlg.exec_()
if dlg.updateProvider:
algList.reloadProvider('model')
class GetScriptsAndModelsDialog(BASE, WIDGET):
HELP_TEXT = QCoreApplication.translate('GetScriptsAndModelsDialog',
'<h3> Processing resources manager </h3>'
'<p>Check/uncheck algorithms in the tree to select the ones that you '
'want to install or remove</p>'
'<p>Algorithms are divided in 3 groups:</p>'
'<ul><li><b>Installed:</b> Algorithms already in your system, with '
'the latest version available</li>'
'<li><b>Updatable:</b> Algorithms already in your system, but with '
'a newer version available in the server</li>'
'<li><b>Not installed:</b> Algorithms not installed in your '
'system</li></ul>')
MODELS = 0
SCRIPTS = 1
RSCRIPTS = 2
tr_disambiguation = {0: 'GetModelsAction',
1: 'GetScriptsAction',
2: 'GetRScriptsAction'}
def __init__(self, resourceType):
super(GetScriptsAndModelsDialog, self).__init__(iface.mainWindow())
self.setupUi(self)
if hasattr(self.leFilter, 'setPlaceholderText'):
self.leFilter.setPlaceholderText(self.tr('Search...'))
self.manager = QgsNetworkAccessManager.instance()
self.resourceType = resourceType
if self.resourceType == self.MODELS:
self.folder = ModelerUtils.modelsFolders()[0]
self.urlBase = 'https://raw.githubusercontent.com/qgis/QGIS-Processing/master/models/'
self.icon = QIcon(os.path.join(pluginPath, 'images', 'model.png'))
elif self.resourceType == self.SCRIPTS:
self.folder = ScriptUtils.scriptsFolders()[0]
self.urlBase = 'https://raw.githubusercontent.com/qgis/QGIS-Processing/master/scripts/'
self.icon = QIcon(os.path.join(pluginPath, 'images', 'script.png'))
else:
self.folder = RUtils.RScriptsFolders()[0]
self.urlBase = 'https://raw.githubusercontent.com/qgis/QGIS-Processing/master/rscripts/'
self.icon = QIcon(os.path.join(pluginPath, 'images', 'r.svg'))
self.lastSelectedItem = None
self.updateProvider = False
self.data = None
self.populateTree()
self.buttonBox.accepted.connect(self.okPressed)
self.buttonBox.rejected.connect(self.cancelPressed)
self.tree.currentItemChanged.connect(self.currentItemChanged)
self.leFilter.textChanged.connect(self.fillTree)
def popupError(self, error=None, url=None):
"""Popups an Error message bar for network errors."""
disambiguation = self.tr_disambiguation[self.resourceType]
widget = iface.messageBar().createMessage(self.tr('Connection problem', disambiguation),
self.tr('Could not connect to scripts/models repository', disambiguation))
if error and url:
QgsMessageLog.logMessage(self.tr(u"Network error code: {} on URL: {}").format(error, url), self.tr(u"Processing"), QgsMessageLog.CRITICAL)
button = QPushButton(QCoreApplication.translate("Python", "View message log"), pressed=show_message_log)
widget.layout().addWidget(button)
iface.messageBar().pushWidget(widget, level=QgsMessageBar.CRITICAL, duration=5)
def grabHTTP(self, url, loadFunction, arguments=None):
"""Grab distant content via QGIS internal classes and QtNetwork."""
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
request = QUrl(url)
reply = self.manager.get(QNetworkRequest(request))
if arguments:
reply.finished.connect(partial(loadFunction, reply, arguments))
else:
reply.finished.connect(partial(loadFunction, reply))
while not reply.isFinished():
QCoreApplication.processEvents()
def populateTree(self):
self.grabHTTP(self.urlBase + 'list.txt', self.treeLoaded)
def treeLoaded(self, reply):
"""
update the tree of scripts/models whenever
HTTP request is finished
"""
QApplication.restoreOverrideCursor()
if reply.error() != QNetworkReply.NoError:
self.popupError(reply.error(), reply.request().url().toString())
else:
resources = unicode(reply.readAll()).splitlines()
resources = [r.split(',') for r in resources]
self.resources = {f: (v, n) for f, v, n in resources}
reply.deleteLater()
self.fillTree()
def fillTree(self):
self.tree.clear()
self.uptodateItem = QTreeWidgetItem()
self.uptodateItem.setText(0, self.tr('Installed'))
self.toupdateItem = QTreeWidgetItem()
self.toupdateItem.setText(0, self.tr('Updatable'))
self.notinstalledItem = QTreeWidgetItem()
self.notinstalledItem.setText(0, self.tr('Not installed'))
self.toupdateItem.setIcon(0, self.icon)
self.uptodateItem.setIcon(0, self.icon)
self.notinstalledItem.setIcon(0, self.icon)
text = unicode(self.leFilter.text())
for i in sorted(self.resources.keys(), key=lambda kv: kv[2].lower()):
filename = i
version = self.resources[filename][0]
name = self.resources[filename][1]
treeBranch = self.getTreeBranchForState(filename, float(version))
if text == '' or text.lower() in filename.lower():
item = TreeItem(filename, name, self.icon)
treeBranch.addChild(item)
if treeBranch != self.notinstalledItem:
item.setCheckState(0, Qt.Checked)
self.tree.addTopLevelItem(self.toupdateItem)
self.tree.addTopLevelItem(self.notinstalledItem)
self.tree.addTopLevelItem(self.uptodateItem)
if text != '':
self.tree.expandAll()
self.txtHelp.setHtml(self.HELP_TEXT)
def setHelp(self, reply, item):
"""Change the HTML content"""
QApplication.restoreOverrideCursor()
if reply.error() != QNetworkReply.NoError:
html = self.tr('<h2>No detailed description available for this script</h2>')
else:
content = unicode(reply.readAll())
descriptions = json.loads(content)
html = '<h2>%s</h2>' % item.name
html += self.tr('<p><b>Description:</b> %s</p>') % getDescription(ALG_DESC, descriptions)
html += self.tr('<p><b>Created by:</b> %s') % getDescription(ALG_CREATOR, descriptions)
html += self.tr('<p><b>Version:</b> %s') % getDescription(ALG_VERSION, descriptions)
reply.deleteLater()
self.txtHelp.setHtml(html)
def currentItemChanged(self, item, prev):
if isinstance(item, TreeItem):
url = self.urlBase + item.filename.replace(' ', '%20') + '.help'
self.grabHTTP(url, self.setHelp, item)
else:
self.txtHelp.setHtml(self.HELP_TEXT)
def getTreeBranchForState(self, filename, version):
if not os.path.exists(os.path.join(self.folder, filename)):
return self.notinstalledItem
else:
helpFile = os.path.join(self.folder, filename + '.help')
try:
with open(helpFile) as f:
helpContent = json.load(f)
currentVersion = float(helpContent[Help2Html.ALG_VERSION])
except Exception:
currentVersion = 0
if version > currentVersion:
return self.toupdateItem
else:
return self.uptodateItem
def cancelPressed(self):
super(GetScriptsAndModelsDialog, self).reject()
def storeFile(self, reply, filename):
"""store a script/model that has been downloaded"""
QApplication.restoreOverrideCursor()
if reply.error() != QNetworkReply.NoError:
if os.path.splitext(filename)[1].lower() == '.help':
content = '{"ALG_VERSION" : %s}' % self.resources[filename[:-5]][0]
else:
self.popupError(reply.error(), reply.request().url().toString())
content = None
else:
content = reply.readAll()
reply.deleteLater()
if content:
path = os.path.join(self.folder, filename)
with open(path, 'w') as f:
f.write(content)
self.progressBar.setValue(self.progressBar.value() + 1)
def okPressed(self):
toDownload = []
for i in xrange(self.toupdateItem.childCount()):
item = self.toupdateItem.child(i)
if item.checkState(0) == Qt.Checked:
toDownload.append(item.filename)
for i in xrange(self.notinstalledItem.childCount()):
item = self.notinstalledItem.child(i)
if item.checkState(0) == Qt.Checked:
toDownload.append(item.filename)
if toDownload:
self.progressBar.setMaximum(len(toDownload) * 2)
for i, filename in enumerate(toDownload):
QCoreApplication.processEvents()
url = self.urlBase + filename.replace(' ', '%20')
self.grabHTTP(url, self.storeFile, filename)
url += '.help'
self.grabHTTP(url, self.storeFile, filename + '.help')
toDelete = []
for i in xrange(self.uptodateItem.childCount()):
item = self.uptodateItem.child(i)
if item.checkState(0) == Qt.Unchecked:
toDelete.append(item.filename)
# Remove py and help files if they exist
for filename in toDelete:
for pathname in (filename, filename + u".help"):
path = os.path.join(self.folder, pathname)
if os.path.exists(path):
os.remove(path)
self.updateProvider = len(toDownload) + len(toDelete) > 0
super(GetScriptsAndModelsDialog, self).accept()
class TreeItem(QTreeWidgetItem):
def __init__(self, filename, name, icon):
QTreeWidgetItem.__init__(self)
self.name = name
self.filename = filename
self.setText(0, name)
self.setIcon(0, icon)
self.setCheckState(0, Qt.Unchecked)
| gpl-2.0 | -6,995,422,623,564,464,000 | 40.258721 | 150 | 0.593462 | false | 4.137901 | false | false | false |
snap-stanford/ogb | examples/lsc/mag240m/label_prop.py | 1 | 2884 | # NOTE: More than 256GB CPU memory required to run this script.
# Use `--low-memory` to reduce memory consumption by using half-precision
import os.path as osp
import time
import argparse
import torch
import torch.nn.functional as F
from torch_sparse import SparseTensor
from torch_geometric.nn import LabelPropagation
from torch_geometric.nn.conv.gcn_conv import gcn_norm
from ogb.lsc import MAG240MDataset, MAG240MEvaluator
from root import ROOT
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--num_layers', type=int, default=3),
parser.add_argument('--alpha', type=float, default=0.9),
parser.add_argument('--low-memory', action='store_true'),
args = parser.parse_args()
print(args)
dataset = MAG240MDataset(ROOT)
evaluator = MAG240MEvaluator()
t = time.perf_counter()
print('Reading adjacency matrix...', end=' ', flush=True)
path = f'{dataset.dir}/paper_to_paper_symmetric.pt'
if osp.exists(path):
adj_t = torch.load(path)
else:
edge_index = dataset.edge_index('paper', 'cites', 'paper')
edge_index = torch.from_numpy(edge_index)
adj_t = SparseTensor(
row=edge_index[0], col=edge_index[1],
sparse_sizes=(dataset.num_papers, dataset.num_papers),
is_sorted=True)
adj_t = adj_t.to_symmetric()
torch.save(adj_t, path)
adj_t = gcn_norm(adj_t, add_self_loops=False)
if args.low_memory:
adj_t = adj_t.to(torch.half)
print(f'Done! [{time.perf_counter() - t:.2f}s]')
train_idx = dataset.get_idx_split('train')
valid_idx = dataset.get_idx_split('valid')
test_idx = dataset.get_idx_split('test')
y_train = torch.from_numpy(dataset.paper_label[train_idx]).to(torch.long)
y_valid = torch.from_numpy(dataset.paper_label[valid_idx]).to(torch.long)
model = LabelPropagation(args.num_layers, args.alpha)
N, C = dataset.num_papers, dataset.num_classes
t = time.perf_counter()
print('Propagating labels...', end=' ', flush=True)
if args.low_memory:
y = torch.zeros(N, C, dtype=torch.half)
y[train_idx] = F.one_hot(y_train, C).to(torch.half)
out = model(y, adj_t, post_step=lambda x: x)
y_pred = out.argmax(dim=-1)
else:
y = torch.zeros(N, C)
y[train_idx] = F.one_hot(y_train, C).to(torch.float)
out = model(y, adj_t)
y_pred = out.argmax(dim=-1)
print(f'Done! [{time.perf_counter() - t:.2f}s]')
train_acc = evaluator.eval({
'y_true': y_train,
'y_pred': y_pred[train_idx]
})['acc']
valid_acc = evaluator.eval({
'y_true': y_valid,
'y_pred': y_pred[valid_idx]
})['acc']
print(f'Train: {train_acc:.4f}, Valid: {valid_acc:.4f}')
res = {'y_pred': y_pred[test_idx]}
evaluator.save_test_submission(res, 'results/label_prop')
| mit | 2,963,788,015,815,800,000 | 34.170732 | 79 | 0.626907 | false | 3.077908 | false | true | false |
kmuehlbauer/wradlib | wradlib/qual.py | 1 | 8000 | #!/usr/bin/env python
# Copyright (c) 2011-2018, wradlib developers.
# Distributed under the MIT License. See LICENSE.txt for more info.
"""
Data Quality
^^^^^^^^^^^^
This module will serve two purposes:
#. provide routines to create simple radar data quality related fields.
#. provide routines to decide which radar pixel to choose based on the
competing information in different quality fields.
Data is supposed to be stored in 'aligned' arrays. Aligned here means that
all fields are structured such that in each field the data for a certain index
is representative for the same physical target.
Therefore no assumptions are made on the dimensions or shape of the input
fields except that they exhibit the numpy ndarray interface.
.. autosummary::
:nosignatures:
:toctree: generated/
pulse_volume
beam_block_frac
cum_beam_block_frac
get_bb_ratio
"""
import numpy as np
def pulse_volume(ranges, h, theta):
"""Calculates the sampling volume of the radar beam per bin depending on \
range and aperture.
We assume a cone frustum which has the volume
:math:`V=(\\pi/3) \\cdot h \\cdot (R^2 + R \\cdot r + r^2)`.
R and r are the radii of the two frustum surface circles. Assuming that the
pulse width is small compared to the range, we get
:math:`R=r= \\tan ( 0.5 \\cdot \\theta \\cdot \\pi/180 ) \\cdot range`
with theta being the aperture angle (beam width).
Thus, the pulse volume simply becomes the volume of a cylinder with
:math:`V=\\pi \\cdot h \\cdot range^2 \\cdot \\tan(
0.5 \\cdot \\theta \\cdot \\pi/180)^2`
Parameters
----------
ranges : :class:`numpy:numpy.ndarray`
the distances of each bin from the radar [m]
h : float
pulse width (which corresponds to the range resolution [m])
theta : float
the aperture angle (beam width) of the radar beam [degree]
Returns
-------
output : :class:`numpy:numpy.ndarray`
Volume of radar bins at each range in `ranges` [:math:`m^3`]
Examples
--------
See :ref:`/notebooks/workflow/recipe1.ipynb`.
"""
return np.pi * h * (ranges ** 2) * (np.tan(np.radians(theta/2.))) ** 2
def beam_block_frac(th, bh, a):
"""Partial beam blockage fraction.
Note
----
Code was migrated from https://github.com/nguy/PyRadarMet.
From Bech et al. (2003), Eqn 2 and Appendix
Parameters
----------
th : float | :class:`numpy:numpy.ndarray` of floats
Terrain height [m]
bh : float | :class:`numpy:numpy.ndarray` of floats
Beam height [m]
a : float | :class:`numpy:numpy.ndarray` of floats
Half power beam radius [m]
Returns
-------
pbb : float
Partial beam blockage fraction [unitless]
Examples
--------
>>> pbb = beam_block_frac(th,bh,a) #doctest: +SKIP
See :ref:`/notebooks/beamblockage/wradlib_beamblock.ipynb`.
Note
----
This procedure uses a simplified interception function where no vertical
gradient of refractivity is considered. Other algorithms treat this
more thoroughly. However, this is accurate in most cases other than
the super-refractive case.
See the the half_power_radius function to calculate variable `a`.
The heights must be the same units!
"""
isfloat = (isinstance(th, float)
and isinstance(bh, float)
and isinstance(a, float))
# convert to numpy array in any case
th = np.atleast_1d(th)
bh = np.atleast_1d(bh)
a = np.atleast_1d(a)
# First find the difference between the terrain and height of
# radar beam (Bech et al. (2003), Fig.3)
y = th - bh
# check if beam is clear or blocked
ya = y / a
clear = ya < -1.
block = ya > 1.
numer = (ya * np.sqrt(a ** 2 - y ** 2)) + \
(a * np.arcsin(ya)) + (np.pi * a / 2.)
denom = np.pi * a
pbb = numer / denom
pbb[clear] = 0.
pbb[block] = 1.
if isfloat:
return pbb[0]
else:
return pbb
def cum_beam_block_frac(pbb):
"""Cumulative beam blockage fraction along a beam.
Computes the cumulative beam blockage (cbb) along a beam from the partial
beam blockage (pbb) fraction of each bin along that beam. CBB in one bin
along a beam will always be at least as high as the maximum PBB of the
preceeding bins.
Parameters
----------
pbb : :class:`numpy:numpy.ndarray`
2-D array of floats of shape (num beams, num range bins)
Partial beam blockage fraction of a bin along a beam [m]
Returns
-------
cbb : :class:`numpy:numpy.ndarray`
Array of floats of the same shape as pbb
Cumulative partial beam blockage fraction [unitless]
Examples
--------
>>> pbb = beam_block_frac(th, bh, a) #doctest: +SKIP
>>> cbb = cum_beam_block_frac(pbb) #doctest: +SKIP
See :ref:`/notebooks/beamblockage/wradlib_beamblock.ipynb`.
"""
# This is the index of the maximum PBB along each beam
maxindex = np.nanargmax(pbb, axis=1)
cbb = np.copy(pbb)
# Iterate over all beams
for ii, index in enumerate(maxindex):
premax = 0.
for jj in range(index):
# Only iterate to max index to make this faster
if pbb[ii, jj] > premax:
cbb[ii, jj] = pbb[ii, jj]
premax = pbb[ii, jj]
else:
cbb[ii, jj] = premax
# beyond max index, everything is max anyway
cbb[ii, index:] = pbb[ii, index]
return cbb
def get_bb_ratio(bb_height, bb_width, quality, zp_r):
"""Returns the Bright Band ratio of each PR bin
With *SR*, we refer to precipitation radars based on space-born platforms
such as TRMM or GPM.
This function basically applies the Bright Band (BB) information as
provided by the corresponding SR datasets per beam, namely BB height and
width, as well as quality flags of the SR beams. A BB ratio of <= 0
indicates that a bin is located below the melting layer (ML), >=1
above the ML, and in between 0 and 1 inside the ML.
Parameters
----------
bb_height : :class:`numpy:numpy.ndarray`
Array of shape (nscans, nbeams) containing the SR beams' BB heights
in meters.
bb_width : :class:`numpy:numpy.ndarray`
Array of shape (nscans, nbeams) containing the SR beams' BB widths
in meters.
quality : :class:`numpy:numpy.ndarray`
Array of shape (nscans, nbeams) containing the SR beams' BB quality
index.
zp_r : :class:`numpy:numpy.ndarray`
Array of SR bin altitudes of shape (nscans, nbeams, nbins).
Returns
-------
ratio : :class:`numpy:numpy.ndarray`
Array of shape (nscans, nbeams, nbins) containing the BB ratio of
every SR bin.
- ratio <= 0: below ml
- 0 < ratio < 1: between ml
- 1 <= ratio: above ml
ibb : :class:`numpy:numpy.ndarray`
Boolean array containing the indices of SR bins connected to the
BB.
"""
# parameters for bb detection
ibb = (bb_height > 0) & (bb_width > 0) & (quality == 1)
# set non-bb-pixels to np.nan
bb_height = bb_height.copy()
bb_height[~ibb] = np.nan
bb_width = bb_width.copy()
bb_width[~ibb] = np.nan
# get median of bb-pixels
bb_height_m = np.nanmedian(bb_height)
bb_width_m = np.nanmedian(bb_width)
# approximation of melting layer top and bottom
zmlt = bb_height_m + bb_width_m / 2.
zmlb = bb_height_m - bb_width_m / 2.
# get ratio connected to brightband height
ratio = (zp_r - zmlb) / (zmlt - zmlb)
return ratio, ibb
if __name__ == '__main__':
print('wradlib: Calling module <qual> as main...')
| mit | 8,926,593,673,090,248,000 | 29.128405 | 79 | 0.6045 | false | 3.59874 | false | false | false |
dstaple/z3test | scripts/mk_copyright.py | 3 | 1766 | # Copyright (c) 2015 Microsoft Corporation
import os
import re
cr = re.compile("Copyright")
aut = re.compile("Automatically generated")
aut2 = re.compile("auto-generated")
cr_notice = """
/*++
Copyright (c) 2015 Microsoft Corporation
--*/
"""
smt2_cr_notice = """
; Copyright (c) 2015 Microsoft Corporation
"""
py_cr_notice = """
# Copyright (c) 2015 Microsoft Corporation
"""
def has_cr(file):
ins = open(file)
lines = 0
line = ins.readline()
while line and lines < 20:
m = cr.search(line)
if m:
ins.close()
return True
m = aut.search(line)
if m:
ins.close()
return True
m = aut2.search(line)
if m:
ins.close()
return True
line = ins.readline()
ins.close()
return False
def add_cr(file):
tmp = "%s.tmp" % file
ins = open(file)
ous = open(tmp,'w')
if file.endswith("smt2"):
ous.write(smt2_cr_notice)
elif file.endswith("py"):
ous.write(py_cr_notice)
else:
ous.write(cr_notice)
line = ins.readline()
while line:
ous.write(line)
line = ins.readline()
ins.close()
ous.close()
os.system("move %s %s" % (tmp, file))
def add_missing_cr(dir):
for root, dirs, files in os.walk(dir):
for f in files:
if f.endswith('.cpp') or f.endswith('.h') or f.endswith('.c') or f.endswith('.cs') or f.endswith('.py') or f.endswith('.smt2'):
path = "%s\\%s" % (root, f)
if not has_cr(path):
print("Missing CR for %s" % path)
add_cr(path)
add_missing_cr('regressions')
#add_missing_cr('old-regressions')
#add_missing_cr('ClusterExperiment')
| mit | 3,061,600,513,663,548,000 | 21.641026 | 139 | 0.543601 | false | 3.3258 | false | false | false |
sburnett/seattle | autograder/emulab/sample_client.py | 1 | 2122 | #from remote_emulab import *
import remote_emulab
# This is a very rough sample of what a client
# to the remote_emulab.py library will look like
#
# Everything below is hardcoded and is not intended
# to be executed (it will fail because it assumes a
# new and unique exp name). It is just an exmaple
# senerio.
# if you want to run it and see it work, first you'll
# need to set up keys and your login for remote_emulab.py
# then just change exp below from "helloworld"+n to
# "helloworld"+(n+1) so that the name will be unique.
# You should probably be polite and permanately remove the
# the new exp from emulab when you are done.
# SETUP SOME CONSTANTS
# specify the emulab proj name, this is always 'Seattle'
proj = "Seattle"
# specify the exp name, this is unique for any class assignment
exp = "lantest"
#specify the name of an ns file being used
mynsfn = "hello.ns"
# EXECUTE A BASIC SENERIO
# read the ns file into a string
mynsfobj = open(mynsfn)
mynsfilestr = mynsfobj.read()
mynsfobj.close()
# check the ns file for errors
(passed,message) = remote_emulab.checkNS(mynsfilestr)
# did the parsing fail?
if (not passed):
print message
print "checkNS failed, please fix the ns file and try again"
else:
# start a new exp in non-batchmode
print "starting a new exp..."
remote_emulab.startexp(proj,exp,mynsfilestr)
# wait for the exp to go active
# by default times out in 10 minutes
print "exp started, waiting for active..."
remote_emulab.wait_for_active(proj,exp)
print "now active... getting mapping"
mapping = remote_emulab.get_mapping(proj,exp)
print "mapping: "+str(mapping)
simple_mapping = get_ips(mapping)
print " got mapping, getting links"
print "links: "+str(remote_emulab.get_links(proj,exp))
# exit this code, go and do your expirament
# when the exp is done we'll swap it out
print "finished exp, swapping out"
#remote_emulab.swapOUT(proj,exp)
print "swaped out"
# Some additional notes.
# Since we did a swap out and not an endexp
# the exp will still exisit in emulab
# we can re run it, or modify it and re run it
| mit | 8,734,794,021,364,859,000 | 25.525 | 63 | 0.719604 | false | 3.279753 | false | false | false |
AlexanderSavelyev/rdkit | Contrib/mmpa/indexing.py | 1 | 21045 | # Copyright (c) 2012, GlaxoSmithKline Research & Development Ltd.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of GlaxoSmithKline Research & Development Ltd.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Created by Jameed Hussain, September 2012
from __future__ import print_function
import sys
import re
from rdkit import Chem
from optparse import OptionParser
def heavy_atom_count(smi):
m = Chem.MolFromSmiles(smi)
return m.GetNumAtoms()
def add_to_index(smi,attachments,cmpd_heavy):
result = False
core_size = heavy_atom_count(smi) - attachments
if(use_ratio):
core_ratio = float(core_size) / float(cmpd_heavy)
if(core_ratio <= ratio ):
result = True
else:
if(core_size <= max_size):
result = True
return result
def get_symmetry_class(smi):
symmetry = []
m = Chem.MolFromSmiles(smi)
#determine the symmetry class
#see: http://www.mail-archive.com/[email protected]/msg01894.html
#A thanks to Greg (and Alan)
Chem.AssignStereochemistry(m,cleanIt=True,force=True,flagPossibleStereoCenters=True)
#get the symmetry class of the attachements points
#Note: 1st star is the zero index,
#2nd star is first index, etc
for atom in m.GetAtoms():
if(atom.GetMass() == 0):
symmetry.append(atom.GetProp('_CIPRank'))
return symmetry
def cansmirk(lhs,rhs,context):
#cansmirk algorithm
#1) cansmi the LHS.
#2) For the LHS the 1st star will have label 1, 2nd star will have label 2 and so on
#3) Do a symmetry check of lhs and rhs and use that to decide if the labels on
# RHS or/and context need to change.
#4) For the rhs, if you have a choice (ie. two attachement points are symmetrically
# equivalent), always put the label with lower numerical value on the earlier
# attachement point on the cansmi-ed smiles
#print "in: %s,%s" % (lhs,rhs)
isotope_track={}
#if the star count of lhs/context/rhs is 1, single cut
stars = lhs.count("*")
if(stars > 1):
#get the symmetry class of stars of lhs and rhs
lhs_sym = get_symmetry_class(lhs)
rhs_sym = get_symmetry_class(rhs)
#deal with double cuts
if(stars == 2):
#simple cases
#unsymmetric lhs and unsymmetric rhs
if( (lhs_sym[0] != lhs_sym[1]) and (rhs_sym[0] != rhs_sym[1]) ):
#get 1st and 2nd labels and store the new label for it in isotope_track
#structure: isotope_track[old_label]=new_label (as strings)
isotope_track = build_track_dictionary(lhs,stars)
#switch labels using isotope track
lhs = switch_labels_on_position(lhs)
rhs = switch_labels(isotope_track,stars,rhs)
context = switch_labels(isotope_track,stars,context)
#symmetric lhs and symmetric rhs
elif( (lhs_sym[0] == lhs_sym[1]) and (rhs_sym[0] == rhs_sym[1]) ):
#the points are all equivalent so change labels on lhs and rhs based on position
#labels on context don't need to change
lhs = switch_labels_on_position(lhs)
rhs = switch_labels_on_position(rhs)
#more difficult cases..
#symmetric lhs and unsymmetric rhs
elif( (lhs_sym[0] == lhs_sym[1]) and (rhs_sym[0] != rhs_sym[1]) ):
#switch labels lhs based on position
lhs = switch_labels_on_position(lhs)
#change labels on rhs based on position but need to record
#the changes as need to appy them to the context
isotope_track = build_track_dictionary(rhs,stars)
rhs = switch_labels_on_position(rhs)
context = switch_labels(isotope_track,stars,context)
#unsymmetric lhs and symmetric rhs
elif( (lhs_sym[0] != lhs_sym[1]) and (rhs_sym[0] == rhs_sym[1]) ):
#change labels on lhs based on position but need to record
#the changes as need to appy them to the context
isotope_track = build_track_dictionary(lhs,stars)
lhs = switch_labels_on_position(lhs)
context = switch_labels(isotope_track,stars,context)
#as rhs is symmetric, positions are equivalent so change labels on position
rhs = switch_labels_on_position(rhs)
#deal with triple cut
#unwieldy code but most readable I can make it
elif(stars == 3):
#simple cases
#completely symmetric lhs and completely symmetric rhs
if( ( (lhs_sym[0] == lhs_sym[1]) and (lhs_sym[1] == lhs_sym[2]) and (lhs_sym[0] == lhs_sym[2]) ) and
( (rhs_sym[0] == rhs_sym[1]) and (rhs_sym[1] == rhs_sym[2]) and (rhs_sym[0] == rhs_sym[2]) ) ):
#the points are all equivalent so change labels on lhs and rhs based on position
#labels on context don't need to change
lhs = switch_labels_on_position(lhs)
rhs = switch_labels_on_position(rhs)
#completely symmetric lhs and completely unsymmetric rhs
elif( ( (lhs_sym[0] == lhs_sym[1]) and (lhs_sym[1] == lhs_sym[2]) and (lhs_sym[0] == lhs_sym[2]) ) and
( (rhs_sym[0] != rhs_sym[1]) and (rhs_sym[1] != rhs_sym[2]) and (rhs_sym[0] != rhs_sym[2]) ) ):
#alter lhs in usual way
lhs = switch_labels_on_position(lhs)
#change labels on rhs based on position but need to record
#the changes as need to appy them to the context
isotope_track = build_track_dictionary(rhs,stars)
rhs = switch_labels_on_position(rhs)
context = switch_labels(isotope_track,stars,context)
#completely unsymmetric lhs and completely unsymmetric rhs
elif( ( (lhs_sym[0] != lhs_sym[1]) and (lhs_sym[1] != lhs_sym[2]) and (lhs_sym[0] != lhs_sym[2]) ) and
( (rhs_sym[0] != rhs_sym[1]) and (rhs_sym[1] != rhs_sym[2]) and (rhs_sym[0] != rhs_sym[2]) ) ):
#build the isotope track
isotope_track = build_track_dictionary(lhs,stars)
#alter lhs in usual way
lhs = switch_labels_on_position(lhs)
#change rhs and context based on isotope_track
rhs = switch_labels(isotope_track,stars,rhs)
context = switch_labels(isotope_track,stars,context)
#completely unsymmetric lhs and completely symmetric rhs
elif( ( (lhs_sym[0] != lhs_sym[1]) and (lhs_sym[1] != lhs_sym[2]) and (lhs_sym[0] != lhs_sym[2]) ) and
( (rhs_sym[0] == rhs_sym[1]) and (rhs_sym[1] == rhs_sym[2]) and (rhs_sym[0] == rhs_sym[2]) ) ):
#build isotope trach on lhs
isotope_track = build_track_dictionary(lhs,stars)
#alter lhs in usual way
lhs = switch_labels_on_position(lhs)
#change labels on context
context = switch_labels(isotope_track,stars,context)
#all positions on rhs equivalent so add labels on position
rhs = switch_labels_on_position(rhs)
#more difficult cases, partial symmetry
#completely unsymmetric on lhs and partial symmetry on rhs
elif( (lhs_sym[0] != lhs_sym[1]) and (lhs_sym[1] != lhs_sym[2]) and (lhs_sym[0] != lhs_sym[2]) ):
#build the isotope track
isotope_track = build_track_dictionary(lhs,stars)
#alter lhs in usual way
lhs = switch_labels_on_position(lhs)
#change rhs and context based on isotope_track
rhs = switch_labels(isotope_track,stars,rhs)
context = switch_labels(isotope_track,stars,context)
#tweak positions on rhs based on symmetry
#rhs 1,2 equivalent
if(rhs_sym[0] == rhs_sym[1]):
#tweak rhs position 1 and 2 as they are symmetric
rhs = switch_specific_labels_on_symmetry(rhs,rhs_sym,1,2)
#rhs 2,3 equivalent
elif(rhs_sym[1] == rhs_sym[2]):
#tweak rhs position 1 and 2 as they are symmetric
rhs = switch_specific_labels_on_symmetry(rhs,rhs_sym,2,3)
#rhs 1,3 equivalent - try for larger set in future
elif(rhs_sym[0] == rhs_sym[2]):
#tweak rhs position 1 and 2 as they are symmetric
rhs = switch_specific_labels_on_symmetry(rhs,rhs_sym,1,3)
#now we are left with things with partial symmetry on lhs and not completely symmetric or unsymmetric on rhs
else:
#lhs 1,2,3 equivalent and any sort of partial symmetry on rhs
if( (lhs_sym[0] == lhs_sym[1]) and (lhs_sym[1] == lhs_sym[2]) and (lhs_sym[0] == lhs_sym[2]) ):
#alter lhs in usual way
lhs = switch_labels_on_position(lhs)
#change labels on rhs based on position but need to record
#the changes as need to appy them to the context
isotope_track = build_track_dictionary(rhs,stars)
rhs = switch_labels_on_position(rhs)
context = switch_labels(isotope_track,stars,context)
#now deal partial symmetry on lhs or rhs.
#Cases where:
#lhs 1,2 equivalent
#lhs 2,3 equivalent
#lhs 1,3 equivalent
else:
#build isotope track on lhs
isotope_track = build_track_dictionary(lhs,stars)
#alter lhs in usual way
lhs = switch_labels_on_position(lhs)
#change rhs and context based on isotope_track
rhs = switch_labels(isotope_track,stars,rhs)
context = switch_labels(isotope_track,stars,context)
#tweak positions on rhs based on symmetry
#lhs 1,2 equivalent
if(lhs_sym[0] == lhs_sym[1]):
#tweak rhs position 1 and 2 as they are symmetric on lhs
rhs = switch_specific_labels_on_symmetry(rhs,rhs_sym,1,2)
#lhs 2,3 equivalent
elif(lhs_sym[1] == lhs_sym[2]):
#tweak rhs position 1 and 2 as they are symmetric on lhs
rhs = switch_specific_labels_on_symmetry(rhs,rhs_sym,2,3)
#lhs 1,3 equivalent - try for larger set in future
elif(lhs_sym[0] == lhs_sym[2]):
#tweak rhs position 1 and 2 as they are symmetric on lhs
rhs = switch_specific_labels_on_symmetry(rhs,rhs_sym,1,3)
smirk = "%s>>%s" % (lhs,rhs)
return smirk,context
def switch_specific_labels_on_symmetry(smi,symmetry_class,a,b):
#check if a and b positions are symmetrically equivalent
#if equivalent, swap labels if the lower numerical label is not on the
#1st symmetrically equivalent attachment points in the smi
if(symmetry_class[a-1] == symmetry_class[b-1]):
#what are the labels on a and b
matchObj = re.search( r'\[\*\:([123])\].*\[\*\:([123])\].*\[\*\:([123])\]', smi )
if matchObj:
#if the higher label comes first, fix
if(int(matchObj.group(a)) > int(matchObj.group(b))):
#if(int(matchObj.group(1)) > int(matchObj.group(2))):
smi = re.sub(r'\[\*\:'+matchObj.group(a)+'\]', '[*:XX' + matchObj.group(b) + 'XX]' , smi)
smi = re.sub(r'\[\*\:'+matchObj.group(b)+'\]', '[*:XX' + matchObj.group(a) + 'XX]' , smi)
smi = re.sub('XX', '' , smi)
return smi
def switch_labels_on_position(smi):
#move the labels in order of position
smi = re.sub(r'\[\*\:[123]\]', '[*:XX1XX]' , smi, 1)
smi = re.sub(r'\[\*\:[123]\]', '[*:XX2XX]' , smi, 1)
smi = re.sub(r'\[\*\:[123]\]', '[*:XX3XX]' , smi, 1)
smi = re.sub('XX', '' , smi)
return smi
def switch_labels(track,stars,smi):
#switch labels based on the input dictionary track
if(stars > 1):
#for k in track:
# print "old: %s, new: %s" % (k,track[k])
if(track['1'] != '1'):
smi = re.sub(r'\[\*\:1\]', '[*:XX' + track['1'] + 'XX]' , smi)
if(track['2'] != '2'):
smi = re.sub(r'\[\*\:2\]', '[*:XX' + track['2'] + 'XX]' , smi)
if(stars == 3):
if(track['3'] != '3'):
smi = re.sub(r'\[\*\:3\]', '[*:XX' + track['3'] + 'XX]' , smi)
#now remove the XX
smi = re.sub('XX', '' , smi)
return smi
def build_track_dictionary(smi,stars):
isotope_track = {}
#find 1st label, record it in isotope_track as key, with value being the
#new label based on its position (1st star is 1, 2nd star 2 etc.)
if(stars ==2):
matchObj = re.search( r'\[\*\:([123])\].*\[\*\:([123])\]', smi )
if matchObj:
isotope_track[matchObj.group(1)] = '1'
isotope_track[matchObj.group(2)] = '2'
elif(stars ==3):
matchObj = re.search( r'\[\*\:([123])\].*\[\*\:([123])\].*\[\*\:([123])\]', smi )
if matchObj:
isotope_track[matchObj.group(1)] = '1'
isotope_track[matchObj.group(2)] = '2'
isotope_track[matchObj.group(3)] = '3'
return isotope_track
def index_hydrogen_change():
#Algorithm details
#have an index of common fragment(key) => fragments conected to it (values)
#Need to add *-H to the values where appropriate - and its
#appropriate when the key is what you would get if you chopped a H off a cmpd.
#Therefore simply need to check if key with the * replaced with a H is
#the same as any full smiles in the set
#
#Specific details:
#1) Loop through keys of index
#2) If key is the result of a single cut (so contains only 1 *) replace the * with H, and cansmi
#3) If full smiles matches key in hash above, add *-H to that fragment index.
for key in index:
attachments = key.count('*')
#print attachments
if(attachments==1):
smi = key
#simple method
smi = re.sub(r'\[\*\:1\]', '[H]' , smi)
#now cansmi it
temp = Chem.MolFromSmiles(smi)
if(temp == None):
sys.stderr.write('Error with key: %s, Added H: %s\n' %(key,smi) )
else:
c_smi = Chem.MolToSmiles( temp, isomericSmiles=True )
if(c_smi in smi_to_id):
core = "[*:1][H]"
id = smi_to_id[c_smi]
value = "%s;t%s" % (id,core)
#add to index
index[key].append(value)
if __name__=='__main__':
#note max heavy atom count does not
#include the attachement points (*)
max_size = 10
ratio = 0.3
use_ratio = False
index={}
smi_to_id={}
id_to_smi={}
id_to_heavy={}
#set up the command line options
#parser = OptionParser()
parser = OptionParser(description="Program to generate MMPs")
parser.add_option('-s', '--symmetric', default=False, action='store_true', dest='sym',
help='Output symmetrically equivalent MMPs, i.e output both cmpd1,cmpd2, SMIRKS:A>>B and cmpd2,cmpd1, SMIRKS:B>>A')
parser.add_option('-m','--maxsize',action='store', dest='maxsize', type='int',
help='Maximum size of change (in heavy atoms) allowed in matched molecular pairs identified. DEFAULT=10. \
Note: This option overrides the ratio option if both are specified.')
parser.add_option('-r','--ratio',action='store', dest='ratio', type='float',
help='Maximum ratio of change allowed in matched molecular pairs identified. The ratio is: size of change / \
size of cmpd (in terms of heavy atoms). DEFAULT=0.3. Note: If this option is used with the maxsize option, the maxsize option will be used.')
#parse the command line options
(options, args) = parser.parse_args()
#print options
if(options.maxsize != None):
max_size = options.maxsize
elif(options.ratio != None):
ratio = options.ratio
if(ratio >= 1):
print("Ratio specified: %s. Ratio needs to be less than 1.")
sys.exit(1)
use_ratio = True
#read the STDIN
for line in sys.stdin:
line = line.rstrip()
smi,id,core,context = line.split(',')
#fill in dictionaries
smi_to_id[smi]=id
id_to_smi[id]=smi
#if using the ratio option, check if heavy atom
#of mol already calculated. If not, calculate and store
cmpd_heavy = None
if(use_ratio):
if( (id in id_to_heavy) == False):
id_to_heavy[id] = heavy_atom_count(smi)
cmpd_heavy = id_to_heavy[id]
#deal with cmpds that have not been fragmented
if(len(core) == 0) and (len(context) == 0):
continue
#deal with single cuts
if(len(core) == 0):
side_chains = context.split('.')
#minus 1 for the attachement pt
if( add_to_index(side_chains[1],1,cmpd_heavy)==True ):
context = side_chains[0]
core = side_chains[1]
value = "%s;t%s" % (id,core)
#add the array if no key exists
#add the context with id to index
index.setdefault(context, []).append(value)
#minus 1 for the attachement pt
if( add_to_index(side_chains[0],1,cmpd_heavy)==True ):
context = side_chains[1]
core = side_chains[0]
value = "%s;t%s" % (id,core)
#add the array if no key exists
#add the context with id to index
index.setdefault(context, []).append(value)
#double or triple cut
else:
attachments = core.count('*')
if( add_to_index(core,attachments,cmpd_heavy)==True ):
value = "%s;t%s" % (id,core)
#add the array if no key exists
#add the context with id to index
index.setdefault(context, []).append(value)
#index the H change
index_hydrogen_change()
#Now index is ready
#loop through the index
for key in index:
total = len(index[key])
#check if have more than one value
if(total == 1):
continue
for xa in xrange(total):
for xb in xrange(xa, total):
if(xa != xb):
#now generate the pairs
id_a,core_a = index[key][xa].split(";t")
id_b,core_b = index[key][xb].split(";t")
#make sure pairs are not same molecule
if(id_a != id_b):
#make sure LHS and RHS of SMIRKS are not the same
if(core_a != core_b):
smirks,context = cansmirk(core_a,core_b,key)
print("%s,%s,%s,%s,%s,%s" % ( id_to_smi[id_a], id_to_smi[id_b], id_a, id_b, smirks, context ))
#deal with symmetry switch
if(options.sym == True):
smirks,context = cansmirk(core_b,core_a,key)
print("%s,%s,%s,%s,%s,%s" % ( id_to_smi[id_b], id_to_smi[id_a], id_b, id_a, smirks, context ))
| bsd-3-clause | 3,132,523,979,875,397,600 | 39.627413 | 163 | 0.5665 | false | 3.614116 | false | false | false |
rahul-ramadas/BagOfTricks | InsertMarkdownLink.py | 1 | 1987 | import sublime
import sublime_plugin
MARKDOWN_LINK_SNIPPET = "[${{1:{}}}](${{2:{}}})"
class InsertMarkdownLinkCommand(sublime_plugin.TextCommand):
def decode_page(self, page_bytes, potential_encoding=None):
if potential_encoding:
try:
text = page_bytes.decode(potential_encoding)
return text
except:
pass
encodings_to_try = ["utf-8", "iso-8859-1"]
for encoding in encodings_to_try:
if encoding == potential_encoding:
continue
try:
text = page_bytes.decode(encoding)
return text
except:
pass
raise UnicodeDecodeError
def run(self, edit):
import re
def on_done(link):
import urllib.request
request = urllib.request.Request(link, headers={'User-Agent' : 'Google Internal-Only Browser'})
with urllib.request.urlopen(request) as page:
encoding = page.headers.get_content_charset()
text = self.decode_page(page.read(), encoding)
match = re.search("<title>(.+?)</title>", text, re.IGNORECASE | re.DOTALL)
if match is None:
title = link
else:
title = match.group(1).strip()
markdown_link = MARKDOWN_LINK_SNIPPET.format(title, link)
self.view.run_command("insert_snippet", {"contents": markdown_link})
clipboard_text = sublime.get_clipboard(2000)
if re.match("https?://", clipboard_text, re.IGNORECASE) is not None:
initial_text = clipboard_text
else:
initial_text = ""
input_view = self.view.window().show_input_panel("Link", initial_text, on_done, None, None)
input_view.sel().clear()
input_view.sel().add(sublime.Region(0, input_view.size()))
| unlicense | -25,537,689,219,243,780 | 32.859649 | 107 | 0.535984 | false | 4.319565 | false | false | false |
djaodjin/djaodjin-signup | signup/serializers.py | 1 | 13054 | # Copyright (c) 2021, DjaoDjin inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
from django.core import validators
from django.contrib.auth import get_user_model
from django.utils.translation import ugettext_lazy as _
import phonenumbers
from rest_framework import serializers
from rest_framework.exceptions import ValidationError
from .models import Activity, Contact, Notification
from .utils import get_account_model, has_invalid_password
from .validators import (validate_email_or_phone,
validate_username_or_email_or_phone)
LOGGER = logging.getLogger(__name__)
class PhoneField(serializers.CharField):
def to_internal_value(self, data):
"""
Returns a formatted phone number as a string.
"""
if self.required:
try:
phone_number = phonenumbers.parse(data, None)
except phonenumbers.NumberParseException as err:
LOGGER.info("tel %s:%s", data, err)
phone_number = None
if not phone_number:
try:
phone_number = phonenumbers.parse(data, "US")
except phonenumbers.NumberParseException:
raise ValidationError(self.error_messages['invalid'])
if phone_number and not phonenumbers.is_valid_number(phone_number):
raise ValidationError(self.error_messages['invalid'])
return phonenumbers.format_number(
phone_number, phonenumbers.PhoneNumberFormat.E164)
return None
class CommField(serializers.CharField):
"""
Either an e-mail address or a phone number
"""
default_error_messages = {
'invalid': _('Enter a valid email address or phone number.')
}
def __init__(self, **kwargs):
super(CommField, self).__init__(**kwargs)
self.validators.append(validate_email_or_phone)
class UsernameOrCommField(serializers.CharField):
"""
Either a username, e-mail address or a phone number
"""
default_error_messages = {
'invalid': _('Enter a valid username, email address or phone number.')
}
def __init__(self, **kwargs):
super(UsernameOrCommField, self).__init__(**kwargs)
self.validators.append(validate_username_or_email_or_phone)
class NoModelSerializer(serializers.Serializer):
def create(self, validated_data):
raise RuntimeError('`create()` should not be called.')
def update(self, instance, validated_data):
raise RuntimeError('`update()` should not be called.')
class ActivateUserSerializer(serializers.ModelSerializer):
username = serializers.CharField(required=False,
help_text=_("Username to identify the account"))
new_password = serializers.CharField(required=False, write_only=True,
style={'input_type': 'password'}, help_text=_("Password with which"\
" a user can authenticate with the service"))
full_name = serializers.CharField(required=False,
help_text=_("Full name (effectively first name followed by last name)"))
class Meta:
model = get_user_model()
fields = ('username', 'new_password', 'full_name')
class ActivitySerializer(serializers.ModelSerializer):
account = serializers.SlugRelatedField(allow_null=True,
slug_field='slug', queryset=get_account_model().objects.all(),
help_text=_("Account the activity is associated to"))
created_by = serializers.SlugRelatedField(
read_only=True, slug_field='username',
help_text=_("User that created the activity"))
class Meta:
model = Activity
fields = ('created_at', 'created_by', 'text', 'account')
read_only_fields = ('created_at', 'created_by')
class AuthenticatedUserPasswordSerializer(NoModelSerializer):
password = serializers.CharField(write_only=True,
style={'input_type': 'password'},
help_text=_("Password of the user making the HTTP request"))
class Meta:
fields = ('password',)
class APIKeysSerializer(NoModelSerializer):
"""
username and password for authentication through API.
"""
secret = serializers.CharField(max_length=128, read_only=True,
help_text=_("Secret API Key used to authenticate user on every HTTP"\
" request"))
class Meta:
fields = ('secret',)
class PublicKeySerializer(AuthenticatedUserPasswordSerializer):
"""
Updates a user public key
"""
pubkey = serializers.CharField(max_length=500,
style={'input_type': 'password'},
help_text=_("New public key for the user referenced in the URL"))
class ContactSerializer(serializers.ModelSerializer):
"""
This serializer is used in lists and other places where a Contact/User
profile is referenced.
For a detailed profile, see `ContactDetailSerializer`.
"""
printable_name = serializers.CharField(
help_text=_("Printable name"), read_only=True)
credentials = serializers.SerializerMethodField(read_only=True,
help_text=_("True if the user has valid login credentials"))
class Meta:
model = Contact
fields = ('slug', 'printable_name', 'picture', 'email', 'created_at',
'credentials',)
read_only_fields = ('slug', 'printable_name', 'created_at',
'credentials',)
@staticmethod
def get_credentials(obj):
return (not has_invalid_password(obj.user)) if obj.user else False
class ContactDetailSerializer(ContactSerializer):
"""
This serializer is used in APIs where a single Contact/User
profile is returned.
For a summary profile, see `ContactSerializer`.
"""
activities = ActivitySerializer(many=True, read_only=True)
class Meta(ContactSerializer.Meta):
fields = ContactSerializer.Meta.fields + ('phone',
'full_name', 'nick_name', 'lang', 'extra', 'activities',)
read_only_fields = ContactSerializer.Meta.read_only_fields + (
'activities',)
class StringListField(serializers.ListField):
child = serializers.CharField()
class NotificationsSerializer(serializers.ModelSerializer):
notifications = StringListField(allow_empty=True,
help_text=_("List of notifications from %s") %
', '.join([item[0] for item in Notification.NOTIFICATION_TYPE]))
class Meta:
model = get_user_model()
fields = ('notifications',)
class CredentialsSerializer(NoModelSerializer):
"""
username and password for authentication through API.
"""
username = UsernameOrCommField(
help_text=_("Username, e-mail address or phone number to identify"\
" the account"))
password = serializers.CharField(write_only=True,
style={'input_type': 'password'},
help_text=_("Secret password for the account"))
code = serializers.IntegerField(required=False, write_only=True,
style={'input_type': 'password'},
help_text=_("One-time code. This field will be checked against"\
" an expected code when multi-factor authentication (MFA)"\
" is enabled."))
class CreateUserSerializer(serializers.ModelSerializer):
username = serializers.CharField(required=False,
help_text=_("Username to identify the account"))
password = serializers.CharField(required=False, write_only=True,
style={'input_type': 'password'}, help_text=_("Password with which"\
" a user can authenticate with the service"))
email = serializers.EmailField(
help_text=_("Primary e-mail to contact user"), required=False)
phone = PhoneField(
help_text=_("Primary phone number to contact user"), required=False)
full_name = serializers.CharField(
help_text=_("Full name (effectively first name followed by last name)"))
lang = serializers.CharField(
help_text=_("Preferred communication language"), required=False)
class Meta:
model = get_user_model()
fields = ('username', 'password', 'email', 'phone', 'full_name', 'lang')
class PasswordResetConfirmSerializer(NoModelSerializer):
new_password = serializers.CharField(write_only=True,
style={'input_type': 'password'},
help_text=_("New password for the user referenced in the URL"))
class PasswordChangeSerializer(PasswordResetConfirmSerializer):
password = serializers.CharField(write_only=True,
style={'input_type': 'password'},
help_text=_("Password of the user making the HTTP request"))
class PasswordResetSerializer(NoModelSerializer):
"""
Serializer to send an e-mail to a user in order to recover her account.
"""
email = CommField(
help_text=_("Email or phone number to recover the account"))
class TokenSerializer(NoModelSerializer):
"""
token to verify or refresh.
"""
token = serializers.CharField(
help_text=_("Token used to authenticate user on every HTTP request"))
class ValidationErrorSerializer(NoModelSerializer):
"""
Details on why token is invalid.
"""
detail = serializers.CharField(help_text=_("Describes the reason for"\
" the error in plain text"))
class UploadBlobSerializer(NoModelSerializer):
"""
Upload a picture or other POD content
"""
location = serializers.URLField(
help_text=_("URL to uploaded content"))
class UserSerializer(serializers.ModelSerializer):
"""
This serializer is a substitute for `ContactSerializer` whose intent is to
facilitate composition of this App with other Django Apps which references
a `django.contrib.auth.User model`. It is not used in this App.
XXX currently used in `api.auth.JWTBase` for payloads.
"""
# Only way I found out to remove the ``UniqueValidator``. We are not
# interested to create new instances here.
slug = serializers.CharField(source='username', validators=[
validators.RegexValidator(r'^[\w.@+-]+$', _("Enter a valid username."),
'invalid')],
help_text=_("Username"))
printable_name = serializers.CharField(source='get_full_name',
help_text=_("Full name"))
picture = serializers.SerializerMethodField(read_only=True,
help_text=_("Picture"))
email = serializers.EmailField(
help_text=_("Primary e-mail to contact user"), required=False)
phone = PhoneField(
help_text=_("Primary phone number to contact user"), required=False)
created_at = serializers.DateTimeField(source='date_joined',
help_text=_("date at which the account was created"))
credentials = serializers.SerializerMethodField(read_only=True,
help_text=_("True if the user has valid login credentials"))
# XXX username and full_name are duplicates of slug and printable_name
# respectively. They are still included in this version for backward
# compatibility.
username = serializers.CharField(validators=[
validators.RegexValidator(r'^[\w.@+-]+$', _("Enter a valid username."),
'invalid')],
help_text=_("Username"))
full_name = serializers.CharField(source='get_full_name',
help_text=_("Full name"))
class Meta:
model = get_user_model()
fields = ('slug', 'printable_name', 'picture', 'email', 'phone',
'created_at', 'credentials', 'username', 'full_name')
read_only_fields = ('slug', 'printable_name', 'created_at',
'credentials',)
@staticmethod
def get_credentials(obj):
return not has_invalid_password(obj)
@staticmethod
def get_picture(obj):
contact = obj.contacts.filter(picture__isnull=False).order_by(
'created_at').first()
if contact:
return contact.picture
return None
| bsd-2-clause | 8,961,634,846,136,696,000 | 36.085227 | 80 | 0.673816 | false | 4.444671 | false | false | false |
mariocannistra/radio-astronomy | findsessionrange.py | 1 | 1973 | #!/usr/bin/python
# this source is part of my Hackster.io project: https://www.hackster.io/mariocannistra/radio-astronomy-with-rtl-sdr-raspberrypi-and-amazon-aws-iot-45b617
# this program will determine the overall range of signal strengths received during the whole session.
# this program can be run standalone but is usually run at end of session by doscan.py
# Its output will be stored in 2 files:
# dbminmax.txt and session-overview.png . The first contains two rows of text with just the maximum
# and minimum of the whole session. The second contains a chart of all the min and max values for each of
# the scan files
from glob import glob
import numpy as np
import radioConfig
import subprocess
import os
import datetime
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import matplotlib.pyplot as plt
globmax = -9000
globmin = 9000
sessmin = np.empty(shape=[0, 1])
sessmax = np.empty(shape=[0, 1])
scantimeline = np.empty(shape=[0, 1])
files_in_dir = sorted(glob("*.csv"))
for fname in files_in_dir:
dbs = np.genfromtxt(fname,dtype='float',delimiter = ',', skip_header=0, skip_footer=0, usecols=(6,),usemask=True)
thismin=dbs.min()
thismax=dbs.max()
scantime=str(fname)[11:17]
print scantime,thismin,thismax
if thismin < globmin:
globmin = thismin
if thismax > globmax:
globmax = thismax
sessmin = np.append(sessmin, thismin)
sessmax = np.append(sessmax, thismax)
scantimeline = np.append(scantimeline, scantime)
mytitle = 'Signal strength range: min %f .. max %f' % (globmin,globmax)
print mytitle
xs = range(len(scantimeline))
plt.plot(xs,sessmin )
plt.plot(xs,sessmax )
plt.xticks(xs,scantimeline,rotation=70)
plt.grid()
plt.title(mytitle)
#plt.show()
plt.savefig('session-overview.png')
sessfile = open("dbminmax.txt", "w")
sessfile.write(str(globmax))
sessfile.write("\n")
sessfile.write(str(globmin))
sessfile.write("\n")
sessfile.close()
| mit | -5,841,826,111,265,500,000 | 28.893939 | 155 | 0.729346 | false | 3.126783 | false | false | false |
rwaldron/Espruino | boards/STM32F429IDISCOVERY.py | 1 | 5051 | #!/bin/false
# This file is part of Espruino, a JavaScript interpreter for Microcontrollers
#
# Copyright (C) 2013 Gordon Williams <[email protected]>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# ----------------------------------------------------------------------------------------
# This file contains information for a specific board - the available pins, and where LEDs,
# Buttons, and other in-built peripherals are. It is used to build documentation as well
# as various source and header files for Espruino.
# ----------------------------------------------------------------------------------------
import pinutils;
info = {
'name' : "STM32 F429 Discovery",
'link' : [ "http://www.st.com/web/catalog/tools/FM116/SC959/SS1532/LN1199/PF259090" ],
'default_console' : "EV_SERIAL1",
'variables' : 5450,
'binary_name' : 'espruino_%v_stm32f429idiscovery.bin',
};
chip = {
'part' : "STM32F429ZIT6",
'family' : "STM32F4",
'package' : "LQFP144",
'ram' : 128,#256,
'flash' : 512, #2048,
'speed' : 168,
'usart' : 6,
'spi' : 3,
'i2c' : 3,
'adc' : 3,
'dac' : 2,
};
# left-right, or top-bottom order
board = {
'left' : [ ], # fixme
'left2' : [ ],
'right2' : [ ],
'right' : [ ],
};
devices = {
'OSC' : { 'pin_1' : 'H0',
'pin_2' : 'H1' },
'OSC_RTC' : { 'pin_1' : 'C14',
'pin_2' : 'C15' },
'LED1' : { 'pin' : 'G13' }, # green
'LED2' : { 'pin' : 'G14' }, # red
'BTN1' : { 'pin' : 'A0' },
'USB' : { 'pin_dm' : 'B14',
'pin_dp' : 'B15',
'pin_vbus' : 'B13',
'pin_id' : 'B12',
'pin_pso' : 'C4', # Power supply enable
'pin_oc' : 'C5', # Overcurrent
},
'MEMS' : { 'device' : 'L3GD20',
'pin_cs' : 'C1',
'pin_int1' : 'A1',
'pin_int2' : 'A2',
'pin_mosi' : 'F9',
'pin_miso' : 'F8',
'pin_sck' : 'F7' },
'TOUCHSCREEN' : {
'pin_irq' : 'A15',
'pin_cs' : '',
'pin_scl' : 'A8',
'pin_sda' : 'C9',
},
'LCD' : {
'width' : 320, 'height' : 240, 'bpp' : 16, 'controller' : 'fsmc', 'controller2' : 'ili9341',
'pin_d0' : 'D6',
'pin_d1' : 'G11',
'pin_d2' : 'G12',
'pin_d3' : 'A3',
'pin_d4' : 'B8',
'pin_d5' : 'B9',
'pin_d6' : 'A6',
'pin_d7' : 'G10',
'pin_d8' : 'B10',
'pin_d9' : 'B11',
'pin_d10' : 'C7',
'pin_d11' : 'D3',
'pin_d12' : 'C10',
'pin_d13' : 'B0',
'pin_d14' : 'A11',
'pin_d15' : 'A12',
'pin_d16' : 'B1',
'pin_d16' : 'G6',
'pin_rd' : 'D12', # RDX
'pin_wr' : 'D13',# WRQ
'pin_cs' : 'C2', # CSX
'pin_en' : 'F10',
'pin_vsync' : 'A4',
'pin_hsync' : 'C6',
'pin_dotlck' : 'G7',
'pin_dc' : 'F7', # DCX
'pin_sda' : 'F9',
'pin_im0' : 'D2', # pulled to 0
'pin_im1' : 'D4', # pulled to 1
'pin_im2' : 'D5', # pulled to 1
'pin_im3' : 'D7', # pulled to 0
},
'SDRAM' : {
'pin_sdcke1' : 'B5',
'pin_sdne1' : 'B6',
'pin_sdnwe' : 'C0',
'pin_d2' : 'D0',
'pin_d3' : 'D1',
'pin_d13' : 'D8',
'pin_d14' : 'D9',
'pin_d15' : 'D10',
'pin_d0' : 'D14',
'pin_d1' : 'D15',
'pin_nbl0' : 'E0',
'pin_nbl1' : 'E1',
'pin_d4' : 'E7',
'pin_d5' : 'E8',
'pin_d6' : 'E9',
'pin_d7' : 'E10',
'pin_d8' : 'E11',
'pin_d9' : 'E12',
'pin_d10' : 'E13',
'pin_d11' : 'E14',
'pin_d12' : 'E15',
'pin_a0' : 'F0',
'pin_a1' : 'F1',
'pin_a2' : 'F2',
'pin_a3' : 'F3',
'pin_a4' : 'F4',
'pin_a5' : 'F5',
'pin_sdnras' : 'F11',
'pin_a6' : 'F12',
'pin_a7' : 'F13',
'pin_a8' : 'F14',
'pin_a9' : 'F15',
'pin_a10' : 'G0',
'pin_a11' : 'G1',
'pin_ba0' : 'G4',
'pin_ba1' : 'G5',
'pin_sdclk' : 'G8',
'pin_sdncas' : 'G15',
},
};
board_css = """
#board {
width: 680px;
height: 1020px;
left: 200px;
background-image: url(img/STM32F429IDISCOVERY.jpg);
}
#boardcontainer {
height: 1020px;
}
#left {
top: 375px;
right: 590px;
}
#left2 {
top: 375px;
left: 105px;
}
#right {
top: 375px;
left: 550px;
}
#right2 {
top: 375px;
right: 145px;
}
""";
def get_pins():
pins = pinutils.scan_pin_file([], 'stm32f40x.csv', 6, 9, 10)
return pinutils.only_from_package(pinutils.fill_gaps_in_pin_list(pins), chip["package"])
| mpl-2.0 | 6,059,209,135,743,455,000 | 27.061111 | 104 | 0.402891 | false | 2.607641 | false | false | false |
TAMU-CPT/galaxy-tools | tools/genome_viz/brigaid.py | 1 | 36126 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
AUTHOR
Pedro Cerqueira
github: @pedrorvc
DESCRIPTION
This script serves to create xml files contaning the information necessary
for the execution of BRIG (Blast Ring Image Generator), reducing the time
performing the tedious task of setting up all the information on the GUI
and provides a quick way to produce an image.
The arguments for this script provide some (but not all)
of the available options in BRIG, which were the ones I used to change the most.
USAGE:
brigaid.py -q reference_sequence.fna -rfd path/to/reference/dir -od path/to/output/dir -of path/to/output/dir/output_file
-oi path/to/output/BRIG/output_image -t Image_title -a annotation_file.gbk --genes genes_of_interest.txt
--contig-order contig_order.tsv
"""
import argparse
import csv
import os
import xml.etree.ElementTree as ET
from collections import OrderedDict
from xml.dom import minidom
from Bio import SeqIO
from matplotlib import cm
def listdir_fullpath(path):
""" Gets the full path of the files from a directory
Args:
path (str): full path to a directory
Returns:
list containing the full path of every file contained in the input directory
"""
return [os.path.join(path, f) for f in os.listdir(path)]
def ring_attributes(colour, name, position):
""" Creates ring attributes.
Args:
colour (str): color of the ring.
name (str): name of the ring.
position (str): position of the ring.
Returns:
ring_attrs (dict): attributes of any regular ring of the BRIG xml.
"""
ring_attrs = {"colour" : colour,
"name": name,
"position" : position,
"upperInt" : "90",
"lowerInt" : "70",
"legend" : "yes",
"size" : "30",
"labels" : "no",
"blastType" : "blastn"}
return ring_attrs
def annotation_ring_attributes(position):
""" Creates annotation ring attributes.
Args:
position (str): position of the ring.
Returns:
annotation_ring_attrs (dict): attributes of the annotation ring of the BRIG xml.
"""
annotation_ring_attrs = {"colour" : '172,14,225',
"name": 'null',
"position" : position,
"upperInt" : "70",
"lowerInt" : "50",
"legend" : "yes",
"size" : "30",
"labels" : "no",
"blastType" : "blastn"}
return annotation_ring_attrs
def create_feature_attrs(label, colour, decoration, start, stop):
""" Create attributes for the Feature SubElements of the annotation ring.
Args:
label (str): name of the gene/CDS to annotate
colour (str): colour of the decoration for the annotation
decoration (str): shape of the gene/CDS to annotate, for example, 'clockwise-arrow'
start (str): start of the gene/CDS to annotate
stop (str): stop of the gene/CDS to annotate
Results:
feature_element_attrs (dict): attributes of the feature element.
feature_range_element_attrs (dict): attributes of the feature range element
"""
feature_element_attrs = {'label' : label,
'colour' : colour,
'decoration' : decoration}
feature_range_element_attrs = {'start' : start,
'stop' : stop}
return feature_element_attrs, feature_range_element_attrs
def create_annotation_ring_tsv(annotation_ring, annotation_file):
""" Uses a tsv file to annotate the reference genome.
Args:
annotation_ring: ElementTree SubElement object containing the 'ring' tag and its attributes.
annotation_file (str): Full path to the file containing annotations for the reference genome.
"""
with open(annotation_file) as tsvfile:
reader = csv.DictReader(tsvfile, dialect='excel-tab')
# Obtain the annotations from the file contents
for row in reader:
start = row['#START']
stop = row['STOP']
label = row['Label']
colour = row['Colour']
decoration = row['Decoration']
# Create xml attributes
feature_element_attrs, feature_range_element_attrs = create_feature_attrs(label, colour, decoration, start, stop)
# Create xml elements
feature_element = ET.SubElement(annotation_ring, 'feature', attrib=feature_element_attrs)
feature_range_element = ET.SubElement(feature_element, 'featureRange', attrib=feature_range_element_attrs)
def annotation_ring_feature_elements_gbk_concat(annotation_ring, record, genome_size=False):
""" Creates the annotation ring feature elements, using a concatenated Genbank annotation file.
Args:
annotation_ring: ElementTree SubElement object containing the 'ring' tag and its attributes.
record (SeqRecord): Object of BioPython containing the information of the input Genbank.
genome_size (bool): Size of genome. Integer when a Genbank divided by contigs is provided.
Boolean (False) when a concatenated Genbank is provided.
"""
#if type(genome_size) == int:
# Obtain the features of the Genbank file records
for fea in record.features:
# Get the start and end position of the genome
# Also get the strand
if fea.type == 'CDS':
start = str(fea.location.start.position)
end = str(fea.location.end.position)
strand = fea.location.strand
# Get the label of the gene or product
if 'gene' in fea.qualifiers:
label = str(fea.qualifiers['gene'][0])
elif 'product' in fea.qualifiers:
product = fea.qualifiers['product'][0]
label = str(product)
else:
continue
# Define the decoration of the annotation based on the strand
if strand == -1:
decoration = 'counterclockwise-arrow'
elif strand == 1:
decoration = 'clockwise-arrow'
# Create xml attributes
feature_element_attrs, feature_range_element_attrs = create_feature_attrs(label, "black", decoration, start, end)
# Create xml elements
feature_element = ET.SubElement(annotation_ring, 'feature', attrib=feature_element_attrs)
feature_range_element = ET.SubElement(feature_element, 'featureRange', attrib=feature_range_element_attrs)
# If a genome size is provided, get the size of the records
if type(genome_size) == int:
if fea.type == 'source':
size = fea.location.end.position
try:
size
genome_size += size
return genome_size
except NameError:
pass
def annotation_ring_feature_elements_genes_of_interest_gbk_concat(annotation_ring, record, genes, genome_size=False):
""" Creates the annotation ring feature elements, using a concatenated Genbank annotation file
and specific gene annotations.
Args:
annotation_ring: ElementTree SubElement object containing the 'ring' tag and its attributes.
record (SeqRecord): Object of BioPython containing the information of the input Genbank.
genome_size (bool): Size of genome. Integer when a Genbank divided by contigs is provided.
Boolean (False) when a concatenated Genbank is provided.
"""
for f in record.features:
if f.type == 'CDS':
# Find the 'gene' tag and determine if the gene belongs to the specified genes to be annotated
if 'gene' in f.qualifiers and f.qualifiers['gene'][0] in genes:
label = f.qualifiers['gene'][0]
elif 'product' in f.qualifiers and f.qualifiers['product'][0] in genes:
product = f.qualifiers['product'][0]
label = product
else:
continue
# Determine the start, stop and strand of the gene
start = str(f.location.start.position + genome_size)
end = str(f.location.end.position + genome_size)
strand = f.location.strand
# Define the decoration of the annotation based on the strand
if strand == -1:
decoration = 'counterclockwise-arrow'
elif strand == 1:
decoration = 'clockwise-arrow'
# Create xml attributes
feature_element_attrs, feature_range_element_attrs = create_feature_attrs(label, "black", decoration, start, end)
# Create xml elements
feature_element = ET.SubElement(annotation_ring, 'feature', attrib=feature_element_attrs)
feature_range_element = ET.SubElement(feature_element, 'featureRange', attrib=feature_range_element_attrs)
# If a genome size is provided, get the size of the records
if type(genome_size) == int:
if f.type == "source":
size = f.location.end.position
try:
size
genome_size += size
return genome_size
except NameError:
pass
def create_annotation_ring_gbk_concat(annotation_ring, annotation_file, genes_of_interest, records):
""" Create annotation ring using a concatenated Genbank annotation file.
Args:
annotation_ring: ElementTree SubElement object containing the 'ring' tag and its attributes.
annotation_file (str): Full path to the file containing annotations for the reference genome.
genes_of_interest (str): Full path to the file containing the genes to search for in the Genbank file.
records (SeqRecord): Object of BioPython containing the information of the input Genbank.
"""
if genes_of_interest != []:
# Get the genes to serach in the Genbank file
with open(genes_of_interest, "r") as f:
genes = f.readlines()
genes = [gene.rstrip() for gene in genes]
# Create feature elements of the annotation ring
for seq_record in records:
annotation_ring_feature_elements_genes_of_interest_gbk_concat(annotation_ring, seq_record, genes)
else:
for seq_record in records:
annotation_ring_feature_elements_gbk_concat(annotation_ring, seq_record)
def create_annotation_ring_gbk_contigs(annotation_ring, annotation_file, records, genes_of_interest, contig_order):
""" Create annotation ring using a Genbank annotation file divided by contigs.
Args:
annotation_ring: ElementTree SubElement object containing the 'ring' tag and its attributes.
annotation_file (str): Full path to the file containing annotations for the reference genome.
genes_of_interest (str): Full path to the file containing the genes to search for in the Genbank file.
records (SeqRecord): Object of BioPython containing the information of the input Genbank.
contig_order (str): Full path to the file containing the order of the contigs.
"""
if contig_order != []:
with open(contig_order) as tsvfile:
reader = csv.DictReader(tsvfile, dialect='excel-tab')
# Create an OrderedDict with the contents of the file
# The keys are the order are a number representing the order of the contig
# The values are the names of the contigs
content_dict = OrderedDict()
for r in reader:
content_dict[r["order"]] = r["contig"]
# Create an OrderedDict with the content of each contig
# The keys are the names of the contigs
# The values are SeqRecord objects from BipPython
seq_records_dict = OrderedDict()
for record in records:
seq_records_dict[record.id] = record
if genes_of_interest != []:
with open(genes_of_interest, "r") as f:
genes = f.readlines()
genes = [gene.rstrip() for gene in genes]
genome_size = 0
for i in range(1, len(records)+1):
ord_record = seq_records_dict[content_dict[str(i)]]
gsize = annotation_ring_feature_elements_genes_of_interest_gbk_concat(annotation_ring, ord_record, genes, genome_size)
genome_size = gsize
else:
genome_size = 0
for i in range(1, len(records)+1):
ord_record = seq_records_dict[content_dict[str(i)]]
gsize = annotation_ring_feature_elements_gbk_concat(annotation_ring, ord_record, genome_size)
genome_size = gsize
else:
if genes_of_interest != []:
with open(genes_of_interest, "r") as f:
genes = f.readlines()
genes = [gene.rstrip() for gene in genes]
for seq_record in records:
annotation_ring_feature_elements_genes_of_interest_gbk_concat(annotation_ring, seq_record, genes)
else:
for seq_record in records:
annotation_ring_feature_elements_gbk_concat(annotation_ring, seq_record)
def write_xml(root_elem, output_file):
""" Writes a xml file.
Args:
root_elem is a ElementTree Element object containing all the information
required for the output file.
output_file (str): full path to the output file
"""
xml_file = ET.tostring(root_elem, encoding='utf8').decode('utf8')
pretty_xml_file = minidom.parseString(xml_file).toprettyxml(indent=' ')
output_file = output_file + ".xml"
with open(output_file, "w") as f:
f.write(pretty_xml_file)
####### Create xml elemnts
# Create root element
def create_root_element(blast_options, legend_position, query_file,
output_folder, image_output_file, title, image_format):
"""
Creates the root element of the xml file and its attributes.
Args:
blast_options (str): additional options for blast, for example, -evalue or num_threads
legend_position (str): position of the legend on the image
query_file (str): full path to the query file
output_folder (str): full path to the output folder
image_output_file (str): full path to the image output file
title (str): title of the output image
image_format (str): format of the image output file
Returns:
root: ElementTree Element object containing the BRIG tag and its attributes
"""
root_attrs = {"blastOptions" : blast_options,
"legendPosition" : legend_position,
"queryFile" : query_file,
"outputFolder" : output_folder,
"blastPlus" : "yes",
"outputFile" : os.path.join(output_folder, image_output_file),
"title" : title,
"imageFormat" : image_format,
"queryFastaFile" : query_file,
"cgXML" : os.path.join(output_folder + "/scratch", os.path.basename(query_file) + ".xml")}
root = ET.Element('BRIG', attrib=root_attrs)
return root
#### Create root children
# Create cgview_settings element
def create_cgview_settings_element(root, height, width):
""" Creates the cgview_settings element of the xml file and its attributes.
Args:
root: ElementTree Element object containing the BRIG tag and its attributes.
height (str): height of the output image in pixels
width (str): width of the output image in pixels
Returns:
cgview_settings: ElementTree SubElement object containing the cgview settings tag and its attributes
"""
cgview_settings_attrs = {"arrowheadLength" : "medium",
"backboneColor" : "black",
"backboneRadius" : "600",
"backboneThickness" : "medium",
"backgroundColor" : "white",
"borderColor" : "black",
"featureSlotSpacing" : "medium",
"featureThickness" : "30",
"giveFeaturePositions" : "false",
"globalLabel" : "true",
"height" : height,
"isLinear" : "false",
"labelFont" : "SansSerif,plain,25",
"labelLineLength" : "medium",
"labelLineThickness" : "medium",
"labelPlacementQuality" : "best",
"labelsToKeep" : "1000",
"longTickColor" : "black",
"minimumFeatureLength" : "medium",
"moveInnerLabelsToOuter" :"true",
"origin" : "12",
"rulerFont" : "SansSerif,plain,35",
"rulerFontColor" : "black",
"rulerPadding" : "40",
"rulerUnits" : "bases",
"shortTickColor" : "black",
"shortTickThickness" : "medium",
"showBorder" : "false",
"showShading" : "true",
"showWarning" : "false",
"tickDensity" : "0.2333",
"tickThickness" : "medium",
"titleFont" : "SansSerif,plain,45",
"titleFontColor" : "black",
"useColoredLabelBackgrounds" : "false",
"useInnerLabels" : "true",
"warningFont" : "Default,plain,35",
"warningFontColor" : "black",
"width" : width,
"zeroTickColor" : "black",
"tickLength" : "medium"}
cgview_settings = ET.SubElement(root, 'cgview_settings', attrib=cgview_settings_attrs)
return cgview_settings
# Create brig_settings element
def create_brig_settings_element(root, java_memory):
""" Creates the brig_settings element of the xml file and its attributes.
Args:
root: ElementTree Element object containing the BRIG tag and its attributes.
java_memory (str): amount of memory (in bytes) java is allowed to use for BRIG
Returns:
brig_settings: ElementTree SubElement object containing the brig settings tag and its attributes
"""
brig_settings_attrs = {"Ring1" : "172,14,225",
"Ring2" : "222,149,220",
"Ring3" : "161,221,231",
"Ring4" : "49,34,221",
"Ring5" : "116,152,226",
"Ring6" : "224,206,38",
"Ring7" : "40,191,140",
"Ring8" : "158,223,139",
"Ring9" : "226,38,122",
"Ring10" :"211,41,77",
"defaultUpper" : "70",
"defaultLower" : "50",
"defaultMinimum" : "50",
"genbankFiles" : "gbk,gb,genbank",
"fastaFiles" : "fna,faa,fas,fasta,fa",
"emblFiles" : "embl",
"blastLocation" : "",
"divider" : "3",
"multiplier" : "3",
"memory" : java_memory,
"defaultSpacer" : "0"}
brig_settings = ET.SubElement(root,
"brig_settings",
attrib=brig_settings_attrs)
return brig_settings
## Create special element
def create_special_element(root):
"""Creates the 'special' element of the xml file and its attributes
Args:
root: ElementTree Element object containing the BRIG tag and its attributes.
Returns:
gc_content_special: ElementTree SubElement object containing the 'special' tag and its attributes
gc_skew_special: ElementTree SubElement object containing the 'special' tag and its attributes
"""
gc_content_special = ET.SubElement(root, 'special', attrib={'value' : 'GC Content'})
gc_skew_special = ET.SubElement(root, 'special', attrib={'value' : 'GC Skew'})
return gc_content_special, gc_skew_special
# Create reference dir element
def create_reference_directory_element(root, reference_directory):
""" Creates the 'reference directory' element of the xml file and its attributes.
Args:
root: ElementTree Element object containing the 'BRIG' tag and its attributes.
reference_directory (str): full path to the reference directory that contains
the fasta files used to build the rings.
Returns:
ref_file: ElementTree SubElement object containing the 'refFile' tag and its attributes
"""
ref_dir = ET.SubElement(root,
"refDir",
attrib={"location" : reference_directory})
# Obtain the full path for all the files in the directory
ref_dir_list = listdir_fullpath(reference_directory)
for f in ref_dir_list:
ref_file = ET.SubElement(ref_dir,
"refFile",
attrib={"location" : f})
return ref_file
# Create the ring where the annotations are defined
def create_annotation_ring(root, reference_directory, annotation_file, genes_of_interest, contig_order):
""" Creates the ring that will contain the annotations for the reference genome.
Args:
root: ElementTree Element object containing the 'BRIG' tag and its attributes.
reference_directory (str): full path to the reference directory that contains
the fasta files used to build the rings.
annotation_file (str): Full path to the file containing annotations for the reference genome.
genes_of_interest (str): Full path to the file containing a list of specific genes.
contig_order (str): Full path to the tab-delimited file containing the order of the contigs.
"""
# Determine the position of the annotation ring, which will be the position after the last reference genome
ring_position = len(os.listdir(reference_directory)) + 2
# Create the annotation ring element
annotation_ring = ET.SubElement(root, 'ring', attrib=annotation_ring_attributes(str(ring_position)))
# Check for tab-delimited annotation file input
if list(SeqIO.parse(annotation_file, "genbank")) == []:
create_annotation_ring_tsv(annotation_ring, annotation_file)
else:
# Get the records of the Genbank file
records = [r for r in SeqIO.parse(annotation_file, "genbank")]
### Check if a contig order file has been provided
if len(records) > 1: # If more than 1 record exists, then the Genbank file is divided by contigs
create_annotation_ring_gbk_contigs(annotation_ring, annotation_file, records, genes_of_interest, contig_order)
else:
create_annotation_ring_gbk_concat(annotation_ring, annotation_file, genes_of_interest, records)
## Create remaining rings
def create_ring_element(root, reference_directory, colormap):
""" Creates the ring elements of the xml file, containing the position and color of the rings.
Args:
root: ElementTree Element object containing the 'BRIG' tag and its attributes.
reference_directory (str): full path to the reference directory that contains
the fasta files used to build the rings.
colormap (str): name of the colormap (available in matplotlib) to use for the color of the rings
Returns:
ring_number_element: ElementTree SubElement object containing the 'ring' tag and its attributes
ring_sequence_element: ElementTree SubElement object containing the 'sequence' tag and its attributes
"""
ref_dir_list = listdir_fullpath(reference_directory)
# Gets the colormap from matplotlib with as many colors as the number of files
cmap = cm.get_cmap(colormap, len(ref_dir_list))
list_colormap = cmap.colors.tolist()
# Remove the fourth element (transparency) because it is not necessary
colors_to_use = []
for l in list_colormap:
convert = [round(x * 255) for x in l]
convert.pop()
colors_to_use.append(convert)
#reversed_colors_to_use = colors_to_use[::-1]
# Check if the user provided an order for the rings
has_digit = [os.path.basename(x).split("_")[0].isdigit() for x in ref_dir_list]
if True in has_digit:
# Obtain the ring positions
ring_positions = [os.path.basename(x).split("_")[0] for x in ref_dir_list]
# Reverse sort the positions of the rings, because they will be created
# in a descending order of their positions
ring_positions.sort(reverse=True)
ref_dir_list.sort(reverse=True)
for ring in range(len(ref_dir_list)):
# The ring positions start at 2 due to the special rings (GC Content and GC Skew)
ring_position = int(ring_positions[ring]) + 1
# Select a color for the ring
ring_color = ",".join([str(e) for e in colors_to_use[ring]])
# Define the name of the ring
ring_name = os.path.basename(ref_dir_list[ring]).split("_")[1]
# Create the xml elements
ring_number_element = ET.SubElement(root,
'ring',
ring_attributes(ring_color, ring_name, str(ring_position)))
ring_sequence_element = ET.SubElement(ring_number_element,
"sequence",
attrib={"location" : ref_dir_list[ring]})
else:
# Sort files by lowercase
ref_dir_list.sort(key=lambda y: y.lower())
# The number of rings starts at 2 due to the GC Content and GC Skew
ring_number = len(ref_dir_list) + 1
for ring in range(len(ref_dir_list)):
# Select a color for the ring
ring_color = ",".join([str(e) for e in colors_to_use[ring]])
# Define the name of the ring
ring_name = os.path.basename(ref_dir_list[ring]).split("_")[0]
# Create the xml elements
ring_number_element = ET.SubElement(root,
'ring',
ring_attributes(ring_color, ring_name, str(ring_number)))
ring_sequence_element = ET.SubElement(ring_number_element,
"sequence",
attrib={"location" : ref_dir_list[ring]})
ring_number -= 1
return ring_number_element, ring_sequence_element
## Create special rings
def create_special_ring_element(root):
""" Create the 'special' ring element and its attributes.
Args:
root: ElementTree Element object containing the 'BRIG' tag and its attributes.
Returns:
gc_content_location: ElementTree SubElement object containing the 'sequence' tag and its attributes
gc_skew_location: ElementTree SubElement object containing the 'sequence' tag and its attributes
"""
# Create ring attributes
gc_content_ring_attrs = ring_attributes('225,0,0', "GC Content", "0")
gc_skew_ring_attrs = ring_attributes('225,0,0', "GC Skew", "1")
# Add ring element to root
gc_skew_ring = ET.SubElement(root, 'ring', attrib=gc_skew_ring_attrs)
gc_content_ring = ET.SubElement(root, 'ring', attrib=gc_content_ring_attrs)
# Add sequence element to ring
gc_content_location = ET.SubElement(gc_content_ring, 'sequence', attrib={'location' : 'GC Content'})
gc_skew_location = ET.SubElement(gc_skew_ring, 'sequence', attrib={'location' : 'GC Skew'})
return gc_content_location, gc_skew_location
def main(query_file, reference_directory, output_folder, output_xml, image_output_file, title, annotation_file,
genes_of_interest, contig_order, blast_options, legend_position, image_format, height, width, java_memory, colormap):
root = create_root_element(blast_options, legend_position, query_file,
output_folder, image_output_file, title, image_format)
cgview_settings = create_cgview_settings_element(root, height, width)
brig_settings = create_brig_settings_element(root, java_memory)
special = create_special_element(root)
refdir = create_reference_directory_element(root, reference_directory)
if annotation_file:
create_annotation_ring(root, reference_directory, annotation_file, genes_of_interest, contig_order)
rings = create_ring_element(root, reference_directory, colormap)
special_ring = create_special_ring_element(root)
write_xml(root, output_xml)
print("\n File written to {}".format(output_xml))
def parse_arguments():
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-q', '--query', type=str, required=True, dest='query_file',
help='Path to the query/reference FASTA file.')
parser.add_argument('-rfd', '--ref_dir', type=str, required=True, dest='reference_directory',
help='Path to the directory where the FASTA files to compare against the reference are located.')
parser.add_argument('-od', '--out_dir', type=str, required=True, dest='output_folder',
help='Path to the output directory for the results of BRIG.')
parser.add_argument('-of', '--out_xml', type=str, required=True, dest='output_file',
help='Path to the output of this script.')
parser.add_argument('-oi', '--out_img', type=str, required=True, dest='image_output_file',
help='Path to the output file of the resulting image of BRIG.')
parser.add_argument('-t', '--title', type=str, required=True, dest='title',
help='Title of the resulting image from BRIG.')
parser.add_argument('-a', '--annotation', type=str, required=False, dest='annotation_file', default=False,
help='File containing annotations for the reference genome. '
'The annoation file can be a tab-delimited file (.tsv) or a Genbank format file (.gbk, .gb)')
parser.add_argument('--genes', type=str, required=False, dest='genes_of_interest', default=[],
help='File containing a list of specific genes (one gene per line) to search when a Genbank annotation file is provided. ')
parser.add_argument('--contig_order', type=str, required=False, dest='contig_order', default=[],
help='Tab-delimited file containing the order of the contigs when a Genbank (divided by contigs) annotation file is provided. '
'Example: order contig '
'1 Contig8')
parser.add_argument('-b', '--blast_options', type=str, required=False, dest="blast_options", default="-evalue 0.001 -num_threads 6",
help='Options for running BLAST.')
parser.add_argument('-l', '--legend_pos', type=str, required=False, dest="legend_position", default="middle-right",
help='Positon of the legend on the resulting image.'
'The options available are upper, center or lower, '
'paired with left, center or right')
parser.add_argument('-if', '--image_format', type=str, required=False, dest="image_format", default="jpg",
help='Format of the resulting image file.'
'The available options are: jpg, png, svg or svgz.')
parser.add_argument('-ht', '--height', type=str, required=False, dest="height", default="3000",
help='Height (in pixels) of the resulting image.')
parser.add_argument('-wd', '--width', type=str, required=False, dest="width", default="3000",
help='Width (in pixels) of the resulting image.')
parser.add_argument('-jm', '--java_memory', type=str, required=False, dest="java_memory", default="1500",
help='Amount of memory (in bytes) that Java is allowed to use for BRIG.')
parser.add_argument('-cm', '--colormap', type=str, required=False, dest="colormap", default="viridis",
help='Colormap from matplotlib to use for the color of the rings. '
'The available options are: viridis, plasma, inferno, magma and cividis.'
'More options for colormaps at: '
'https://matplotlib.org/users/colormaps.html')
args = parser.parse_args()
return [args.query_file, args.reference_directory, args.output_folder, args.output_file,
args.image_output_file, args.title, args.annotation_file, args.genes_of_interest, args.contig_order,
args.blast_options, args.legend_position, args.image_format, args.height, args.width, args.java_memory, args.colormap]
if __name__ == '__main__':
args = parse_arguments()
main(args[0], args[1], args[2], args[3], args[4], args[5], args[6],
args[7], args[8], args[9], args[10], args[11], args[12], args[13],
args[14], args[15])
| gpl-3.0 | 5,687,784,411,646,860,000 | 40.8125 | 151 | 0.560455 | false | 4.529338 | false | false | false |
GaneshPandey/alex-scraper | alexscrapper/spiders/luckyshops_spider.py | 1 | 3002 | # -*- coding: utf-8 -*-
import scrapy
from scrapy.http import Request, FormRequest
from scrapy.spiders import CrawlSpider
from alexscrapper.items import *
from datetime import datetime
from scrapy.conf import settings
import urllib
import csv
import json
import re
from datetime import datetime, timedelta
from dateutil import parser
from urllib import urlencode
from HTMLParser import HTMLParser
import requests
class LuckyshopsSider(CrawlSpider):
store_name = "Lucky Shops"
name = "luckyshops"
allowed_domains = ["rewards.luckyshops.com"]
start_urls = ['http://rewards.luckyshops.com/shopping/b____alpha.htm']
base_url = 'http://rewards.luckyshops.com'
headers = {
'User-Agent': 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2.10) Firefox/3.6.10 GTB7.1',
'Accept-Language': 'en-us,en;q=0.5'
}
def __init__(self, *args, **kwargs):
super(LuckyshopsSider, self).__init__(*args, **kwargs)
settings.set('RETRY_HTTP_CODES', [500, 503, 504, 400, 408, 404] )
settings.set('RETRY_TIMES', 5 )
settings.set('REDIRECT_ENABLED', True)
settings.set('METAREFRESH_ENABLED', True)
settings.set('USER_AGENT', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.90 Safari/537.36')
def start_requests(self):
for url in self.start_urls:
yield Request(url=url, callback=self.parse_product, headers=self.headers)
def parse_product(self, response):
item = Yaging()
pattern = ur'([\d.]+)'
store = response.xpath('//ul[@class="mn_splitListRt" or @class="mn_splitListLt"]/li')
for data in store:
name = str(data.xpath('a[2]/text()').extract()[0])
cashback = str(data.xpath('span').extract()[0])
link = str([(self.base_url + self.parse_link(link)) for link in data.xpath('a/@href').extract()][:][1])
item['name'] = name.replace("'", "''")
item['link'] = link
cashback = cashback.replace("<span>", "").replace("</span>", "")
if "$" in cashback:
cashback = "$"+ str(self.getNumbers(cashback))
elif "%" in cashback:
cashback = str(self.getNumbers(cashback)) + "%"
else:
pass
item['cashback'] = cashback.replace("'", "''")
item['sid'] = self.store_name
item['ctype'] = 1
item['numbers'] = self.getNumbers(cashback).replace('%','').replace('$','')
item['domainurl'] = self.base_url
yield item
def parse_link(self, jstring):
start = jstring.find("../") + 2
return jstring[start:]
def getNumbers(self, cashback):
cash = cashback
pattern = r'\d+(?:\.\d+)?'
ret = re.findall(pattern, cash)
if len(ret):
return ret[0]
else:
return "100" | gpl-3.0 | 8,431,650,365,857,330,000 | 33.125 | 146 | 0.570286 | false | 3.474537 | false | false | false |
jrsmith3/gits | test/test_fs_utils.py | 1 | 6045 | # -*- coding: utf-8 -*-
import os
import shutil
import unittest
from gits import fs_utils
test_dir_root = os.path.dirname(os.path.realpath(__file__))
class MethodsInput(unittest.TestCase):
"""
Tests behavior of methods which take input arguments.
"""
scratch_dir = os.path.join(test_dir_root, "scratch")
path_to_dummy_file = os.path.join(scratch_dir, "dummy.txt")
good_input_dict = {"dir1": {}}
def setUp(self):
"""
Creates the scratch dir.
Creates a dummy file in the scratch dir.
"""
os.mkdir(self.scratch_dir)
with open(self.path_to_dummy_file, "w"):
pass
def tearDown(self):
"""
Removes scratch dir and contents.
"""
shutil.rmtree(self.scratch_dir)
def test_dict_to_fs_fs_dict_non_dict(self):
"""
First argument to dict_to_fs must be a dictionary.
"""
self.assertRaises(TypeError, fs_utils.dict_to_fs, "not a dict", self.scratch_dir)
def test_dict_to_fs_fs_dict_values_non_dict_string(self):
"""
Values of fs_dict must be either strings or dictionaries.
"""
bad_input = {"neither_string_nor_dict": 42.}
self.assertRaises(TypeError, fs_utils.dict_to_fs, bad_input, self.scratch_dir)
def test_dict_to_fs_fqpn_root_non_str(self):
"""
Second argument to dict_to_fs must be a string.
"""
self.assertRaises(TypeError, fs_utils.dict_to_fs, self.good_input_dict, 42.)
def test_dict_to_fs_fqpn_root_string(self):
"""
Second argument to dict_to_fs can be str.
"""
try:
fs_utils.dict_to_fs(self.good_input_dict, str(self.scratch_dir))
except:
self.fail("An exception was raised, so this method can't handle strings.")
def test_dict_to_fs_fqpn_root_unicode(self):
"""
Second argument to dict_to_fs can be unicode.
"""
try:
fs_utils.dict_to_fs(self.good_input_dict, unicode(self.scratch_dir))
except:
self.fail("An exception was raised, so this method can't handle unicode.")
def test_dict_to_fs_fqpn_root_nonexistant_path(self):
"""
Second arg to dict_to_fs must correspond to exitant path.
"""
nonexistant_subdir = "does_not_exist"
bad_fqpn_root = os.path.join(self.scratch_dir, nonexistant_subdir)
self.assertRaises(OSError, fs_utils.dict_to_fs, self.good_input_dict, bad_fqpn_root)
def test_dict_to_fs_fqpn_root_non_directory_path(self):
"""
Second arg to dict_to_fs must correspond to a dir, not a file.
"""
self.assertRaises(OSError, fs_utils.dict_to_fs, self.good_input_dict, self.path_to_dummy_file)
class MethodsFunctionality(unittest.TestCase):
"""
Tests proper functionality of the methods.
"""
scratch_dir = os.path.join(test_dir_root, "scratch")
def setUp(self):
"""
Creates a scratch directory for the tests.
"""
os.mkdir(self.scratch_dir)
def tearDown(self):
"""
Removes the scratch dir (and its contents).
"""
shutil.rmtree(self.scratch_dir)
def test_dict_to_fs_filename(self):
"""
dict_to_fs should be able to create a file with a specified filename.
"""
fs_dict = {"dummy.txt": ""}
fs_utils.dict_to_fs(fs_dict, self.scratch_dir)
scratch_names = os.listdir(self.scratch_dir)
self.assertEqual(scratch_names, fs_dict.keys())
def test_dict_to_fs_isfile(self):
"""
dict_to_fs should be able to create a file.
"""
dummy_filename = "dummy.txt"
fs_dict = {dummy_filename: ""}
fs_utils.dict_to_fs(fs_dict, self.scratch_dir)
dummy_fqpn = os.path.join(self.scratch_dir, dummy_filename)
self.assertTrue(os.path.isfile(dummy_fqpn))
def test_dict_to_fs_empty_file(self):
"""
An empty string should generate an empty file.
"""
dummy_filename = "dummy.txt"
fs_dict = {dummy_filename: ""}
fs_utils.dict_to_fs(fs_dict, self.scratch_dir)
dummy_fqpn = os.path.join(self.scratch_dir, dummy_filename)
self.assertEqual(os.path.getsize(dummy_fqpn), 0)
def test_dict_to_fs_nonempty_file(self):
"""
A nonempty string should generate a nonempty file.
"""
dummy_filename = "dummy.txt"
fs_dict = {dummy_filename: "Hello world.\n"}
fs_utils.dict_to_fs(fs_dict, self.scratch_dir)
dummy_fqpn = os.path.join(self.scratch_dir, dummy_filename)
self.assertTrue(os.path.getsize(dummy_fqpn) > 0)
def test_dict_to_fs_isdir(self):
"""
dict_to_fs should be able to create a directory.
"""
dummy_dirname = "dummy"
fs_dict = {dummy_dirname: {}}
fs_utils.dict_to_fs(fs_dict, self.scratch_dir)
dummy_fqpn = os.path.join(self.scratch_dir, dummy_dirname)
self.assertTrue(os.path.isdir(dummy_fqpn))
def test_dict_to_fs_dir_isempty(self):
"""
dict_to_fs should be able to create an empty directory.
"""
dummy_dirname = "dummy"
fs_dict = {dummy_dirname: {}}
fs_utils.dict_to_fs(fs_dict, self.scratch_dir)
dummy_fqpn = os.path.join(self.scratch_dir, dummy_dirname)
should_be_empty_list = os.listdir(os.path.join(self.scratch_dir, dummy_dirname))
self.assertEqual(should_be_empty_list, [])
def test_dict_to_fs_dir_nonempty(self):
"""
dict_to_fs should be able to create a populated directory.
"""
dummy_dirname = "dummy"
fs_dict = {dummy_dirname: {"test_file.txt":""}}
fs_utils.dict_to_fs(fs_dict, self.scratch_dir)
dummy_fqpn = os.path.join(self.scratch_dir, dummy_dirname)
should_not_be_empty_list = os.listdir(os.path.join(self.scratch_dir, dummy_dirname))
self.assertTrue(len(should_not_be_empty_list) > 0)
| mit | 2,823,540,586,295,067,600 | 30.815789 | 102 | 0.599504 | false | 3.432709 | true | false | false |
luca-morreale/reinforcement-gym | generalizer/state_generalizer.py | 1 | 1668 | # -*- coding: utf-8 -*-
import numpy as np
from state_action import StateAction
class StateGeneralizer:
""" Creates the Generalizer.
Args:
updater: object in charge of update the value of actions
"""
def __init__(self, m):
self.Q = {}
self.m = m
def getRepresentation(self, state_action):
return NotImplementedError()
""" Returns the StateAction estimated value.
Args:
state_action: the state to look for
Returns:
number
"""
def getQValue(self, state_action):
return NotImplementedError()
def getCombinedValue(self, state, action):
return self.getQValue(StateAction(state, action))
""" Returns an array containing the value of the corrisponding action.
Args:
obs: the state to look for
Returns:
array of numbers
"""
def getPossibleActions(self, obs):
actions = np.zeros(self.m)
for i in range(self.m):
actions[i] = self.getQValue(StateAction(obs, i))
return actions
""" Update the value of a state-action pair adding the given value.
Args:
state_action: object representing the state-action
value: value to add to the current value
"""
def addDeltaToQValue(self, state_action, value):
return NotImplementedError()
def newEpisode(self):
pass
"""
Prints the content of Q in a readable way
"""
def prettyPrintQ(self):
for key in self.Q:
print(str(key) + "-> ", end="")
for v in self.Q[key]:
print(str(v) + " ", end="")
print()
| gpl-3.0 | -5,542,995,542,859,738,000 | 25.903226 | 74 | 0.585731 | false | 4.212121 | false | false | false |
asoplata/dynasim-benchmark-brette-2007 | Brian2/brian2_benchmark_COBAHH_nosyn_compiled_500.py | 1 | 3481 | """
# Notes:
- This simulation seeks to emulate the COBAHH benchmark simulations of (Brette
et al. 2007) using the Brian2 simulator for speed benchmark comparison to
DynaSim. However, this simulation does NOT include synapses, for better
comparison to Figure 5 of (Goodman and Brette, 2008) - although it uses the
COBAHH model of (Brette et al. 2007), not CUBA.
- The time taken to simulate will be indicated in the stdout log file
'~/batchdirs/brian2_benchmark_COBAHH_nosyn_compiled_500/pbsout/brian2_benchmark_COBAHH_nosyn_compiled_500.out'
- Note that this code has been slightly modified from the original (Brette et
al. 2007) benchmarking code, available here on ModelDB:
https://senselab.med.yale.edu/modeldb/showModel.cshtml?model=83319
in order to work with version 2 of the Brian simulator (aka Brian2), and also
modified to change the model being benchmarked, etc.
# References:
- Brette R, Rudolph M, Carnevale T, Hines M, Beeman D, Bower JM, et al.
Simulation of networks of spiking neurons: A review of tools and strategies.
Journal of Computational Neuroscience 2007;23:349–98.
doi:10.1007/s10827-007-0038-6.
- Goodman D, Brette R. Brian: a simulator for spiking neural networks in Python.
Frontiers in Neuroinformatics 2008;2. doi:10.3389/neuro.11.005.2008.
"""
from brian2 import *
set_device('cpp_standalone')
prefs.codegen.cpp.extra_compile_args = ['-w', '-O3', '-ffast-math', '-march=native']
# Parameters
cells = 500
defaultclock.dt = 0.01*ms
area = 20000*umetre**2
Cm = (1*ufarad*cmetre**-2) * area
gl = (5e-5*siemens*cmetre**-2) * area
El = -60*mV
EK = -90*mV
ENa = 50*mV
g_na = (100*msiemens*cmetre**-2) * area
g_kd = (30*msiemens*cmetre**-2) * area
VT = -63*mV
# # Time constants
# taue = 5*ms
# taui = 10*ms
# # Reversal potentials
# Ee = 0*mV
# Ei = -80*mV
# we = 6*nS # excitatory synaptic weight
# wi = 67*nS # inhibitory synaptic weight
# The model
eqs = Equations('''
dv/dt = (gl*(El-v)-
g_na*(m*m*m)*h*(v-ENa)-
g_kd*(n*n*n*n)*(v-EK))/Cm : volt
dm/dt = alpha_m*(1-m)-beta_m*m : 1
dn/dt = alpha_n*(1-n)-beta_n*n : 1
dh/dt = alpha_h*(1-h)-beta_h*h : 1
alpha_m = 0.32*(mV**-1)*(13*mV-v+VT)/
(exp((13*mV-v+VT)/(4*mV))-1.)/ms : Hz
beta_m = 0.28*(mV**-1)*(v-VT-40*mV)/
(exp((v-VT-40*mV)/(5*mV))-1)/ms : Hz
alpha_h = 0.128*exp((17*mV-v+VT)/(18*mV))/ms : Hz
beta_h = 4./(1+exp((40*mV-v+VT)/(5*mV)))/ms : Hz
alpha_n = 0.032*(mV**-1)*(15*mV-v+VT)/
(exp((15*mV-v+VT)/(5*mV))-1.)/ms : Hz
beta_n = .5*exp((10*mV-v+VT)/(40*mV))/ms : Hz
''')
# dv/dt = (gl*(El-v)+ge*(Ee-v)+gi*(Ei-v)-
# dge/dt = -ge*(1./taue) : siemens
# dgi/dt = -gi*(1./taui) : siemens
P = NeuronGroup(cells, model=eqs, threshold='v>-20*mV', refractory=3*ms,
method='euler')
proportion=int(0.8*cells)
Pe = P[:proportion]
Pi = P[proportion:]
# Ce = Synapses(Pe, P, on_pre='ge+=we')
# Ci = Synapses(Pi, P, on_pre='gi+=wi')
# Ce.connect(p=0.98)
# Ci.connect(p=0.98)
# Initialization
P.v = 'El + (randn() * 5 - 5)*mV'
# P.ge = '(randn() * 1.5 + 4) * 10.*nS'
# P.gi = '(randn() * 12 + 20) * 10.*nS'
# Record a few traces
trace = StateMonitor(P, 'v', record=[1, 10, 100])
totaldata = StateMonitor(P, 'v', record=True)
run(0.5 * second, report='text')
# plot(trace.t/ms, trace[1].v/mV)
# plot(trace.t/ms, trace[10].v/mV)
# plot(trace.t/ms, trace[100].v/mV)
# xlabel('t (ms)')
# ylabel('v (mV)')
# show()
# print("Saving TC cell voltages!")
# numpy.savetxt("foo_totaldata.csv", totaldata.v/mV, delimiter=",")
| gpl-3.0 | 3,238,071,321,243,270,700 | 31.212963 | 110 | 0.641851 | false | 2.387783 | false | false | false |
LilithWittmann/bootev-tickets | tickets/conference/migrations/0001_initial.py | 1 | 1388 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Conference',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),
('name', models.CharField(verbose_name='name', max_length=80)),
('image', models.ImageField(verbose_name='logo', upload_to='media/')),
('description', models.CharField(verbose_name='description', max_length=200, blank=True)),
('start_date', models.DateField()),
('end_date', models.DateField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('active', models.BooleanField(default=False)),
('slug', models.SlugField()),
('owner', models.ForeignKey(verbose_name='owner', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Conference',
'verbose_name_plural': 'Conferences',
},
),
]
| gpl-2.0 | 476,390,771,540,662,000 | 38.657143 | 114 | 0.564121 | false | 4.491909 | false | false | false |
ssdi-drive/nuxeo-drive | nuxeo-drive-client/nxdrive/engine/next/engine_next.py | 1 | 1582 | # coding: utf-8
""" Evolution to try new engine solution. """
from nxdrive.client.remote_document_client import RemoteDocumentClient
from nxdrive.client.remote_file_system_client import RemoteFileSystemClient
from nxdrive.client.remote_filtered_file_system_client import \
RemoteFilteredFileSystemClient
from nxdrive.engine.engine import Engine
from nxdrive.logging_config import get_logger
from nxdrive.options import Options
log = get_logger(__name__)
class EngineNext(Engine):
def __init__(self, manager, definition, binder=None, processors=5,
remote_doc_client_factory=RemoteDocumentClient,
remote_fs_client_factory=RemoteFileSystemClient,
remote_filtered_fs_client_factory=RemoteFilteredFileSystemClient):
super(EngineNext, self).__init__(manager, definition, binder, processors,
remote_doc_client_factory, remote_fs_client_factory, remote_filtered_fs_client_factory)
self._type = "NXDRIVENEXT"
def create_processor(self, item_getter, name=None):
from nxdrive.engine.next.processor import Processor
return Processor(self, item_getter, name=name)
def _create_queue_manager(self, processors):
from nxdrive.engine.next.queue_manager import QueueManager
if Options.debug:
return QueueManager(self, self._dao, max_file_processors=2)
return QueueManager(self, self._dao)
def _create_local_watcher(self):
from nxdrive.engine.next.simple_watcher import SimpleWatcher
return SimpleWatcher(self, self._dao)
| lgpl-2.1 | 5,264,211,132,705,381,000 | 41.756757 | 104 | 0.721871 | false | 3.945137 | false | false | false |
AmI-2015/python-intermediate | metrics.py | 1 | 2662 | '''
Created on Mar 19, 2014
@author: Dario Bonino <[email protected]>
Copyright (c) 2014 Dario Bonino
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License
'''
import os,psutil,time,tts
def print_sys_metrics():
'''
Prints some system metric in an os-independent way
'''
#get uname data
uname = os.uname()
# print the operating system information
print "OS Type:%s\nHost:%s\nKernel:%s %s\nArch:%s\n"%uname
#get the current system load average (last min, 5min, 15min)
load = os.getloadavg()
#print the load average
print "load_avg:\n \t%f (1min)\n \t%f (5min)\n \t%f (15min)"%(load)
#get the current virtual memory statistics
virtual_memory = psutil.virtual_memory()
#print total memory
print "Total memory:\n \t%s"%virtual_memory.total
#print available memory
print "Available memory:\n \t%s"%virtual_memory.available
#print free memory
print "Free memory:\n \t%s"%virtual_memory.available
#print cpu usage
print "CPU usage:\n \t%f"%psutil.cpu_percent(None, False)
#get disk counters
disk_io = psutil.disk_io_counters(False)
#print the number of reads and corresponding bytes
print "Reads: %d (%d bytes)"%(disk_io.read_count,disk_io.read_bytes)
#print the number of writes and the corresponding bytes
print "Writes: %d (%d bytes)"%(disk_io.write_count, disk_io.write_bytes)
'''
Monitors the cpu occupation and if raises over a given threshold, calls a specified function
'''
def monitor_cpu(threshold,interval,callback=None):
while(True):
#get the cpu percentage
percent = psutil.cpu_percent()
#check the thrashold
if(percent > threshold):
#callback
callback(percent)
#debug
print "calling callback: %s"%percent
#wait for the given time
time.sleep(interval)
if __name__ == '__main__':
#print the system metrics
print_sys_metrics()
#monitors the current cpu status
monitor_cpu(10, 1, lambda x: tts.say("warning, CPU percent raised up to %s"%x)) | apache-2.0 | -8,028,598,815,019,101,000 | 28.588889 | 92 | 0.654771 | false | 3.852388 | false | false | false |
scribblemaniac/MCEdit2Blender | blocks/Transparent.py | 1 | 3837 | import bpy
import mathutils
from Block import Block
class Transparent(Block):
"""A block with a texture that contains transparent or translucent pixels"""
def makeObject(self, x, y, z, metadata):
mesh = bpy.data.meshes.new(name="Block")
mesh.from_pydata([[-0.5,-0.5,-0.5],[0.5,-0.5,-0.5],[-0.5,0.5,-0.5],[0.5,0.5,-0.5],[-0.5,-0.5,0.5],[0.5,-0.5,0.5],[-0.5,0.5,0.5],[0.5,0.5,0.5]],[],[[0,1,3,2],[4,5,7,6],[0,1,5,4],[0,2,6,4],[2,3,7,6],[1,3,7,5]])
mesh.update()
obj = bpy.data.objects.new("Block", mesh)
obj.location.x = x + 0.5
obj.location.y = y + 0.5
obj.location.z = z + 0.5
obj.scale = (0.9998999834060669, 0.9998999834060669, 0.9998999834060669) # workaround for overlapping object shading issue
obj.blockId = self._id
obj.blockMetadata = metadata
bpy.context.scene.objects.link(obj)
activeObject = bpy.context.scene.objects.active
bpy.context.scene.objects.active = obj
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.normals_make_consistent(inside=False)
bpy.ops.object.editmode_toggle()
bpy.context.scene.objects.active = activeObject
return obj
def applyMaterial(self, obj, metadata):
try:
mat = bpy.data.materials[self._unlocalizedName]
except KeyError:
mat = bpy.data.materials.new(self._unlocalizedName)
mat.preview_render_type = "CUBE"
mat.use_nodes = True
mat.node_tree.nodes["Material Output"].location = [400, 0]
mat.node_tree.nodes["Diffuse BSDF"].location = [0, -75]
mat.node_tree.links.remove(mat.node_tree.links[0])
#Mix Shader
mat.node_tree.nodes.new(type="ShaderNodeMixShader")
mat.node_tree.nodes["Mix Shader"].location = [200, 0]
mat.node_tree.links.new(mat.node_tree.nodes["Diffuse BSDF"].outputs[0], mat.node_tree.nodes["Mix Shader"].inputs[2])
mat.node_tree.links.new(mat.node_tree.nodes["Mix Shader"].outputs[0], mat.node_tree.nodes["Material Output"].inputs[0])
#Transparent Shader
mat.node_tree.nodes.new(type="ShaderNodeBsdfTransparent")
mat.node_tree.nodes["Transparent BSDF"].location = [0, 100]
mat.node_tree.links.new(mat.node_tree.nodes["Transparent BSDF"].outputs[0], mat.node_tree.nodes["Mix Shader"].inputs[1])
#Initialize Texture
try:
tex = bpy.data.images[self._unlocalizedName]
except KeyError:
tex = bpy.data.images.load(self.getBlockTexturePath(self._textureName))
tex.name = self._unlocalizedName
#First Image Texture
mat.node_tree.nodes.new(type="ShaderNodeTexImage")
mat.node_tree.nodes["Image Texture"].location = [-200, 75]
mat.node_tree.nodes["Image Texture"].image = tex
mat.node_tree.nodes["Image Texture"].interpolation = "Closest"
mat.node_tree.nodes["Image Texture"].projection = "FLAT"
mat.node_tree.links.new(mat.node_tree.nodes["Image Texture"].outputs[0], mat.node_tree.nodes["Diffuse BSDF"].inputs[0])
mat.node_tree.links.new(mat.node_tree.nodes["Image Texture"].outputs[1], mat.node_tree.nodes["Mix Shader"].inputs[0])
#UV Map
mat.node_tree.nodes.new(type="ShaderNodeUVMap")
mat.node_tree.nodes["UV Map"].location = [-400, 0]
mat.node_tree.nodes["UV Map"].uv_map = "UVMap"
mat.node_tree.links.new(mat.node_tree.nodes["UV Map"].outputs[0], mat.node_tree.nodes["Image Texture"].inputs[0])
obj.data.materials.append(mat)
| gpl-3.0 | 8,452,518,236,062,519,000 | 49.486842 | 216 | 0.594996 | false | 3.282293 | false | false | false |
ambyte/Vertaler | src/modules/startupapp.py | 1 | 2108 | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Copyright (c) 2011 Sergey Gulyaev <[email protected]>
#
# This file is part of Vertaler.
#
# Vertaler is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Vertaler is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
# ----------------------------------------------------------------------------
""" startup application when start Windows """
import os
if os.name == "nt":
import winshell
import sys
def is_start_up():
try:
startup = winshell.startup(1)
if os.path.exists(startup + '\\Vertaler.lnk'):
return True
else:
return False
except Exception:
pass
def set_startup():
try:
# get path and file name for application
startFile = os.path.abspath(sys.argv[0])
# get startup folder
startup = winshell.startup(1)
# create shortcut in startup folder
winshell.CreateShortcut(
Path=os.path.join(startup, "Vertaler.lnk"),
Target=startFile,
Icon=(startFile, 0),
Description="Vertaler",
StartIn=os.path.abspath(None)
)
except Exception:
pass
def delete_startup():
try:
startup = winshell.startup(1)
# remove shortcut from startup folder
if os.path.isfile(startup + '\\Vertaler.lnk'):
os.remove(startup + '\\Vertaler.lnk')
except Exception:
pass
| gpl-2.0 | -8,542,072,036,966,153,000 | 28.690141 | 78 | 0.591556 | false | 4.216 | false | false | false |
SoCdesign/EHA | Tools/Minimization_Tool/essential_checker_extraction.py | 1 | 7029 | # copyright 2016 Siavoosh Payandeh Azad and Behrad Niazmand
import package_file
import copy
def extract_checker_info(name_string):
package_file.list_of_detection_info_sa0[name_string] = []
package_file.list_of_detection_info_sa1[name_string] = []
package_file.list_of_true_misses_sa0[name_string] = []
package_file.list_of_true_misses_sa1[name_string] = []
area_report_file = open("coverage_results/fstat" + str(name_string), 'r')
line = area_report_file.readline()
while line != "":
line = area_report_file.readline()
if ".CHECKERS DETECTION INFO - amount of detections" in line:
line = area_report_file.readline()
for item in line.split(" "):
if "|" in item:
package_file.list_of_detection_info_sa0[name_string].append(item.split("|")[0])
package_file.list_of_detection_info_sa1[name_string].append(item.split("|")[1])
if "amount of True Misses" in line:
line = area_report_file.readline()
for item in line.split(" "):
if "|" in item:
package_file.list_of_true_misses_sa0[name_string].append(item.split("|")[0])
package_file.list_of_true_misses_sa1[name_string].append(item.split("|")[1])
# print package_file.list_of_detection_info_sa0
# print package_file.list_of_detection_info_sa1
return None
def find_essential_checker():
print "------------------------------------------------------------------------------------------------"
print " Extracting essential checkers"
print "------------------------------------------------------------------------------------------------"
temp_copy_sa0 = copy.deepcopy(package_file.list_of_true_misses_sa0)
temp_copy_sa1 = copy.deepcopy(package_file.list_of_true_misses_sa1)
random_item = temp_copy_sa0.keys()[0]
selected_checkers_sa0 = []
selected_checkers_sa1 = []
checkers_for_optimization = []
for node in range(0, len(temp_copy_sa0[random_item])):
best_checker = None
best_true_miss_rate = float('inf')
for checker in temp_copy_sa0:
true_miss_rate = int(temp_copy_sa0[checker][node])
if int(package_file.list_of_detection_info_sa0[str(checker)][node]) > 0:
if true_miss_rate >= 0:
if true_miss_rate < best_true_miss_rate:
best_true_miss_rate = true_miss_rate
best_checker = checker
# if best_true_miss_rate == 0:
count = 0
for checker in temp_copy_sa0:
if int(package_file.list_of_true_misses_sa0[checker][node]) == best_true_miss_rate:
if int(package_file.list_of_detection_info_sa0[str(checker)][node]) > 0:
temp_copy_sa0[checker][node] = 1
count += 1
else:
temp_copy_sa0[checker][node] = 0
else:
temp_copy_sa0[checker][node] = 0
if count == 1:
if best_checker not in selected_checkers_sa0:
selected_checkers_sa0.append(best_checker)
# else:
# for checker in temp_copy_sa0:
# temp_copy_sa0[checker][node] = 0
print "single dominant checkers for sta0:", selected_checkers_sa0
for node in range(0, len(temp_copy_sa1[random_item])):
best_checker = None
best_true_miss_rate = float('inf')
for checker in temp_copy_sa1:
true_miss_rate = int(temp_copy_sa1[checker][node])
if int(package_file.list_of_detection_info_sa1[str(checker)][node]) > 0:
# print checker, int(package_file.list_of_detection_info_sa1[str(checker)][node])
if true_miss_rate >= 0:
if true_miss_rate < best_true_miss_rate:
best_true_miss_rate = true_miss_rate
best_checker = checker
# if best_true_miss_rate == 0:
count = 0
for checker in temp_copy_sa1:
if int(package_file.list_of_true_misses_sa1[checker][node]) == best_true_miss_rate:
if int(package_file.list_of_detection_info_sa1[str(checker)][node]) > 0:
temp_copy_sa1[checker][node] = 1
count += 1
else:
temp_copy_sa1[checker][node] = 0
else:
temp_copy_sa1[checker][node] = 0
# print "best checker", best_checker
if count == 1:
if best_checker not in selected_checkers_sa1:
selected_checkers_sa1.append(best_checker)
# else:
# for checker in temp_copy_sa1:
# temp_copy_sa1[checker][node] = 0
print "single dominant checkers for sta1:", selected_checkers_sa1
for checker in selected_checkers_sa0:
for node in range(0, len(temp_copy_sa0[checker])):
if temp_copy_sa0[checker][node] == 1:
for checker2 in temp_copy_sa0.keys():
if checker2 not in selected_checkers_sa0:
if temp_copy_sa0[checker2][node] == 1:
temp_copy_sa0[checker2][node] = 0
for checker in selected_checkers_sa1:
for node in range(0, len(temp_copy_sa1[checker])):
if temp_copy_sa1[checker][node] == 1:
for checker2 in temp_copy_sa1.keys():
if checker2 not in selected_checkers_sa1:
if temp_copy_sa1[checker2][node] == 1:
temp_copy_sa1[checker2][node] = 0
if package_file.debug:
print "-----------------"
print "printing the checkers true misses table (0-1)"
print "stuck at 0:"
for checker in sorted(temp_copy_sa0.keys()):
print checker,
for item in temp_copy_sa0[checker]:
print item,
print ""
print "-----------------"
print "printing the checkers true misses table (0-1)"
print "stuck at 1:"
for checker in sorted(temp_copy_sa1.keys()):
print checker,
for item in temp_copy_sa1[checker]:
print item,
print ""
print "-----------------"
final_selected_list = []
for item in selected_checkers_sa0:
final_selected_list.append(str(item))
for item in selected_checkers_sa1:
if item not in selected_checkers_sa0:
final_selected_list.append(str(item))
for item in temp_copy_sa0.keys():
if str(item) not in final_selected_list:
if str(item) not in checkers_for_optimization:
checkers_for_optimization.append(str(item))
print "selected single dominant checkers:", final_selected_list
print "selected checkers for optimization:", checkers_for_optimization
return final_selected_list, checkers_for_optimization
| gpl-3.0 | 3,876,197,300,479,161,300 | 43.487342 | 108 | 0.543463 | false | 3.760835 | false | false | false |
funkbit/django-funky-user | funky_user/managers.py | 1 | 1568 | from datetime import datetime
from django.contrib.auth.models import BaseUserManager as DjangoBaseUserManager
from django.utils import timezone
class UserManager(DjangoBaseUserManager):
"""
Default manager for the User model.
"""
###################################
# Required Django manager methods #
###################################
def create_user(self, email, password=None, **extra_fields):
"""
Creates and saves a User with the given email and password.
"""
# We set last login in the past so we know which users has logged in once
last_login_date = datetime(1970, 1, 1).replace(tzinfo=timezone.utc)
if not email:
raise ValueError('The given email must be set')
email = self.normalize_email(email)
user = self.model(
email=email,
is_staff=False,
is_active=False,
last_login=last_login_date,
date_joined=timezone.now(),
**extra_fields
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password, **extra_fields):
user = self.create_user(email, password, **extra_fields)
user.is_staff = True
user.is_active = True
user.save(using=self._db)
return user
##################
# Custom methods #
##################
def active(self):
"""
Returns only active users.
"""
return self.filter(is_active=True)
| bsd-2-clause | -2,770,687,478,345,300,000 | 26.508772 | 81 | 0.55676 | false | 4.467236 | false | false | false |
tdozat/Parser | dataset.py | 1 | 5660 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright 2016 Timothy Dozat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from collections import Counter
from lib.etc.k_means import KMeans
from configurable import Configurable
from vocab import Vocab
from metabucket import Metabucket
#***************************************************************
class Dataset(Configurable):
""""""
#=============================================================
def __init__(self, filename, vocabs, builder, *args, **kwargs):
""""""
super(Dataset, self).__init__(*args, **kwargs)
self._file_iterator = self.file_iterator(filename)
self._train = (filename == self.train_file)
self._metabucket = Metabucket(self._config, n_bkts=self.n_bkts)
self._data = None
self.vocabs = vocabs
self.rebucket()
self.inputs = tf.placeholder(dtype=tf.int32, shape=(None,None,None), name='inputs')
self.targets = tf.placeholder(dtype=tf.int32, shape=(None,None,None), name='targets')
self.builder = builder()
#=============================================================
def file_iterator(self, filename):
""""""
with open(filename) as f:
if self.lines_per_buffer > 0:
buff = [[]]
while True:
line = f.readline()
while line:
line = line.strip().split()
if line:
buff[-1].append(line)
else:
if len(buff) < self.lines_per_buffer:
if buff[-1]:
buff.append([])
else:
break
line = f.readline()
if not line:
f.seek(0)
else:
buff = self._process_buff(buff)
yield buff
line = line.strip().split()
if line:
buff = [[line]]
else:
buff = [[]]
else:
buff = [[]]
for line in f:
line = line.strip().split()
if line:
buff[-1].append(line)
else:
if buff[-1]:
buff.append([])
if buff[-1] == []:
buff.pop()
buff = self._process_buff(buff)
while True:
yield buff
#=============================================================
def _process_buff(self, buff):
""""""
words, tags, rels = self.vocabs
for i, sent in enumerate(buff):
for j, token in enumerate(sent):
word, tag1, tag2, head, rel = token[words.conll_idx], token[tags.conll_idx[0]], token[tags.conll_idx[1]], token[6], token[rels.conll_idx]
buff[i][j] = (word,) + words[word] + tags[tag1] + tags[tag2] + (int(head),) + rels[rel]
sent.insert(0, ('root', Vocab.ROOT, Vocab.ROOT, Vocab.ROOT, Vocab.ROOT, 0, Vocab.ROOT))
return buff
#=============================================================
def reset(self, sizes):
""""""
self._data = []
self._targets = []
self._metabucket.reset(sizes)
return
#=============================================================
def rebucket(self):
""""""
buff = self._file_iterator.next()
len_cntr = Counter()
for sent in buff:
len_cntr[len(sent)] += 1
self.reset(KMeans(self.n_bkts, len_cntr).splits)
for sent in buff:
self._metabucket.add(sent)
self._finalize()
return
#=============================================================
def _finalize(self):
""""""
self._metabucket._finalize()
return
#=============================================================
def get_minibatches(self, batch_size, input_idxs, target_idxs, shuffle=True):
""""""
minibatches = []
for bkt_idx, bucket in enumerate(self._metabucket):
if batch_size == 0:
n_splits = 1
else:
n_tokens = len(bucket) * bucket.size
n_splits = max(n_tokens // batch_size, 1)
if shuffle:
range_func = np.random.permutation
else:
range_func = np.arange
arr_sp = np.array_split(range_func(len(bucket)), n_splits)
for bkt_mb in arr_sp:
minibatches.append( (bkt_idx, bkt_mb) )
if shuffle:
np.random.shuffle(minibatches)
for bkt_idx, bkt_mb in minibatches:
feed_dict = {}
data = self[bkt_idx].data[bkt_mb]
sents = self[bkt_idx].sents[bkt_mb]
maxlen = np.max(np.sum(np.greater(data[:,:,0], 0), axis=1))
feed_dict.update({
self.inputs: data[:,:maxlen,input_idxs],
self.targets: data[:,:maxlen,target_idxs]
})
yield feed_dict, sents
#=============================================================
@property
def n_bkts(self):
if self._train:
return super(Dataset, self).n_bkts
else:
return super(Dataset, self).n_valid_bkts
#=============================================================
def __getitem__(self, key):
return self._metabucket[key]
def __len__(self):
return len(self._metabucket)
| apache-2.0 | 8,636,058,868,906,911,000 | 29.928962 | 145 | 0.510247 | false | 3.847723 | false | false | false |
SWENG500-Team1/FitnessForSplunk | misc/python-oauth2-test/python_server.py | 1 | 1192 | from bottle import route, run, request # Python server library
import sys
import httplib2, urllib
import base64
# Hello World route example
@route('/hello')
def hello():
return "Hello World!"
# Fitbit callback route
@route('/auth/fitbit/callback')
def fitbit_callback():
# Edit these variables to suit you
clientID = '227MVJ'
clientSecret = 'df8009bd0ddcb975f9a812e3587e54dd'
encoded = base64.b64encode( (clientID + ':' + clientSecret) )
callback_url = 'https://localhost:8089/services/fitness_for_splunk/fitbit_callback'
authCode = '' # Need to fill in auth cod
# Request for a token
url = 'https://api.fitbit.com/oauth2/token'
authHeader_value = ('Basic ' + encoded)
headers = {'Authorization': authHeader_value, 'Content-Type': 'application/x-www-form-urlencoded'}
data = {'clientId': clientID, 'grant_type': 'authorization_code', 'redirect_uri': callback_url, 'code': authCode}
body = urllib.urlencode(data)
http = httplib2.Http()
resp, cont = http.request(url, 'POST', headers=headers, body=body)
# Print response content (token) to screen
return cont
run(host='localhost', port=3000, debug=True) | mit | -3,756,327,715,078,418,000 | 33.085714 | 117 | 0.687081 | false | 3.475219 | false | false | false |
HEP-DL/root2hdf5 | root2hdf5/plugins/larcv/pmt.py | 1 | 1321 | from root2hdf5.data_types.base import BaseData
import numpy as np
import logging
class PMTData(BaseData):
logger = logging.getLogger('root2hdf5.data_types.pmt')
tree_name = 'image2d_pmt_tree'
def __init__(self, _file, output_file):
super(PMTData, self).__init__(_file)
from larcv import larcv
self.array_converter = larcv.as_ndarray
self.dataset = output_file.create_dataset("image2d/pmt", (10,1,1500,32), maxshape=(None,1,1500,32),
chunks=(10,1,1500,32), dtype='f',compression="gzip")
self.dataset.attrs['name'] = 'image2d_pmt'
self.dataset.attrs['index0_name'] = 'eventN'
self.dataset.attrs['index1_name'] = 'layerN'
self.dataset.attrs['index3_name'] = 'pixelX'
self.dataset.attrs['index4_name'] = 'pixelY'
self.logger.info("Setting Up PMT Data Stream")
self.buffer = np.ndarray((10,1,1500,32), dtype='H')
self.buffer_index=0
def process_branch(self, branch):
layerimage = self.array_converter(branch.at(0))
layerimage.resize(1500,32)
self.buffer[self.buffer_index, 0] = layerimage
self.buffer_index+=1
if self.event_index %10==0:
self.buffer_index=0
self.dataset.resize( (self.event_index+10,1,1500,32) )
self.dataset[self.event_index:self.event_index+10,:,:,:] = self.buffer
| gpl-3.0 | -8,310,864,700,203,725,000 | 40.28125 | 103 | 0.657835 | false | 3.064965 | false | false | false |
jolyonb/edx-platform | lms/djangoapps/courseware/management/commands/clean_xml.py | 1 | 4518 | from __future__ import print_function
import os
import sys
import traceback
import lxml.etree
from django.core.management.base import BaseCommand
from fs.osfs import OSFS
from path import Path as path
from xmodule.modulestore.xml import XMLModuleStore
def traverse_tree(course):
"""
Load every descriptor in course. Return bool success value.
"""
queue = [course]
while len(queue) > 0:
node = queue.pop()
queue.extend(node.get_children())
return True
def export(course, export_dir):
"""
Export the specified course to course_dir. Creates dir if it doesn't
exist. Overwrites files, does not clean out dir beforehand.
"""
fs = OSFS(export_dir, create=True)
if not fs.isdirempty('.'):
print(u'WARNING: Directory {dir} not-empty. May clobber/confuse things'.format(dir=export_dir))
try:
course.runtime.export_fs = fs
root = lxml.etree.Element('root')
course.add_xml_to_node(root)
with fs.open('course.xml', mode='w') as f:
root.write(f)
return True
except:
print('Export failed!')
traceback.print_exc()
return False
def import_with_checks(course_dir):
all_ok = True
print(u'Attempting to load "{}"'.format(course_dir))
course_dir = path(course_dir)
data_dir = course_dir.dirname()
source_dirs = [course_dir.basename()]
# No default class--want to complain if it doesn't find plugins for any
# module.
modulestore = XMLModuleStore(
data_dir,
default_class=None,
source_dirs=source_dirs
)
def str_of_err(tpl):
(msg, exc_str) = tpl
return '{msg}\n{exc}'.format(msg=msg, exc=exc_str)
courses = modulestore.get_courses()
n = len(courses)
if n != 1:
print(u'ERROR: Expect exactly 1 course. Loaded {n}: {lst}'.format(n=n, lst=courses))
return (False, None)
course = courses[0]
errors = modulestore.get_course_errors(course.id)
if len(errors) != 0:
all_ok = False
print(
'\n' +
'========================================' +
'ERRORs during import:' +
'\n'.join(map(str_of_err, errors)) +
'========================================' +
'\n'
)
# print course
validators = (
traverse_tree,
)
print('========================================')
print('Running validators...')
for validate in validators:
print(u'Running {}'.format(validate.__name__))
all_ok = validate(course) and all_ok
if all_ok:
print('Course passes all checks!')
else:
print('Course fails some checks. See above for errors.')
return all_ok, course
def check_roundtrip(course_dir):
"""
Check that import->export leaves the course the same
"""
print('====== Roundtrip import =======')
(ok, course) = import_with_checks(course_dir)
if not ok:
raise Exception('Roundtrip import failed!')
print('====== Roundtrip export =======')
export_dir = course_dir + '.rt'
export(course, export_dir)
# dircmp doesn't do recursive diffs.
# diff = dircmp(course_dir, export_dir, ignore=[], hide=[])
print('======== Roundtrip diff: =========')
sys.stdout.flush() # needed to make diff appear in the right place
os.system(u'diff -r {} {}'.format(course_dir, export_dir))
print('======== ideally there is no diff above this =======')
class Command(BaseCommand):
help = 'Imports specified course, validates it, then exports it in a canonical format.'
def add_arguments(self, parser):
parser.add_argument('course_dir',
help='path to the input course directory')
parser.add_argument('output_dir',
help='path to the output course directory')
parser.add_argument('--force',
action='store_true',
help='export course even if there were import errors')
def handle(self, *args, **options):
course_dir = options['course_dir']
output_dir = options['output_dir']
force = options['force']
(ok, course) = import_with_checks(course_dir)
if ok or force:
if not ok:
print('WARNING: Exporting despite errors')
export(course, output_dir)
check_roundtrip(output_dir)
else:
print('Did NOT export')
| agpl-3.0 | -2,033,734,661,561,714,200 | 27.77707 | 104 | 0.568172 | false | 4.019573 | false | false | false |
lmaycotte/quark | quark/plugin_modules/ports.py | 1 | 26370 | # Copyright 2013 Openstack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from neutron.extensions import securitygroup as sg_ext
from neutron import quota
from neutron_lib import exceptions as n_exc
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import uuidutils
from quark.db import api as db_api
from quark.drivers import registry
from quark.environment import Capabilities
from quark import exceptions as q_exc
from quark import ipam
from quark import network_strategy
from quark import plugin_views as v
from quark import tags
from quark import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
PORT_TAG_REGISTRY = tags.PORT_TAG_REGISTRY
STRATEGY = network_strategy.STRATEGY
# HACK(amir): RM9305: do not allow a tenant to associate a network to a port
# that does not belong to them unless it is publicnet or servicenet
# NOTE(blogan): allow advanced services, such as lbaas, the ability
# to associate a network to a port that does not belong to them
def _raise_if_unauthorized(context, net):
if (not STRATEGY.is_provider_network(net["id"]) and
net["tenant_id"] != context.tenant_id and
not context.is_advsvc):
raise n_exc.NotAuthorized()
def _get_net_driver(network, port=None):
port_driver = None
if port and port.get("network_plugin"):
port_driver = port.get("network_plugin")
try:
return registry.DRIVER_REGISTRY.get_driver(
network["network_plugin"], port_driver=port_driver)
except Exception as e:
raise n_exc.BadRequest(resource="ports",
msg="invalid network_plugin: %s" % e)
def _get_ipam_driver(network, port=None):
network_id = network["id"]
network_strategy = network["ipam_strategy"]
# Ask the net driver for a IPAM strategy to use
# with the given network/default strategy.
net_driver = _get_net_driver(network, port=port)
strategy = net_driver.select_ipam_strategy(
network_id, network_strategy)
# If the driver has no opinion about which strategy to use,
# we use the one specified by the network.
if not strategy:
strategy = network_strategy
try:
return ipam.IPAM_REGISTRY.get_strategy(strategy)
except Exception as e:
raise n_exc.BadRequest(resource="ports",
msg="invalid ipam_strategy: %s" % e)
# NOTE(morgabra) Backend driver operations return a lot of stuff. We use a
# small subset of this data, so we filter out things we don't care about
# so we can avoid any collisions with real port data.
def _filter_backend_port(backend_port):
# Collect a list of allowed keys in the driver response
required_keys = ["uuid", "bridge"]
tag_keys = [tag for tag in PORT_TAG_REGISTRY.tags.keys()]
allowed_keys = required_keys + tag_keys
for k in backend_port.keys():
if k not in allowed_keys:
del backend_port[k]
def split_and_validate_requested_subnets(context, net_id, segment_id,
fixed_ips):
subnets = []
ip_addresses = {}
for fixed_ip in fixed_ips:
subnet_id = fixed_ip.get("subnet_id")
ip_address = fixed_ip.get("ip_address")
if not subnet_id:
raise n_exc.BadRequest(resource="fixed_ips",
msg="subnet_id required")
if ip_address:
ip_addresses[ip_address] = subnet_id
else:
subnets.append(subnet_id)
subnets = ip_addresses.values() + subnets
sub_models = db_api.subnet_find(context, id=subnets, scope=db_api.ALL)
if len(sub_models) == 0:
raise n_exc.SubnetNotFound(subnet_id=subnets)
for s in sub_models:
if s["network_id"] != net_id:
raise n_exc.InvalidInput(
error_message="Requested subnet doesn't belong to requested "
"network")
if segment_id and segment_id != s["segment_id"]:
raise q_exc.AmbiguousNetworkId(net_id=net_id)
return ip_addresses, subnets
def create_port(context, port):
"""Create a port
Create a port which is a connection point of a device (e.g., a VM
NIC) to attach to a L2 Neutron network.
: param context: neutron api request context
: param port: dictionary describing the port, with keys
as listed in the RESOURCE_ATTRIBUTE_MAP object in
neutron/api/v2/attributes.py. All keys will be populated.
"""
LOG.info("create_port for tenant %s" % context.tenant_id)
port_attrs = port["port"]
admin_only = ["mac_address", "device_owner", "bridge", "admin_state_up",
"use_forbidden_mac_range", "network_plugin",
"instance_node_id"]
utils.filter_body(context, port_attrs, admin_only=admin_only)
port_attrs = port["port"]
mac_address = utils.pop_param(port_attrs, "mac_address", None)
use_forbidden_mac_range = utils.pop_param(port_attrs,
"use_forbidden_mac_range", False)
segment_id = utils.pop_param(port_attrs, "segment_id")
fixed_ips = utils.pop_param(port_attrs, "fixed_ips")
if "device_id" not in port_attrs:
port_attrs['device_id'] = ""
device_id = port_attrs['device_id']
# NOTE(morgabra) This should be instance.node from nova, only needed
# for ironic_driver.
if "instance_node_id" not in port_attrs:
port_attrs['instance_node_id'] = ""
instance_node_id = port_attrs['instance_node_id']
net_id = port_attrs["network_id"]
port_id = uuidutils.generate_uuid()
net = db_api.network_find(context, None, None, None, False, id=net_id,
scope=db_api.ONE)
if not net:
raise n_exc.NetworkNotFound(net_id=net_id)
_raise_if_unauthorized(context, net)
# NOTE (Perkins): If a device_id is given, try to prevent multiple ports
# from being created for a device already attached to the network
if device_id:
existing_ports = db_api.port_find(context,
network_id=net_id,
device_id=device_id,
scope=db_api.ONE)
if existing_ports:
raise n_exc.BadRequest(
resource="port", msg="This device is already connected to the "
"requested network via another port")
# Try to fail early on quotas and save ourselves some db overhead
if fixed_ips:
quota.QUOTAS.limit_check(context, context.tenant_id,
fixed_ips_per_port=len(fixed_ips))
if not STRATEGY.is_provider_network(net_id):
# We don't honor segmented networks when they aren't "shared"
segment_id = None
port_count = db_api.port_count_all(context, network_id=[net_id],
tenant_id=[context.tenant_id])
quota.QUOTAS.limit_check(
context, context.tenant_id,
ports_per_network=port_count + 1)
else:
if not segment_id:
raise q_exc.AmbiguousNetworkId(net_id=net_id)
network_plugin = utils.pop_param(port_attrs, "network_plugin")
if not network_plugin:
network_plugin = net["network_plugin"]
port_attrs["network_plugin"] = network_plugin
ipam_driver = _get_ipam_driver(net, port=port_attrs)
net_driver = _get_net_driver(net, port=port_attrs)
# NOTE(morgabra) It's possible that we select a driver different than
# the one specified by the network. However, we still might need to use
# this for some operations, so we also fetch it and pass it along to
# the backend driver we are actually using.
base_net_driver = _get_net_driver(net)
# TODO(anyone): security groups are not currently supported on port create.
# Please see JIRA:NCP-801
security_groups = utils.pop_param(port_attrs, "security_groups")
if security_groups is not None:
raise q_exc.SecurityGroupsNotImplemented()
group_ids, security_groups = _make_security_group_list(context,
security_groups)
quota.QUOTAS.limit_check(context, context.tenant_id,
security_groups_per_port=len(group_ids))
addresses = []
backend_port = None
with utils.CommandManager().execute() as cmd_mgr:
@cmd_mgr.do
def _allocate_ips(fixed_ips, net, port_id, segment_id, mac):
fixed_ip_kwargs = {}
if fixed_ips:
if (STRATEGY.is_provider_network(net_id) and
not context.is_admin):
raise n_exc.NotAuthorized()
ips, subnets = split_and_validate_requested_subnets(context,
net_id,
segment_id,
fixed_ips)
fixed_ip_kwargs["ip_addresses"] = ips
fixed_ip_kwargs["subnets"] = subnets
ipam_driver.allocate_ip_address(
context, addresses, net["id"], port_id,
CONF.QUARK.ipam_reuse_after, segment_id=segment_id,
mac_address=mac, **fixed_ip_kwargs)
@cmd_mgr.undo
def _allocate_ips_undo(addr):
LOG.info("Rolling back IP addresses...")
if addresses:
for address in addresses:
try:
with context.session.begin():
ipam_driver.deallocate_ip_address(context, address)
except Exception:
LOG.exception("Couldn't release IP %s" % address)
@cmd_mgr.do
def _allocate_mac(net, port_id, mac_address,
use_forbidden_mac_range=False):
mac = ipam_driver.allocate_mac_address(
context, net["id"], port_id, CONF.QUARK.ipam_reuse_after,
mac_address=mac_address,
use_forbidden_mac_range=use_forbidden_mac_range)
return mac
@cmd_mgr.undo
def _allocate_mac_undo(mac):
LOG.info("Rolling back MAC address...")
if mac:
try:
with context.session.begin():
ipam_driver.deallocate_mac_address(context,
mac["address"])
except Exception:
LOG.exception("Couldn't release MAC %s" % mac)
@cmd_mgr.do
def _allocate_backend_port(mac, addresses, net, port_id):
backend_port = net_driver.create_port(
context, net["id"],
port_id=port_id,
security_groups=group_ids,
device_id=device_id,
instance_node_id=instance_node_id,
mac_address=mac,
addresses=addresses,
base_net_driver=base_net_driver)
_filter_backend_port(backend_port)
return backend_port
@cmd_mgr.undo
def _allocate_back_port_undo(backend_port):
LOG.info("Rolling back backend port...")
try:
backend_port_uuid = None
if backend_port:
backend_port_uuid = backend_port.get("uuid")
net_driver.delete_port(context, backend_port_uuid)
except Exception:
LOG.exception(
"Couldn't rollback backend port %s" % backend_port)
@cmd_mgr.do
def _allocate_db_port(port_attrs, backend_port, addresses, mac):
port_attrs["network_id"] = net["id"]
port_attrs["id"] = port_id
port_attrs["security_groups"] = security_groups
LOG.info("Including extra plugin attrs: %s" % backend_port)
port_attrs.update(backend_port)
with context.session.begin():
new_port = db_api.port_create(
context, addresses=addresses, mac_address=mac["address"],
backend_key=backend_port["uuid"], **port_attrs)
return new_port
@cmd_mgr.undo
def _allocate_db_port_undo(new_port):
LOG.info("Rolling back database port...")
if not new_port:
return
try:
with context.session.begin():
db_api.port_delete(context, new_port)
except Exception:
LOG.exception(
"Couldn't rollback db port %s" % backend_port)
# addresses, mac, backend_port, new_port
mac = _allocate_mac(net, port_id, mac_address,
use_forbidden_mac_range=use_forbidden_mac_range)
_allocate_ips(fixed_ips, net, port_id, segment_id, mac)
backend_port = _allocate_backend_port(mac, addresses, net, port_id)
new_port = _allocate_db_port(port_attrs, backend_port, addresses, mac)
return v._make_port_dict(new_port)
def update_port(context, id, port):
"""Update values of a port.
: param context: neutron api request context
: param id: UUID representing the port to update.
: param port: dictionary with keys indicating fields to update.
valid keys are those that have a value of True for 'allow_put'
as listed in the RESOURCE_ATTRIBUTE_MAP object in
neutron/api/v2/attributes.py.
"""
LOG.info("update_port %s for tenant %s" % (id, context.tenant_id))
port_db = db_api.port_find(context, id=id, scope=db_api.ONE)
if not port_db:
raise n_exc.PortNotFound(port_id=id)
port_dict = port["port"]
fixed_ips = port_dict.pop("fixed_ips", None)
admin_only = ["mac_address", "device_owner", "bridge", "admin_state_up",
"device_id"]
always_filter = ["network_id", "backend_key", "network_plugin"]
utils.filter_body(context, port_dict, admin_only=admin_only,
always_filter=always_filter)
# Pre-check the requested fixed_ips before making too many db trips.
# Note that this is the only check we need, since this call replaces
# the entirety of the IP addresses document if fixed_ips are provided.
if fixed_ips:
quota.QUOTAS.limit_check(context, context.tenant_id,
fixed_ips_per_port=len(fixed_ips))
new_security_groups = utils.pop_param(port_dict, "security_groups")
if new_security_groups is not None:
if (Capabilities.TENANT_NETWORK_SG not in
CONF.QUARK.environment_capabilities):
if not STRATEGY.is_provider_network(port_db["network_id"]):
raise q_exc.TenantNetworkSecurityGroupRulesNotEnabled()
if new_security_groups is not None and not port_db["device_id"]:
raise q_exc.SecurityGroupsRequireDevice()
group_ids, security_group_mods = _make_security_group_list(
context, new_security_groups)
quota.QUOTAS.limit_check(context, context.tenant_id,
security_groups_per_port=len(group_ids))
if fixed_ips is not None:
# NOTE(mdietz): we want full control over IPAM since
# we're allocating by subnet instead of
# network.
ipam_driver = ipam.IPAM_REGISTRY.get_strategy(
ipam.QuarkIpamANY.get_name())
addresses, subnet_ids = [], []
ip_addresses = {}
for fixed_ip in fixed_ips:
subnet_id = fixed_ip.get("subnet_id")
ip_address = fixed_ip.get("ip_address")
if not (subnet_id or ip_address):
raise n_exc.BadRequest(
resource="fixed_ips",
msg="subnet_id or ip_address required")
if ip_address and not subnet_id:
raise n_exc.BadRequest(
resource="fixed_ips",
msg="subnet_id required for ip_address allocation")
if subnet_id and ip_address:
ip_netaddr = None
try:
ip_netaddr = netaddr.IPAddress(ip_address).ipv6()
except netaddr.AddrFormatError:
raise n_exc.InvalidInput(
error_message="Invalid format provided for ip_address")
ip_addresses[ip_netaddr] = subnet_id
else:
subnet_ids.append(subnet_id)
port_ips = set([netaddr.IPAddress(int(a["address"]))
for a in port_db["ip_addresses"]])
new_ips = set([a for a in ip_addresses.keys()])
ips_to_allocate = list(new_ips - port_ips)
ips_to_deallocate = list(port_ips - new_ips)
for ip in ips_to_allocate:
if ip in ip_addresses:
# NOTE: Fix for RM10187 - we were losing the list of IPs if
# more than one IP was to be allocated. Track an
# aggregate list instead, and add it to the running total
# after each allocate
allocated = []
ipam_driver.allocate_ip_address(
context, allocated, port_db["network_id"],
port_db["id"], reuse_after=None, ip_addresses=[ip],
subnets=[ip_addresses[ip]])
addresses.extend(allocated)
for ip in ips_to_deallocate:
ipam_driver.deallocate_ips_by_port(
context, port_db, ip_address=ip)
for subnet_id in subnet_ids:
ipam_driver.allocate_ip_address(
context, addresses, port_db["network_id"], port_db["id"],
reuse_after=CONF.QUARK.ipam_reuse_after,
subnets=[subnet_id])
# Need to return all existing addresses and the new ones
if addresses:
port_dict["addresses"] = port_db["ip_addresses"]
port_dict["addresses"].extend(addresses)
# NOTE(morgabra) Updating network_plugin on port objects is explicitly
# disallowed in the api, so we use whatever exists in the db.
net_driver = _get_net_driver(port_db.network, port=port_db)
base_net_driver = _get_net_driver(port_db.network)
# TODO(anyone): What do we want to have happen here if this fails? Is it
# ok to continue to keep the IPs but fail to apply security
# groups? Is there a clean way to have a multi-status? Since
# we're in a beta-y status, I'm going to let this sit for
# a future patch where we have time to solve it well.
kwargs = {}
if new_security_groups is not None:
kwargs["security_groups"] = security_group_mods
net_driver.update_port(context, port_id=port_db["backend_key"],
mac_address=port_db["mac_address"],
device_id=port_db["device_id"],
base_net_driver=base_net_driver,
**kwargs)
port_dict["security_groups"] = security_group_mods
with context.session.begin():
port = db_api.port_update(context, port_db, **port_dict)
# NOTE(mdietz): fix for issue 112, we wanted the IPs to be in
# allocated_at order, so get a fresh object every time
if port_db in context.session:
context.session.expunge(port_db)
port_db = db_api.port_find(context, id=id, scope=db_api.ONE)
return v._make_port_dict(port_db)
def get_port(context, id, fields=None):
"""Retrieve a port.
: param context: neutron api request context
: param id: UUID representing the port to fetch.
: param fields: a list of strings that are valid keys in a
port dictionary as listed in the RESOURCE_ATTRIBUTE_MAP
object in neutron/api/v2/attributes.py. Only these fields
will be returned.
"""
LOG.info("get_port %s for tenant %s fields %s" %
(id, context.tenant_id, fields))
results = db_api.port_find(context, id=id, fields=fields,
scope=db_api.ONE)
if not results:
raise n_exc.PortNotFound(port_id=id)
return v._make_port_dict(results)
def get_ports(context, limit=None, sorts=None, marker=None, page_reverse=False,
filters=None, fields=None):
"""Retrieve a list of ports.
The contents of the list depends on the identity of the user
making the request (as indicated by the context) as well as any
filters.
: param context: neutron api request context
: param filters: a dictionary with keys that are valid keys for
a port as listed in the RESOURCE_ATTRIBUTE_MAP object
in neutron/api/v2/attributes.py. Values in this dictionary
are an iterable containing values that will be used for an exact
match comparison for that value. Each result returned by this
function will have matched one of the values for each key in
filters.
: param fields: a list of strings that are valid keys in a
port dictionary as listed in the RESOURCE_ATTRIBUTE_MAP
object in neutron/api/v2/attributes.py. Only these fields
will be returned.
"""
LOG.info("get_ports for tenant %s filters %s fields %s" %
(context.tenant_id, filters, fields))
if filters is None:
filters = {}
if "ip_address" in filters:
if not context.is_admin:
raise n_exc.NotAuthorized()
ips = []
try:
ips = [netaddr.IPAddress(ip) for ip in filters.pop("ip_address")]
except netaddr.AddrFormatError:
raise n_exc.InvalidInput(
error_message="Invalid format provided for ip_address")
query = db_api.port_find_by_ip_address(context, ip_address=ips,
scope=db_api.ALL, **filters)
ports = []
for ip in query:
ports.extend(ip.ports)
else:
ports = db_api.port_find(context, limit, sorts, marker,
fields=fields, join_security_groups=True,
**filters)
return v._make_ports_list(ports, fields)
def get_ports_count(context, filters=None):
"""Return the number of ports.
The result depends on the identity of the user making the request
(as indicated by the context) as well as any filters.
: param context: neutron api request context
: param filters: a dictionary with keys that are valid keys for
a port as listed in the RESOURCE_ATTRIBUTE_MAP object
in neutron/api/v2/attributes.py. Values in this dictionary
are an iterable containing values that will be used for an exact
match comparison for that value. Each result returned by this
function will have matched one of the values for each key in
filters.
NOTE: this method is optional, as it was not part of the originally
defined plugin API.
"""
LOG.info("get_ports_count for tenant %s filters %s" %
(context.tenant_id, filters))
return db_api.port_count_all(context, join_security_groups=True, **filters)
def delete_port(context, id):
"""Delete a port.
: param context: neutron api request context
: param id: UUID representing the port to delete.
"""
LOG.info("delete_port %s for tenant %s" % (id, context.tenant_id))
port = db_api.port_find(context, id=id, scope=db_api.ONE)
if not port:
raise n_exc.PortNotFound(port_id=id)
if 'device_id' in port: # false is weird, but ignore that
LOG.info("delete_port %s for tenant %s has device %s" %
(id, context.tenant_id, port['device_id']))
backend_key = port["backend_key"]
mac_address = netaddr.EUI(port["mac_address"]).value
ipam_driver = _get_ipam_driver(port["network"], port=port)
ipam_driver.deallocate_mac_address(context, mac_address)
ipam_driver.deallocate_ips_by_port(
context, port, ipam_reuse_after=CONF.QUARK.ipam_reuse_after)
net_driver = _get_net_driver(port["network"], port=port)
base_net_driver = _get_net_driver(port["network"])
net_driver.delete_port(context, backend_key, device_id=port["device_id"],
mac_address=port["mac_address"],
base_net_driver=base_net_driver)
with context.session.begin():
db_api.port_delete(context, port)
def _diag_port(context, port, fields):
p = v._make_port_dict(port)
net_driver = _get_net_driver(port.network, port=port)
if 'config' in fields:
p.update(net_driver.diag_port(
context, port["backend_key"], get_status='status' in fields))
return p
def diagnose_port(context, id, fields):
if not context.is_admin:
raise n_exc.NotAuthorized()
if id == "*":
return {'ports': [_diag_port(context, port, fields) for
port in db_api.port_find(context).all()]}
db_port = db_api.port_find(context, id=id, scope=db_api.ONE)
if not db_port:
raise n_exc.PortNotFound(port_id=id)
port = _diag_port(context, db_port, fields)
return {'ports': port}
def _make_security_group_list(context, group_ids):
if not group_ids or not utils.attr_specified(group_ids):
return ([], [])
group_ids = list(set(group_ids))
groups = []
for gid in group_ids:
group = db_api.security_group_find(context, id=gid,
scope=db_api.ONE)
if not group:
raise sg_ext.SecurityGroupNotFound(id=gid)
groups.append(group)
return (group_ids, groups)
| apache-2.0 | 2,644,880,706,528,607,000 | 39.382848 | 79 | 0.594539 | false | 3.957677 | false | false | false |
ctmil/meli_oerp | models/posting.py | 1 | 7027 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from odoo import fields, osv, models, api
from odoo.tools.translate import _
import logging
from ..melisdk.meli import Meli
import logging
_logger = logging.getLogger(__name__)
from .meli_oerp_config import *
from dateutil.parser import *
from datetime import *
def _ml_datetime(datestr):
try:
#return parse(datestr).isoformat().replace("T"," ")
return parse(datestr).strftime('%Y-%m-%d %H:%M:%S')
except:
return ""
class mercadolibre_posting_update(models.TransientModel):
_name = "mercadolibre.posting.update"
_description = "Update Posting Questions"
def posting_update(self, context=None ):
context = context or self.env.context
posting_ids = False
_logger.info("context:")
_logger.info(context)
if ('active_ids' in context):
posting_ids = context['active_ids']
#_logger.info("ids %s", ''.join(ids))
#posting_ids = ids
posting_obj = self.env['mercadolibre.posting']
if (posting_ids):
for posting_id in posting_ids:
# _logger.info("posting_update: %s " % (posting_id) )
posting = posting_obj.browse(posting_id)
posting.posting_query_questions()
return {}
mercadolibre_posting_update()
class mercadolibre_posting(models.Model):
_name = "mercadolibre.posting"
_description = "Posting en MercadoLibre"
def _posting_update( self ):
company = self.env.user.company_id
posting_obj = self.env['mercadolibre.posting']
for posting in self:
update_status = "ok"
posting.posting_update = update_status
posting.posting_query_questions()
#res = {}
#res[posting.id] = update_status
#return res
def posting_query_questions( self ):
#get with an item id
company = self.env.user.company_id
posting_obj = self.env['mercadolibre.posting']
for posting in self:
log_msg = 'posting_query_questions: %s' % (posting.meli_id)
#_logger.info(log_msg)
CLIENT_ID = company.mercadolibre_client_id
CLIENT_SECRET = company.mercadolibre_secret_key
ACCESS_TOKEN = company.mercadolibre_access_token
REFRESH_TOKEN = company.mercadolibre_refresh_token
#
meli = Meli(client_id=CLIENT_ID,client_secret=CLIENT_SECRET, access_token=ACCESS_TOKEN, refresh_token=REFRESH_TOKEN )
if (posting.meli_id):
pass;
else:
continue;
response = meli.get("/items/"+posting.meli_id, {'access_token':meli.access_token})
product_json = response.json()
#_logger.info( product_json )
if "error" in product_json:
ML_status = product_json["error"]
else:
ML_status = product_json["status"]
ML_permalink = product_json["permalink"]
ML_price = product_json["price"]
#ML_sku = product_json["seller_custom_field"]
posting.write( { 'meli_status': ML_status, 'meli_permalink': ML_permalink, 'meli_price': ML_price } )
if (not company.mercadolibre_cron_get_questions):
return {}
response = meli.get("/questions/search?item_id="+posting.meli_id, {'access_token':meli.access_token})
questions_json = response.json()
questions_obj = self.env['mercadolibre.questions']
if 'questions' in questions_json:
questions = questions_json['questions']
#_logger.info( questions )
cn = 0
for Question in questions:
cn = cn + 1
question_answer = Question['answer']
question_fields = {
'posting_id': posting.id,
'question_id': Question['id'],
'date_created': _ml_datetime(Question['date_created']),
'item_id': Question['item_id'],
'seller_id': Question['seller_id'],
'text': str(Question['text'].encode("utf-8")),
'status': Question['status'],
}
if (question_answer):
question_fields['answer_text'] = str(question_answer['text'].encode("utf-8"))
question_fields['answer_status'] = question_answer['status']
question_fields['answer_date_created'] = _ml_datetime(question_answer['date_created'])
question = questions_obj.search( [('question_id','=',question_fields['question_id'])])
if not question:
question = questions_obj.create( ( question_fields ))
else:
if question:
question.write( (question_fields) )
return {}
def posting_query_all_questions( self, cr, uid, ids, context=None ):
return {}
posting_date = fields.Date('Fecha del posting');
name = fields.Char('Name');
meli_id = fields.Char('Id del item asignado por Meli', size=256);
product_id = fields.Many2one('product.product','product_id');
meli_status = fields.Char( string="Estado del producto en MLA", size=256 );
meli_permalink = fields.Char( string="Permalink en MercadoLibre", size=512 );
meli_price = fields.Char(string='Precio de venta', size=128);
posting_questions = fields.One2many( 'mercadolibre.questions','posting_id','Questions' );
posting_update = fields.Char( compute=_posting_update, string="Posting Update", store=False );
meli_seller_custom_field = fields.Char('Sellect Custom Field or SKU',size=256);
mercadolibre_posting()
| agpl-3.0 | -4,238,164,900,268,997,000 | 37.700565 | 129 | 0.556994 | false | 4.043153 | false | false | false |
japsu/desugaala | status/models.py | 1 | 1102 | from collections import defaultdict
from django.db import models
from vote.models import Category, Option
class Watch(models.Model):
category = models.ForeignKey(Category)
def evaluate(self):
results = dict((wo.option, 0) for wo in self.watchoption_set.all())
for ballot_category in self.category.ballotcategory_set.all():
for ballot_option in ballot_category.ballotoption_set.all().order_by('order'):
if self.watchoption_set.filter(option=ballot_option.option):
results[ballot_option.option] += 1
break
results = list(i for i in results.iteritems())
results.sort(key=lambda (option, num_votes): -num_votes)
return results
def __unicode__(self):
options = u" vs. ".join(i.option.title for i in self.watchoption_set.all())
category = self.category.title if self.category else u"None"
return u"{category}: {options}".format(**locals())
class WatchOption(models.Model):
watch = models.ForeignKey(Watch)
option = models.ForeignKey(Option)
def __unicode__(self):
return self.option.title if self.option else u"None" | mit | -4,102,352,009,066,950,000 | 31.441176 | 84 | 0.702359 | false | 3.673333 | false | false | false |
beatrizChagas/scrm-solutions | extracao/rsoservices/service_preprocessing_twitter_v1_ACS.py | 1 | 5056 | # -*- coding: UTF-8 -*-
import sys
import emoji
import mysql.connector
from extracao.rsoservices.config import config
from extracao.rsoservices.emoji_dict import emoticon_dict
from extracao.rsoservices.preprocessing_dict import EMOJI_CARACTER
add_message_table0 = ("INSERT INTO extracao_processamento_tweet "
"(tweet_id, workspace_id, tweet_origin, tweet_tratament, tweet_demojize, tweet_process) "
"VALUES (%s, %s, %s, %s, %s, %s)")
def preprocessamento_tweets(workspace_id):
conex = mysql.connector.connect(**config)
con = conex.cursor()
con.execute("SELECT id, tweet FROM extracao_tweet WHERE workspace_id=%s;", (workspace_id,))
try:
mensagens = con.fetchall()
for msn in mensagens:
id_tweet=msn[0]
message_origin=msn[1]
con.execute("SELECT tweet_id FROM extracao_processamento_tweet WHERE tweet_id=%s;", (id_tweet,))
if con.fetchall():
continue
else:
message_tratament = tratament(message_origin)
if message_tratament == None:
message_origin=None
elif message_tratament == message_origin:
message = emoji(message_tratament)
message_demojize = None
message_tratament=None
con.execute(add_message_table0, (id_tweet, workspace_id, message_origin, message_tratament, message_demojize, message))
conex.commit()
else:
message_demojize = None
message = emoji(message_tratament)
con.execute(add_message_table0, (id_tweet, workspace_id, message_origin, message_tratament, message_demojize, message))
conex.commit()
continue
continue
except Exception as e:
print("EXCECAO!!!!!!!Insert no db", e)
conex.close()
print("fim")
def tratament(s):
if (s == '') or (s == None):
s = None
else:
s = s.replace('\n', ' ')
s = s.replace('\r', ' ')
s = s.replace('\t', ' ')
s = s.replace('\v', ' ')
s = s.replace(",),", ' ')
s = s.replace("('", ' ')
s = s.replace(",)]", ' ')
s = s.replace("'", ' ')
s = s.replace('("', ' ')
return s
def emoji(origin):
try:
import emoji
s = emoji.demojize(origin)
s = s.replace('::', ': :')
lista_texto = s.split()
print(lista_texto)
lista_demoj=[]
for palavra in lista_texto:
parada=False
cont=0
while not parada:
for group in EMOJI_CARACTER.items():
cont+=1
qtd_emojis=EMOJI_CARACTER.__len__()
chave=group[0]
valor=group[1]
if chave != palavra:
if chave in palavra:
palavra=palavra.split(chave)
palavra=''.join(palavra)
lista_demoj.append(palavra)
lista_demoj.append(valor)
#print(lista_demoj)
#demoj=''.join(lista_demoj)
parada=True
break
else:
if palavra in lista_demoj:
parada=True
break
elif palavra==chave:
lista_demoj.append(valor)
parada=True
break
elif chave not in palavra and cont <= qtd_emojis:
continue
else:
lista_demoj.append(palavra)
#demoj=''.join(lista_demoj)
parada=True
break
#print(lista_demoj)
#demoj=''.join(lista_demoj)
#print(demoj)
else:
lista_demoj.append(valor)
#print(lista_demoj)
#demoj=''.join(lista_demoj)
parada=True
break
demoj=' '.join(lista_demoj)
print(origin)
print(demoj)
if demoj == origin:
demoj=None
return demoj
else:
return demoj
except Exception as e:
print(e)
def process_tweet(workspace_id):
preprocessamento_tweets(workspace_id)
if (__name__ == '__main__'):
process_tweet() | gpl-3.0 | 3,370,406,689,246,566,000 | 35.746269 | 139 | 0.433347 | false | 4.209825 | false | false | false |
sio2project/filetracker | setup.py | 1 | 1364 | from os import path
import io
from setuptools import setup, find_packages
with io.open(
path.join(path.abspath(path.dirname(__file__)), 'README.md'), encoding='utf-8'
) as f:
long_description = f.read()
setup(
name='filetracker',
version='2.1.5',
author='SIO2 Project Team',
author_email='[email protected]',
description='Filetracker caching file storage',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/sio2project/filetracker',
license='GPL',
packages=find_packages(),
install_requires=[
'bsddb3==6.2.7',
'flup6',
'gunicorn==19.9.0',
'gevent==1.3.1',
'greenlet==0.4.13', # for compatibility with this version of gevent
'progressbar2',
'requests',
'six',
],
setup_requires=[
'pytest-runner',
],
tests_require=[
'pytest',
],
entry_points={
'console_scripts': [
'filetracker = filetracker.client.shell:main',
'filetracker-server = filetracker.servers.run:main',
'filetracker-cache-cleaner = filetracker.scripts.cachecleaner:main',
'filetracker-migrate = filetracker.scripts.migrate:main',
'filetracker-recover = filetracker.scripts.recover:main',
],
},
)
| gpl-3.0 | 2,672,298,170,505,186,000 | 28.652174 | 82 | 0.611437 | false | 3.618037 | false | false | false |
mohamedhagag/community-addons | analytic_resource_plan/model/analytic_resource_plan.py | 1 | 10135 | # -*- coding: utf-8 -*-
# © 2015 Eficent Business and IT Consulting Services S.L.
# (Jordi Ballester Alomar)
#
# © 2015 Serpent Consulting Services Pvt. Ltd.
# (Sudhir Arya)
#
# © 2016 Matmoz d.o.o.
# (Matjaž Mozetič)
#
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
import time
from openerp import api, fields, models
from openerp.tools.translate import _
from openerp.exceptions import Warning as UserError
from openerp.exceptions import ValidationError
class AnalyticResourcePlanLine(models.Model):
_name = 'analytic.resource.plan.line'
_description = "Analytic Resource Planning lines"
_inherit = ['mail.thread', 'ir.needaction_mixin']
@api.multi
@api.depends('child_ids')
def _has_child(self):
res = {}
for line in self:
res[line.id] = False
if line.child_ids:
res[line.id] = True
return res
account_id = fields.Many2one(
'account.analytic.account',
'Analytic Account',
required=True,
ondelete='cascade',
select=True,
domain=[('type', '<>', 'view')],
readonly=True,
states={'draft': [('readonly', False)]}
)
name = fields.Char(
'Activity description',
required=True,
readonly=True,
states={'draft': [('readonly', False)]}
)
date = fields.Date(
'Date',
required=True,
select=True,
readonly=True,
states={'draft': [('readonly', False)]},
default=lambda *a: time.strftime('%Y-%m-%d')
)
state = fields.Selection(
[
('draft', 'Draft'),
('confirm', 'Confirmed')
],
'Status',
select=True,
required=True,
readonly=True,
help=' * The \'Draft\' status is '
'used when a user is encoding a new and '
'unconfirmed resource plan line. \n* '
'The \'Confirmed\' status is used for to confirm '
'the execution of the resource plan lines.',
default='draft'
)
product_id = fields.Many2one(
'product.product',
'Product',
readonly=True,
required=True,
states={'draft': [('readonly', False)]}
)
product_uom_id = fields.Many2one(
'product.uom',
'UoM',
required=True,
readonly=True,
states={'draft': [('readonly', False)]}
)
unit_amount = fields.Float(
'Planned Quantity',
readonly=True,
required=True,
states={'draft': [('readonly', False)]},
help='Specifies the quantity that has '
'been planned.',
default=1
)
notes = fields.Text(
'Notes'
)
parent_id = fields.Many2one(
'analytic.resource.plan.line',
'Parent',
readonly=True,
ondelete='cascade'
)
child_ids = fields.One2many(
comodel_name='analytic.resource.plan.line',
inverse_name='parent_id',
string='Child lines'
)
has_child = fields.Boolean(
compute='_has_child',
string="Child lines"
)
analytic_line_plan_ids = fields.One2many(
'account.analytic.line.plan',
'resource_plan_id',
'Planned costs',
readonly=True
)
price_unit = fields.Float(
string='Cost Price',
groups='project.group_project_manager',
)
price_total = fields.Float(
store=False,
compute='_compute_get_price_total',
string='Total Cost',
groups='project.group_project_manager',
)
resource_type = fields.Selection(
selection=[('task', 'Task'), ('procurement', 'Procurement')],
string='Type',
required=True,
default='task'
)
user_id = fields.Many2one(
comodel_name='res.users',
string='Assign To',
ondelete='set null'
)
@api.multi
def copy(self, default=None):
self.ensure_one()
if default is None:
default = {}
default['parent_id'] = False
default['analytic_line_plan_ids'] = []
res = super(AnalyticResourcePlanLine, self).copy(default)
return res
# TODO: Solve TypeError: can only concatenate list (not "NoneType") to list
# on raise error
@api.model
def _prepare_analytic_lines(self):
plan_version_obj = self.env['account.analytic.plan.version']
journal_id = (
self.product_id.expense_analytic_plan_journal_id
and self.product_id.expense_analytic_plan_journal_id.id
or False
)
general_account_id = (
self.product_id.product_tmpl_id.property_account_expense.id
)
if not general_account_id:
general_account_id = (
self.product_id.categ_id.property_account_expense_categ.id
)
if not general_account_id:
raise UserError(
_(
'There is no expense account defined '
'for this product: "%s" (id:%d)'
) % (self.product_id.name, self.product_id.id,)
)
default_plan = plan_version_obj.search(
[('default_resource_plan', '=', True)],
limit=1
)
if not default_plan:
raise UserError(
_(
'No active planning version for resource '
'plan exists.'
)
)
return [{
'resource_plan_id': self.id,
'account_id': self.account_id.id,
'name': self.name,
'date': self.date,
'product_id': self.product_id.id,
'product_uom_id': self.product_uom_id.id,
'unit_amount': self.unit_amount,
'amount': -1 * self.product_id.standard_price * self.unit_amount,
'general_account_id': general_account_id,
'journal_id': journal_id,
'notes': self.notes,
'version_id': default_plan.id,
'currency_id': self.account_id.company_id.currency_id.id,
# 'amount_currency': (
# -1 * self.product_id.standard_price * self.unit_amount
# ),
}]
@api.model
def create_analytic_lines(self):
res = []
line_plan_obj = self.env['account.analytic.line.plan']
lines_vals = self._prepare_analytic_lines()
for line_vals in lines_vals:
line = line_plan_obj.create(line_vals)
return res
@api.model
def _delete_analytic_lines(self):
line_plan_obj = self.env['account.analytic.line.plan']
ana_line = line_plan_obj.search([('resource_plan_id', '=', self.id)])
ana_line.unlink()
return True
@api.multi
def action_button_draft(self):
for line in self:
for child in line.child_ids:
if child.state not in ('draft', 'plan'):
raise UserError(
_(
'All the child resource plan lines must '
' be in Draft state.'
)
)
line._delete_analytic_lines()
return self.write({'state': 'draft'})
@api.multi
def action_button_confirm(self):
for line in self:
if line.unit_amount == 0:
raise UserError(
_(
'Quantity should be greater than 0.'
)
)
if not line.child_ids:
line.create_analytic_lines()
return self.write({'state': 'confirm'})
@api.onchange('product_id')
def on_change_product_id(self):
if self.product_id:
self.name = self.product_id.name
self.product_uom_id = (
self.product_id.uom_id
and self.product_id.uom_id.id
or False
)
self.price_unit = self.product_id.standard_price
@api.onchange('account_id')
def on_change_account_id(self):
if self.account_id:
if self.account_id.date:
self.date = self.account_id.date
@api.multi
def write(self, vals):
analytic_obj = self.env['account.analytic.account']
if 'account_id' in vals:
analytic = analytic_obj.browse(vals['account_id'])
if vals.get('date', False):
vals['date'] = analytic.date
return super(AnalyticResourcePlanLine, self).write(vals)
@api.multi
def unlink(self):
for line in self:
if line.analytic_line_plan_ids:
raise UserError(
_(
'You cannot delete a record that refers to '
'analytic plan lines!'
)
)
return super(AnalyticResourcePlanLine, self).unlink()
# PRICE DEFINITIONS
@api.multi
@api.depends('price_unit', 'unit_amount')
def _compute_get_price_total(self):
for resource in self:
resource.price_total = resource.price_unit * resource.unit_amount
@api.multi
def _get_pricelist(self):
self.ensure_one()
partner_id = self._get_partner()
if partner_id:
if partner_id.property_product_pricelist:
return partner_id.property_product_pricelist
else:
return False
# RESOURCE TYPE
@api.onchange('resource_type')
def resource_type_change(self):
if self.resource_type == 'procurement':
self.user_id = False
@api.multi
@api.constrains('resource_type', 'product_uom_id')
def _check_description(self):
for resource in self:
if self.resource_type == 'task' and (
self.product_uom_id.category_id != (
self.env.ref('product.uom_categ_wtime'))):
raise ValidationError(_(
"When resource type is task, "
"the uom category should be time"))
| agpl-3.0 | -7,276,925,191,341,370,000 | 30.169231 | 79 | 0.531096 | false | 4.053621 | false | false | false |
petershvets/spark-etl | spark_etl_extract.py | 1 | 9475 | #!/usr/bin/python
from simple_salesforce import Salesforce
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
from pyspark import Row
from pyspark.sql.types import *
#from pyspark.sql.functions import *
from optparse import OptionParser
from pyspark.sql import DataFrameWriter
import json
import re
import os
from datetime import datetime
# *** SPARK-ETL packages
import util
import udf_spark_etl
def main(sc, sqlContext, properties_file, spark_etl_logger):
""" This is main data extraction functionality
Data is extracted from SFDC and loaded into Spark SQL temp tables
"""
startTime = datetime.now()
# Enable logging
spark_etl_logger.info("***** Main process execution started at: "+str(startTime))
# Get app environment variables
d_app_variables = util.get_app_variables()
spark_etl_logger.info("Application environment variables: %s" %(d_app_variables))
spark_etl_logger.info("Processing Spark ETL properties file: %s" %(properties_file))
##### Get table properties defined in respective table ETL config file ####
# Store table properties in local dictionary for servicing the script
#No need to pass SPARK_ETL_CONF_DIR variable as driver script passes file with absolute path
dict_tbl_properties = util.get_json_config('', properties_file)
##### Defined SOQL statement takes precedence over list of source columns #####
##### SOQL statement will be proccessed and related metadata will be extracted from it
if len(dict_tbl_properties["soql_query"]) > 0:
# Process SOQL query if it is defined in config file
soqlStmt = dict_tbl_properties["soql_query"]
spark_etl_logger.info("Defined SOQL statement: "+ soqlStmt)
# Process list of fields and define schema for creating RDD
schemaCol = re.findall('SELECT\s(.+)\sFROM', dict_tbl_properties["soql_query"], flags=re.IGNORECASE)[0]
spark_etl_logger.info("Columns extracted from SOQL: " + schemaCol)
# Removing extra whitespaces from string elements while converting
schemaList = [rec.strip() for rec in schemaCol.split(',')]
# Convert column names into StructType for RDD
fields = [StructField(field_name, StringType(), True) for field_name in schemaList]
schema = StructType(fields)
# Define source table name - extract from SOQL Query
src_tbl_name = re.findall("FROM\s(\S+)", soqlStmt, flags=re.IGNORECASE)[0]
spark_etl_logger.info("Source table name: " + src_tbl_name)
# Define target table name
tgt_table_name = dict_tbl_properties["tgt_table"]
spark_etl_logger.info("Target table name: " + tgt_table_name)
else:
spark_etl_logger.info("SOQL statement is not defined, will process src_table and src_columns properties")
# Constructing SOQL statement from properties provided, converting list to str
soqlStmt = "SELECT " + ', '.join(dict_tbl_properties["src_columns"]) \
+ " FROM " \
+ dict_tbl_properties["src_table"] \
+ " " + dict_tbl_properties["where"] \
+ " " + dict_tbl_properties["limit"]
spark_etl_logger.info("Constructed SOQL statement: %s" %(soqlStmt))
# Process list of fields and define schema for creating RDD
schemaList = dict_tbl_properties["src_columns"]
spark_etl_logger.info("Schema from config file: %s" %(schemaList))
fields = [StructField(field_name, StringType(), True) for field_name in schemaList]
schema = StructType(fields)
# Define source table name
src_tbl_name = dict_tbl_properties["src_table"]
spark_etl_logger.info("Source table name: " + src_tbl_name)
# Define target table name for load into target data storage of your choice
tgt_table_name = dict_tbl_properties["tgt_table"]
spark_etl_logger.info("Target table name: ",tgt_table_name)
################### End process table properties defined in table ETL config file ##################
# Get Salesforce connection details from connections json file
spark_etl_logger.info("Processing SFDC connections information file sfdc_connections.json")
d_sfdc_conn = util.get_json_config(d_app_variables['SPARK_ETL_CONN_DIR'], "sfdc_connections.json")
spark_etl_logger.info("SFDC Connections: %s" %(list(d_sfdc_conn.keys())))
# Process SFDC Connection details
spark_etl_logger.info("SFDC Connection details: %s" %(d_sfdc_conn[dict_tbl_properties["sfdc_connection"]]))
# Establish connection to Salesforce. Using Simple-Salesforce package
exec("sf=" + util.get_sfdc_conn(**d_sfdc_conn[dict_tbl_properties["sfdc_connection"]]), globals())
###### Retrieve source table properties - use it to define target table DDL ####
#
# Store object description in list of dictionaries
# This structure returned by Simple-Salesforce
exec("tblDesc = sf."+src_tbl_name+".describe()", globals())
lColProperties = ['name', 'type', 'length', 'precision', 'custom', 'scale']
columnProperties = list()
for line in tblDesc['fields']: # Iterate through the list of dictionaries
# Keep only needed properties listed in lColProperties list and
# columns mapped in config properties file and remove the rest
rec = {k:line[k] for k in (lColProperties) if line["name"] in list(dict_tbl_properties["columns_map"].keys())}
if len(rec) == 0:continue
columnProperties.append(rec)
spark_etl_logger.info("Column properties: %s" %(rec))
# Record table properties in json file
with open(os.path.join(d_app_variables['SPARK_ETL_LOG_DIR'],tgt_table_name+"_schema.json"), "w") as tableMetadata_file:
json.dump(columnProperties, tableMetadata_file)
# Build DDL in order to create table in MySQL db
for record in columnProperties:
spark_etl_logger.info("Column MySQL datatype: " + record["name"]+" Type:"+record["type"]+" New: "+util.get_sfdc_mysql_dt(record["type"], str(record["length"]), str(record["precision"]), str(record["scale"])))
#*********************** Start Data Acquisition **************************#
#
# Extract data from SFDC - run SOQL statement.
# sf.query returns a list of OrderedDict
queryResultRaw = sf.query_all(soqlStmt)
#*********************** End Data Acquisition ****************************#
#********************* Clean up dataset *************************#
# Remove unrelated record metadata provided by SFDC
queryResult = list()
for line in queryResultRaw['records']:
rec=[(k,str(v)) for k, v in line.items() if k not in "attributes"]
queryResult.append(rec)
# Create RDD
v_rdd = sc.parallelize(queryResult)
rddElemCount = v_rdd.count()
spark_etl_logger.info("RDD was successfully created")
spark_etl_logger.info("Dataset contains: "+ str(rddElemCount) + " records")
# Create DataFrame from RDD
global sqlDataFrame, sqlDFPK
sqlDataFrame = v_rdd.map(lambda l: Row(**dict(l))).toDF()
spark_etl_logger.info("Generating PK")
sqlDFPK = udf_spark_etl.generate_pk('WID', sqlDataFrame)
#sqlDFPK = sqlDataFrame.withColumn('WID', monotonicallyIncreasingId()+1)
spark_etl_logger.info("Done generating PK")
spark_etl_logger.info("Created dataframe with extracted data:: ")
sqlDFPK.printSchema()
sqlDFPK.show()
####################### UDF functions #########################
# Create UDFs
#
# logic to handle null values
slen = udf(lambda s: 0 if s is None else len(s), IntegerType())
StrConcat = udf(lambda s: "ADD_SOMETHING"+s, StringType())
####################### End UDF functions #########################
######################## Mapping columns ############################
# Create a dict out of column list in form
for k,v in sorted(dict_tbl_properties["columns_map"].items()):
spark_etl_logger.info("Column mapping: "+k+":"+v)
# Construct command for column mapping
wCol =''
v_dfSQL_col = ''
for k,v in sorted(dict_tbl_properties["columns_map"].items()):
#wCol = wCol + ".withColumn(\'"+v+"\' , "+dfColumnsOrig+"."+k+")"
wCol = wCol + ".withColumnRenamed(\'"+k+"\' , \'"+v+"\')"
v_dfSQL_col = v_dfSQL_col + "\""+v+"\","
dfSQL_col = v_dfSQL_col.rstrip(',')
spark_etl_logger.info("The following command will be executed: dfRemapped = sqlDFPK %s" %(wCol))
# exec(dfColumnsRenamed+" = "+dfColumnsOrig+wCol, globals())
exec("global dfRemapped; dfRemapped = sqlDFPK"+wCol, globals())
dfRemapped.printSchema()
dfRemapped.show()
######################## End mapping columns ########################
# Generate PK
# Sample
#df0 = sc.parallelize(range(2), 2).mapPartitions(lambda x: [(1,), (2,), (3,)]).toDF(['col1'])
#df0.select(monotonicallyIncreasingId().alias('id')).collect()
#################### Register DataFrame as Temp Table for SQL operatoins ####################
spark_etl_logger.info("Registering remapped data frame as Spark SQL temp table")
dfRemapped.registerTempTable(tgt_table_name)
# Run SQL (returns RDD)
rddSQL = sqlContext.sql("SELECT * FROM "+ tgt_table_name)
# Write DataFrame into AWS S3 bucket
print("Serialize DF into S3")
# dfRemapped.repartition(1).write.save("s3n://hive-qs-data/"+tgt_table_name+".json", "json", )
# dfRemapped.write.mode('append').json("s3n://hive-qs-data/"+tgt_table_name)
# rddSQL.rdd.saveAsTextFile(tgt_table_name+".csv")
# dfRemapped.rdd.map(lambda rec: ",".join([str(col) for col in rec])).saveAsTextFile("s3n://hive-qs-data/"+tgt_table_name)
# dfRemapped.repartition(1).rdd.map(lambda rec: ",".join([str(col) for col in rec])).saveAsTextFile("s3n://hive-qs-data/"+tgt_table_name)
print("Done serialize DF into S3")
endTime = datetime.now()
spark_etl_logger.info("***** Main process execution completed at: " + str(endTime))
spark_etl_logger.info("***** Main process execution took: " + str(endTime - startTime))
| apache-2.0 | -803,472,950,311,587,300 | 45.446078 | 210 | 0.688021 | false | 3.332747 | true | false | false |
JensAstrup/pyEchosign | tests/test_agreement.py | 1 | 8056 | from unittest import TestCase
from six import StringIO
from pyEchosign.exceptions.echosign import PermissionDenied
try:
from unittest.mock import Mock, patch
except ImportError:
from mock import Mock, patch
from pyEchosign.classes.agreement import Agreement
from pyEchosign.classes.account import EchosignAccount
from pyEchosign.exceptions.internal import ApiError
class TestAccount(TestCase):
@classmethod
def setup_class(cls):
cls.mock_get_patcher = patch('pyEchosign.classes.account.requests.get')
cls.mock_get = cls.mock_get_patcher.start()
cls.mock_put_patcher = patch('pyEchosign.classes.agreement.requests.put')
cls.mock_put = cls.mock_put_patcher.start()
cls.mock_post_patcher = patch('pyEchosign.classes.agreement.requests.post')
cls.mock_post = cls.mock_post_patcher.start()
def test_cancel_agreement_passes(self):
mock_response = Mock()
self.mock_get_patcher = patch('pyEchosign.classes.account.requests.get')
self.mock_get = self.mock_get_patcher.start()
e = EchosignAccount('a string')
e.api_access_point = 'http://echosign.com'
agreement = Agreement(account=e)
agreement.name = 'Test Agreement'
agreement.fully_retrieved = False
agreement.echosign_id = '123'
agreement.date = '2017-02-19T08:22:34-08:00'
mock_response.status_code = 200
# Assign our mock response as the result of our patched function
self.mock_put.return_value = mock_response
agreement.cancel()
def test_cancel_agreement_401_raises_error(self):
mock_response = Mock()
self.mock_get_patcher = patch('pyEchosign.classes.account.requests.get')
self.mock_get = self.mock_get_patcher.start()
e = EchosignAccount('an invalid string')
e.api_access_point = 'http://echosign.com'
agreement = Agreement(account=e)
agreement.name = 'Test Agreement'
agreement.fully_retrieved = False
agreement.echosign_id = '123'
agreement.date = '2017-02-19T08:22:34-08:00'
mock_response.status_code = 401
# Assign our mock response as the result of our patched function
self.mock_put.return_value = mock_response
with self.assertRaises(PermissionDenied):
agreement.cancel()
def test_cancel_agreement_500_raises_error(self):
""" Test that an invalid response due to an issue with the API, not the package, raises an Exception """
mock_response = Mock()
self.mock_get_patcher = patch('pyEchosign.classes.account.requests.get')
self.mock_get = self.mock_get_patcher.start()
account = EchosignAccount('an invalid string')
account.api_access_point = 'http://echosign.com'
agreement = Agreement(account=account)
agreement.name = 'Test Agreement'
agreement.fully_retrieved = False
agreement.echosign_id = '123'
agreement.date = '2017-02-19T08:22:34-08:00'
mock_response.status_code = 500
# Assign our mock response as the result of our patched function
self.mock_put.return_value = mock_response
with self.assertRaises(ApiError):
agreement.cancel()
def test_delete_agreement_passes(self):
mock_response = Mock()
self.mock_get_patcher = patch('pyEchosign.classes.account.requests.get')
self.mock_get = self.mock_get_patcher.start()
account = EchosignAccount('an invalid string')
account.api_access_point = 'http://echosign.com'
agreement = Agreement(account=account)
agreement.name = 'Test Agreement'
agreement.fully_retrieved = False
agreement.echosign_id = '123'
agreement.date = '2017-02-19T08:22:34-08:00'
mock_response.status_code = 200
# Assign our mock response as the result of our patched function
self.mock_put.return_value = mock_response
agreement.cancel()
def test_delete_agreement_401_raises_error(self):
mock_response = Mock()
self.mock_get_patcher = patch('pyEchosign.classes.account.requests.get')
self.mock_get = self.mock_get_patcher.start()
account = EchosignAccount('an invalid string')
account.api_access_point = 'http://echosign.com'
agreement = Agreement(account=account)
agreement.name = 'Test Agreement'
agreement.fully_retrieved = False
agreement.echosign_id = '123'
agreement.date = '2017-02-19T08:22:34-08:00'
mock_response.status_code = 401
# Assign our mock response as the result of our patched function
self.mock_put.return_value = mock_response
with self.assertRaises(PermissionDenied):
agreement.cancel()
def test_create_agreement(self):
json_response = dict(userAgreementList=[dict(displayDate='2017-09-09T09:33:53-07:00', esign=True, displayUserSetInfos=[
{'displayUserSetMemberInfos': [{'email': '[email protected]'}]}], agreementId='123', name='test_agreement',
latestVersionId='v1', status='WAITING_FOR_MY_SIGNATURE')])
mock_response = Mock()
self.mock_get_patcher = patch('pyEchosign.classes.account.requests.get')
self.mock_get = self.mock_get_patcher.start()
account = EchosignAccount('account')
account.api_access_point = 'http://echosign.com'
mock_response.json.return_value = json_response
mock_response.status_code = 200
mock_agreement_get_patcher = patch('pyEchosign.classes.agreement.requests.get')
mock_agreement_get = mock_agreement_get_patcher.start()
mock_agreement_get.return_value = mock_response
agreements = account.get_agreements()
agreements = list(agreements)
self.assertEqual(len(agreements), 1)
self.assertEqual(agreements[0].name, 'test_agreement')
# Reset the patch for the Account - otherwise exceptions will ensue
self.mock_get_patcher = patch('pyEchosign.classes.account.requests.get')
self.mock_get = self.mock_get_patcher.start()
def test_send_reminder(self):
""" Test that reminders are sent without exceptions """
mock_response = Mock()
account = EchosignAccount('account')
account.api_access_point = 'http://echosign.com'
mock_response.status_code = 200
self.mock_post.return_value = mock_response
agreement = Agreement(account=account)
agreement.name = 'Test Agreement'
agreement.fully_retrieved = False
agreement.echosign_id = '123'
agreement.date = '2017-02-19T08:22:34-08:00'
agreement.send_reminder()
agreement.send_reminder('Test')
agreement.send_reminder(None)
def test_get_form_data(self):
""" Test that form data is retrieved and returned correctly """
mock_response = Mock()
account = EchosignAccount('account')
account.api_access_point = 'http://echosign.com'
mock_response.status_code = 200
agreement = Agreement(account=account)
agreement.name = 'Test Agreement'
agreement.fully_retrieved = False
agreement.echosign_id = '123'
agreement.date = '2017-02-19T08:22:34-08:00'
mock_response.text = 'Column,Column2,Column3'
mock_response.status_code = 200
mock_get_patcher = patch('pyEchosign.classes.agreement.requests.get')
mock_get = mock_get_patcher.start()
mock_get.return_value = mock_response
form_data = agreement.get_form_data()
self.assertIsInstance(form_data, StringIO)
data = form_data.read()
self.assertEqual(data, mock_response.text)
mock_get_patcher.stop() | mit | -2,151,252,365,031,162,400 | 34.963303 | 127 | 0.636172 | false | 4.001987 | true | false | false |
numb3r33/StumbpleUponChallenge | src/data/make_dataset.py | 1 | 1475 | import pandas as pd
import numpy as np
import json
from unidecode import unidecode
def extract_domain(url):
# extract domains
domain = url.lower().split('/')[2]
domain_parts = domain.split('.')
# e.g. co.uk
if domain_parts[-2] not in ['com', 'co']:
return '.'.join(domain_parts[-2:])
else:
return '.'.join(domain_parts[-3:])
def load_csv(filename):
return pd.read_table(filename)
def parse_data(df):
data = []
columns = df.columns
for key, row in df.iterrows():
item = {}
for column in columns:
item[column] = row[column]
# parse url
item['real_url'] = row['url'].lower()
item['domain'] = extract_domain(row['url'])
item['tld'] = item['domain'].split('.')[-1]
# parse boilerplate
boilerplate = json.loads(row['boilerplate'])
for f in ['title', 'url', 'body']:
item[f] = boilerplate[f] if f in boilerplate else u''
item[f] = unidecode(item[f]) if item[f] else ''
if 'label' in row:
item['label'] = row['label']
else:
item['label'] = np.nan
data.append(item)
return data
def get_train():
train = load_csv('../data/raw/train.tsv')
return (parse_data(train))
def get_test():
test = load_csv('../data/raw/test.tsv')
return (parse_data(test))
| mit | 4,679,320,032,139,094,000 | 21.692308 | 65 | 0.51661 | false | 3.734177 | false | false | false |
brunoliveira8/managyment | project/gym_app/migrations/0001_initial.py | 1 | 7312 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Athlete',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('level', models.CharField(default=b'BG', max_length=2, choices=[(b'BG', b'Beginner'), (b'IN', b'Intermediate'), (b'AD', b'Advanced')])),
('training_period', models.CharField(default=b'MO', max_length=2, choices=[(b'MO', b'Morning'), (b'AF', b'Afternoon'), (b'NI', b'Night')])),
('gender', models.CharField(default=b'M', max_length=2, choices=[(b'M', b'Male'), (b'F', b'Female')])),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='BodyScreening',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('screeningDate', models.DateField(default=datetime.datetime.now)),
('triceps', models.IntegerField(default=0, max_length=3)),
('biceps', models.IntegerField(default=0, max_length=3)),
('subscapular', models.IntegerField(default=0, max_length=3)),
('supraspinale', models.IntegerField(default=0, max_length=3)),
('suprailic', models.IntegerField(default=0, max_length=3)),
('abdominal', models.IntegerField(default=0, max_length=3)),
('chest', models.IntegerField(default=0, max_length=3)),
('thigh', models.IntegerField(default=0, max_length=3)),
('calf', models.IntegerField(default=0, max_length=3)),
('weight', models.IntegerField(default=0, max_length=4)),
('feet', models.IntegerField(default=0, max_length=4)),
('inches', models.IntegerField(default=0, max_length=4)),
('bodyfat', models.DecimalField(default=0, max_digits=6, decimal_places=2)),
('bmi', models.DecimalField(default=0, max_digits=6, decimal_places=1)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Exercise',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('weight', models.IntegerField(default=1, max_length=4)),
('repetition', models.IntegerField(default=1, max_length=4)),
('sets', models.IntegerField(default=1, max_length=4)),
('day', models.IntegerField(default=1, max_length=7, choices=[(1, b'Day 1'), (2, b'Day 2'), (3, b'Day 3'), (4, b'Day 4'), (5, b'Day 5'), (6, b'Day 6'), (7, b'Day 7')])),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='MailBox',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('owner', models.CharField(max_length=50)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sbj', models.CharField(max_length=50)),
('body', models.TextField(max_length=500)),
('src', models.CharField(max_length=50)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='PersonalTrainer',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('gender', models.CharField(default=b'M', max_length=2, choices=[(b'M', b'Male'), (b'F', b'Female')])),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=32)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Tracker',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('startWeightDate', models.DateField(auto_now_add=True)),
('startWeight', models.IntegerField(default=0, max_length=4)),
('previousWeightDate', models.DateField(auto_now=True)),
('previousWeight', models.IntegerField(default=0, max_length=4)),
('currentWeightDate', models.DateField(auto_now=True)),
('currentWeight', models.IntegerField(default=170, max_length=4)),
('goalWeight', models.IntegerField(default=160, max_length=4)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='WorkoutPlan',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('exercises', models.ManyToManyField(to='gym_app.Exercise')),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='mailbox',
name='messages',
field=models.ManyToManyField(to='gym_app.Message'),
preserve_default=True,
),
migrations.AddField(
model_name='exercise',
name='task',
field=models.ForeignKey(to='gym_app.Task'),
preserve_default=True,
),
migrations.AddField(
model_name='athlete',
name='screenings',
field=models.ManyToManyField(to='gym_app.BodyScreening'),
preserve_default=True,
),
migrations.AddField(
model_name='athlete',
name='tracker',
field=models.OneToOneField(to='gym_app.Tracker'),
preserve_default=True,
),
migrations.AddField(
model_name='athlete',
name='user',
field=models.OneToOneField(to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
migrations.AddField(
model_name='athlete',
name='workout_plan',
field=models.OneToOneField(to='gym_app.WorkoutPlan'),
preserve_default=True,
),
]
| mit | 6,572,833,183,942,440,000 | 42.011765 | 185 | 0.526258 | false | 4.195066 | false | false | false |
cobbler/cobbler | cobbler/cobbler_collections/distros.py | 1 | 4476 | """
Copyright 2006-2009, Red Hat, Inc and Others
Michael DeHaan <michael.dehaan AT gmail>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
import os.path
import glob
from cobbler.cobbler_collections import collection
from cobbler.items import distro
from cobbler import utils
from cobbler.cexceptions import CX
class Distros(collection.Collection):
"""
A distro represents a network bootable matched set of kernels and initrd files.
"""
@staticmethod
def collection_type() -> str:
return "distro"
@staticmethod
def collection_types() -> str:
return "distros"
def factory_produce(self, api, item_dict):
"""
Return a Distro forged from item_dict
"""
new_distro = distro.Distro(api)
new_distro.from_dict(item_dict)
return new_distro
def remove(self, name, with_delete: bool = True, with_sync: bool = True, with_triggers: bool = True,
recursive: bool = False):
"""
Remove element named 'name' from the collection
:raises CX: In case any subitem (profiles or systems) would be orphaned. If the option ``recursive`` is set then
the orphaned items would be removed automatically.
"""
name = name.lower()
# first see if any Groups use this distro
if not recursive:
for profile in self.api.profiles():
if profile.distro and profile.distro.name.lower() == name:
raise CX("removal would orphan profile: %s" % profile.name)
obj = self.find(name=name)
if obj is not None:
kernel = obj.kernel
if recursive:
kids = obj.get_children()
for k in kids:
self.api.remove_profile(k, recursive=recursive, delete=with_delete, with_triggers=with_triggers)
if with_delete:
if with_triggers:
utils.run_triggers(self.api, obj, "/var/lib/cobbler/triggers/delete/distro/pre/*", [])
if with_sync:
lite_sync = self.api.get_sync()
lite_sync.remove_single_distro(name)
self.lock.acquire()
try:
del self.listing[name]
finally:
self.lock.release()
self.collection_mgr.serialize_delete(self, obj)
if with_delete:
if with_triggers:
utils.run_triggers(self.api, obj, "/var/lib/cobbler/triggers/delete/distro/post/*", [])
utils.run_triggers(self.api, obj, "/var/lib/cobbler/triggers/change/*", [])
# look through all mirrored directories and find if any directory is holding this particular distribution's
# kernel and initrd
settings = self.api.settings()
possible_storage = glob.glob(settings.webdir + "/distro_mirror/*")
path = None
for storage in possible_storage:
if os.path.dirname(obj.kernel).find(storage) != -1:
path = storage
continue
# if we found a mirrored path above, we can delete the mirrored storage /if/ no other object is using the
# same mirrored storage.
if with_delete and path is not None and os.path.exists(path) and kernel.find(settings.webdir) != -1:
# this distro was originally imported so we know we can clean up the associated storage as long as
# nothing else is also using this storage.
found = False
distros = self.api.distros()
for d in distros:
if d.kernel.find(path) != -1:
found = True
if not found:
utils.rmtree(path)
| gpl-2.0 | -4,830,930,041,022,205,000 | 37.586207 | 120 | 0.605004 | false | 4.405512 | false | false | false |
bennylope/django-site-contacts | contact/migrations/0001_initial.py | 1 | 4272 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from django.conf import settings
USER_MODEL = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
USER_MODEL_NAME = USER_MODEL.split('.')[1]
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Recipient'
db.create_table('contact_recipient', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.OneToOneField')(to=orm[USER_MODEL], unique=True)),
))
db.send_create_signal('contact', ['Recipient'])
def backwards(self, orm):
# Deleting model 'Recipient'
db.delete_table('contact_recipient')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
USER_MODEL: {
'Meta': {'object_name': USER_MODEL_NAME},
#'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
#'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
#'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
#'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
#'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
#'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
#'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
#'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
#'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
#'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
#'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
#'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contact.recipient': {
'Meta': {'object_name': 'Recipient'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['{0}']".format(USER_MODEL), 'unique': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['contact']
| bsd-3-clause | 1,464,142,631,011,156,500 | 58.333333 | 182 | 0.562968 | false | 3.753954 | false | false | false |
nathantypanski/zombierl | character.py | 1 | 2941 | from libtcod import libtcodpy as libtcod
import object as O
import map_vars as M
import status as S
import random
class Character (O.Object):
def __init__ (self, name, max_health, x, y, char, color, npc=False,
strength=5, to_hit=0.8, view_distance=10):
self.name = name
self.health = max_health
self.max_health = max_health
self._x = x
self._y = y
self.char = char
self.color = color
self.items = []
self.hand = None
self.npc = npc
self.strength = strength
self.to_hit = to_hit
self.view_distance=view_distance
M.gameworld[self.x][self.y].characters.append(self)
def move (self, dx, dy):
if (M.gameworld[self.x + dx][self.y + dy].characters
or not M.gameworld[self.x + dx][self.y + dy].is_floor()):
characters = M.gameworld[self.x + dx][self.y + dy].characters
if characters:
for character in characters:
if not character.npc:
self.attack(character)
else:
M.gameworld[self.x][self.y].characters.remove(self)
self.x = self.x + dx
self.y = self.y + dy
M.gameworld[self.x][self.y].characters.append(self)
def pick_up(self):
if M.gameworld[self.x][self.y].items:
item = M.gameworld[self.x][self.y].items.pop()
self.items.append(item)
def drop(self):
if self.items:
item = self.items.pop()
M.gameworld[self.x][self.y].items.append(item)
def drop_all(self):
for item in self.items:
self.items.remove(item)
M.gameworld[self.x][self.y].items.append(item)
# Moves toward coordinates. Only moves one step.
def move_to_coordinates (self, dx, dy):
if dx > self.x:
newx = 1
elif dx < self.x:
newx = -1
else:
newx = 0
if dy > self.y:
newy = 1
elif dy < self.y:
newy = -1
else:
newy = 0
self.move(newx, newy)
# Set the character's health.
def set_health (self, health):
self.health = health
def attack (self, character):
damage = self.strength*random.randint(self.strength//2, self.strength*2)
if random.random() <= self.to_hit:
S.add_status("%s hits %s!" % (self.name, character.name))
if damage > (0.5*character.max_health):
S.add_status("It's super effective!")
character.take_damage(damage)
else:
S.add_status("%s swings and misses." % (self.name))
def take_damage (self, damage):
self.health -= damage
if 0 > self.health:
S.add_status("%s is killed!" % (self.name))
self.health = 0
M.gameworld[self.x][self.y].characters.remove(self)
self.drop_all()
def compute_fov(self):
for x in range (M.MAP_WIDTH):
for y in range (M.MAP_HEIGHT):
if M.gameworld[x][y].is_floor():
libtcod.map_set_properties (self.fov, x , y, True, True)
libtcod.map_compute_fov (self.fov, self.x, self.y, self.view_distance,
True,libtcod.FOV_DIAMOND)
| gpl-3.0 | -7,985,737,944,965,132,000 | 28.707071 | 76 | 0.606256 | false | 3.057173 | false | false | false |
matthagy/Jamenson | jamenson/compiler/Attic/constant_reduction.py | 1 | 4970 | '''Evaluate constant expressions in ir
'''
from __future__ import absolute_import
from __future__ import with_statement
import operator as O
from ..runtime.multimethod import MultiMethod, defmethod, around
from .resolution import compile_time_resolve, UnresolvableError
from .walk import propigate_location
from . import ir as I
from . import codegen
constant_reduce = MultiMethod('constant_reduce',
signature='node',
doc='''If possible reduce expression to simpler expression.
Called after children nodes have been reduced to simpler nodes
''')
def reduce_constants(node):
#reduce children first
for child in list(I.iter_children(node)):
r_child = reduce_constants(child)
if r_child is not child:
I.replace_child(child, r_child)
return constant_reduce(node)
class NotConstant(Exception):
pass
no_default = object()
def as_value(op, default=no_default):
if op is None and default is not no_default:
return default
if not isinstance(op, I.constant):
raise NotConstant
return op.value
def catch_notconstant(func):
def inner(node, *args, **kwds):
try:
return func(node, *args, **kwds)
except NotConstant:
return node
return inner
def mkcnst(node, value):
return propigate_location(node, I.make_constant(value))
@catch_notconstant
def reduce_through_function(node, func):
return mkcnst(node, evaluate_catch(node, func, *map(as_value, I.iter_children(node))))
def evaluate_catch(node, func, *args):
try:
return func(*args)
except Exception:
#could insert code to handle errors here
raise
#by default do nothing
@defmethod(constant_reduce, [I.node])
def meth(node):
return node
unary_functions = {
I.neg : O.neg,
I.pos : O.pos,
I.not_ : O.not_,
I.convert : repr,
I.invert : O.invert,
I.get_iter : iter,
}
@defmethod(constant_reduce, [I.unary_base])
def meth(node):
return reduce_through_function(node, unary_functions[type(node)])
binary_functions = {
I.add : O.add,
I.subtract : O.sub,
I.multiply : O.mul,
I.divide : O.div,
I.floor_divide : O.floordiv,
I.true_divide : O.truediv,
I.modulo : O.mod,
I.iadd : O.iadd,
I.isubtract : O.isub,
I.imultiply : O.imul,
I.idivide : O.idiv,
I.ifloor_divide : O.ifloordiv,
I.itrue_divide : O.itruediv,
I.imodulo : O.imod,
I.lshift : O.lshift,
I.rshift : O.rshift,
I.binand : O.and_,
I.binor : O.or_,
I.binxor : O.xor,
I.ibinand : O.iand,
I.ibinor : O.ior,
I.ibinxor : O.ixor,
I.gt : O.gt,
I.ge : O.ge,
I.eq : O.eq,
I.le : O.le,
I.lt : O.lt,
I.in_ : O.contains,
I.notin : lambda x,seq: x not in seq,
I.is_ : O.is_,
I.isnot : O.is_not,
I.exception_match : isinstance,
}
@defmethod(constant_reduce, [I.binary_base])
def meth(node):
return reduce_through_function(node, binary_functions[type(node)])
@defmethod(constant_reduce, [I.attrget])
@catch_notconstant
def meth(node):
return evaluate_catch(node, getattr, as_value(node.obj), node.name)
@defmethod(constant_reduce, [I.getitem])
@catch_notconstant
def meth(node):
return evaluate_catch(node, lambda op, item: op[item], as_value(node.op), as_value(node.item))
@defmethod(constant_reduce, [I.progn])
@catch_notconstant
def meth(node):
if not node.exprs:
return I.copy_loc(I.make_nop(), node)
for expr in node.exprs:
value = as_value(expr)
return mkcnst(node, value)
@defmethod(constant_reduce, [I.call])
@catch_notconstant
def meth(node):
callee = as_value(node.callee)
star_args = as_value(node.star_args, [])
star_kwds = as_value(node.star_kwds, {})
args = map(as_value, node.args)
kwds = dict(zip(node.kwd_names, map(as_value, node.kwd_values)))
def perform_call():
if set(kwds) & set(star_kwds):
raise ValueError("multiple values for same keyword")
kwds.update(star_kwds)
return callee(*(args + star_args), **kwds)
return mkcnst(node, evaluate_catch(node, perform_call))
@defmethod(constant_reduce, [I.if_])
@catch_notconstant
def meth(node):
return node.then if as_value(node.condition) else node.else_
@defmethod(constant_reduce, [I.function])
@catch_notconstant
def meth(func):
if codegen.get_function_free_bindings(func):
return func
map(as_value, func.defaults)
#must import here to prevent cyclic imports
from .function import make_function
return mkcnst(func, make_function(func))
| apache-2.0 | 2,729,221,520,801,868,300 | 27.895349 | 98 | 0.602817 | false | 3.319973 | false | false | false |
frappe/frappe | frappe/utils/file_manager.py | 1 | 13378 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
import frappe
import os, base64, re, json
import hashlib
import mimetypes
import io
from frappe.utils import get_hook_method, get_files_path, random_string, encode, cstr, call_hook_method, cint
from frappe import _
from frappe import conf
from copy import copy
from urllib.parse import unquote
class MaxFileSizeReachedError(frappe.ValidationError):
pass
def get_file_url(file_data_name):
data = frappe.db.get_value("File", file_data_name, ["file_name", "file_url"], as_dict=True)
return data.file_url or data.file_name
def upload():
# get record details
dt = frappe.form_dict.doctype
dn = frappe.form_dict.docname
file_url = frappe.form_dict.file_url
filename = frappe.form_dict.filename
frappe.form_dict.is_private = cint(frappe.form_dict.is_private)
if not filename and not file_url:
frappe.msgprint(_("Please select a file or url"),
raise_exception=True)
file_doc = get_file_doc()
comment = {}
if dt and dn:
comment = frappe.get_doc(dt, dn).add_comment("Attachment",
_("added {0}").format("<a href='{file_url}' target='_blank'>{file_name}</a>{icon}".format(**{
"icon": ' <i class="fa fa-lock text-warning"></i>' \
if file_doc.is_private else "",
"file_url": file_doc.file_url.replace("#", "%23") \
if file_doc.file_name else file_doc.file_url,
"file_name": file_doc.file_name or file_doc.file_url
})))
return {
"name": file_doc.name,
"file_name": file_doc.file_name,
"file_url": file_doc.file_url,
"is_private": file_doc.is_private,
"comment": comment.as_dict() if comment else {}
}
def get_file_doc(dt=None, dn=None, folder=None, is_private=None, df=None):
'''returns File object (Document) from given parameters or form_dict'''
r = frappe.form_dict
if dt is None: dt = r.doctype
if dn is None: dn = r.docname
if df is None: df = r.docfield
if folder is None: folder = r.folder
if is_private is None: is_private = r.is_private
if r.filedata:
file_doc = save_uploaded(dt, dn, folder, is_private, df)
elif r.file_url:
file_doc = save_url(r.file_url, r.filename, dt, dn, folder, is_private, df)
return file_doc
def save_uploaded(dt, dn, folder, is_private, df=None):
fname, content = get_uploaded_content()
if content:
return save_file(fname, content, dt, dn, folder, is_private=is_private, df=df)
else:
raise Exception
def save_url(file_url, filename, dt, dn, folder, is_private, df=None):
# if not (file_url.startswith("http://") or file_url.startswith("https://")):
# frappe.msgprint("URL must start with 'http://' or 'https://'")
# return None, None
file_url = unquote(file_url)
file_size = frappe.form_dict.file_size
f = frappe.get_doc({
"doctype": "File",
"file_url": file_url,
"file_name": filename,
"attached_to_doctype": dt,
"attached_to_name": dn,
"attached_to_field": df,
"folder": folder,
"file_size": file_size,
"is_private": is_private
})
f.flags.ignore_permissions = True
try:
f.insert()
except frappe.DuplicateEntryError:
return frappe.get_doc("File", f.duplicate_entry)
return f
def get_uploaded_content():
# should not be unicode when reading a file, hence using frappe.form
if 'filedata' in frappe.form_dict:
if "," in frappe.form_dict.filedata:
frappe.form_dict.filedata = frappe.form_dict.filedata.rsplit(",", 1)[1]
frappe.uploaded_content = base64.b64decode(frappe.form_dict.filedata)
frappe.uploaded_filename = frappe.form_dict.filename
return frappe.uploaded_filename, frappe.uploaded_content
else:
frappe.msgprint(_('No file attached'))
return None, None
def save_file(fname, content, dt, dn, folder=None, decode=False, is_private=0, df=None):
if decode:
if isinstance(content, str):
content = content.encode("utf-8")
if b"," in content:
content = content.split(b",")[1]
content = base64.b64decode(content)
file_size = check_max_file_size(content)
content_hash = get_content_hash(content)
content_type = mimetypes.guess_type(fname)[0]
fname = get_file_name(fname, content_hash[-6:])
file_data = get_file_data_from_hash(content_hash, is_private=is_private)
if not file_data:
call_hook_method("before_write_file", file_size=file_size)
write_file_method = get_hook_method('write_file', fallback=save_file_on_filesystem)
file_data = write_file_method(fname, content, content_type=content_type, is_private=is_private)
file_data = copy(file_data)
file_data.update({
"doctype": "File",
"attached_to_doctype": dt,
"attached_to_name": dn,
"attached_to_field": df,
"folder": folder,
"file_size": file_size,
"content_hash": content_hash,
"is_private": is_private
})
f = frappe.get_doc(file_data)
f.flags.ignore_permissions = True
try:
f.insert()
except frappe.DuplicateEntryError:
return frappe.get_doc("File", f.duplicate_entry)
return f
def get_file_data_from_hash(content_hash, is_private=0):
for name in frappe.db.sql_list("select name from `tabFile` where content_hash=%s and is_private=%s", (content_hash, is_private)):
b = frappe.get_doc('File', name)
return {k: b.get(k) for k in frappe.get_hooks()['write_file_keys']}
return False
def save_file_on_filesystem(fname, content, content_type=None, is_private=0):
fpath = write_file(content, fname, is_private)
if is_private:
file_url = "/private/files/{0}".format(fname)
else:
file_url = "/files/{0}".format(fname)
return {
'file_name': os.path.basename(fpath),
'file_url': file_url
}
def get_max_file_size():
return conf.get('max_file_size') or 10485760
def check_max_file_size(content):
max_file_size = get_max_file_size()
file_size = len(content)
if file_size > max_file_size:
frappe.msgprint(_("File size exceeded the maximum allowed size of {0} MB").format(
max_file_size / 1048576),
raise_exception=MaxFileSizeReachedError)
return file_size
def write_file(content, fname, is_private=0):
"""write file to disk with a random name (to compare)"""
file_path = get_files_path(is_private=is_private)
# create directory (if not exists)
frappe.create_folder(file_path)
# write the file
if isinstance(content, str):
content = content.encode()
with open(os.path.join(file_path.encode('utf-8'), fname.encode('utf-8')), 'wb+') as f:
f.write(content)
return get_files_path(fname, is_private=is_private)
def remove_all(dt, dn, from_delete=False):
"""remove all files in a transaction"""
try:
for fid in frappe.db.sql_list("""select name from `tabFile` where
attached_to_doctype=%s and attached_to_name=%s""", (dt, dn)):
remove_file(fid, dt, dn, from_delete)
except Exception as e:
if e.args[0]!=1054: raise # (temp till for patched)
def remove_file_by_url(file_url, doctype=None, name=None):
if doctype and name:
fid = frappe.db.get_value("File", {"file_url": file_url,
"attached_to_doctype": doctype, "attached_to_name": name})
else:
fid = frappe.db.get_value("File", {"file_url": file_url})
if fid:
return remove_file(fid)
def remove_file(fid, attached_to_doctype=None, attached_to_name=None, from_delete=False):
"""Remove file and File entry"""
file_name = None
if not (attached_to_doctype and attached_to_name):
attached = frappe.db.get_value("File", fid,
["attached_to_doctype", "attached_to_name", "file_name"])
if attached:
attached_to_doctype, attached_to_name, file_name = attached
ignore_permissions, comment = False, None
if attached_to_doctype and attached_to_name and not from_delete:
doc = frappe.get_doc(attached_to_doctype, attached_to_name)
ignore_permissions = doc.has_permission("write") or False
if frappe.flags.in_web_form:
ignore_permissions = True
if not file_name:
file_name = frappe.db.get_value("File", fid, "file_name")
comment = doc.add_comment("Attachment Removed", _("Removed {0}").format(file_name))
frappe.delete_doc("File", fid, ignore_permissions=ignore_permissions)
return comment
def delete_file_data_content(doc, only_thumbnail=False):
method = get_hook_method('delete_file_data_content', fallback=delete_file_from_filesystem)
method(doc, only_thumbnail=only_thumbnail)
def delete_file_from_filesystem(doc, only_thumbnail=False):
"""Delete file, thumbnail from File document"""
if only_thumbnail:
delete_file(doc.thumbnail_url)
else:
delete_file(doc.file_url)
delete_file(doc.thumbnail_url)
def delete_file(path):
"""Delete file from `public folder`"""
if path:
if ".." in path.split("/"):
frappe.msgprint(_("It is risky to delete this file: {0}. Please contact your System Manager.").format(path))
parts = os.path.split(path.strip("/"))
if parts[0]=="files":
path = frappe.utils.get_site_path("public", "files", parts[-1])
else:
path = frappe.utils.get_site_path("private", "files", parts[-1])
path = encode(path)
if os.path.exists(path):
os.remove(path)
def get_file(fname):
"""Returns [`file_name`, `content`] for given file name `fname`"""
file_path = get_file_path(fname)
# read the file
with io.open(encode(file_path), mode='rb') as f:
content = f.read()
try:
# for plain text files
content = content.decode()
except UnicodeDecodeError:
# for .png, .jpg, etc
pass
return [file_path.rsplit("/", 1)[-1], content]
def get_file_path(file_name):
"""Returns file path from given file name"""
f = frappe.db.sql("""select file_url from `tabFile`
where name=%s or file_name=%s""", (file_name, file_name))
if f:
file_name = f[0][0]
file_path = file_name
if "/" not in file_path:
file_path = "/files/" + file_path
if file_path.startswith("/private/files/"):
file_path = get_files_path(*file_path.split("/private/files/", 1)[1].split("/"), is_private=1)
elif file_path.startswith("/files/"):
file_path = get_files_path(*file_path.split("/files/", 1)[1].split("/"))
else:
frappe.throw(_("There is some problem with the file url: {0}").format(file_path))
return file_path
def get_content_hash(content):
if isinstance(content, str):
content = content.encode()
return hashlib.md5(content).hexdigest()
def get_file_name(fname, optional_suffix):
# convert to unicode
fname = cstr(fname)
n_records = frappe.db.sql("select name from `tabFile` where file_name=%s", fname)
if len(n_records) > 0 or os.path.exists(encode(get_files_path(fname))):
f = fname.rsplit('.', 1)
if len(f) == 1:
partial, extn = f[0], ""
else:
partial, extn = f[0], "." + f[1]
return '{partial}{suffix}{extn}'.format(partial=partial, extn=extn, suffix=optional_suffix)
return fname
@frappe.whitelist()
def download_file(file_url):
"""
Download file using token and REST API. Valid session or
token is required to download private files.
Method : GET
Endpoint : frappe.utils.file_manager.download_file
URL Params : file_name = /path/to/file relative to site path
"""
file_doc = frappe.get_doc("File", {"file_url":file_url})
file_doc.check_permission("read")
path = os.path.join(get_files_path(), os.path.basename(file_url))
with open(path, "rb") as fileobj:
filedata = fileobj.read()
frappe.local.response.filename = os.path.basename(file_url)
frappe.local.response.filecontent = filedata
frappe.local.response.type = "download"
def extract_images_from_doc(doc, fieldname):
content = doc.get(fieldname)
content = extract_images_from_html(doc, content)
if frappe.flags.has_dataurl:
doc.set(fieldname, content)
def extract_images_from_html(doc, content):
frappe.flags.has_dataurl = False
def _save_file(match):
data = match.group(1)
data = data.split("data:")[1]
headers, content = data.split(",")
if "filename=" in headers:
filename = headers.split("filename=")[-1]
# decode filename
if not isinstance(filename, str):
filename = str(filename, 'utf-8')
else:
mtype = headers.split(";")[0]
filename = get_random_filename(content_type=mtype)
doctype = doc.parenttype if doc.parent else doc.doctype
name = doc.parent or doc.name
if doc.doctype == "Comment":
doctype = doc.reference_doctype
name = doc.reference_name
# TODO fix this
file_url = save_file(filename, content, doctype, name, decode=True).get("file_url")
if not frappe.flags.has_dataurl:
frappe.flags.has_dataurl = True
return '<img src="{file_url}"'.format(file_url=file_url)
if content:
content = re.sub(r'<img[^>]*src\s*=\s*["\'](?=data:)(.*?)["\']', _save_file, content)
return content
def get_random_filename(extn=None, content_type=None):
if extn:
if not extn.startswith("."):
extn = "." + extn
elif content_type:
extn = mimetypes.guess_extension(content_type)
return random_string(7) + (extn or "")
@frappe.whitelist(allow_guest=True)
def validate_filename(filename):
from frappe.utils import now_datetime
timestamp = now_datetime().strftime(" %Y-%m-%d %H:%M:%S")
fname = get_file_name(filename, timestamp)
return fname
@frappe.whitelist()
def add_attachments(doctype, name, attachments):
'''Add attachments to the given DocType'''
if isinstance(attachments, str):
attachments = json.loads(attachments)
# loop through attachments
files =[]
for a in attachments:
if isinstance(a, str):
attach = frappe.db.get_value("File", {"name":a}, ["file_name", "file_url", "is_private"], as_dict=1)
# save attachments to new doc
f = save_url(attach.file_url, attach.file_name, doctype, name, "Home/Attachments", attach.is_private)
files.append(f)
return files
| mit | -1,703,635,252,731,318,000 | 28.662971 | 130 | 0.690537 | false | 2.928634 | false | false | false |
lig/picket_deadend | apps/picket/middleware.py | 1 | 1686 | """
Copyright 2010 Serge Matveenko
This file is part of Picket.
Picket is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Picket is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Picket. If not, see <http://www.gnu.org/licenses/>.
"""
from mongoengine import ValidationError
from documents import Project, Department
class PicketMiddleware(object):
def process_request(self, request):
# set project
if 'set_project' in request.GET:
request.session['current_project'] = request.GET['set_project']
# attach project object to request
current_project_id = request.session.get('current_project')
# current_project_id could be None for all projects
current_project = (current_project_id and
Project.objects.with_id(current_project_id))
# current_project could be None after lookup
request.project = current_project
# get headed departments and managed projects
if request.user.is_authenticated():
request.my_departments = Department.objects(head=request.user)
request.my_projects = Project.objects(manager=request.user)
else:
request.my_departments, request.my_projects = None, None
| gpl-3.0 | 5,305,808,671,908,993,000 | 35.652174 | 75 | 0.708185 | false | 4.345361 | false | false | false |
whatitslike/spiders | zhihu/roundtables.py | 1 | 1046 | from .agent import do_request
from .base import BaseSource
from .types import Types
class RoundTables(BaseSource):
def __init__(self):
super(RoundTables, self).__init__()
self._start_urls = [
'https://api.zhihu.com/roundtables?excerpt_len=75'
]
def _parse(self, json_objs):
urls = []
for obj in json_objs['data']:
t = obj.get('type')
if t != 'roundtable':
continue
urls.append(obj['url'])
questions_url = [u + '/questions?excerpt_len=75' for u in urls]
for url in questions_url:
objs = do_request(url)
while not objs['paging']['is_end']:
for obj in objs['data']:
if obj['type'] != 'question':
continue
self.publish(obj['url'], Types.QUESTION)
self.get_answer_url_by_question_url(obj['url'])
next_url = objs['paging']['next']
objs = do_request(next_url)
| gpl-3.0 | -4,115,169,817,327,442,000 | 28.055556 | 71 | 0.5 | false | 4.023077 | false | false | false |
adelina-t/compute-hyperv | hyperv/nova/livemigrationops.py | 1 | 5536 | # Copyright 2012 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for live migration VM operations.
"""
import functools
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from hyperv.i18n import _
from hyperv.nova import imagecache
from hyperv.nova import serialconsoleops
from hyperv.nova import utilsfactory
from hyperv.nova import vmops
from hyperv.nova import volumeops
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('use_cow_images', 'nova.virt.driver')
def check_os_version_requirement(function):
@functools.wraps(function)
def wrapper(self, *args, **kwds):
if not self._livemigrutils:
raise NotImplementedError(_('Live migration is supported '
'starting with Hyper-V Server '
'2012'))
return function(self, *args, **kwds)
return wrapper
class LiveMigrationOps(object):
def __init__(self):
# Live migration is supported starting from Hyper-V Server 2012
if utilsfactory.get_hostutils().check_min_windows_version(6, 2):
self._livemigrutils = utilsfactory.get_livemigrationutils()
else:
self._livemigrutils = None
self._pathutils = utilsfactory.get_pathutils()
self._vmops = vmops.VMOps()
self._volumeops = volumeops.VolumeOps()
self._serial_console_ops = serialconsoleops.SerialConsoleOps()
self._imagecache = imagecache.ImageCache()
@check_os_version_requirement
def live_migration(self, context, instance_ref, dest, post_method,
recover_method, block_migration=False,
migrate_data=None):
LOG.debug("live_migration called", instance=instance_ref)
instance_name = instance_ref["name"]
try:
self._vmops.copy_vm_dvd_disks(instance_name, dest)
# We must make sure that the console log workers are stopped,
# otherwise we won't be able to delete / move VM log files.
self._serial_console_ops.stop_console_handler(instance_name)
self._pathutils.copy_vm_console_logs(instance_name, dest)
self._livemigrutils.live_migrate_vm(instance_name,
dest)
except Exception:
with excutils.save_and_reraise_exception():
LOG.debug("Calling live migration recover_method "
"for instance: %s", instance_name)
recover_method(context, instance_ref, dest, block_migration)
LOG.debug("Calling live migration post_method for instance: %s",
instance_name)
post_method(context, instance_ref, dest, block_migration)
@check_os_version_requirement
def pre_live_migration(self, context, instance, block_device_info,
network_info):
LOG.debug("pre_live_migration called", instance=instance)
self._livemigrutils.check_live_migration_config()
if CONF.use_cow_images:
boot_from_volume = self._volumeops.ebs_root_in_block_devices(
block_device_info)
if not boot_from_volume and instance.image_ref:
self._imagecache.get_cached_image(context, instance)
self._volumeops.initialize_volumes_connection(block_device_info)
@check_os_version_requirement
def post_live_migration(self, context, instance, block_device_info):
self._volumeops.disconnect_volumes(block_device_info)
self._pathutils.get_instance_dir(instance.name,
create_dir=False,
remove_dir=True)
@check_os_version_requirement
def post_live_migration_at_destination(self, ctxt, instance_ref,
network_info, block_migration):
LOG.debug("post_live_migration_at_destination called",
instance=instance_ref)
@check_os_version_requirement
def check_can_live_migrate_destination(self, ctxt, instance_ref,
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
LOG.debug("check_can_live_migrate_destination called", instance_ref)
return {}
@check_os_version_requirement
def check_can_live_migrate_destination_cleanup(self, ctxt,
dest_check_data):
LOG.debug("check_can_live_migrate_destination_cleanup called")
@check_os_version_requirement
def check_can_live_migrate_source(self, ctxt, instance_ref,
dest_check_data):
LOG.debug("check_can_live_migrate_source called", instance_ref)
return dest_check_data
| apache-2.0 | 8,935,619,094,054,086,000 | 40.62406 | 78 | 0.621568 | false | 4.304821 | false | false | false |
JimDuggan/SDMR | pysd2r/pysd2r_scripts/models/SIR.py | 1 | 4264 | """
Python model "SIR.py"
Translated using PySD version 0.9.0
"""
from __future__ import division
import numpy as np
from pysd import utils
import xarray as xr
from pysd.py_backend.functions import cache
from pysd.py_backend import functions
_subscript_dict = {}
_namespace = {
'TIME': 'time',
'Time': 'time',
'Contact Rate': 'contact_rate',
'Infected': 'infected',
'Infectivity': 'infectivity',
'IR': 'ir',
'Net Flow': 'net_flow',
'R Delay': 'r_delay',
'R0': 'r0',
'Recovered': 'recovered',
'RR': 'rr',
'Susceptible': 'susceptible',
'Total Population': 'total_population',
'FINAL TIME': 'final_time',
'INITIAL TIME': 'initial_time',
'SAVEPER': 'saveper',
'TIME STEP': 'time_step'
}
__pysd_version__ = "0.9.0"
@cache('run')
def contact_rate():
"""
Real Name: b'Contact Rate'
Original Eqn: b'4'
Units: b''
Limits: (None, None)
Type: constant
b''
"""
return 4
@cache('step')
def infected():
"""
Real Name: b'Infected'
Original Eqn: b'INTEG ( IR-RR, 1)'
Units: b''
Limits: (None, None)
Type: component
b''
"""
return integ_infected()
@cache('run')
def infectivity():
"""
Real Name: b'Infectivity'
Original Eqn: b'0.25'
Units: b''
Limits: (None, None)
Type: constant
b''
"""
return 0.25
@cache('step')
def ir():
"""
Real Name: b'IR'
Original Eqn: b'Contact Rate*Susceptible*(Infected/Total Population)*Infectivity'
Units: b''
Limits: (None, None)
Type: component
b''
"""
return contact_rate() * susceptible() * (infected() / total_population()) * infectivity()
@cache('step')
def net_flow():
"""
Real Name: b'Net Flow'
Original Eqn: b'IR-RR'
Units: b''
Limits: (None, None)
Type: component
b''
"""
return ir() - rr()
@cache('run')
def r_delay():
"""
Real Name: b'R Delay'
Original Eqn: b'2'
Units: b''
Limits: (None, None)
Type: constant
b''
"""
return 2
@cache('step')
def r0():
"""
Real Name: b'R0'
Original Eqn: b'Contact Rate*Infectivity*R Delay'
Units: b''
Limits: (None, None)
Type: component
b''
"""
return contact_rate() * infectivity() * r_delay()
@cache('step')
def recovered():
"""
Real Name: b'Recovered'
Original Eqn: b'INTEG ( RR, 0)'
Units: b''
Limits: (None, None)
Type: component
b''
"""
return integ_recovered()
@cache('step')
def rr():
"""
Real Name: b'RR'
Original Eqn: b'Infected/R Delay'
Units: b''
Limits: (None, None)
Type: component
b''
"""
return infected() / r_delay()
@cache('step')
def susceptible():
"""
Real Name: b'Susceptible'
Original Eqn: b'INTEG ( -IR, 9999)'
Units: b''
Limits: (None, None)
Type: component
b''
"""
return integ_susceptible()
@cache('run')
def total_population():
"""
Real Name: b'Total Population'
Original Eqn: b'10000'
Units: b''
Limits: (None, None)
Type: constant
b''
"""
return 10000
@cache('run')
def final_time():
"""
Real Name: b'FINAL TIME'
Original Eqn: b'100'
Units: b'Month'
Limits: (None, None)
Type: constant
b'The final time for the simulation.'
"""
return 100
@cache('run')
def initial_time():
"""
Real Name: b'INITIAL TIME'
Original Eqn: b'0'
Units: b'Month'
Limits: (None, None)
Type: constant
b'The initial time for the simulation.'
"""
return 0
@cache('step')
def saveper():
"""
Real Name: b'SAVEPER'
Original Eqn: b'TIME STEP'
Units: b'Month'
Limits: (0.0, None)
Type: component
b'The frequency with which output is stored.'
"""
return time_step()
@cache('run')
def time_step():
"""
Real Name: b'TIME STEP'
Original Eqn: b'0.0078125'
Units: b'Month'
Limits: (0.0, None)
Type: constant
b'The time step for the simulation.'
"""
return 0.0078125
integ_infected = functions.Integ(lambda: ir() - rr(), lambda: 1)
integ_recovered = functions.Integ(lambda: rr(), lambda: 0)
integ_susceptible = functions.Integ(lambda: -ir(), lambda: 9999)
| mit | 5,341,325,515,212,766,000 | 15.920635 | 93 | 0.56379 | false | 3.015559 | false | false | false |
bennylope/sysenv | sysenv/__init__.py | 1 | 1422 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Ben Lopatin'
__email__ = '[email protected]'
__version__ = '0.1.0'
import os
import re
import logging
from .data import EnvDict
logger = logging.getLogger(__name__)
def read_file_values(env_file, fail_silently=True):
"""
Borrowed from Honcho
"""
env_data = {}
try:
with open(env_file) as f:
content = f.read()
except IOError:
if fail_silently:
logging.error("Could not read file '{0}'".format(env_file))
return env_data
raise
for line in content.splitlines():
m1 = re.match(r'\A([A-Za-z_0-9]+)=(.*)\Z', line)
if m1:
key, val = m1.group(1), m1.group(2)
m2 = re.match(r"\A'(.*)'\Z", val)
if m2:
val = m2.group(1)
m3 = re.match(r'\A"(.*)"\Z', val)
if m3:
val = re.sub(r'\\(.)', r'\1', m3.group(1))
env_data[key] = val
return env_data
def load(env_file=None, fail_silently=True, load_globally=True, **kwargs):
"""
Returns an instance of EnvDict after reading the system environment an
optionally provided file.
"""
data = {}
data.update(os.environ)
if env_file:
data.update(read_file_values(env_file, fail_silently))
if load_globally:
os.environ.update(data)
return EnvDict(data, **kwargs)
| bsd-3-clause | 1,805,632,655,056,492,500 | 22.311475 | 74 | 0.539381 | false | 3.202703 | false | false | false |
FEniCS/dolfin | python/dolfin/function/functionspace.py | 1 | 8437 | # -*- coding: utf-8 -*-
"""Main module for DOLFIN"""
# Copyright (C) 2017 Chris N. Richardson and Garth N. Wells
#
# Distributed under the terms of the GNU Lesser Public License (LGPL),
# either version 3 of the License, or (at your option) any later
# version.
import types
import ffc
import ufl
import dolfin.cpp as cpp
from . import function
class FunctionSpace(ufl.FunctionSpace):
def __init__(self, *args, **kwargs):
"""Create finite element function space."""
if len(args) == 1:
# Do we relly want to do it this way? Can we get the
# sub-element from UFL?
self._init_from_cpp(*args, **kwargs)
else:
if len(args) == 0 or not isinstance(args[0], cpp.mesh.Mesh):
#cpp.dolfin_error("functionspace.py",
# "create function space",
# "Illegal argument, not a mesh: "
# + str(args[0]))
pass
elif len(args) == 2:
self._init_from_ufl(*args, **kwargs)
else:
self._init_convenience(*args, **kwargs)
def _init_from_ufl(self, mesh, element, constrained_domain=None):
# Initialize the ufl.FunctionSpace first to check for good
# meaning
ufl.FunctionSpace.__init__(self, mesh.ufl_domain(), element)
# Compile dofmap and element
ufc_element, ufc_dofmap = ffc.jit(element, parameters=None)
ufc_element = cpp.fem.make_ufc_finite_element(ufc_element)
# Create DOLFIN element and dofmap
dolfin_element = cpp.fem.FiniteElement(ufc_element)
ufc_dofmap = cpp.fem.make_ufc_dofmap(ufc_dofmap)
if constrained_domain is None:
dolfin_dofmap = cpp.fem.DofMap(ufc_dofmap, mesh)
else:
dolfin_dofmap = cpp.fem.DofMap(ufc_dofmap, mesh,
constrained_domain)
# Initialize the cpp.FunctionSpace
self._cpp_object = cpp.function.FunctionSpace(mesh,
dolfin_element,
dolfin_dofmap)
def _init_from_cpp(self, cppV, **kwargs):
"""
if not isinstance(cppV, cpp.FunctionSpace):
cpp.dolfin_error("functionspace.py",
"create function space",
"Illegal argument for C++ function space, "
"not a cpp.FunctionSpace: " + str(cppV))
# We don't want to support copy construction. This would
# indicate internal defficiency in the library
if isinstance(cppV, FunctionSpace):
cpp.dolfin_error("functionspace.py",
"create function space",
"Illegal argument for C++ function space, "
"should not be functions.functionspace.FunctionSpace: " + str(cppV))
if len(kwargs) > 0:
cpp.dolfin_error("functionspace.py",
"create function space",
"Illegal arguments, did not expect C++ "
"function space and **kwargs: " + str(kwargs))
"""
# Reconstruct UFL element from signature
ufl_element = eval(cppV.element().signature(), ufl.__dict__)
# Get mesh
ufl_domain = cppV.mesh().ufl_domain()
# Initialize the ufl.FunctionSpace (not calling cpp.Function.__init__)
self._cpp_object = cppV
# Initialize the ufl.FunctionSpace
ufl.FunctionSpace.__init__(self, ufl_domain, ufl_element)
def _init_convenience(self, mesh, family, degree, form_degree=None,
constrained_domain=None, restriction=None):
# Create UFL element
element = ufl.FiniteElement(family, mesh.ufl_cell(), degree,
form_degree=form_degree)
self._init_from_ufl(mesh, element, constrained_domain=constrained_domain)
def dolfin_element(self):
"Return the DOLFIN element."
return self._cpp_object.element()
def num_sub_spaces(self):
"Return the number of sub spaces"
return self.dolfin_element().num_sub_elements()
def sub(self, i):
"Return the i-th sub space"
# FIXME: Should we have a more extensive check other than
# whats includeding the cpp code?
if not isinstance(i, int):
raise TypeError("expected an int for 'i'")
if self.num_sub_spaces() == 1:
raise ValueError("no SubSpaces to extract")
if i >= self.num_sub_spaces():
raise ValueError("Can only extract SubSpaces with i = 0 ... %d" % \
(self.num_sub_spaces() - 1))
assert hasattr(self.ufl_element(), "sub_elements")
# Extend with the python layer
return FunctionSpace(cpp.function.FunctionSpace.sub(self._cpp_object, i))
def component(self):
return self._cpp_object.component()
def contains(self, V):
"Check whether a function is in the FunctionSpace"
return self._cpp_object.contains(V._cpp_object)
#if isinstance(u, cpp.function.Function):
# return u._in(self)
#elif isinstance(u, function.Function):
# return u._cpp_object._in(self)
#return False
def __contains__(self, u):
"Check whether a function is in the FunctionSpace"
if isinstance(u, cpp.function.Function):
return u._in(self._cpp_object)
elif isinstance(u, function.Function):
return u._cpp_object._in(self._cpp_object)
return False
def __eq__(self, other):
"Comparison for equality."
return ufl.FunctionSpace.__eq__(self, other) and self._cpp_object == other._cpp_object
def __ne__(self, other):
"Comparison for inequality."
return ufl.FunctionSpace.__ne__(self, other) or self._cpp_object != other._cpp_object
def ufl_cell(self):
return self._cpp_object.mesh().ufl_cell()
def ufl_function_space(self):
return self
def dim(self):
return self._cpp_object.dim()
def id(self):
return self._cpp_object.id()
def element(self):
return self._cpp_object.element()
def dofmap(self):
return self._cpp_object.dofmap()
def mesh(self):
return self._cpp_object.mesh()
def set_x(self, basis, x, component):
return self._cpp_object.set_x(basis, x, component)
def collapse(self, collapsed_dofs=False):
"""Collapse a subspace and return a new function space and a map from
new to old dofs
*Arguments*
collapsed_dofs (bool)
Return the map from new to old dofs
*Returns*
_FunctionSpace_
The new function space.
dict
The map from new to old dofs (optional)
"""
# Get the cpp version of the FunctionSpace
cpp_space, dofs = self._cpp_object.collapse()
# Extend with the python layer
V = FunctionSpace(cpp_space)
if collapsed_dofs:
return V, dofs
else:
return V
def extract_sub_space(self, component):
V = self._cpp_object.extract_sub_space(component)
return FunctionSpace(V)
def tabulate_dof_coordinates(self):
return self._cpp_object.tabulate_dof_coordinates()
def VectorFunctionSpace(mesh, family, degree, dim=None, form_degree=None,
constrained_domain=None, restriction=None):
"""Create finite element function space."""
# Create UFL element
element = ufl.VectorElement(family, mesh.ufl_cell(), degree,
form_degree=form_degree, dim=dim)
# Return (Py)DOLFIN FunctionSpace
return FunctionSpace(mesh, element, constrained_domain=constrained_domain)
def TensorFunctionSpace(mesh, family, degree, shape=None, symmetry=None,
constrained_domain=None, restriction=None):
"""Create finite element function space."""
# Create UFL element
element = ufl.TensorElement(family, mesh.ufl_cell(), degree,
shape, symmetry)
# Return (Py)DOLFIN FunctionSpace
return FunctionSpace(mesh, element, constrained_domain=constrained_domain)
| lgpl-3.0 | -3,374,285,540,439,778,300 | 34.902128 | 97 | 0.576271 | false | 4.105596 | false | false | false |
peterlharding/PDQ | examples/ppa_1998/chap3/multi_class.py | 1 | 2445 | #!/usr/bin/env python
###############################################################################
# Copyright (C) 1994 - 2009, Performance Dynamics Company #
# #
# This software is licensed as described in the file COPYING, which #
# you should have received as part of this distribution. The terms #
# are also available at http://www.perfdynamics.com/Tools/copyright.html. #
# #
# You may opt to use, copy, modify, merge, publish, distribute and/or sell #
# copies of the Software, and permit persons to whom the Software is #
# furnished to do so, under the terms of the COPYING file. #
# #
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY #
# KIND, either express or implied. #
###############################################################################
import pdq
# Based on closed_center.c
#
# Illustrate import of PDQ solver for multiclass workload.
#---- Model specific variables -------------------------------------------------
think = 0.0
#---- Initialize the model -----------------------------------------------------
tech = pdq.APPROX
if (tech == pdq.EXACT):
technique = "EXACT"
else:
technique = "APPROX"
print "**** %s Solution ****:\n" % technique
print " N R (w1) R (w2)"
for pop in range(1, 10):
pdq.Init("Test_Exact_calc")
#---- Define the workload and circuit type ----------------------------------
pdq.streams = pdq.CreateClosed("w1", pdq.TERM, 1.0 * pop, think)
pdq.streams = pdq.CreateClosed("w2", pdq.TERM, 1.0 * pop, think)
#---- Define the queueing center --------------------------------------------
pdq.nodes = pdq.CreateNode("node", pdq.CEN, pdq.FCFS)
#---- service demand --------------------------------------------------------
pdq.SetDemand("node", "w1", 1.0)
pdq.SetDemand("node", "w2", 0.5)
#---- Solve the model -------------------------------------------------------
pdq.Solve(tech)
print "%3.0f %8.4f %8.4f" % (pop,
pdq.GetResponse(pdq.TERM, "w1"),
pdq.GetResponse(pdq.TERM, "w2"));
| mit | -8,317,115,764,989,448,000 | 34.716418 | 80 | 0.42454 | false | 4.130068 | false | false | false |
kleinfeld/medpy | medpy/graphcut/generate.py | 1 | 16523 | """
@package medpy.graphcut.generate
Provides functionality to generate graphs efficiently from nD label-images and image voxels.
Functions:
- def graph_from_labels(label_image,
fg_markers,
bg_markers,
regional_term = False,
boundary_term = False,
regional_term_args = False,
boundary_term_args = False): Creates a Graph object from a nD label image.
- def graph_from_voxels(fg_markers,
bg_markers,
regional_term = False,
boundary_term = False,
regional_term_args = False,
boundary_term_args = False): Creates a Graph object from the voxels of an image.
@author Oskar Maier
@version r0.3.0
@since 2012-01-18
@status Release
"""
# build-in modules
import inspect
# third-party modules
import scipy
# own modules
from ..core import Logger
from ..graphcut import GCGraph
def graph_from_voxels(fg_markers,
bg_markers,
regional_term = False,
boundary_term = False,
regional_term_args = False,
boundary_term_args = False):
"""
Create a graphcut.maxflow.GraphDouble object for all voxels of an image with a
ndim * 2 neighbourhood.
Every voxel of the image is regarded as a node. They are connected to their immediate
neighbours via arcs. If to voxels are neighbours is determined using
ndim*2-connectedness (e.g. 3*2=6 for 3D). In the next step the arcs weights
(n-weights) are computed using the supplied boundary_term function.
Implicitly the graph holds two additional nodes: the source and the sink, so called
terminal nodes. These are connected with all other nodes through arcs of an initial
weight (t-weight) of zero.
All voxels that are under the foreground markers are considered to be tightly bound
to the source: The t-weight of the arc from source to these nodes is set to a maximum
value. The same goes for the background markers: The covered voxels receive a maximum
(graphcut.graph.GCGraph.MAX) t-weight for their arc towards the sink.
@note If a voxel is marked as both, foreground and background, the background marker
is given higher priority.
@note all arcs whose weight is not explicitly set are assumed to carry a weight of
zero.
@param fg_markers The foreground markers as binary array of the same shape as the original image.
@type fg_markers ndarray
@param bg_markers The background markers as binary array of the same shape as the original image.
@type bg_markers ndarray
@param regional_term This can be either
False - all t-weights are set to 0, except for the nodes that are
directly connected to the source or sink.
, or a function -
The supplied function is used to compute the t_edges. It has to
have the following signature
regional_term(graph, regional_term_args),
and is supposed to compute (source_t_weight, sink_t_weight) for
all voxels of the image and add these to the passed graph.GCGraph
object. The weights have only to be computed for nodes where
they do not equal zero. Additional parameters can be passed via
the regional_term_args argument.
@type regional_term function
@param boundary_term This can be either
False -
In which case the weight of all n_edges i.e. between all nodes
that are not source or sink, are set to 0.
, or a function -
In which case it is used to compute the edges weights. The
supplied function has to have the following signature
fun(graph, boundary_term_args), and is supposed to compute the
edges between the graphs node and to add them to the supplied
graph.GCGraph object. Additional parameters
can be passed via the boundary_term_args argument.
@type boundary_term function
@param regional_term_args Use this to pass some additional parameters to the
regional_term function.
@param boundary_term_args Use this to pass some additional parameters to the
boundary_term function.
@return the created graph
@rtype graphcut.maxflow.GraphDouble
@raise AttributeError If an argument is maleformed.
@raise FunctionError If one of the supplied functions returns unexpected results.
"""
# prepare logger
logger = Logger.getInstance()
# prepare result graph
logger.debug('Assuming {} nodes and {} edges for image of shape {}'.format(fg_markers.size, __voxel_4conectedness(fg_markers.shape), fg_markers.shape))
graph = GCGraph(fg_markers.size, __voxel_4conectedness(fg_markers.shape))
logger.info('Performing attribute tests...')
# check, set and convert all supplied parameters
fg_markers = scipy.asarray(fg_markers, dtype=scipy.bool_)
bg_markers = scipy.asarray(bg_markers, dtype=scipy.bool_)
# set dummy functions if not supplied
if not regional_term: regional_term = __regional_term_voxel
if not boundary_term: boundary_term = __boundary_term_voxel
# check supplied functions and their signature
if not hasattr(regional_term, '__call__') or not 2 == len(inspect.getargspec(regional_term)[0]):
raise AttributeError('regional_term has to be a callable object which takes two parameter.')
if not hasattr(boundary_term, '__call__') or not 2 == len(inspect.getargspec(boundary_term)[0]):
raise AttributeError('boundary_term has to be a callable object which takes two parameters.')
logger.debug('#nodes={}, #hardwired-nodes source/sink={}/{}'.format(fg_markers.size,
len(fg_markers.ravel().nonzero()[0]),
len(bg_markers.ravel().nonzero()[0])))
# compute the weights of all edges from the source and to the sink i.e.
# compute the weights of the t_edges Wt
logger.info('Computing and adding terminal edge weights...')
regional_term(graph, regional_term_args)
# compute the weights of the edges between the neighbouring nodes i.e.
# compute the weights of the n_edges Wr
logger.info('Computing and adding inter-node edge weights...')
boundary_term(graph, boundary_term_args)
# collect all voxels that are under the foreground resp. background markers i.e.
# collect all nodes that are connected to the source resp. sink
logger.info('Setting terminal weights for the markers...')
if not 0 == scipy.count_nonzero(fg_markers):
graph.set_source_nodes(fg_markers.ravel().nonzero()[0])
if not 0 == scipy.count_nonzero(bg_markers):
graph.set_sink_nodes(bg_markers.ravel().nonzero()[0])
return graph.get_graph()
def graph_from_labels(label_image,
fg_markers,
bg_markers,
regional_term = False,
boundary_term = False,
regional_term_args = False,
boundary_term_args = False):
"""
Create a graphcut.maxflow.GraphDouble object from a nD label image.
Every region of the label image is regarded as a node. They are connected to their
immediate neighbours by arcs. If to regions are neighbours is determined using
ndim*2-connectedness (e.g. 3*2=6 for 3D).
In the next step the arcs weights (n-weights) are computed using the supplied
boundary_term function.
Implicitly the graph holds two additional nodes: the source and the sink, so called
terminal nodes. These are connected with all other nodes through arcs of an initial
weight (t-weight) of zero.
All regions that are under the foreground markers are considered to be tightly bound
to the source: The t-weight of the arc from source to these nodes is set to a maximum
value. The same goes for the background markers: The covered regions receive a
maximum (graphcut.graph.GCGraph.MAX) t-weight for their arc towards the sink.
@note If a region is marked as both, foreground and background, the background marker
is given higher priority.
@note all arcs whose weight is not explicitly set are assumed to carry a weight of
zero.
@param label_image The label image as an array containing uint values. Note that the
region labels have to start from 1 and be continuous (filter.label.relabel()).
@type label_image numpy.ndarray
@param fg_markers The foreground markers as binary array of the same shape as the label image.
@type fg_markers ndarray
@param bg_markers The background markers as binary array of the same shape as the label image.
@type bg_markers ndarray
@param regional_term This can be either
False - all t-weights are set to 0, except for the nodes that are
directly connected to the source or sink.
, or a function -
The supplied function is used to compute the t_edges. It has to
have the following signature
regional_term(graph, label_image, regional_term_args), and is
supposed to compute the weights between the regions of the
label_image and the sink resp. source. The computed values it
should add directly to the supplied graph.GCGraph object.
Additional parameters can be passed via the regional_term_args
argument.
@type regional_term function
@param boundary_term This can be either
False -
In which case the weight of all n_edges i.e. between all nodes
that are not source or sink, are set to 0.
, or a function -
In which case it is used to compute the edges weights. The
supplied function has to have the following signature
fun(graph, label_image, boundary_term_args), and is supposed to
compute the (directed or undirected) edges between any two
adjunct regions of the label image. These computed weights it
adds directly to the supplied graph.GCGraph object. Additional
parameters can be passed via the boundary_term_args argument.
@type boundary_term function
@param regional_term_args Use this to pass some additional parameters to the
regional_term function.
@param boundary_term_args Use this to pass some additional parameters to the
boundary_term function.
@return the created graph
@rtype graphcut.maxflow.GraphDouble
@raise AttributeError If an argument is maleformed.
@raise FunctionError If one of the supplied functions returns unexpected results.
"""
# prepare logger
logger = Logger.getInstance()
logger.info('Performing attribute tests...')
# check, set and convert all supplied parameters
label_image = scipy.asarray(label_image)
fg_markers = scipy.asarray(fg_markers, dtype=scipy.bool_)
bg_markers = scipy.asarray(bg_markers, dtype=scipy.bool_)
# check supplied labels image
if not 1 == min(label_image.flat):
raise AttributeError('The supplied label image does either not contain any regions or they are not labeled consecutively starting from 1.')
# set dummy functions if not supplied
if not regional_term: regional_term = __regional_term_label
if not boundary_term: boundary_term = __boundary_term_label
# check supplied functions and their signature
if not hasattr(regional_term, '__call__') or not 3 == len(inspect.getargspec(regional_term)[0]):
raise AttributeError('regional_term has to be a callable object which takes three parameters.')
if not hasattr(boundary_term, '__call__') or not 3 == len(inspect.getargspec(boundary_term)[0]):
raise AttributeError('boundary_term has to be a callable object which takes three parameters.')
logger.info('Determining number of nodes and edges.')
# compute number of nodes and edges
nodes = len(scipy.unique(label_image))
# POSSIBILITY 1: guess the number of edges (in the best situation is faster but requires a little bit more memory. In the worst is slower.)
edges = 10 * nodes
logger.debug('guessed: #nodes={} nodes / #edges={}'.format(nodes, edges))
# POSSIBILITY 2: compute the edges (slow)
#edges = len(__compute_edges(label_image))
#logger.debug('computed: #nodes={} nodes / #edges={}'.format(nodes, edges))
# prepare result graph
graph = GCGraph(nodes, edges)
logger.debug('#hardwired-nodes source/sink={}/{}'.format(len(scipy.unique(label_image[fg_markers])),
len(scipy.unique(label_image[bg_markers]))))
#logger.info('Extracting the regions bounding boxes...')
# extract the bounding boxes
#bounding_boxes = find_objects(label_image)
# compute the weights of all edges from the source and to the sink i.e.
# compute the weights of the t_edges Wt
logger.info('Computing and adding terminal edge weights...')
#regions = set(graph.get_nodes()) - set(graph.get_source_nodes()) - set(graph.get_sink_nodes())
regional_term(graph, label_image, regional_term_args) # bounding boxes indexed from 0 # old version: regional_term(graph, label_image, regions, bounding_boxes, regional_term_args)
# compute the weights of the edges between the neighbouring nodes i.e.
# compute the weights of the n_edges Wr
logger.info('Computing and adding inter-node edge weights...')
boundary_term(graph, label_image, boundary_term_args)
# collect all regions that are under the foreground resp. background markers i.e.
# collect all nodes that are connected to the source resp. sink
logger.info('Setting terminal weights for the markers...')
graph.set_source_nodes(scipy.unique(label_image[fg_markers] - 1)) # requires -1 to adapt to node id system
graph.set_sink_nodes(scipy.unique(label_image[bg_markers] - 1))
return graph.get_graph()
def __regional_term_voxel(graph, regional_term_args):
"""Fake regional_term function with the appropriate signature."""
return {}
def __regional_term_label(graph, label_image, regional_term_args):
"""Fake regional_term function with the appropriate signature."""
return {}
def __boundary_term_voxel(graph, boundary_term_args):
"""Fake regional_term function with the appropriate signature."""
# supplying no boundary term contradicts the whole graph cut idea.
return {}
def __boundary_term_label(graph, label_image, boundary_term_args):
"""Fake regional_term function with the appropriate signature."""
# supplying no boundary term contradicts the whole graph cut idea.
return {}
def __voxel_4conectedness(shape):
"""
Returns the number of edges for the supplied image shape assuming 4-connectedness.
The name of the function has historical reasons. Essentially it returns the number
of edges assuming 4-connectedness only for 2D. For 3D it assumes 6-connectedness,
etc.
@param shape the shape of the image
@type shape sequence
@return the number of edges
@rtype int
"""
shape = list(shape)
while 1 in shape: shape.remove(1) # empty resp. 1-sized dimensions have to be removed (equal to scipy.squeeze on the array)
return int(round(sum([(dim - 1)/float(dim) for dim in shape]) * scipy.prod(shape)))
| gpl-3.0 | 8,061,059,661,752,991,000 | 50.313665 | 183 | 0.638988 | false | 4.531816 | false | false | false |
twisted/mantissa | xmantissa/_recordattr.py | 1 | 6833 | # -*- test-case-name: xmantissa.test.test_recordattr -*-
"""
Utility support for attributes on items which compose multiple Axiom attributes
into a single epsilon.structlike.record attribute. This can be handy when
composing a simple, common set of columns that several tables share into a
recognizable object that is not an item itself. For example, the pair of
'localpart', 'domain' into a user object, or the triple of 'realname',
'nickname', 'hostmask', 'network' into an IRC nickname. This functionality is
currently used to make L{sharing.Identifier} objects.
This is a handy utility that should really be moved to L{axiom.attributes} and
made public as soon as a few conditions are met:
* L{WithRecordAttributes} needs to be integrated into L{Item}, or
otherwise made obsolete such that normal item instantiation works and
users don't need to call a bogus classmethod.
* L{RecordAttribute} needs to implement the full set of comparison
operators required by the informal axiom constraint language (__gt__,
__lt__, __ge__, __le__, probably some other stuff). It would also be
great if that informal language got documented somewhere.
"""
from axiom.attributes import AND
class RecordAttribute(object):
"""
A descriptor which maps a group of axiom attributes into a single attribute
which returns a record composing them all.
Use this within an Item class definition, like so::
class Address(record('localpart domain')):
'An email address.'
class Email(Item, WithRecordAttributes):
senderLocalpart = text()
senderDomain = text()
receipientLocalpart = text()
recipientDomain = text()
body = text()
sender = RecordAttribute(Address, senderLocalpart, senderDomain)
recipient = RecordAttribute(Address, recipientLocalpart,
recipientDomain)
# ...
myEmail = Email._recordCreate(sender=Address(localpart=u'hello',
domain=u'example.com'),
recipient=Address(localpart=u'goodbye',
domain=u'example.com'))
print myEmail.sender.localpart
Note: the ugly _recordCreate method is required to create items which use
this feature due to some problems with Axiom's initialization order. See
L{WithRecordAttributes} for details.
"""
def __init__(self, recordType, attrs):
"""
Create a L{RecordAttribute} for a certain record type and set of Axiom
attributes.
@param recordType: the result, or a subclass of the result, of
L{axiom.structlike.record}.
@param attrs: a tuple of L{axiom.attributes.SQLAttribute} instances
that were defined as part of the schema on the same item type.
"""
self.recordType = recordType
self.attrs = attrs
def __get__(self, oself, type=None):
"""
Retrieve this compound attribute from the given item.
@param oself: an L{axiom.item.Item} instance, of a type which has this
L{RecordAttribute}'s L{attrs} defined in its schema.
"""
if oself is None:
return self
constructData = {}
for n, attr in zip(self.recordType.__names__, self.attrs):
constructData[n] = attr.__get__(oself, type)
return self.recordType(**constructData)
def _decompose(self, value):
"""
Decompose an instance of our record type into a dictionary mapping
attribute names to values.
@param value: an instance of self.recordType
@return: L{dict} containing the keys declared on L{record}.
"""
data = {}
for n, attr in zip(self.recordType.__names__, self.attrs):
data[attr.attrname] = getattr(value, n)
return data
def __set__(self, oself, value):
"""
Set each component attribute of this L{RecordAttribute} in turn.
@param oself: an instance of the type where this attribute is defined.
@param value: an instance of self.recordType whose values should be
used.
"""
for n, attr in zip(self.recordType.__names__, self.attrs):
attr.__set__(oself, getattr(value, n))
def __eq__(self, other):
"""
@return: a comparison object resulting in all of the component
attributes of this attribute being equal to all of the attribute values
on the other object.
@rtype: L{IComparison}
"""
return AND(*[attr == getattr(other, name)
for attr, name
in zip(self.attrs, self.recordType.__names__)])
def __ne__(self, other):
"""
@return: a comparison object resulting in all of the component
attributes of this attribute being unequal to all of the attribute
values on the other object.
@rtype: L{IComparison}
"""
return AND(*[attr != getattr(other, name)
for attr, name
in zip(self.attrs, self.recordType.__names__)])
class WithRecordAttributes(object):
"""
Axiom has an unfortunate behavior, which is a rather deep-seated bug in the
way Item objects are initialized. Default parameters are processed before
the attributes in the constructor's dictionary are actually set. In other
words, if you have a custom descriptor like L{RecordAttribute}, it can't be
passed in the constructor; if the public way to fill in a required
attribute's value is via such an API, it becomes impossible to properly
construct an object.
This mixin implements a temporary workaround, by adding a classmethod for
creating instances of classes that use L{RecordAttribute} by explicitly
decomposing the structured record instances into their constitutent values
before actually passing them on to L{Item.__init__}.
This workaround needs to be promoted to a proper resolution before this can
be a public API; users should be able to create their own descriptors that
modify underlying database state and have them behave in the expected way
during item creation.
"""
def create(cls, **kw):
"""
Create an instance of this class, first cleaning up the keyword
arguments so they will fill in any required values.
@return: an instance of C{cls}
"""
for k, v in kw.items():
attr = getattr(cls, k, None)
if isinstance(attr, RecordAttribute):
kw.pop(k)
kw.update(attr._decompose(v))
return cls(**kw)
create = classmethod(create)
| mit | 748,060,930,660,187,400 | 36.961111 | 79 | 0.635299 | false | 4.549268 | false | false | false |
TheHonestGene/thehonestgene-pipeline | thehonestgenepipeline/riskprediction.py | 1 | 1383 | from celery.utils.log import get_task_logger
from celery.signals import after_setup_task_logger
from thehonestgenepipeline.celery import celery
from riskpredictor.core import predictor as pred
from os import path
from . import GENOTYPE_FOLDER,DATA_FOLDER
from . import get_platform_from_genotype
from .progress_logger import CeleryProgressLogHandler
import h5py
import logging
logger = get_task_logger(pred.__name__)
# pass through environment
@after_setup_task_logger.connect
def setup_task_logger(**kwargs):
progress_handler = CeleryProgressLogHandler(celery,'riskprediction')
logger.addHandler(progress_handler)
@celery.task(serialiazer='json')
def run(id,trait):
try:
log_extra={'id':id,'progress':0,'data':trait}
logger.info('Starting Risk Prediction',extra=log_extra)
genotype_file= '%s/IMPUTED/%s.hdf5' % (GENOTYPE_FOLDER,id)
platform = get_platform_from_genotype(genotype_file)
trait_folder = '%s/PRED_DATA/%s/%s/' % (DATA_FOLDER,trait,platform)
risk = pred.predict(genotype_file,trait_folder,log_extra=log_extra)
result = {'trait':trait,'risk':risk}
logger.info('Finished Risk Prediction',extra={'id':id,'progress':100,'state':'FINISHED','data':trait})
except Exception as err:
logger.error('Error calculating risk prediction',extra=log_extra)
raise err
return result | mit | -3,424,874,194,230,446,000 | 37.444444 | 110 | 0.723066 | false | 3.537084 | false | false | false |
kaushik94/sympy | sympy/core/function.py | 1 | 112629 | """
There are three types of functions implemented in SymPy:
1) defined functions (in the sense that they can be evaluated) like
exp or sin; they have a name and a body:
f = exp
2) undefined function which have a name but no body. Undefined
functions can be defined using a Function class as follows:
f = Function('f')
(the result will be a Function instance)
3) anonymous function (or lambda function) which have a body (defined
with dummy variables) but have no name:
f = Lambda(x, exp(x)*x)
f = Lambda((x, y), exp(x)*y)
The fourth type of functions are composites, like (sin + cos)(x); these work in
SymPy core, but are not yet part of SymPy.
Examples
========
>>> import sympy
>>> f = sympy.Function("f")
>>> from sympy.abc import x
>>> f(x)
f(x)
>>> print(sympy.srepr(f(x).func))
Function('f')
>>> f(x).args
(x,)
"""
from __future__ import print_function, division
from .add import Add
from .assumptions import ManagedProperties
from .basic import Basic, _atomic
from .cache import cacheit
from .compatibility import iterable, is_sequence, as_int, ordered, Iterable
from .decorators import _sympifyit
from .expr import Expr, AtomicExpr
from .numbers import Rational, Float
from .operations import LatticeOp
from .rules import Transform
from .singleton import S
from .sympify import sympify
from sympy.core.compatibility import string_types, with_metaclass, PY3, range
from sympy.core.containers import Tuple, Dict
from sympy.core.evaluate import global_evaluate
from sympy.core.logic import fuzzy_and
from sympy.utilities import default_sort_key
from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy.utilities.iterables import has_dups, sift
from sympy.utilities.misc import filldedent
import mpmath
import mpmath.libmp as mlib
import inspect
from collections import Counter
def _coeff_isneg(a):
"""Return True if the leading Number is negative.
Examples
========
>>> from sympy.core.function import _coeff_isneg
>>> from sympy import S, Symbol, oo, pi
>>> _coeff_isneg(-3*pi)
True
>>> _coeff_isneg(S(3))
False
>>> _coeff_isneg(-oo)
True
>>> _coeff_isneg(Symbol('n', negative=True)) # coeff is 1
False
For matrix expressions:
>>> from sympy import MatrixSymbol, sqrt
>>> A = MatrixSymbol("A", 3, 3)
>>> _coeff_isneg(-sqrt(2)*A)
True
>>> _coeff_isneg(sqrt(2)*A)
False
"""
if a.is_MatMul:
a = a.args[0]
if a.is_Mul:
a = a.args[0]
return a.is_Number and a.is_extended_negative
class PoleError(Exception):
pass
class ArgumentIndexError(ValueError):
def __str__(self):
return ("Invalid operation with argument number %s for Function %s" %
(self.args[1], self.args[0]))
class BadSignatureError(TypeError):
'''Raised when a Lambda is created with an invalid signature'''
pass
class BadArgumentsError(TypeError):
'''Raised when a Lambda is called with an incorrect number of arguments'''
pass
# Python 2/3 version that does not raise a Deprecation warning
def arity(cls):
"""Return the arity of the function if it is known, else None.
When default values are specified for some arguments, they are
optional and the arity is reported as a tuple of possible values.
Examples
========
>>> from sympy.core.function import arity
>>> from sympy import log
>>> arity(lambda x: x)
1
>>> arity(log)
(1, 2)
>>> arity(lambda *x: sum(x)) is None
True
"""
eval_ = getattr(cls, 'eval', cls)
if PY3:
parameters = inspect.signature(eval_).parameters.items()
if [p for _, p in parameters if p.kind == p.VAR_POSITIONAL]:
return
p_or_k = [p for _, p in parameters if p.kind == p.POSITIONAL_OR_KEYWORD]
# how many have no default and how many have a default value
no, yes = map(len, sift(p_or_k,
lambda p:p.default == p.empty, binary=True))
return no if not yes else tuple(range(no, no + yes + 1))
else:
cls_ = int(hasattr(cls, 'eval')) # correction for cls arguments
evalargspec = inspect.getargspec(eval_)
if evalargspec.varargs:
return
else:
evalargs = len(evalargspec.args) - cls_
if evalargspec.defaults:
# if there are default args then they are optional; the
# fewest args will occur when all defaults are used and
# the most when none are used (i.e. all args are given)
fewest = evalargs - len(evalargspec.defaults)
return tuple(range(fewest, evalargs + 1))
return evalargs
class FunctionClass(ManagedProperties):
"""
Base class for function classes. FunctionClass is a subclass of type.
Use Function('<function name>' [ , signature ]) to create
undefined function classes.
"""
_new = type.__new__
def __init__(cls, *args, **kwargs):
# honor kwarg value or class-defined value before using
# the number of arguments in the eval function (if present)
nargs = kwargs.pop('nargs', cls.__dict__.get('nargs', arity(cls)))
# Canonicalize nargs here; change to set in nargs.
if is_sequence(nargs):
if not nargs:
raise ValueError(filldedent('''
Incorrectly specified nargs as %s:
if there are no arguments, it should be
`nargs = 0`;
if there are any number of arguments,
it should be
`nargs = None`''' % str(nargs)))
nargs = tuple(ordered(set(nargs)))
elif nargs is not None:
nargs = (as_int(nargs),)
cls._nargs = nargs
super(FunctionClass, cls).__init__(*args, **kwargs)
@property
def __signature__(self):
"""
Allow Python 3's inspect.signature to give a useful signature for
Function subclasses.
"""
# Python 3 only, but backports (like the one in IPython) still might
# call this.
try:
from inspect import signature
except ImportError:
return None
# TODO: Look at nargs
return signature(self.eval)
@property
def free_symbols(self):
return set()
@property
def xreplace(self):
# Function needs args so we define a property that returns
# a function that takes args...and then use that function
# to return the right value
return lambda rule, **_: rule.get(self, self)
@property
def nargs(self):
"""Return a set of the allowed number of arguments for the function.
Examples
========
>>> from sympy.core.function import Function
>>> from sympy.abc import x, y
>>> f = Function('f')
If the function can take any number of arguments, the set of whole
numbers is returned:
>>> Function('f').nargs
Naturals0
If the function was initialized to accept one or more arguments, a
corresponding set will be returned:
>>> Function('f', nargs=1).nargs
FiniteSet(1)
>>> Function('f', nargs=(2, 1)).nargs
FiniteSet(1, 2)
The undefined function, after application, also has the nargs
attribute; the actual number of arguments is always available by
checking the ``args`` attribute:
>>> f = Function('f')
>>> f(1).nargs
Naturals0
>>> len(f(1).args)
1
"""
from sympy.sets.sets import FiniteSet
# XXX it would be nice to handle this in __init__ but there are import
# problems with trying to import FiniteSet there
return FiniteSet(*self._nargs) if self._nargs else S.Naturals0
def __repr__(cls):
return cls.__name__
class Application(with_metaclass(FunctionClass, Basic)):
"""
Base class for applied functions.
Instances of Application represent the result of applying an application of
any type to any object.
"""
is_Function = True
@cacheit
def __new__(cls, *args, **options):
from sympy.sets.fancysets import Naturals0
from sympy.sets.sets import FiniteSet
args = list(map(sympify, args))
evaluate = options.pop('evaluate', global_evaluate[0])
# WildFunction (and anything else like it) may have nargs defined
# and we throw that value away here
options.pop('nargs', None)
if options:
raise ValueError("Unknown options: %s" % options)
if evaluate:
evaluated = cls.eval(*args)
if evaluated is not None:
return evaluated
obj = super(Application, cls).__new__(cls, *args, **options)
# make nargs uniform here
sentinel = object()
objnargs = getattr(obj, "nargs", sentinel)
if objnargs is not sentinel:
# things passing through here:
# - functions subclassed from Function (e.g. myfunc(1).nargs)
# - functions like cos(1).nargs
# - AppliedUndef with given nargs like Function('f', nargs=1)(1).nargs
# Canonicalize nargs here
if is_sequence(objnargs):
nargs = tuple(ordered(set(objnargs)))
elif objnargs is not None:
nargs = (as_int(objnargs),)
else:
nargs = None
else:
# things passing through here:
# - WildFunction('f').nargs
# - AppliedUndef with no nargs like Function('f')(1).nargs
nargs = obj._nargs # note the underscore here
# convert to FiniteSet
obj.nargs = FiniteSet(*nargs) if nargs else Naturals0()
return obj
@classmethod
def eval(cls, *args):
"""
Returns a canonical form of cls applied to arguments args.
The eval() method is called when the class cls is about to be
instantiated and it should return either some simplified instance
(possible of some other class), or if the class cls should be
unmodified, return None.
Examples of eval() for the function "sign"
---------------------------------------------
.. code-block:: python
@classmethod
def eval(cls, arg):
if arg is S.NaN:
return S.NaN
if arg.is_zero: return S.Zero
if arg.is_positive: return S.One
if arg.is_negative: return S.NegativeOne
if isinstance(arg, Mul):
coeff, terms = arg.as_coeff_Mul(rational=True)
if coeff is not S.One:
return cls(coeff) * cls(terms)
"""
return
@property
def func(self):
return self.__class__
def _eval_subs(self, old, new):
if (old.is_Function and new.is_Function and
callable(old) and callable(new) and
old == self.func and len(self.args) in new.nargs):
return new(*[i._subs(old, new) for i in self.args])
class Function(Application, Expr):
"""
Base class for applied mathematical functions.
It also serves as a constructor for undefined function classes.
Examples
========
First example shows how to use Function as a constructor for undefined
function classes:
>>> from sympy import Function, Symbol
>>> x = Symbol('x')
>>> f = Function('f')
>>> g = Function('g')(x)
>>> f
f
>>> f(x)
f(x)
>>> g
g(x)
>>> f(x).diff(x)
Derivative(f(x), x)
>>> g.diff(x)
Derivative(g(x), x)
Assumptions can be passed to Function, and if function is initialized with a
Symbol, the function inherits the name and assumptions associated with the Symbol:
>>> f_real = Function('f', real=True)
>>> f_real(x).is_real
True
>>> f_real_inherit = Function(Symbol('f', real=True))
>>> f_real_inherit(x).is_real
True
Note that assumptions on a function are unrelated to the assumptions on
the variable it is called on. If you want to add a relationship, subclass
Function and define the appropriate ``_eval_is_assumption`` methods.
In the following example Function is used as a base class for
``my_func`` that represents a mathematical function *my_func*. Suppose
that it is well known, that *my_func(0)* is *1* and *my_func* at infinity
goes to *0*, so we want those two simplifications to occur automatically.
Suppose also that *my_func(x)* is real exactly when *x* is real. Here is
an implementation that honours those requirements:
>>> from sympy import Function, S, oo, I, sin
>>> class my_func(Function):
...
... @classmethod
... def eval(cls, x):
... if x.is_Number:
... if x.is_zero:
... return S.One
... elif x is S.Infinity:
... return S.Zero
...
... def _eval_is_real(self):
... return self.args[0].is_real
...
>>> x = S('x')
>>> my_func(0) + sin(0)
1
>>> my_func(oo)
0
>>> my_func(3.54).n() # Not yet implemented for my_func.
my_func(3.54)
>>> my_func(I).is_real
False
In order for ``my_func`` to become useful, several other methods would
need to be implemented. See source code of some of the already
implemented functions for more complete examples.
Also, if the function can take more than one argument, then ``nargs``
must be defined, e.g. if ``my_func`` can take one or two arguments
then,
>>> class my_func(Function):
... nargs = (1, 2)
...
>>>
"""
@property
def _diff_wrt(self):
return False
@cacheit
def __new__(cls, *args, **options):
# Handle calls like Function('f')
if cls is Function:
return UndefinedFunction(*args, **options)
n = len(args)
if n not in cls.nargs:
# XXX: exception message must be in exactly this format to
# make it work with NumPy's functions like vectorize(). See,
# for example, https://github.com/numpy/numpy/issues/1697.
# The ideal solution would be just to attach metadata to
# the exception and change NumPy to take advantage of this.
temp = ('%(name)s takes %(qual)s %(args)s '
'argument%(plural)s (%(given)s given)')
raise TypeError(temp % {
'name': cls,
'qual': 'exactly' if len(cls.nargs) == 1 else 'at least',
'args': min(cls.nargs),
'plural': 's'*(min(cls.nargs) != 1),
'given': n})
evaluate = options.get('evaluate', global_evaluate[0])
result = super(Function, cls).__new__(cls, *args, **options)
if evaluate and isinstance(result, cls) and result.args:
pr2 = min(cls._should_evalf(a) for a in result.args)
if pr2 > 0:
pr = max(cls._should_evalf(a) for a in result.args)
result = result.evalf(mlib.libmpf.prec_to_dps(pr))
return result
@classmethod
def _should_evalf(cls, arg):
"""
Decide if the function should automatically evalf().
By default (in this implementation), this happens if (and only if) the
ARG is a floating point number.
This function is used by __new__.
Returns the precision to evalf to, or -1 if it shouldn't evalf.
"""
from sympy.core.evalf import pure_complex
if arg.is_Float:
return arg._prec
if not arg.is_Add:
return -1
m = pure_complex(arg)
if m is None or not (m[0].is_Float or m[1].is_Float):
return -1
l = [i._prec for i in m if i.is_Float]
l.append(-1)
return max(l)
@classmethod
def class_key(cls):
from sympy.sets.fancysets import Naturals0
funcs = {
'exp': 10,
'log': 11,
'sin': 20,
'cos': 21,
'tan': 22,
'cot': 23,
'sinh': 30,
'cosh': 31,
'tanh': 32,
'coth': 33,
'conjugate': 40,
're': 41,
'im': 42,
'arg': 43,
}
name = cls.__name__
try:
i = funcs[name]
except KeyError:
i = 0 if isinstance(cls.nargs, Naturals0) else 10000
return 4, i, name
@property
def is_commutative(self):
"""
Returns whether the function is commutative.
"""
if all(getattr(t, 'is_commutative') for t in self.args):
return True
else:
return False
def _eval_evalf(self, prec):
def _get_mpmath_func(fname):
"""Lookup mpmath function based on name"""
if isinstance(self, AppliedUndef):
# Shouldn't lookup in mpmath but might have ._imp_
return None
if not hasattr(mpmath, fname):
from sympy.utilities.lambdify import MPMATH_TRANSLATIONS
fname = MPMATH_TRANSLATIONS.get(fname, None)
if fname is None:
return None
return getattr(mpmath, fname)
func = _get_mpmath_func(self.func.__name__)
# Fall-back evaluation
if func is None:
imp = getattr(self, '_imp_', None)
if imp is None:
return None
try:
return Float(imp(*[i.evalf(prec) for i in self.args]), prec)
except (TypeError, ValueError):
return None
# Convert all args to mpf or mpc
# Convert the arguments to *higher* precision than requested for the
# final result.
# XXX + 5 is a guess, it is similar to what is used in evalf.py. Should
# we be more intelligent about it?
try:
args = [arg._to_mpmath(prec + 5) for arg in self.args]
def bad(m):
from mpmath import mpf, mpc
# the precision of an mpf value is the last element
# if that is 1 (and m[1] is not 1 which would indicate a
# power of 2), then the eval failed; so check that none of
# the arguments failed to compute to a finite precision.
# Note: An mpc value has two parts, the re and imag tuple;
# check each of those parts, too. Anything else is allowed to
# pass
if isinstance(m, mpf):
m = m._mpf_
return m[1] !=1 and m[-1] == 1
elif isinstance(m, mpc):
m, n = m._mpc_
return m[1] !=1 and m[-1] == 1 and \
n[1] !=1 and n[-1] == 1
else:
return False
if any(bad(a) for a in args):
raise ValueError # one or more args failed to compute with significance
except ValueError:
return
with mpmath.workprec(prec):
v = func(*args)
return Expr._from_mpmath(v, prec)
def _eval_derivative(self, s):
# f(x).diff(s) -> x.diff(s) * f.fdiff(1)(s)
i = 0
l = []
for a in self.args:
i += 1
da = a.diff(s)
if da.is_zero:
continue
try:
df = self.fdiff(i)
except ArgumentIndexError:
df = Function.fdiff(self, i)
l.append(df * da)
return Add(*l)
def _eval_is_commutative(self):
return fuzzy_and(a.is_commutative for a in self.args)
def as_base_exp(self):
"""
Returns the method as the 2-tuple (base, exponent).
"""
return self, S.One
def _eval_aseries(self, n, args0, x, logx):
"""
Compute an asymptotic expansion around args0, in terms of self.args.
This function is only used internally by _eval_nseries and should not
be called directly; derived classes can overwrite this to implement
asymptotic expansions.
"""
from sympy.utilities.misc import filldedent
raise PoleError(filldedent('''
Asymptotic expansion of %s around %s is
not implemented.''' % (type(self), args0)))
def _eval_nseries(self, x, n, logx):
"""
This function does compute series for multivariate functions,
but the expansion is always in terms of *one* variable.
Examples
========
>>> from sympy import atan2
>>> from sympy.abc import x, y
>>> atan2(x, y).series(x, n=2)
atan2(0, y) + x/y + O(x**2)
>>> atan2(x, y).series(y, n=2)
-y/x + atan2(x, 0) + O(y**2)
This function also computes asymptotic expansions, if necessary
and possible:
>>> from sympy import loggamma
>>> loggamma(1/x)._eval_nseries(x,0,None)
-1/x - log(x)/x + log(x)/2 + O(1)
"""
from sympy import Order
from sympy.sets.sets import FiniteSet
args = self.args
args0 = [t.limit(x, 0) for t in args]
if any(t.is_finite is False for t in args0):
from sympy import oo, zoo, nan
# XXX could use t.as_leading_term(x) here but it's a little
# slower
a = [t.compute_leading_term(x, logx=logx) for t in args]
a0 = [t.limit(x, 0) for t in a]
if any([t.has(oo, -oo, zoo, nan) for t in a0]):
return self._eval_aseries(n, args0, x, logx)
# Careful: the argument goes to oo, but only logarithmically so. We
# are supposed to do a power series expansion "around the
# logarithmic term". e.g.
# f(1+x+log(x))
# -> f(1+logx) + x*f'(1+logx) + O(x**2)
# where 'logx' is given in the argument
a = [t._eval_nseries(x, n, logx) for t in args]
z = [r - r0 for (r, r0) in zip(a, a0)]
p = [Dummy() for _ in z]
q = []
v = None
for ai, zi, pi in zip(a0, z, p):
if zi.has(x):
if v is not None:
raise NotImplementedError
q.append(ai + pi)
v = pi
else:
q.append(ai)
e1 = self.func(*q)
if v is None:
return e1
s = e1._eval_nseries(v, n, logx)
o = s.getO()
s = s.removeO()
s = s.subs(v, zi).expand() + Order(o.expr.subs(v, zi), x)
return s
if (self.func.nargs is S.Naturals0
or (self.func.nargs == FiniteSet(1) and args0[0])
or any(c > 1 for c in self.func.nargs)):
e = self
e1 = e.expand()
if e == e1:
#for example when e = sin(x+1) or e = sin(cos(x))
#let's try the general algorithm
term = e.subs(x, S.Zero)
if term.is_finite is False or term is S.NaN:
raise PoleError("Cannot expand %s around 0" % (self))
series = term
fact = S.One
_x = Dummy('x')
e = e.subs(x, _x)
for i in range(n - 1):
i += 1
fact *= Rational(i)
e = e.diff(_x)
subs = e.subs(_x, S.Zero)
if subs is S.NaN:
# try to evaluate a limit if we have to
subs = e.limit(_x, S.Zero)
if subs.is_finite is False:
raise PoleError("Cannot expand %s around 0" % (self))
term = subs*(x**i)/fact
term = term.expand()
series += term
return series + Order(x**n, x)
return e1.nseries(x, n=n, logx=logx)
arg = self.args[0]
l = []
g = None
# try to predict a number of terms needed
nterms = n + 2
cf = Order(arg.as_leading_term(x), x).getn()
if cf != 0:
nterms = int(nterms / cf)
for i in range(nterms):
g = self.taylor_term(i, arg, g)
g = g.nseries(x, n=n, logx=logx)
l.append(g)
return Add(*l) + Order(x**n, x)
def fdiff(self, argindex=1):
"""
Returns the first derivative of the function.
"""
if not (1 <= argindex <= len(self.args)):
raise ArgumentIndexError(self, argindex)
ix = argindex - 1
A = self.args[ix]
if A._diff_wrt:
if len(self.args) == 1:
return Derivative(self, A)
if A.is_Symbol:
for i, v in enumerate(self.args):
if i != ix and A in v.free_symbols:
# it can't be in any other argument's free symbols
# issue 8510
break
else:
return Derivative(self, A)
else:
free = A.free_symbols
for i, a in enumerate(self.args):
if ix != i and a.free_symbols & free:
break
else:
# there is no possible interaction bewtween args
return Derivative(self, A)
# See issue 4624 and issue 4719, 5600 and 8510
D = Dummy('xi_%i' % argindex, dummy_index=hash(A))
args = self.args[:ix] + (D,) + self.args[ix + 1:]
return Subs(Derivative(self.func(*args), D), D, A)
def _eval_as_leading_term(self, x):
"""Stub that should be overridden by new Functions to return
the first non-zero term in a series if ever an x-dependent
argument whose leading term vanishes as x -> 0 might be encountered.
See, for example, cos._eval_as_leading_term.
"""
from sympy import Order
args = [a.as_leading_term(x) for a in self.args]
o = Order(1, x)
if any(x in a.free_symbols and o.contains(a) for a in args):
# Whereas x and any finite number are contained in O(1, x),
# expressions like 1/x are not. If any arg simplified to a
# vanishing expression as x -> 0 (like x or x**2, but not
# 3, 1/x, etc...) then the _eval_as_leading_term is needed
# to supply the first non-zero term of the series,
#
# e.g. expression leading term
# ---------- ------------
# cos(1/x) cos(1/x)
# cos(cos(x)) cos(1)
# cos(x) 1 <- _eval_as_leading_term needed
# sin(x) x <- _eval_as_leading_term needed
#
raise NotImplementedError(
'%s has no _eval_as_leading_term routine' % self.func)
else:
return self.func(*args)
def _sage_(self):
import sage.all as sage
fname = self.func.__name__
func = getattr(sage, fname, None)
args = [arg._sage_() for arg in self.args]
# In the case the function is not known in sage:
if func is None:
import sympy
if getattr(sympy, fname, None) is None:
# abstract function
return sage.function(fname)(*args)
else:
# the function defined in sympy is not known in sage
# this exception is caught in sage
raise AttributeError
return func(*args)
class AppliedUndef(Function):
"""
Base class for expressions resulting from the application of an undefined
function.
"""
is_number = False
def __new__(cls, *args, **options):
args = list(map(sympify, args))
u = [a.name for a in args if isinstance(a, UndefinedFunction)]
if u:
raise TypeError('Invalid argument: expecting an expression, not UndefinedFunction%s: %s' % (
's'*(len(u) > 1), ', '.join(u)))
obj = super(AppliedUndef, cls).__new__(cls, *args, **options)
return obj
def _eval_as_leading_term(self, x):
return self
def _sage_(self):
import sage.all as sage
fname = str(self.func)
args = [arg._sage_() for arg in self.args]
func = sage.function(fname)(*args)
return func
@property
def _diff_wrt(self):
"""
Allow derivatives wrt to undefined functions.
Examples
========
>>> from sympy import Function, Symbol
>>> f = Function('f')
>>> x = Symbol('x')
>>> f(x)._diff_wrt
True
>>> f(x).diff(x)
Derivative(f(x), x)
"""
return True
class UndefSageHelper(object):
"""
Helper to facilitate Sage conversion.
"""
def __get__(self, ins, typ):
import sage.all as sage
if ins is None:
return lambda: sage.function(typ.__name__)
else:
args = [arg._sage_() for arg in ins.args]
return lambda : sage.function(ins.__class__.__name__)(*args)
_undef_sage_helper = UndefSageHelper()
class UndefinedFunction(FunctionClass):
"""
The (meta)class of undefined functions.
"""
def __new__(mcl, name, bases=(AppliedUndef,), __dict__=None, **kwargs):
from .symbol import _filter_assumptions
# Allow Function('f', real=True)
# and/or Function(Symbol('f', real=True))
assumptions, kwargs = _filter_assumptions(kwargs)
if isinstance(name, Symbol):
assumptions = name._merge(assumptions)
name = name.name
elif not isinstance(name, string_types):
raise TypeError('expecting string or Symbol for name')
else:
commutative = assumptions.get('commutative', None)
assumptions = Symbol(name, **assumptions).assumptions0
if commutative is None:
assumptions.pop('commutative')
__dict__ = __dict__ or {}
# put the `is_*` for into __dict__
__dict__.update({'is_%s' % k: v for k, v in assumptions.items()})
# You can add other attributes, although they do have to be hashable
# (but seriously, if you want to add anything other than assumptions,
# just subclass Function)
__dict__.update(kwargs)
# add back the sanitized assumptions without the is_ prefix
kwargs.update(assumptions)
# Save these for __eq__
__dict__.update({'_kwargs': kwargs})
# do this for pickling
__dict__['__module__'] = None
obj = super(UndefinedFunction, mcl).__new__(mcl, name, bases, __dict__)
obj.name = name
obj._sage_ = _undef_sage_helper
return obj
def __instancecheck__(cls, instance):
return cls in type(instance).__mro__
_kwargs = {}
def __hash__(self):
return hash((self.class_key(), frozenset(self._kwargs.items())))
def __eq__(self, other):
return (isinstance(other, self.__class__) and
self.class_key() == other.class_key() and
self._kwargs == other._kwargs)
def __ne__(self, other):
return not self == other
@property
def _diff_wrt(self):
return False
class WildFunction(Function, AtomicExpr):
"""
A WildFunction function matches any function (with its arguments).
Examples
========
>>> from sympy import WildFunction, Function, cos
>>> from sympy.abc import x, y
>>> F = WildFunction('F')
>>> f = Function('f')
>>> F.nargs
Naturals0
>>> x.match(F)
>>> F.match(F)
{F_: F_}
>>> f(x).match(F)
{F_: f(x)}
>>> cos(x).match(F)
{F_: cos(x)}
>>> f(x, y).match(F)
{F_: f(x, y)}
To match functions with a given number of arguments, set ``nargs`` to the
desired value at instantiation:
>>> F = WildFunction('F', nargs=2)
>>> F.nargs
FiniteSet(2)
>>> f(x).match(F)
>>> f(x, y).match(F)
{F_: f(x, y)}
To match functions with a range of arguments, set ``nargs`` to a tuple
containing the desired number of arguments, e.g. if ``nargs = (1, 2)``
then functions with 1 or 2 arguments will be matched.
>>> F = WildFunction('F', nargs=(1, 2))
>>> F.nargs
FiniteSet(1, 2)
>>> f(x).match(F)
{F_: f(x)}
>>> f(x, y).match(F)
{F_: f(x, y)}
>>> f(x, y, 1).match(F)
"""
include = set()
def __init__(cls, name, **assumptions):
from sympy.sets.sets import Set, FiniteSet
cls.name = name
nargs = assumptions.pop('nargs', S.Naturals0)
if not isinstance(nargs, Set):
# Canonicalize nargs here. See also FunctionClass.
if is_sequence(nargs):
nargs = tuple(ordered(set(nargs)))
elif nargs is not None:
nargs = (as_int(nargs),)
nargs = FiniteSet(*nargs)
cls.nargs = nargs
def matches(self, expr, repl_dict={}, old=False):
if not isinstance(expr, (AppliedUndef, Function)):
return None
if len(expr.args) not in self.nargs:
return None
repl_dict = repl_dict.copy()
repl_dict[self] = expr
return repl_dict
class Derivative(Expr):
"""
Carries out differentiation of the given expression with respect to symbols.
Examples
========
>>> from sympy import Derivative, Function, symbols, Subs
>>> from sympy.abc import x, y
>>> f, g = symbols('f g', cls=Function)
>>> Derivative(x**2, x, evaluate=True)
2*x
Denesting of derivatives retains the ordering of variables:
>>> Derivative(Derivative(f(x, y), y), x)
Derivative(f(x, y), y, x)
Contiguously identical symbols are merged into a tuple giving
the symbol and the count:
>>> Derivative(f(x), x, x, y, x)
Derivative(f(x), (x, 2), y, x)
If the derivative cannot be performed, and evaluate is True, the
order of the variables of differentiation will be made canonical:
>>> Derivative(f(x, y), y, x, evaluate=True)
Derivative(f(x, y), x, y)
Derivatives with respect to undefined functions can be calculated:
>>> Derivative(f(x)**2, f(x), evaluate=True)
2*f(x)
Such derivatives will show up when the chain rule is used to
evalulate a derivative:
>>> f(g(x)).diff(x)
Derivative(f(g(x)), g(x))*Derivative(g(x), x)
Substitution is used to represent derivatives of functions with
arguments that are not symbols or functions:
>>> f(2*x + 3).diff(x) == 2*Subs(f(y).diff(y), y, 2*x + 3)
True
Notes
=====
Simplification of high-order derivatives:
Because there can be a significant amount of simplification that can be
done when multiple differentiations are performed, results will be
automatically simplified in a fairly conservative fashion unless the
keyword ``simplify`` is set to False.
>>> from sympy import cos, sin, sqrt, diff, Function, symbols
>>> from sympy.abc import x, y, z
>>> f, g = symbols('f,g', cls=Function)
>>> e = sqrt((x + 1)**2 + x)
>>> diff(e, (x, 5), simplify=False).count_ops()
136
>>> diff(e, (x, 5)).count_ops()
30
Ordering of variables:
If evaluate is set to True and the expression cannot be evaluated, the
list of differentiation symbols will be sorted, that is, the expression is
assumed to have continuous derivatives up to the order asked.
Derivative wrt non-Symbols:
For the most part, one may not differentiate wrt non-symbols.
For example, we do not allow differentiation wrt `x*y` because
there are multiple ways of structurally defining where x*y appears
in an expression: a very strict definition would make
(x*y*z).diff(x*y) == 0. Derivatives wrt defined functions (like
cos(x)) are not allowed, either:
>>> (x*y*z).diff(x*y)
Traceback (most recent call last):
...
ValueError: Can't calculate derivative wrt x*y.
To make it easier to work with variational calculus, however,
derivatives wrt AppliedUndef and Derivatives are allowed.
For example, in the Euler-Lagrange method one may write
F(t, u, v) where u = f(t) and v = f'(t). These variables can be
written explicitly as functions of time::
>>> from sympy.abc import t
>>> F = Function('F')
>>> U = f(t)
>>> V = U.diff(t)
The derivative wrt f(t) can be obtained directly:
>>> direct = F(t, U, V).diff(U)
When differentiation wrt a non-Symbol is attempted, the non-Symbol
is temporarily converted to a Symbol while the differentiation
is performed and the same answer is obtained:
>>> indirect = F(t, U, V).subs(U, x).diff(x).subs(x, U)
>>> assert direct == indirect
The implication of this non-symbol replacement is that all
functions are treated as independent of other functions and the
symbols are independent of the functions that contain them::
>>> x.diff(f(x))
0
>>> g(x).diff(f(x))
0
It also means that derivatives are assumed to depend only
on the variables of differentiation, not on anything contained
within the expression being differentiated::
>>> F = f(x)
>>> Fx = F.diff(x)
>>> Fx.diff(F) # derivative depends on x, not F
0
>>> Fxx = Fx.diff(x)
>>> Fxx.diff(Fx) # derivative depends on x, not Fx
0
The last example can be made explicit by showing the replacement
of Fx in Fxx with y:
>>> Fxx.subs(Fx, y)
Derivative(y, x)
Since that in itself will evaluate to zero, differentiating
wrt Fx will also be zero:
>>> _.doit()
0
Replacing undefined functions with concrete expressions
One must be careful to replace undefined functions with expressions
that contain variables consistent with the function definition and
the variables of differentiation or else insconsistent result will
be obtained. Consider the following example:
>>> eq = f(x)*g(y)
>>> eq.subs(f(x), x*y).diff(x, y).doit()
y*Derivative(g(y), y) + g(y)
>>> eq.diff(x, y).subs(f(x), x*y).doit()
y*Derivative(g(y), y)
The results differ because `f(x)` was replaced with an expression
that involved both variables of differentiation. In the abstract
case, differentiation of `f(x)` by `y` is 0; in the concrete case,
the presence of `y` made that derivative nonvanishing and produced
the extra `g(y)` term.
Defining differentiation for an object
An object must define ._eval_derivative(symbol) method that returns
the differentiation result. This function only needs to consider the
non-trivial case where expr contains symbol and it should call the diff()
method internally (not _eval_derivative); Derivative should be the only
one to call _eval_derivative.
Any class can allow derivatives to be taken with respect to
itself (while indicating its scalar nature). See the
docstring of Expr._diff_wrt.
See Also
========
_sort_variable_count
"""
is_Derivative = True
@property
def _diff_wrt(self):
"""An expression may be differentiated wrt a Derivative if
it is in elementary form.
Examples
========
>>> from sympy import Function, Derivative, cos
>>> from sympy.abc import x
>>> f = Function('f')
>>> Derivative(f(x), x)._diff_wrt
True
>>> Derivative(cos(x), x)._diff_wrt
False
>>> Derivative(x + 1, x)._diff_wrt
False
A Derivative might be an unevaluated form of what will not be
a valid variable of differentiation if evaluated. For example,
>>> Derivative(f(f(x)), x).doit()
Derivative(f(x), x)*Derivative(f(f(x)), f(x))
Such an expression will present the same ambiguities as arise
when dealing with any other product, like ``2*x``, so ``_diff_wrt``
is False:
>>> Derivative(f(f(x)), x)._diff_wrt
False
"""
return self.expr._diff_wrt and isinstance(self.doit(), Derivative)
def __new__(cls, expr, *variables, **kwargs):
from sympy.matrices.common import MatrixCommon
from sympy import Integer, MatrixExpr
from sympy.tensor.array import Array, NDimArray
from sympy.utilities.misc import filldedent
expr = sympify(expr)
symbols_or_none = getattr(expr, "free_symbols", None)
has_symbol_set = isinstance(symbols_or_none, set)
if not has_symbol_set:
raise ValueError(filldedent('''
Since there are no variables in the expression %s,
it cannot be differentiated.''' % expr))
# determine value for variables if it wasn't given
if not variables:
variables = expr.free_symbols
if len(variables) != 1:
if expr.is_number:
return S.Zero
if len(variables) == 0:
raise ValueError(filldedent('''
Since there are no variables in the expression,
the variable(s) of differentiation must be supplied
to differentiate %s''' % expr))
else:
raise ValueError(filldedent('''
Since there is more than one variable in the
expression, the variable(s) of differentiation
must be supplied to differentiate %s''' % expr))
# Standardize the variables by sympifying them:
variables = list(sympify(variables))
# Split the list of variables into a list of the variables we are diff
# wrt, where each element of the list has the form (s, count) where
# s is the entity to diff wrt and count is the order of the
# derivative.
variable_count = []
array_likes = (tuple, list, Tuple)
for i, v in enumerate(variables):
if isinstance(v, Integer):
if i == 0:
raise ValueError("First variable cannot be a number: %i" % v)
count = v
prev, prevcount = variable_count[-1]
if prevcount != 1:
raise TypeError("tuple {0} followed by number {1}".format((prev, prevcount), v))
if count == 0:
variable_count.pop()
else:
variable_count[-1] = Tuple(prev, count)
else:
if isinstance(v, array_likes):
if len(v) == 0:
# Ignore empty tuples: Derivative(expr, ... , (), ... )
continue
if isinstance(v[0], array_likes):
# Derive by array: Derivative(expr, ... , [[x, y, z]], ... )
if len(v) == 1:
v = Array(v[0])
count = 1
else:
v, count = v
v = Array(v)
else:
v, count = v
if count == 0:
continue
elif isinstance(v, UndefinedFunction):
raise TypeError(
"cannot differentiate wrt "
"UndefinedFunction: %s" % v)
else:
count = 1
variable_count.append(Tuple(v, count))
# light evaluation of contiguous, identical
# items: (x, 1), (x, 1) -> (x, 2)
merged = []
for t in variable_count:
v, c = t
if c.is_negative:
raise ValueError(
'order of differentiation must be nonnegative')
if merged and merged[-1][0] == v:
c += merged[-1][1]
if not c:
merged.pop()
else:
merged[-1] = Tuple(v, c)
else:
merged.append(t)
variable_count = merged
# sanity check of variables of differentation; we waited
# until the counts were computed since some variables may
# have been removed because the count was 0
for v, c in variable_count:
# v must have _diff_wrt True
if not v._diff_wrt:
__ = '' # filler to make error message neater
raise ValueError(filldedent('''
Can't calculate derivative wrt %s.%s''' % (v,
__)))
# We make a special case for 0th derivative, because there is no
# good way to unambiguously print this.
if len(variable_count) == 0:
return expr
evaluate = kwargs.get('evaluate', False)
if evaluate:
if isinstance(expr, Derivative):
expr = expr.canonical
variable_count = [
(v.canonical if isinstance(v, Derivative) else v, c)
for v, c in variable_count]
# Look for a quick exit if there are symbols that don't appear in
# expression at all. Note, this cannot check non-symbols like
# Derivatives as those can be created by intermediate
# derivatives.
zero = False
free = expr.free_symbols
for v, c in variable_count:
vfree = v.free_symbols
if c.is_positive and vfree:
if isinstance(v, AppliedUndef):
# these match exactly since
# x.diff(f(x)) == g(x).diff(f(x)) == 0
# and are not created by differentiation
D = Dummy()
if not expr.xreplace({v: D}).has(D):
zero = True
break
elif isinstance(v, MatrixExpr):
zero = False
break
elif isinstance(v, Symbol) and v not in free:
zero = True
break
else:
if not free & vfree:
# e.g. v is IndexedBase or Matrix
zero = True
break
if zero:
if isinstance(expr, (MatrixCommon, NDimArray)):
return expr.zeros(*expr.shape)
elif isinstance(expr, MatrixExpr):
from sympy import ZeroMatrix
return ZeroMatrix(*expr.shape)
elif expr.is_scalar:
return S.Zero
# make the order of symbols canonical
#TODO: check if assumption of discontinuous derivatives exist
variable_count = cls._sort_variable_count(variable_count)
# denest
if isinstance(expr, Derivative):
variable_count = list(expr.variable_count) + variable_count
expr = expr.expr
return Derivative(expr, *variable_count, **kwargs)
# we return here if evaluate is False or if there is no
# _eval_derivative method
if not evaluate or not hasattr(expr, '_eval_derivative'):
# return an unevaluated Derivative
if evaluate and variable_count == [(expr, 1)] and expr.is_scalar:
# special hack providing evaluation for classes
# that have defined is_scalar=True but have no
# _eval_derivative defined
return S.One
return Expr.__new__(cls, expr, *variable_count)
# evaluate the derivative by calling _eval_derivative method
# of expr for each variable
# -------------------------------------------------------------
nderivs = 0 # how many derivatives were performed
unhandled = []
for i, (v, count) in enumerate(variable_count):
old_expr = expr
old_v = None
is_symbol = v.is_symbol or isinstance(v,
(Iterable, Tuple, MatrixCommon, NDimArray))
if not is_symbol:
old_v = v
v = Dummy('xi')
expr = expr.xreplace({old_v: v})
# Derivatives and UndefinedFunctions are independent
# of all others
clashing = not (isinstance(old_v, Derivative) or \
isinstance(old_v, AppliedUndef))
if not v in expr.free_symbols and not clashing:
return expr.diff(v) # expr's version of 0
if not old_v.is_scalar and not hasattr(
old_v, '_eval_derivative'):
# special hack providing evaluation for classes
# that have defined is_scalar=True but have no
# _eval_derivative defined
expr *= old_v.diff(old_v)
# Evaluate the derivative `n` times. If
# `_eval_derivative_n_times` is not overridden by the current
# object, the default in `Basic` will call a loop over
# `_eval_derivative`:
obj = expr._eval_derivative_n_times(v, count)
if obj is not None and obj.is_zero:
return obj
nderivs += count
if old_v is not None:
if obj is not None:
# remove the dummy that was used
obj = obj.subs(v, old_v)
# restore expr
expr = old_expr
if obj is None:
# we've already checked for quick-exit conditions
# that give 0 so the remaining variables
# are contained in the expression but the expression
# did not compute a derivative so we stop taking
# derivatives
unhandled = variable_count[i:]
break
expr = obj
# what we have so far can be made canonical
expr = expr.replace(
lambda x: isinstance(x, Derivative),
lambda x: x.canonical)
if unhandled:
if isinstance(expr, Derivative):
unhandled = list(expr.variable_count) + unhandled
expr = expr.expr
expr = Expr.__new__(cls, expr, *unhandled)
if (nderivs > 1) == True and kwargs.get('simplify', True):
from sympy.core.exprtools import factor_terms
from sympy.simplify.simplify import signsimp
expr = factor_terms(signsimp(expr))
return expr
@property
def canonical(cls):
return cls.func(cls.expr,
*Derivative._sort_variable_count(cls.variable_count))
@classmethod
def _sort_variable_count(cls, vc):
"""
Sort (variable, count) pairs into canonical order while
retaining order of variables that do not commute during
differentiation:
* symbols and functions commute with each other
* derivatives commute with each other
* a derivative doesn't commute with anything it contains
* any other object is not allowed to commute if it has
free symbols in common with another object
Examples
========
>>> from sympy import Derivative, Function, symbols, cos
>>> vsort = Derivative._sort_variable_count
>>> x, y, z = symbols('x y z')
>>> f, g, h = symbols('f g h', cls=Function)
Contiguous items are collapsed into one pair:
>>> vsort([(x, 1), (x, 1)])
[(x, 2)]
>>> vsort([(y, 1), (f(x), 1), (y, 1), (f(x), 1)])
[(y, 2), (f(x), 2)]
Ordering is canonical.
>>> def vsort0(*v):
... # docstring helper to
... # change vi -> (vi, 0), sort, and return vi vals
... return [i[0] for i in vsort([(i, 0) for i in v])]
>>> vsort0(y, x)
[x, y]
>>> vsort0(g(y), g(x), f(y))
[f(y), g(x), g(y)]
Symbols are sorted as far to the left as possible but never
move to the left of a derivative having the same symbol in
its variables; the same applies to AppliedUndef which are
always sorted after Symbols:
>>> dfx = f(x).diff(x)
>>> assert vsort0(dfx, y) == [y, dfx]
>>> assert vsort0(dfx, x) == [dfx, x]
"""
from sympy.utilities.iterables import uniq, topological_sort
if not vc:
return []
vc = list(vc)
if len(vc) == 1:
return [Tuple(*vc[0])]
V = list(range(len(vc)))
E = []
v = lambda i: vc[i][0]
D = Dummy()
def _block(d, v, wrt=False):
# return True if v should not come before d else False
if d == v:
return wrt
if d.is_Symbol:
return False
if isinstance(d, Derivative):
# a derivative blocks if any of it's variables contain
# v; the wrt flag will return True for an exact match
# and will cause an AppliedUndef to block if v is in
# the arguments
if any(_block(k, v, wrt=True)
for k in d._wrt_variables):
return True
return False
if not wrt and isinstance(d, AppliedUndef):
return False
if v.is_Symbol:
return v in d.free_symbols
if isinstance(v, AppliedUndef):
return _block(d.xreplace({v: D}), D)
return d.free_symbols & v.free_symbols
for i in range(len(vc)):
for j in range(i):
if _block(v(j), v(i)):
E.append((j,i))
# this is the default ordering to use in case of ties
O = dict(zip(ordered(uniq([i for i, c in vc])), range(len(vc))))
ix = topological_sort((V, E), key=lambda i: O[v(i)])
# merge counts of contiguously identical items
merged = []
for v, c in [vc[i] for i in ix]:
if merged and merged[-1][0] == v:
merged[-1][1] += c
else:
merged.append([v, c])
return [Tuple(*i) for i in merged]
def _eval_is_commutative(self):
return self.expr.is_commutative
def _eval_derivative(self, v):
# If v (the variable of differentiation) is not in
# self.variables, we might be able to take the derivative.
if v not in self._wrt_variables:
dedv = self.expr.diff(v)
if isinstance(dedv, Derivative):
return dedv.func(dedv.expr, *(self.variable_count + dedv.variable_count))
# dedv (d(self.expr)/dv) could have simplified things such that the
# derivative wrt things in self.variables can now be done. Thus,
# we set evaluate=True to see if there are any other derivatives
# that can be done. The most common case is when dedv is a simple
# number so that the derivative wrt anything else will vanish.
return self.func(dedv, *self.variables, evaluate=True)
# In this case v was in self.variables so the derivative wrt v has
# already been attempted and was not computed, either because it
# couldn't be or evaluate=False originally.
variable_count = list(self.variable_count)
variable_count.append((v, 1))
return self.func(self.expr, *variable_count, evaluate=False)
def doit(self, **hints):
expr = self.expr
if hints.get('deep', True):
expr = expr.doit(**hints)
hints['evaluate'] = True
rv = self.func(expr, *self.variable_count, **hints)
if rv!= self and rv.has(Derivative):
rv = rv.doit(**hints)
return rv
@_sympifyit('z0', NotImplementedError)
def doit_numerically(self, z0):
"""
Evaluate the derivative at z numerically.
When we can represent derivatives at a point, this should be folded
into the normal evalf. For now, we need a special method.
"""
if len(self.free_symbols) != 1 or len(self.variables) != 1:
raise NotImplementedError('partials and higher order derivatives')
z = list(self.free_symbols)[0]
def eval(x):
f0 = self.expr.subs(z, Expr._from_mpmath(x, prec=mpmath.mp.prec))
f0 = f0.evalf(mlib.libmpf.prec_to_dps(mpmath.mp.prec))
return f0._to_mpmath(mpmath.mp.prec)
return Expr._from_mpmath(mpmath.diff(eval,
z0._to_mpmath(mpmath.mp.prec)),
mpmath.mp.prec)
@property
def expr(self):
return self._args[0]
@property
def _wrt_variables(self):
# return the variables of differentiation without
# respect to the type of count (int or symbolic)
return [i[0] for i in self.variable_count]
@property
def variables(self):
# TODO: deprecate? YES, make this 'enumerated_variables' and
# name _wrt_variables as variables
# TODO: support for `d^n`?
rv = []
for v, count in self.variable_count:
if not count.is_Integer:
raise TypeError(filldedent('''
Cannot give expansion for symbolic count. If you just
want a list of all variables of differentiation, use
_wrt_variables.'''))
rv.extend([v]*count)
return tuple(rv)
@property
def variable_count(self):
return self._args[1:]
@property
def derivative_count(self):
return sum([count for var, count in self.variable_count], 0)
@property
def free_symbols(self):
ret = self.expr.free_symbols
# Add symbolic counts to free_symbols
for var, count in self.variable_count:
ret.update(count.free_symbols)
return ret
def _eval_subs(self, old, new):
# The substitution (old, new) cannot be done inside
# Derivative(expr, vars) for a variety of reasons
# as handled below.
if old in self._wrt_variables:
# first handle the counts
expr = self.func(self.expr, *[(v, c.subs(old, new))
for v, c in self.variable_count])
if expr != self:
return expr._eval_subs(old, new)
# quick exit case
if not getattr(new, '_diff_wrt', False):
# case (0): new is not a valid variable of
# differentiation
if isinstance(old, Symbol):
# don't introduce a new symbol if the old will do
return Subs(self, old, new)
else:
xi = Dummy('xi')
return Subs(self.xreplace({old: xi}), xi, new)
# If both are Derivatives with the same expr, check if old is
# equivalent to self or if old is a subderivative of self.
if old.is_Derivative and old.expr == self.expr:
if self.canonical == old.canonical:
return new
# collections.Counter doesn't have __le__
def _subset(a, b):
return all((a[i] <= b[i]) == True for i in a)
old_vars = Counter(dict(reversed(old.variable_count)))
self_vars = Counter(dict(reversed(self.variable_count)))
if _subset(old_vars, self_vars):
return Derivative(new, *(self_vars - old_vars).items()).canonical
args = list(self.args)
newargs = list(x._subs(old, new) for x in args)
if args[0] == old:
# complete replacement of self.expr
# we already checked that the new is valid so we know
# it won't be a problem should it appear in variables
return Derivative(*newargs)
if newargs[0] != args[0]:
# case (1) can't change expr by introducing something that is in
# the _wrt_variables if it was already in the expr
# e.g.
# for Derivative(f(x, g(y)), y), x cannot be replaced with
# anything that has y in it; for f(g(x), g(y)).diff(g(y))
# g(x) cannot be replaced with anything that has g(y)
syms = {vi: Dummy() for vi in self._wrt_variables
if not vi.is_Symbol}
wrt = set(syms.get(vi, vi) for vi in self._wrt_variables)
forbidden = args[0].xreplace(syms).free_symbols & wrt
nfree = new.xreplace(syms).free_symbols
ofree = old.xreplace(syms).free_symbols
if (nfree - ofree) & forbidden:
return Subs(self, old, new)
viter = ((i, j) for ((i, _), (j, _)) in zip(newargs[1:], args[1:]))
if any(i != j for i, j in viter): # a wrt-variable change
# case (2) can't change vars by introducing a variable
# that is contained in expr, e.g.
# for Derivative(f(z, g(h(x), y)), y), y cannot be changed to
# x, h(x), or g(h(x), y)
for a in _atomic(self.expr, recursive=True):
for i in range(1, len(newargs)):
vi, _ = newargs[i]
if a == vi and vi != args[i][0]:
return Subs(self, old, new)
# more arg-wise checks
vc = newargs[1:]
oldv = self._wrt_variables
newe = self.expr
subs = []
for i, (vi, ci) in enumerate(vc):
if not vi._diff_wrt:
# case (3) invalid differentiation expression so
# create a replacement dummy
xi = Dummy('xi_%i' % i)
# replace the old valid variable with the dummy
# in the expression
newe = newe.xreplace({oldv[i]: xi})
# and replace the bad variable with the dummy
vc[i] = (xi, ci)
# and record the dummy with the new (invalid)
# differentiation expression
subs.append((xi, vi))
if subs:
# handle any residual substitution in the expression
newe = newe._subs(old, new)
# return the Subs-wrapped derivative
return Subs(Derivative(newe, *vc), *zip(*subs))
# everything was ok
return Derivative(*newargs)
def _eval_lseries(self, x, logx):
dx = self.variables
for term in self.expr.lseries(x, logx=logx):
yield self.func(term, *dx)
def _eval_nseries(self, x, n, logx):
arg = self.expr.nseries(x, n=n, logx=logx)
o = arg.getO()
dx = self.variables
rv = [self.func(a, *dx) for a in Add.make_args(arg.removeO())]
if o:
rv.append(o/x)
return Add(*rv)
def _eval_as_leading_term(self, x):
series_gen = self.expr.lseries(x)
d = S.Zero
for leading_term in series_gen:
d = diff(leading_term, *self.variables)
if d != 0:
break
return d
def _sage_(self):
import sage.all as sage
args = [arg._sage_() for arg in self.args]
return sage.derivative(*args)
def as_finite_difference(self, points=1, x0=None, wrt=None):
""" Expresses a Derivative instance as a finite difference.
Parameters
==========
points : sequence or coefficient, optional
If sequence: discrete values (length >= order+1) of the
independent variable used for generating the finite
difference weights.
If it is a coefficient, it will be used as the step-size
for generating an equidistant sequence of length order+1
centered around ``x0``. Default: 1 (step-size 1)
x0 : number or Symbol, optional
the value of the independent variable (``wrt``) at which the
derivative is to be approximated. Default: same as ``wrt``.
wrt : Symbol, optional
"with respect to" the variable for which the (partial)
derivative is to be approximated for. If not provided it
is required that the derivative is ordinary. Default: ``None``.
Examples
========
>>> from sympy import symbols, Function, exp, sqrt, Symbol
>>> x, h = symbols('x h')
>>> f = Function('f')
>>> f(x).diff(x).as_finite_difference()
-f(x - 1/2) + f(x + 1/2)
The default step size and number of points are 1 and
``order + 1`` respectively. We can change the step size by
passing a symbol as a parameter:
>>> f(x).diff(x).as_finite_difference(h)
-f(-h/2 + x)/h + f(h/2 + x)/h
We can also specify the discretized values to be used in a
sequence:
>>> f(x).diff(x).as_finite_difference([x, x+h, x+2*h])
-3*f(x)/(2*h) + 2*f(h + x)/h - f(2*h + x)/(2*h)
The algorithm is not restricted to use equidistant spacing, nor
do we need to make the approximation around ``x0``, but we can get
an expression estimating the derivative at an offset:
>>> e, sq2 = exp(1), sqrt(2)
>>> xl = [x-h, x+h, x+e*h]
>>> f(x).diff(x, 1).as_finite_difference(xl, x+h*sq2) # doctest: +ELLIPSIS
2*h*((h + sqrt(2)*h)/(2*h) - (-sqrt(2)*h + h)/(2*h))*f(E*h + x)/...
To approximate ``Derivative`` around ``x0`` using a non-equidistant
spacing step, the algorithm supports assignment of undefined
functions to ``points``:
>>> dx = Function('dx')
>>> f(x).diff(x).as_finite_difference(points=dx(x), x0=x-h)
-f(-h + x - dx(-h + x)/2)/dx(-h + x) + f(-h + x + dx(-h + x)/2)/dx(-h + x)
Partial derivatives are also supported:
>>> y = Symbol('y')
>>> d2fdxdy=f(x,y).diff(x,y)
>>> d2fdxdy.as_finite_difference(wrt=x)
-Derivative(f(x - 1/2, y), y) + Derivative(f(x + 1/2, y), y)
We can apply ``as_finite_difference`` to ``Derivative`` instances in
compound expressions using ``replace``:
>>> (1 + 42**f(x).diff(x)).replace(lambda arg: arg.is_Derivative,
... lambda arg: arg.as_finite_difference())
42**(-f(x - 1/2) + f(x + 1/2)) + 1
See also
========
sympy.calculus.finite_diff.apply_finite_diff
sympy.calculus.finite_diff.differentiate_finite
sympy.calculus.finite_diff.finite_diff_weights
"""
from ..calculus.finite_diff import _as_finite_diff
return _as_finite_diff(self, points, x0, wrt)
class Lambda(Expr):
"""
Lambda(x, expr) represents a lambda function similar to Python's
'lambda x: expr'. A function of several variables is written as
Lambda((x, y, ...), expr).
A simple example:
>>> from sympy import Lambda
>>> from sympy.abc import x
>>> f = Lambda(x, x**2)
>>> f(4)
16
For multivariate functions, use:
>>> from sympy.abc import y, z, t
>>> f2 = Lambda((x, y, z, t), x + y**z + t**z)
>>> f2(1, 2, 3, 4)
73
It is also possible to unpack tuple arguments:
>>> f = Lambda( ((x, y), z) , x + y + z)
>>> f((1, 2), 3)
6
A handy shortcut for lots of arguments:
>>> p = x, y, z
>>> f = Lambda(p, x + y*z)
>>> f(*p)
x + y*z
"""
is_Function = True
def __new__(cls, signature, expr):
if iterable(signature) and not isinstance(signature, (tuple, Tuple)):
SymPyDeprecationWarning(
feature="non tuple iterable of argument symbols to Lambda",
useinstead="tuple of argument symbols",
issue=17474,
deprecated_since_version="1.5").warn()
signature = tuple(signature)
sig = signature if iterable(signature) else (signature,)
sig = sympify(sig)
cls._check_signature(sig)
if len(sig) == 1 and sig[0] == expr:
return S.IdentityFunction
return Expr.__new__(cls, sig, sympify(expr))
@classmethod
def _check_signature(cls, sig):
syms = set()
def rcheck(args):
for a in args:
if a.is_symbol:
if a in syms:
raise BadSignatureError("Duplicate symbol %s" % a)
syms.add(a)
elif isinstance(a, Tuple):
rcheck(a)
else:
raise BadSignatureError("Lambda signature should be only tuples"
" and symbols, not %s" % a)
if not isinstance(sig, Tuple):
raise BadSignatureError("Lambda signature should be a tuple not %s" % sig)
# Recurse through the signature:
rcheck(sig)
@property
def signature(self):
"""The expected form of the arguments to be unpacked into variables"""
return self._args[0]
@property
def expr(self):
"""The return value of the function"""
return self._args[1]
@property
def variables(self):
"""The variables used in the internal representation of the function"""
def _variables(args):
if isinstance(args, Tuple):
for arg in args:
for a in _variables(arg):
yield a
else:
yield args
return tuple(_variables(self.signature))
@property
def nargs(self):
from sympy.sets.sets import FiniteSet
return FiniteSet(len(self.signature))
bound_symbols = variables
@property
def free_symbols(self):
return self.expr.free_symbols - set(self.variables)
def __call__(self, *args):
n = len(args)
if n not in self.nargs: # Lambda only ever has 1 value in nargs
# XXX: exception message must be in exactly this format to
# make it work with NumPy's functions like vectorize(). See,
# for example, https://github.com/numpy/numpy/issues/1697.
# The ideal solution would be just to attach metadata to
# the exception and change NumPy to take advantage of this.
## XXX does this apply to Lambda? If not, remove this comment.
temp = ('%(name)s takes exactly %(args)s '
'argument%(plural)s (%(given)s given)')
raise BadArgumentsError(temp % {
'name': self,
'args': list(self.nargs)[0],
'plural': 's'*(list(self.nargs)[0] != 1),
'given': n})
d = self._match_signature(self.signature, args)
return self.expr.xreplace(d)
def _match_signature(self, sig, args):
symargmap = {}
def rmatch(pars, args):
for par, arg in zip(pars, args):
if par.is_symbol:
symargmap[par] = arg
elif isinstance(par, Tuple):
if not isinstance(arg, (tuple, Tuple)) or len(args) != len(pars):
raise BadArgumentsError("Can't match %s and %s" % (args, pars))
rmatch(par, arg)
rmatch(sig, args)
return symargmap
def __eq__(self, other):
if not isinstance(other, Lambda):
return False
if self.nargs != other.nargs:
return False
try:
d = self._match_signature(other.signature, self.signature)
except BadArgumentsError:
return False
return self.args == other.xreplace(d).args
def __hash__(self):
return super(Lambda, self).__hash__()
def _hashable_content(self):
return (self.expr.xreplace(self.canonical_variables),)
@property
def is_identity(self):
"""Return ``True`` if this ``Lambda`` is an identity function. """
return self.signature == self.expr
class Subs(Expr):
"""
Represents unevaluated substitutions of an expression.
``Subs(expr, x, x0)`` receives 3 arguments: an expression, a variable or
list of distinct variables and a point or list of evaluation points
corresponding to those variables.
``Subs`` objects are generally useful to represent unevaluated derivatives
calculated at a point.
The variables may be expressions, but they are subjected to the limitations
of subs(), so it is usually a good practice to use only symbols for
variables, since in that case there can be no ambiguity.
There's no automatic expansion - use the method .doit() to effect all
possible substitutions of the object and also of objects inside the
expression.
When evaluating derivatives at a point that is not a symbol, a Subs object
is returned. One is also able to calculate derivatives of Subs objects - in
this case the expression is always expanded (for the unevaluated form, use
Derivative()).
Examples
========
>>> from sympy import Subs, Function, sin, cos
>>> from sympy.abc import x, y, z
>>> f = Function('f')
Subs are created when a particular substitution cannot be made. The
x in the derivative cannot be replaced with 0 because 0 is not a
valid variables of differentiation:
>>> f(x).diff(x).subs(x, 0)
Subs(Derivative(f(x), x), x, 0)
Once f is known, the derivative and evaluation at 0 can be done:
>>> _.subs(f, sin).doit() == sin(x).diff(x).subs(x, 0) == cos(0)
True
Subs can also be created directly with one or more variables:
>>> Subs(f(x)*sin(y) + z, (x, y), (0, 1))
Subs(z + f(x)*sin(y), (x, y), (0, 1))
>>> _.doit()
z + f(0)*sin(1)
Notes
=====
In order to allow expressions to combine before doit is done, a
representation of the Subs expression is used internally to make
expressions that are superficially different compare the same:
>>> a, b = Subs(x, x, 0), Subs(y, y, 0)
>>> a + b
2*Subs(x, x, 0)
This can lead to unexpected consequences when using methods
like `has` that are cached:
>>> s = Subs(x, x, 0)
>>> s.has(x), s.has(y)
(True, False)
>>> ss = s.subs(x, y)
>>> ss.has(x), ss.has(y)
(True, False)
>>> s, ss
(Subs(x, x, 0), Subs(y, y, 0))
"""
def __new__(cls, expr, variables, point, **assumptions):
from sympy import Symbol
if not is_sequence(variables, Tuple):
variables = [variables]
variables = Tuple(*variables)
if has_dups(variables):
repeated = [str(v) for v, i in Counter(variables).items() if i > 1]
__ = ', '.join(repeated)
raise ValueError(filldedent('''
The following expressions appear more than once: %s
''' % __))
point = Tuple(*(point if is_sequence(point, Tuple) else [point]))
if len(point) != len(variables):
raise ValueError('Number of point values must be the same as '
'the number of variables.')
if not point:
return sympify(expr)
# denest
if isinstance(expr, Subs):
variables = expr.variables + variables
point = expr.point + point
expr = expr.expr
else:
expr = sympify(expr)
# use symbols with names equal to the point value (with prepended _)
# to give a variable-independent expression
pre = "_"
pts = sorted(set(point), key=default_sort_key)
from sympy.printing import StrPrinter
class CustomStrPrinter(StrPrinter):
def _print_Dummy(self, expr):
return str(expr) + str(expr.dummy_index)
def mystr(expr, **settings):
p = CustomStrPrinter(settings)
return p.doprint(expr)
while 1:
s_pts = {p: Symbol(pre + mystr(p)) for p in pts}
reps = [(v, s_pts[p])
for v, p in zip(variables, point)]
# if any underscore-prepended symbol is already a free symbol
# and is a variable with a different point value, then there
# is a clash, e.g. _0 clashes in Subs(_0 + _1, (_0, _1), (1, 0))
# because the new symbol that would be created is _1 but _1
# is already mapped to 0 so __0 and __1 are used for the new
# symbols
if any(r in expr.free_symbols and
r in variables and
Symbol(pre + mystr(point[variables.index(r)])) != r
for _, r in reps):
pre += "_"
continue
break
obj = Expr.__new__(cls, expr, Tuple(*variables), point)
obj._expr = expr.xreplace(dict(reps))
return obj
def _eval_is_commutative(self):
return self.expr.is_commutative
def doit(self, **hints):
e, v, p = self.args
# remove self mappings
for i, (vi, pi) in enumerate(zip(v, p)):
if vi == pi:
v = v[:i] + v[i + 1:]
p = p[:i] + p[i + 1:]
if not v:
return self.expr
if isinstance(e, Derivative):
# apply functions first, e.g. f -> cos
undone = []
for i, vi in enumerate(v):
if isinstance(vi, FunctionClass):
e = e.subs(vi, p[i])
else:
undone.append((vi, p[i]))
if not isinstance(e, Derivative):
e = e.doit()
if isinstance(e, Derivative):
# do Subs that aren't related to differentiation
undone2 = []
D = Dummy()
for vi, pi in undone:
if D not in e.xreplace({vi: D}).free_symbols:
e = e.subs(vi, pi)
else:
undone2.append((vi, pi))
undone = undone2
# differentiate wrt variables that are present
wrt = []
D = Dummy()
expr = e.expr
free = expr.free_symbols
for vi, ci in e.variable_count:
if isinstance(vi, Symbol) and vi in free:
expr = expr.diff((vi, ci))
elif D in expr.subs(vi, D).free_symbols:
expr = expr.diff((vi, ci))
else:
wrt.append((vi, ci))
# inject remaining subs
rv = expr.subs(undone)
# do remaining differentiation *in order given*
for vc in wrt:
rv = rv.diff(vc)
else:
# inject remaining subs
rv = e.subs(undone)
else:
rv = e.doit(**hints).subs(list(zip(v, p)))
if hints.get('deep', True) and rv != self:
rv = rv.doit(**hints)
return rv
def evalf(self, prec=None, **options):
return self.doit().evalf(prec, **options)
n = evalf
@property
def variables(self):
"""The variables to be evaluated"""
return self._args[1]
bound_symbols = variables
@property
def expr(self):
"""The expression on which the substitution operates"""
return self._args[0]
@property
def point(self):
"""The values for which the variables are to be substituted"""
return self._args[2]
@property
def free_symbols(self):
return (self.expr.free_symbols - set(self.variables) |
set(self.point.free_symbols))
@property
def expr_free_symbols(self):
return (self.expr.expr_free_symbols - set(self.variables) |
set(self.point.expr_free_symbols))
def __eq__(self, other):
if not isinstance(other, Subs):
return False
return self._hashable_content() == other._hashable_content()
def __ne__(self, other):
return not(self == other)
def __hash__(self):
return super(Subs, self).__hash__()
def _hashable_content(self):
return (self._expr.xreplace(self.canonical_variables),
) + tuple(ordered([(v, p) for v, p in
zip(self.variables, self.point) if not self.expr.has(v)]))
def _eval_subs(self, old, new):
# Subs doit will do the variables in order; the semantics
# of subs for Subs is have the following invariant for
# Subs object foo:
# foo.doit().subs(reps) == foo.subs(reps).doit()
pt = list(self.point)
if old in self.variables:
if _atomic(new) == set([new]) and not any(
i.has(new) for i in self.args):
# the substitution is neutral
return self.xreplace({old: new})
# any occurrence of old before this point will get
# handled by replacements from here on
i = self.variables.index(old)
for j in range(i, len(self.variables)):
pt[j] = pt[j]._subs(old, new)
return self.func(self.expr, self.variables, pt)
v = [i._subs(old, new) for i in self.variables]
if v != list(self.variables):
return self.func(self.expr, self.variables + (old,), pt + [new])
expr = self.expr._subs(old, new)
pt = [i._subs(old, new) for i in self.point]
return self.func(expr, v, pt)
def _eval_derivative(self, s):
# Apply the chain rule of the derivative on the substitution variables:
val = Add.fromiter(p.diff(s) * Subs(self.expr.diff(v), self.variables, self.point).doit() for v, p in zip(self.variables, self.point))
# Check if there are free symbols in `self.expr`:
# First get the `expr_free_symbols`, which returns the free symbols
# that are directly contained in an expression node (i.e. stop
# searching if the node isn't an expression). At this point turn the
# expressions into `free_symbols` and check if there are common free
# symbols in `self.expr` and the deriving factor.
fs1 = {j for i in self.expr_free_symbols for j in i.free_symbols}
if len(fs1 & s.free_symbols) > 0:
val += Subs(self.expr.diff(s), self.variables, self.point).doit()
return val
def _eval_nseries(self, x, n, logx):
if x in self.point:
# x is the variable being substituted into
apos = self.point.index(x)
other = self.variables[apos]
else:
other = x
arg = self.expr.nseries(other, n=n, logx=logx)
o = arg.getO()
terms = Add.make_args(arg.removeO())
rv = Add(*[self.func(a, *self.args[1:]) for a in terms])
if o:
rv += o.subs(other, x)
return rv
def _eval_as_leading_term(self, x):
if x in self.point:
ipos = self.point.index(x)
xvar = self.variables[ipos]
return self.expr.as_leading_term(xvar)
if x in self.variables:
# if `x` is a dummy variable, it means it won't exist after the
# substitution has been performed:
return self
# The variable is independent of the substitution:
return self.expr.as_leading_term(x)
def diff(f, *symbols, **kwargs):
"""
Differentiate f with respect to symbols.
This is just a wrapper to unify .diff() and the Derivative class; its
interface is similar to that of integrate(). You can use the same
shortcuts for multiple variables as with Derivative. For example,
diff(f(x), x, x, x) and diff(f(x), x, 3) both return the third derivative
of f(x).
You can pass evaluate=False to get an unevaluated Derivative class. Note
that if there are 0 symbols (such as diff(f(x), x, 0), then the result will
be the function (the zeroth derivative), even if evaluate=False.
Examples
========
>>> from sympy import sin, cos, Function, diff
>>> from sympy.abc import x, y
>>> f = Function('f')
>>> diff(sin(x), x)
cos(x)
>>> diff(f(x), x, x, x)
Derivative(f(x), (x, 3))
>>> diff(f(x), x, 3)
Derivative(f(x), (x, 3))
>>> diff(sin(x)*cos(y), x, 2, y, 2)
sin(x)*cos(y)
>>> type(diff(sin(x), x))
cos
>>> type(diff(sin(x), x, evaluate=False))
<class 'sympy.core.function.Derivative'>
>>> type(diff(sin(x), x, 0))
sin
>>> type(diff(sin(x), x, 0, evaluate=False))
sin
>>> diff(sin(x))
cos(x)
>>> diff(sin(x*y))
Traceback (most recent call last):
...
ValueError: specify differentiation variables to differentiate sin(x*y)
Note that ``diff(sin(x))`` syntax is meant only for convenience
in interactive sessions and should be avoided in library code.
References
==========
http://reference.wolfram.com/legacy/v5_2/Built-inFunctions/AlgebraicComputation/Calculus/D.html
See Also
========
Derivative
idiff: computes the derivative implicitly
"""
if hasattr(f, 'diff'):
return f.diff(*symbols, **kwargs)
kwargs.setdefault('evaluate', True)
return Derivative(f, *symbols, **kwargs)
def expand(e, deep=True, modulus=None, power_base=True, power_exp=True,
mul=True, log=True, multinomial=True, basic=True, **hints):
r"""
Expand an expression using methods given as hints.
Hints evaluated unless explicitly set to False are: ``basic``, ``log``,
``multinomial``, ``mul``, ``power_base``, and ``power_exp`` The following
hints are supported but not applied unless set to True: ``complex``,
``func``, and ``trig``. In addition, the following meta-hints are
supported by some or all of the other hints: ``frac``, ``numer``,
``denom``, ``modulus``, and ``force``. ``deep`` is supported by all
hints. Additionally, subclasses of Expr may define their own hints or
meta-hints.
The ``basic`` hint is used for any special rewriting of an object that
should be done automatically (along with the other hints like ``mul``)
when expand is called. This is a catch-all hint to handle any sort of
expansion that may not be described by the existing hint names. To use
this hint an object should override the ``_eval_expand_basic`` method.
Objects may also define their own expand methods, which are not run by
default. See the API section below.
If ``deep`` is set to ``True`` (the default), things like arguments of
functions are recursively expanded. Use ``deep=False`` to only expand on
the top level.
If the ``force`` hint is used, assumptions about variables will be ignored
in making the expansion.
Hints
=====
These hints are run by default
mul
---
Distributes multiplication over addition:
>>> from sympy import cos, exp, sin
>>> from sympy.abc import x, y, z
>>> (y*(x + z)).expand(mul=True)
x*y + y*z
multinomial
-----------
Expand (x + y + ...)**n where n is a positive integer.
>>> ((x + y + z)**2).expand(multinomial=True)
x**2 + 2*x*y + 2*x*z + y**2 + 2*y*z + z**2
power_exp
---------
Expand addition in exponents into multiplied bases.
>>> exp(x + y).expand(power_exp=True)
exp(x)*exp(y)
>>> (2**(x + y)).expand(power_exp=True)
2**x*2**y
power_base
----------
Split powers of multiplied bases.
This only happens by default if assumptions allow, or if the
``force`` meta-hint is used:
>>> ((x*y)**z).expand(power_base=True)
(x*y)**z
>>> ((x*y)**z).expand(power_base=True, force=True)
x**z*y**z
>>> ((2*y)**z).expand(power_base=True)
2**z*y**z
Note that in some cases where this expansion always holds, SymPy performs
it automatically:
>>> (x*y)**2
x**2*y**2
log
---
Pull out power of an argument as a coefficient and split logs products
into sums of logs.
Note that these only work if the arguments of the log function have the
proper assumptions--the arguments must be positive and the exponents must
be real--or else the ``force`` hint must be True:
>>> from sympy import log, symbols
>>> log(x**2*y).expand(log=True)
log(x**2*y)
>>> log(x**2*y).expand(log=True, force=True)
2*log(x) + log(y)
>>> x, y = symbols('x,y', positive=True)
>>> log(x**2*y).expand(log=True)
2*log(x) + log(y)
basic
-----
This hint is intended primarily as a way for custom subclasses to enable
expansion by default.
These hints are not run by default:
complex
-------
Split an expression into real and imaginary parts.
>>> x, y = symbols('x,y')
>>> (x + y).expand(complex=True)
re(x) + re(y) + I*im(x) + I*im(y)
>>> cos(x).expand(complex=True)
-I*sin(re(x))*sinh(im(x)) + cos(re(x))*cosh(im(x))
Note that this is just a wrapper around ``as_real_imag()``. Most objects
that wish to redefine ``_eval_expand_complex()`` should consider
redefining ``as_real_imag()`` instead.
func
----
Expand other functions.
>>> from sympy import gamma
>>> gamma(x + 1).expand(func=True)
x*gamma(x)
trig
----
Do trigonometric expansions.
>>> cos(x + y).expand(trig=True)
-sin(x)*sin(y) + cos(x)*cos(y)
>>> sin(2*x).expand(trig=True)
2*sin(x)*cos(x)
Note that the forms of ``sin(n*x)`` and ``cos(n*x)`` in terms of ``sin(x)``
and ``cos(x)`` are not unique, due to the identity `\sin^2(x) + \cos^2(x)
= 1`. The current implementation uses the form obtained from Chebyshev
polynomials, but this may change. See `this MathWorld article
<http://mathworld.wolfram.com/Multiple-AngleFormulas.html>`_ for more
information.
Notes
=====
- You can shut off unwanted methods::
>>> (exp(x + y)*(x + y)).expand()
x*exp(x)*exp(y) + y*exp(x)*exp(y)
>>> (exp(x + y)*(x + y)).expand(power_exp=False)
x*exp(x + y) + y*exp(x + y)
>>> (exp(x + y)*(x + y)).expand(mul=False)
(x + y)*exp(x)*exp(y)
- Use deep=False to only expand on the top level::
>>> exp(x + exp(x + y)).expand()
exp(x)*exp(exp(x)*exp(y))
>>> exp(x + exp(x + y)).expand(deep=False)
exp(x)*exp(exp(x + y))
- Hints are applied in an arbitrary, but consistent order (in the current
implementation, they are applied in alphabetical order, except
multinomial comes before mul, but this may change). Because of this,
some hints may prevent expansion by other hints if they are applied
first. For example, ``mul`` may distribute multiplications and prevent
``log`` and ``power_base`` from expanding them. Also, if ``mul`` is
applied before ``multinomial`, the expression might not be fully
distributed. The solution is to use the various ``expand_hint`` helper
functions or to use ``hint=False`` to this function to finely control
which hints are applied. Here are some examples::
>>> from sympy import expand, expand_mul, expand_power_base
>>> x, y, z = symbols('x,y,z', positive=True)
>>> expand(log(x*(y + z)))
log(x) + log(y + z)
Here, we see that ``log`` was applied before ``mul``. To get the mul
expanded form, either of the following will work::
>>> expand_mul(log(x*(y + z)))
log(x*y + x*z)
>>> expand(log(x*(y + z)), log=False)
log(x*y + x*z)
A similar thing can happen with the ``power_base`` hint::
>>> expand((x*(y + z))**x)
(x*y + x*z)**x
To get the ``power_base`` expanded form, either of the following will
work::
>>> expand((x*(y + z))**x, mul=False)
x**x*(y + z)**x
>>> expand_power_base((x*(y + z))**x)
x**x*(y + z)**x
>>> expand((x + y)*y/x)
y + y**2/x
The parts of a rational expression can be targeted::
>>> expand((x + y)*y/x/(x + 1), frac=True)
(x*y + y**2)/(x**2 + x)
>>> expand((x + y)*y/x/(x + 1), numer=True)
(x*y + y**2)/(x*(x + 1))
>>> expand((x + y)*y/x/(x + 1), denom=True)
y*(x + y)/(x**2 + x)
- The ``modulus`` meta-hint can be used to reduce the coefficients of an
expression post-expansion::
>>> expand((3*x + 1)**2)
9*x**2 + 6*x + 1
>>> expand((3*x + 1)**2, modulus=5)
4*x**2 + x + 1
- Either ``expand()`` the function or ``.expand()`` the method can be
used. Both are equivalent::
>>> expand((x + 1)**2)
x**2 + 2*x + 1
>>> ((x + 1)**2).expand()
x**2 + 2*x + 1
API
===
Objects can define their own expand hints by defining
``_eval_expand_hint()``. The function should take the form::
def _eval_expand_hint(self, **hints):
# Only apply the method to the top-level expression
...
See also the example below. Objects should define ``_eval_expand_hint()``
methods only if ``hint`` applies to that specific object. The generic
``_eval_expand_hint()`` method defined in Expr will handle the no-op case.
Each hint should be responsible for expanding that hint only.
Furthermore, the expansion should be applied to the top-level expression
only. ``expand()`` takes care of the recursion that happens when
``deep=True``.
You should only call ``_eval_expand_hint()`` methods directly if you are
100% sure that the object has the method, as otherwise you are liable to
get unexpected ``AttributeError``s. Note, again, that you do not need to
recursively apply the hint to args of your object: this is handled
automatically by ``expand()``. ``_eval_expand_hint()`` should
generally not be used at all outside of an ``_eval_expand_hint()`` method.
If you want to apply a specific expansion from within another method, use
the public ``expand()`` function, method, or ``expand_hint()`` functions.
In order for expand to work, objects must be rebuildable by their args,
i.e., ``obj.func(*obj.args) == obj`` must hold.
Expand methods are passed ``**hints`` so that expand hints may use
'metahints'--hints that control how different expand methods are applied.
For example, the ``force=True`` hint described above that causes
``expand(log=True)`` to ignore assumptions is such a metahint. The
``deep`` meta-hint is handled exclusively by ``expand()`` and is not
passed to ``_eval_expand_hint()`` methods.
Note that expansion hints should generally be methods that perform some
kind of 'expansion'. For hints that simply rewrite an expression, use the
.rewrite() API.
Examples
========
>>> from sympy import Expr, sympify
>>> class MyClass(Expr):
... def __new__(cls, *args):
... args = sympify(args)
... return Expr.__new__(cls, *args)
...
... def _eval_expand_double(self, **hints):
... '''
... Doubles the args of MyClass.
...
... If there more than four args, doubling is not performed,
... unless force=True is also used (False by default).
... '''
... force = hints.pop('force', False)
... if not force and len(self.args) > 4:
... return self
... return self.func(*(self.args + self.args))
...
>>> a = MyClass(1, 2, MyClass(3, 4))
>>> a
MyClass(1, 2, MyClass(3, 4))
>>> a.expand(double=True)
MyClass(1, 2, MyClass(3, 4, 3, 4), 1, 2, MyClass(3, 4, 3, 4))
>>> a.expand(double=True, deep=False)
MyClass(1, 2, MyClass(3, 4), 1, 2, MyClass(3, 4))
>>> b = MyClass(1, 2, 3, 4, 5)
>>> b.expand(double=True)
MyClass(1, 2, 3, 4, 5)
>>> b.expand(double=True, force=True)
MyClass(1, 2, 3, 4, 5, 1, 2, 3, 4, 5)
See Also
========
expand_log, expand_mul, expand_multinomial, expand_complex, expand_trig,
expand_power_base, expand_power_exp, expand_func, sympy.simplify.hyperexpand.hyperexpand
"""
# don't modify this; modify the Expr.expand method
hints['power_base'] = power_base
hints['power_exp'] = power_exp
hints['mul'] = mul
hints['log'] = log
hints['multinomial'] = multinomial
hints['basic'] = basic
return sympify(e).expand(deep=deep, modulus=modulus, **hints)
# This is a special application of two hints
def _mexpand(expr, recursive=False):
# expand multinomials and then expand products; this may not always
# be sufficient to give a fully expanded expression (see
# test_issue_8247_8354 in test_arit)
if expr is None:
return
was = None
while was != expr:
was, expr = expr, expand_mul(expand_multinomial(expr))
if not recursive:
break
return expr
# These are simple wrappers around single hints.
def expand_mul(expr, deep=True):
"""
Wrapper around expand that only uses the mul hint. See the expand
docstring for more information.
Examples
========
>>> from sympy import symbols, expand_mul, exp, log
>>> x, y = symbols('x,y', positive=True)
>>> expand_mul(exp(x+y)*(x+y)*log(x*y**2))
x*exp(x + y)*log(x*y**2) + y*exp(x + y)*log(x*y**2)
"""
return sympify(expr).expand(deep=deep, mul=True, power_exp=False,
power_base=False, basic=False, multinomial=False, log=False)
def expand_multinomial(expr, deep=True):
"""
Wrapper around expand that only uses the multinomial hint. See the expand
docstring for more information.
Examples
========
>>> from sympy import symbols, expand_multinomial, exp
>>> x, y = symbols('x y', positive=True)
>>> expand_multinomial((x + exp(x + 1))**2)
x**2 + 2*x*exp(x + 1) + exp(2*x + 2)
"""
return sympify(expr).expand(deep=deep, mul=False, power_exp=False,
power_base=False, basic=False, multinomial=True, log=False)
def expand_log(expr, deep=True, force=False):
"""
Wrapper around expand that only uses the log hint. See the expand
docstring for more information.
Examples
========
>>> from sympy import symbols, expand_log, exp, log
>>> x, y = symbols('x,y', positive=True)
>>> expand_log(exp(x+y)*(x+y)*log(x*y**2))
(x + y)*(log(x) + 2*log(y))*exp(x + y)
"""
return sympify(expr).expand(deep=deep, log=True, mul=False,
power_exp=False, power_base=False, multinomial=False,
basic=False, force=force)
def expand_func(expr, deep=True):
"""
Wrapper around expand that only uses the func hint. See the expand
docstring for more information.
Examples
========
>>> from sympy import expand_func, gamma
>>> from sympy.abc import x
>>> expand_func(gamma(x + 2))
x*(x + 1)*gamma(x)
"""
return sympify(expr).expand(deep=deep, func=True, basic=False,
log=False, mul=False, power_exp=False, power_base=False, multinomial=False)
def expand_trig(expr, deep=True):
"""
Wrapper around expand that only uses the trig hint. See the expand
docstring for more information.
Examples
========
>>> from sympy import expand_trig, sin
>>> from sympy.abc import x, y
>>> expand_trig(sin(x+y)*(x+y))
(x + y)*(sin(x)*cos(y) + sin(y)*cos(x))
"""
return sympify(expr).expand(deep=deep, trig=True, basic=False,
log=False, mul=False, power_exp=False, power_base=False, multinomial=False)
def expand_complex(expr, deep=True):
"""
Wrapper around expand that only uses the complex hint. See the expand
docstring for more information.
Examples
========
>>> from sympy import expand_complex, exp, sqrt, I
>>> from sympy.abc import z
>>> expand_complex(exp(z))
I*exp(re(z))*sin(im(z)) + exp(re(z))*cos(im(z))
>>> expand_complex(sqrt(I))
sqrt(2)/2 + sqrt(2)*I/2
See Also
========
sympy.core.expr.Expr.as_real_imag
"""
return sympify(expr).expand(deep=deep, complex=True, basic=False,
log=False, mul=False, power_exp=False, power_base=False, multinomial=False)
def expand_power_base(expr, deep=True, force=False):
"""
Wrapper around expand that only uses the power_base hint.
See the expand docstring for more information.
A wrapper to expand(power_base=True) which separates a power with a base
that is a Mul into a product of powers, without performing any other
expansions, provided that assumptions about the power's base and exponent
allow.
deep=False (default is True) will only apply to the top-level expression.
force=True (default is False) will cause the expansion to ignore
assumptions about the base and exponent. When False, the expansion will
only happen if the base is non-negative or the exponent is an integer.
>>> from sympy.abc import x, y, z
>>> from sympy import expand_power_base, sin, cos, exp
>>> (x*y)**2
x**2*y**2
>>> (2*x)**y
(2*x)**y
>>> expand_power_base(_)
2**y*x**y
>>> expand_power_base((x*y)**z)
(x*y)**z
>>> expand_power_base((x*y)**z, force=True)
x**z*y**z
>>> expand_power_base(sin((x*y)**z), deep=False)
sin((x*y)**z)
>>> expand_power_base(sin((x*y)**z), force=True)
sin(x**z*y**z)
>>> expand_power_base((2*sin(x))**y + (2*cos(x))**y)
2**y*sin(x)**y + 2**y*cos(x)**y
>>> expand_power_base((2*exp(y))**x)
2**x*exp(y)**x
>>> expand_power_base((2*cos(x))**y)
2**y*cos(x)**y
Notice that sums are left untouched. If this is not the desired behavior,
apply full ``expand()`` to the expression:
>>> expand_power_base(((x+y)*z)**2)
z**2*(x + y)**2
>>> (((x+y)*z)**2).expand()
x**2*z**2 + 2*x*y*z**2 + y**2*z**2
>>> expand_power_base((2*y)**(1+z))
2**(z + 1)*y**(z + 1)
>>> ((2*y)**(1+z)).expand()
2*2**z*y*y**z
"""
return sympify(expr).expand(deep=deep, log=False, mul=False,
power_exp=False, power_base=True, multinomial=False,
basic=False, force=force)
def expand_power_exp(expr, deep=True):
"""
Wrapper around expand that only uses the power_exp hint.
See the expand docstring for more information.
Examples
========
>>> from sympy import expand_power_exp
>>> from sympy.abc import x, y
>>> expand_power_exp(x**(y + 2))
x**2*x**y
"""
return sympify(expr).expand(deep=deep, complex=False, basic=False,
log=False, mul=False, power_exp=True, power_base=False, multinomial=False)
def count_ops(expr, visual=False):
"""
Return a representation (integer or expression) of the operations in expr.
If ``visual`` is ``False`` (default) then the sum of the coefficients of the
visual expression will be returned.
If ``visual`` is ``True`` then the number of each type of operation is shown
with the core class types (or their virtual equivalent) multiplied by the
number of times they occur.
If expr is an iterable, the sum of the op counts of the
items will be returned.
Examples
========
>>> from sympy.abc import a, b, x, y
>>> from sympy import sin, count_ops
Although there isn't a SUB object, minus signs are interpreted as
either negations or subtractions:
>>> (x - y).count_ops(visual=True)
SUB
>>> (-x).count_ops(visual=True)
NEG
Here, there are two Adds and a Pow:
>>> (1 + a + b**2).count_ops(visual=True)
2*ADD + POW
In the following, an Add, Mul, Pow and two functions:
>>> (sin(x)*x + sin(x)**2).count_ops(visual=True)
ADD + MUL + POW + 2*SIN
for a total of 5:
>>> (sin(x)*x + sin(x)**2).count_ops(visual=False)
5
Note that "what you type" is not always what you get. The expression
1/x/y is translated by sympy into 1/(x*y) so it gives a DIV and MUL rather
than two DIVs:
>>> (1/x/y).count_ops(visual=True)
DIV + MUL
The visual option can be used to demonstrate the difference in
operations for expressions in different forms. Here, the Horner
representation is compared with the expanded form of a polynomial:
>>> eq=x*(1 + x*(2 + x*(3 + x)))
>>> count_ops(eq.expand(), visual=True) - count_ops(eq, visual=True)
-MUL + 3*POW
The count_ops function also handles iterables:
>>> count_ops([x, sin(x), None, True, x + 2], visual=False)
2
>>> count_ops([x, sin(x), None, True, x + 2], visual=True)
ADD + SIN
>>> count_ops({x: sin(x), x + 2: y + 1}, visual=True)
2*ADD + SIN
"""
from sympy import Integral, Symbol
from sympy.core.relational import Relational
from sympy.simplify.radsimp import fraction
from sympy.logic.boolalg import BooleanFunction
from sympy.utilities.misc import func_name
expr = sympify(expr)
if isinstance(expr, Expr) and not expr.is_Relational:
ops = []
args = [expr]
NEG = Symbol('NEG')
DIV = Symbol('DIV')
SUB = Symbol('SUB')
ADD = Symbol('ADD')
while args:
a = args.pop()
if a.is_Rational:
#-1/3 = NEG + DIV
if a is not S.One:
if a.p < 0:
ops.append(NEG)
if a.q != 1:
ops.append(DIV)
continue
elif a.is_Mul or a.is_MatMul:
if _coeff_isneg(a):
ops.append(NEG)
if a.args[0] is S.NegativeOne:
a = a.as_two_terms()[1]
else:
a = -a
n, d = fraction(a)
if n.is_Integer:
ops.append(DIV)
if n < 0:
ops.append(NEG)
args.append(d)
continue # won't be -Mul but could be Add
elif d is not S.One:
if not d.is_Integer:
args.append(d)
ops.append(DIV)
args.append(n)
continue # could be -Mul
elif a.is_Add or a.is_MatAdd:
aargs = list(a.args)
negs = 0
for i, ai in enumerate(aargs):
if _coeff_isneg(ai):
negs += 1
args.append(-ai)
if i > 0:
ops.append(SUB)
else:
args.append(ai)
if i > 0:
ops.append(ADD)
if negs == len(aargs): # -x - y = NEG + SUB
ops.append(NEG)
elif _coeff_isneg(aargs[0]): # -x + y = SUB, but already recorded ADD
ops.append(SUB - ADD)
continue
if a.is_Pow and a.exp is S.NegativeOne:
ops.append(DIV)
args.append(a.base) # won't be -Mul but could be Add
continue
if (a.is_Mul or
a.is_Pow or
a.is_Function or
isinstance(a, Derivative) or
isinstance(a, Integral)):
o = Symbol(a.func.__name__.upper())
# count the args
if (a.is_Mul or isinstance(a, LatticeOp)):
ops.append(o*(len(a.args) - 1))
else:
ops.append(o)
if not a.is_Symbol:
args.extend(a.args)
elif isinstance(expr, Dict):
ops = [count_ops(k, visual=visual) +
count_ops(v, visual=visual) for k, v in expr.items()]
elif iterable(expr):
ops = [count_ops(i, visual=visual) for i in expr]
elif isinstance(expr, (Relational, BooleanFunction)):
ops = []
for arg in expr.args:
ops.append(count_ops(arg, visual=True))
o = Symbol(func_name(expr, short=True).upper())
ops.append(o)
elif not isinstance(expr, Basic):
ops = []
else: # it's Basic not isinstance(expr, Expr):
if not isinstance(expr, Basic):
raise TypeError("Invalid type of expr")
else:
ops = []
args = [expr]
while args:
a = args.pop()
if a.args:
o = Symbol(a.func.__name__.upper())
if a.is_Boolean:
ops.append(o*(len(a.args)-1))
else:
ops.append(o)
args.extend(a.args)
if not ops:
if visual:
return S.Zero
return 0
ops = Add(*ops)
if visual:
return ops
if ops.is_Number:
return int(ops)
return sum(int((a.args or [1])[0]) for a in Add.make_args(ops))
def nfloat(expr, n=15, exponent=False, dkeys=False):
"""Make all Rationals in expr Floats except those in exponents
(unless the exponents flag is set to True). When processing
dictionaries, don't modify the keys unless ``dkeys=True``.
Examples
========
>>> from sympy.core.function import nfloat
>>> from sympy.abc import x, y
>>> from sympy import cos, pi, sqrt
>>> nfloat(x**4 + x/2 + cos(pi/3) + 1 + sqrt(y))
x**4 + 0.5*x + sqrt(y) + 1.5
>>> nfloat(x**4 + sqrt(y), exponent=True)
x**4.0 + y**0.5
Container types are not modified:
>>> type(nfloat((1, 2))) is tuple
True
"""
from sympy.core.power import Pow
from sympy.polys.rootoftools import RootOf
from sympy import MatrixBase
kw = dict(n=n, exponent=exponent, dkeys=dkeys)
if isinstance(expr, MatrixBase):
return expr.applyfunc(lambda e: nfloat(e, **kw))
# handling of iterable containers
if iterable(expr, exclude=string_types):
if isinstance(expr, (dict, Dict)):
if dkeys:
args = [tuple(map(lambda i: nfloat(i, **kw), a))
for a in expr.items()]
else:
args = [(k, nfloat(v, **kw)) for k, v in expr.items()]
if isinstance(expr, dict):
return type(expr)(args)
else:
return expr.func(*args)
elif isinstance(expr, Basic):
return expr.func(*[nfloat(a, **kw) for a in expr.args])
return type(expr)([nfloat(a, **kw) for a in expr])
rv = sympify(expr)
if rv.is_Number:
return Float(rv, n)
elif rv.is_number:
# evalf doesn't always set the precision
rv = rv.n(n)
if rv.is_Number:
rv = Float(rv.n(n), n)
else:
pass # pure_complex(rv) is likely True
return rv
elif rv.is_Atom:
return rv
# watch out for RootOf instances that don't like to have
# their exponents replaced with Dummies and also sometimes have
# problems with evaluating at low precision (issue 6393)
rv = rv.xreplace({ro: ro.n(n) for ro in rv.atoms(RootOf)})
if not exponent:
reps = [(p, Pow(p.base, Dummy())) for p in rv.atoms(Pow)]
rv = rv.xreplace(dict(reps))
rv = rv.n(n)
if not exponent:
rv = rv.xreplace({d.exp: p.exp for p, d in reps})
else:
# Pow._eval_evalf special cases Integer exponents so if
# exponent is suppose to be handled we have to do so here
rv = rv.xreplace(Transform(
lambda x: Pow(x.base, Float(x.exp, n)),
lambda x: x.is_Pow and x.exp.is_Integer))
return rv.xreplace(Transform(
lambda x: x.func(*nfloat(x.args, n, exponent)),
lambda x: isinstance(x, Function)))
from sympy.core.symbol import Dummy, Symbol
| bsd-3-clause | 1,533,985,883,591,207,000 | 33.35906 | 142 | 0.548837 | false | 3.914535 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.