code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Zomboided
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This module is a bunch of functions that are called from the settings
# menu to manage various files groups.
import xbmcaddon
import xbmcgui
import xbmcvfs
import datetime
import os
from libs.vpnproviders import removeGeneratedFiles, cleanPassFiles, providers, usesUserKeys, usesMultipleKeys, getUserKeys
from libs.vpnproviders import getUserCerts, getVPNDisplay, getVPNLocation, removeDownloadedFiles, isAlternative, resetAlternative
from libs.utility import debugTrace, errorTrace, infoTrace, newPrint, getID, getName
from libs.vpnplatform import getLogPath, getUserDataPath, writeVPNLog, copySystemdFiles, addSystemd, removeSystemd, generateVPNs
from libs.common import resetVPNConnections, isVPNConnected, disconnectVPN, suspendConfigUpdate, resumeConfigUpdate, dnsFix, getVPNRequestedProfile
from libs.common import resetVPNProvider, setAPICommand
from libs.ipinfo import resetIPServices
try:
from libs.generation import generateAll
except:
pass
action = sys.argv[1]
debugTrace("-- Entered managefiles.py with parameter " + action + " --")
if not getID() == "":
addon = xbmcaddon.Addon(getID())
addon_name = getName()
# Reset the ovpn files
if action == "ovpn":
if getVPNRequestedProfile() == "":
if xbmcgui.Dialog().yesno(addon_name, "Resetting the VPN provider will disconnect and reset all VPN connections, and then remove any files that have been created. Continue?"):
suspendConfigUpdate()
# Disconnect so that live files are not being modified
resetVPNConnections(addon)
infoTrace("managefiles.py", "Resetting the VPN provider")
# Delete the generated files, and reset the locations so it can be selected again
removeGeneratedFiles()
# Delete any values that have previously been validated
vpn_provider = getVPNLocation(addon.getSetting("vpn_provider"))
if isAlternative(vpn_provider): resetAlternative(vpn_provider)
# Reset the IP service error counts, etc
resetIPServices()
addon = xbmcaddon.Addon(getID())
resetVPNProvider(addon)
addon = xbmcaddon.Addon(getID())
resumeConfigUpdate()
xbmcgui.Dialog().ok(addon_name, "Reset the VPN provider. Validate a connection to start using a VPN again.")
else:
xbmcgui.Dialog().ok(addon_name, "Connection to VPN being attempted and has been aborted. Try again in a few seconds.")
setAPICommand("Disconnect")
# Generate the VPN provider files
if action == "generate":
# Only used during development to create location files
generateAll()
xbmcgui.Dialog().ok(addon_name, "Regenerated some or all of the VPN location files.")
# Delete all of the downloaded VPN files
if action == "downloads":
debugTrace("Deleting all downloaded VPN files")
removeDownloadedFiles()
xbmcgui.Dialog().ok(addon_name, "Deleted all of the downloaded VPN files. They'll be downloaded again if required.")
# Copy the log file
elif action == "log":
log_path = ""
dest_path = ""
try:
log_path = getLogPath()
start_dir = ""
dest_folder = xbmcgui.Dialog().browse(0, "Select folder to copy log file into", "files", "", False, False, start_dir, False)
dest_path = "kodi " + datetime.datetime.now().strftime("%y-%m-%d %H-%M-%S") + ".log"
dest_path = dest_folder + dest_path.replace(" ", "_")
# Write VPN log to log before copying
writeVPNLog()
debugTrace("Copying " + log_path + " to " + dest_path)
addon = xbmcaddon.Addon(getID())
infoTrace("managefiles.py", "Copying log file to " + dest_path + ". Using version " + addon.getSetting("version_number"))
xbmcvfs.copy(log_path, dest_path)
if not xbmcvfs.exists(dest_path): raise IOError('Failed to copy log ' + log_path + " to " + dest_path)
dialog_message = "Copied log file to: " + dest_path
except:
errorTrace("managefiles.py", "Failed to copy log from " + log_path + " to " + dest_path)
if xbmcvfs.exists(log_path):
dialog_message = "Error copying log, try copying it to a different location."
else:
dialog_messsage = "Could not find the kodi.log file."
errorTrace("managefiles.py", dialog_message + " " + log_path + ", " + dest_path)
xbmcgui.Dialog().ok("Log Copy", dialog_message)
# Delete the user key and cert files
elif action == "user":
if addon.getSetting("1_vpn_validated") == "" or xbmcgui.Dialog().yesno(addon_name, "Deleting key and certificate files will disconnect and reset all VPN connections. Connections must be re-validated before use. Continue?"):
# Disconnect so that live files are not being modified
if isVPNConnected(): resetVPNConnections(addon)
# Select the provider
provider_list = []
for provider in providers:
if usesUserKeys(provider):
provider_list.append(getVPNDisplay(provider))
provider_list.sort()
index = xbmcgui.Dialog().select("Select VPN provider", provider_list)
provider_display = provider_list[index]
provider = getVPNLocation(provider_display)
# Get the key/cert pairs for that provider and offer up for deletion
user_keys = getUserKeys(provider)
user_certs = getUserCerts(provider)
if len(user_keys) > 0 or len(user_certs) > 0:
still_deleting = True
while still_deleting:
if len(user_keys) > 0 or len(user_certs) > 0:
# Build a list of things to display. We should always have pairs, but if
# something didn't copy or the user has messed with the dir this will cope
all_user = []
single_pair = "user [I](Same key and certificate used for all connections)[/I]"
for key in user_keys:
list_item = os.path.basename(key)
list_item = list_item.replace(".key", "")
if list_item == "user": list_item = single_pair
all_user.append(list_item)
for cert in user_certs:
list_item = os.path.basename(cert)
list_item = list_item.replace(".crt", "")
if list_item == "user": list_item = single_pair
if not list_item in all_user: all_user.append(list_item)
all_user.sort()
# Offer a delete all option if there are multiple keys
all_item = "[I]Delete all key and certificate files[/I]"
if usesMultipleKeys(provider):
all_user.append(all_item)
# Add in a finished option
finished_item = "[I]Finished[/I]"
all_user.append(finished_item)
# Get the pair to delete
index = xbmcgui.Dialog().select("Select key and certificate to delete, or [I]Finished[/I]", all_user)
if all_user[index] == finished_item:
still_deleting = False
else:
if all_user[index] == single_pair : all_user[index] = "user"
if all_user[index] == all_item:
if xbmcgui.Dialog().yesno(addon_name, "Are you sure you want to delete all key and certificate files for " + provider_display + "?"):
for item in all_user:
if not item == all_item and not item == finished_item:
path = getUserDataPath(provider + "/" + item)
try:
if xbmcvfs.exists(path + ".key"):
xbmcvfs.delete(path + ".key")
if xbmcvfs.exists(path + ".txt"):
xbmcvfs.delete(path + ".txt")
if xbmcvfs.exists(path + ".crt"):
xbmcvfs.delete(path + ".crt")
except:
xbmcgui.Dialog().ok(addon_name, "Couldn't delete one of the key or certificate files: " + path)
else:
path = getUserDataPath(provider + "/" + all_user[index])
try:
if xbmcvfs.exists(path+".key"):
xbmcvfs.delete(path + ".key")
if xbmcvfs.exists(path + ".txt"):
xbmcvfs.delete(path + ".txt")
if xbmcvfs.exists(path + ".crt"):
xbmcvfs.delete(path + ".crt")
except:
xbmcgui.Dialog().ok(addon_name, "Couldn't delete one of the key or certificate files: " + path)
# Fetch the directory list again
user_keys = getUserKeys(provider)
user_certs = getUserCerts(provider)
if len(user_keys) == 0 and len(user_certs) == 0:
xbmcgui.Dialog().ok(addon_name, "All key and certificate files for " + provider_display + " have been deleted.")
else:
still_deleting = False
else:
xbmcgui.Dialog().ok(addon_name, "No key and certificate files exist for " + provider_display + ".")
# Fix the user defined files with DNS goodness
if action == "dns":
dnsFix()
command = "Addon.OpenSettings(" + getID() + ")"
xbmc.executebuiltin(command)
else:
errorTrace("managefiles.py", "VPN service is not ready")
debugTrace("-- Exit managefiles.py --")
| Zomboided/VPN-Manager | managefiles.py | Python | gpl-2.0 | 11,679 |
# -*- coding: utf-8 -*-
"""
uds.warnings
~~~~~~~~~~~~
:copyright: Copyright (c) 2015, National Institute of Information and Communications Technology.All rights reserved.
:license: GPL2, see LICENSE for more details.
"""
import warnings
def deprecated(func):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used.
:param func:
:return: new_func
"""
def new_func(*args, **kwargs):
warnings.warn("Call to deprecated function {}.".format(func.__name__),
category=DeprecationWarning)
return func(*args, **kwargs)
new_func.__name__ = func.__name__
new_func.__doc__ = func.__doc__
new_func.__dict__.update(func.__dict__)
return new_func
# Examples of use
@deprecated
def some_old_function(x, y):
return x + y
class SomeClass:
@deprecated
def some_old_method(self, x, y):
return x + y | nict-isp/uds-sdk | uds/warnings.py | Python | gpl-2.0 | 979 |
__author__ = 'dako'
class SessionHelper:
def __init__(self, app):
self.app = app
def login(self, username, password):
wd = self.app.wd
self.app.open_home_page()
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys(username)
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys(password)
wd.find_element_by_css_selector('input[type="submit"]').click()
def logout(self):
wd = self.app.wd
wd.find_element_by_link_text("Logout").click()
def is_logged_in(self):
wd = self.app.wd
return len(wd.find_elements_by_link_text("Logout")) > 0
def is_logged_in_as(self, username):
wd = self.app.wd
return self.get_logged_user() == username
def get_logged_user(self):
wd = self.app.wd
return wd.find_element_by_xpath("//div/div[1]/form/b").text[1:-1]
def ensure_logout(self):
wd = self.app.wd
if self.is_logged_in():
self.logout()
def ensure_login(self, username, password):
wd = self.app.wd
if self.is_logged_in():
if self.is_logged_in_as(username):
return
else:
self.logout()
self.login(username, password) | EvilDako/PyTraining | fixture/session.py | Python | gpl-2.0 | 1,421 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from time import gmtime, strftime
import ephem
import wx.calendar
# Test
# here = ephem.Observer()
# here.lat = '-17.576166667'
# here.lon = '-149.618575000'
class App(wx.App):
def OnInit(self):
self.frame = MyFrame("Lunacy", (50, 60), (640, 220))
self.frame.Show()
self.SetTopWindow(self.frame)
return True
##########################################################################
## Class MyFrame
###########################################################################
class MyFrame(wx.Frame):
def __init__(self, title, pos, size):
wx.Frame.__init__(self, None, -1, title, pos, size)
path = "/usr/share/pixmaps/pidgin/emotes/default/moon.png"
icon = wx.Icon(path, wx.BITMAP_TYPE_PNG)
self.SetIcon(icon)
self.SetSizeHintsSz(wx.Size(640, 220), wx.DefaultSize)
gSizer1 = wx.GridSizer(1, 2, 0, 0)
fgSizer1 = wx.FlexGridSizer(1, 1, 0, 0)
fgSizer1.SetFlexibleDirection(wx.BOTH)
fgSizer1.SetNonFlexibleGrowMode(wx.FLEX_GROWMODE_SPECIFIED)
cal = wx.calendar.CalendarCtrl(self, wx.ID_ANY, wx.DefaultDateTime, wx.DefaultPosition, wx.DefaultSize,
wx.calendar.CAL_SHOW_HOLIDAYS |
wx.calendar.CAL_SHOW_SURROUNDING_WEEKS |
wx.calendar.CAL_SUNDAY_FIRST |
wx.SUNKEN_BORDER, u"Date of Lunacy")
self.cal = cal
self.cal.SetFont(wx.Font(12, 74, 90, 90, False, "Sans"))
self.cal.SetToolTipString(u"Date for Next Event")
self.cal.SetHelpText(u"Renders Lunar/Solar events for the date.")
self.Bind(wx.calendar.EVT_CALENDAR_SEL_CHANGED, self.OnDateSelect, id=cal.GetId())
fgSizer1.Add(self.cal, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT | wx.ALL, 5)
fgSizer1.AddSpacer(( 0, 5), 1, wx.EXPAND, 5)
gSizer1.Add(fgSizer1, 1, 0, 0)
fgSizer2 = wx.FlexGridSizer(8, 3, 3, 0)
fgSizer2.SetFlexibleDirection(wx.HORIZONTAL)
fgSizer2.SetNonFlexibleGrowMode(wx.FLEX_GROWMODE_SPECIFIED)
fgSizer2.SetMinSize(wx.Size(-1, 220))
self.staticText_Moonrise = wx.StaticText(self, wx.ID_ANY, u"Moonrise", wx.DefaultPosition, wx.DefaultSize, 0)
self.staticText_Moonrise.Wrap(-1)
self.staticText_Moonrise.SetFont(wx.Font(12, 74, 90, 90, False, "Sans"))
fgSizer2.Add(self.staticText_Moonrise, 0, 0, 5)
self.mrtime = wx.StaticText(self, wx.ID_ANY, u"next rise", wx.DefaultPosition, wx.DefaultSize, 0)
self.mrtime.Wrap(-1)
fgSizer2.Add(self.mrtime, 0, 0, 5)
self.mraz = wx.StaticText(self, wx.ID_ANY, u"azimuth", wx.DefaultPosition, wx.DefaultSize, 0)
self.mraz.Wrap(-1)
fgSizer2.Add(self.mraz, 0, 0, 5)
self.staticText_Moonset = wx.StaticText(self, wx.ID_ANY, u"Moonset", wx.DefaultPosition, wx.DefaultSize, 0)
self.staticText_Moonset.Wrap(-1)
self.staticText_Moonset.SetFont(wx.Font(12, 74, 90, 90, False, "Sans"))
fgSizer2.Add(self.staticText_Moonset, 0, 0, 10)
self.mstime = wx.StaticText(self, wx.ID_ANY, u"next set", wx.DefaultPosition, wx.DefaultSize, 0)
self.mstime.Wrap(-1)
fgSizer2.Add(self.mstime, 0, 0, 5)
self.msaz = wx.StaticText(self, wx.ID_ANY, u"azimuth", wx.DefaultPosition, wx.DefaultSize, 0)
self.msaz.Wrap(-1)
fgSizer2.Add(self.msaz, 0, 0, 5)
self.staticText_Phase = wx.StaticText(self, wx.ID_ANY, u"Phase", wx.DefaultPosition, wx.DefaultSize, 0)
self.staticText_Phase.Wrap(-1)
self.staticText_Phase.SetFont(wx.Font(12, 74, 90, 90, False, "Sans"))
fgSizer2.Add(self.staticText_Phase, 0, 0, 10)
self.moonphase = wx.StaticText(self, wx.ID_ANY, u"moonphase", wx.DefaultPosition, wx.DefaultSize, 0)
self.moonphase.Wrap(-1)
fgSizer2.Add(self.moonphase, 0, 0, 5)
self.phasepercent = wx.StaticText(self, wx.ID_ANY, u"% illuminated", wx.DefaultPosition, wx.DefaultSize, 0)
self.phasepercent.Wrap(-1)
fgSizer2.Add(self.phasepercent, 0, 0, 5)
self.staticText_NewMoon = wx.StaticText(self, wx.ID_ANY, u"New Moon ", wx.DefaultPosition, wx.DefaultSize,
wx.ST_NO_AUTORESIZE)
self.staticText_NewMoon.Wrap(-1)
self.staticText_NewMoon.SetFont(wx.Font(12, 74, 90, 90, False, "Sans"))
fgSizer2.Add(self.staticText_NewMoon, 0, 0, 10)
self.newmoondate = wx.StaticText(self, wx.ID_ANY, u"next new moon", wx.DefaultPosition, wx.DefaultSize, 0)
self.newmoondate.Wrap(-1)
fgSizer2.Add(self.newmoondate, 0, 0, 10)
self.newmoonhour = wx.StaticText(self, wx.ID_ANY, u"hour", wx.DefaultPosition, wx.DefaultSize, 0)
self.newmoonhour.Wrap(-1)
fgSizer2.Add(self.newmoonhour, 0, 0, 10)
self.staticText_FullMoon = wx.StaticText(self, wx.ID_ANY, u"Full Moon", wx.DefaultPosition, wx.DefaultSize, 0)
self.staticText_FullMoon.Wrap(-1)
self.staticText_FullMoon.SetFont(wx.Font(12, 74, 90, 90, False, "Sans"))
fgSizer2.Add(self.staticText_FullMoon, 0, 0, 10)
self.fullmoondate = wx.StaticText(self, wx.ID_ANY, u"next full moon", wx.DefaultPosition, wx.DefaultSize, 0)
self.fullmoondate.Wrap(-1)
fgSizer2.Add(self.fullmoondate, 0, 0, 5)
self.fullmoonhour = wx.StaticText(self, wx.ID_ANY, u"hour", wx.DefaultPosition, wx.DefaultSize, 0)
self.fullmoonhour.Wrap(-1)
fgSizer2.Add(self.fullmoonhour, 0, 0, 5)
self.staticText_Sunrise = wx.StaticText(self, wx.ID_ANY, u"Sunrise", wx.DefaultPosition, wx.DefaultSize, 0)
self.staticText_Sunrise.Wrap(-1)
self.staticText_Sunrise.SetFont(wx.Font(12, 74, 90, 90, False, "Sans"))
fgSizer2.Add(self.staticText_Sunrise, 0, 0, 10)
self.srtime = wx.StaticText(self, wx.ID_ANY, u"next rise", wx.DefaultPosition, wx.DefaultSize, 0)
self.srtime.Wrap(-1)
fgSizer2.Add(self.srtime, 0, 0, 5)
self.sraz = wx.StaticText(self, wx.ID_ANY, u"azimuth", wx.DefaultPosition, wx.DefaultSize, 0)
self.sraz.Wrap(-1)
fgSizer2.Add(self.sraz, 0, 0, 5)
self.staticText_SolarNoon = wx.StaticText(self, wx.ID_ANY, u"High Noon", wx.DefaultPosition, wx.DefaultSize, 0)
self.staticText_SolarNoon.Wrap(-1)
self.staticText_SolarNoon.SetFont(wx.Font(12, 74, 90, 90, False, "Sans"))
fgSizer2.Add(self.staticText_SolarNoon, 0, 0, 10)
self.sntime = wx.StaticText(self, wx.ID_ANY, u"solar noon", wx.DefaultPosition, wx.DefaultSize, 0)
self.sntime.Wrap(-1)
fgSizer2.Add(self.sntime, 0, 0, 5)
self.snaltitude = wx.StaticText(self, wx.ID_ANY, u"altitude", wx.DefaultPosition, wx.DefaultSize, 0)
self.snaltitude.Wrap(-1)
fgSizer2.Add(self.snaltitude, 0, 0, 5)
self.staticText_Sunset = wx.StaticText(self, wx.ID_ANY, u"Sunset", wx.DefaultPosition, wx.DefaultSize, 0)
self.staticText_Sunset.Wrap(-1)
self.staticText_Sunset.SetFont(wx.Font(12, 74, 90, 90, False, "Sans"))
fgSizer2.Add(self.staticText_Sunset, 0, 0, 10)
self.sstime = wx.StaticText(self, wx.ID_ANY, u"next set", wx.DefaultPosition, wx.DefaultSize, 0)
self.sstime.Wrap(-1)
fgSizer2.Add(self.sstime, 0, 0, 5)
self.ssaz = wx.StaticText(self, wx.ID_ANY, u"azimuth", wx.DefaultPosition, wx.DefaultSize, 0)
self.ssaz.Wrap(-1)
fgSizer2.Add(self.ssaz, 0, 0, 5)
gSizer1.Add(fgSizer2, 1, wx.TOP, 5)
self.SetSizer(gSizer1)
self.Layout()
self.Centre(wx.BOTH)
def __del__(self):
pass
def OnDateSelect(self, evt):
f = open(r'/etc/nx.lat') # Lat/lon files for Navigatrix
lat = f.readline(12)
f.close()
f = open(r'/etc/nx.lon')
lon = f.readline(12)
f.close()
lat = float(lat)
lon = float(lon)
degrees = int(lat)
mnn = (lat - degrees) * 60
minutes = int(mnn)
seconds = round(((mnn - minutes) * 60), 3)
lat = str(degrees) + str(minutes) + str(seconds)
degrees = int(lon)
mnn = (lon - degrees) * 60
minutes = int(mnn)
seconds = round(((mnn - minutes) * 60), 3)
lon = str(degrees) + str(minutes) + str(seconds)
here = ephem.Observer()
here.lat = lat
here.lon = lon
here.pressure = 0 # barometric pressure not factored
here.horizon = '-0:34' # fudge factor from the US Navel Observatory
here.elevation = 2.0 # 2 Meters elevation
here.temp = 25.0 # and a balmy 25 degrees
cal = evt.GetEventObject()
year = (str(self.cal.GetDate().GetYear()))
month = (str(self.cal.GetDate().GetMonth() + 1))
day = (str(self.cal.GetDate().GetDay()))
hour = strftime("%H:%M:%S", gmtime())
datefig = year + '/' + month + '/' + day + ' ' + hour
here.date = datefig
sun = ephem.Sun(here)
moon = ephem.Moon(here)
moon.compute(here)
#
# Moon Rise
#
# mrtime = str(here.next_rising(moon))
mrtime = here.next_rising(moon)
lt = ephem.localtime(mrtime)
mrtime = str(lt).split()
mrtime = mrtime[1].split(".")
self.mrtime.SetLabel(str(mrtime[0]))
mraz = str(moon.az).partition(':')
self.mraz.SetLabel(str(mraz[0]) + u'\u00B0 from North')
#
# Moonset moon.compute(here)
#
#
mstime = here.next_setting(moon)
lt = ephem.localtime(mstime)
mstime = str(lt).split()
mstime = mstime[1].split(".")
self.mstime.SetLabel(mstime[0])
msaz = str(moon.az).partition(':')
self.msaz.SetLabel(str(msaz[0]) + u'\u00B0 from North')
#
# Moon Phase
# TODO Clearly these numbers are pulled out of a hat.
# they are a very rough approximation of the phases and
# do not account for waxing and waning
phasepercent = int(moon.moon_phase * 100)
self.phasepercent.SetLabel(str(phasepercent) + " %")
if phasepercent <= 2.0:
moonphase = "New Moon"
if 2.1 < phasepercent <= 20.0:
moonphase = "Crescent"
if 20.1 < phasepercent <= 60.0:
moonphase = "Quarter Moon"
if 60.1 < phasepercent <= 95.0:
moonphase = "Gibbous"
if phasepercent > 95.1:
moonphase = "Full Moon"
self.moonphase.SetLabel(moonphase)
#
# New Moon Date
#
newmoondate = ephem.next_new_moon(datefig)
lt = ephem.localtime(newmoondate)
newmoondate = str(lt).split()
newmoonhour = newmoondate[1].split(".")
self.newmoondate.SetLabel(str(newmoondate[0]))
self.newmoonhour.SetLabel(str(newmoonhour[0]))
#
# Full Moon Date
#
fullmoondate = ephem.next_full_moon(datefig)
lt = ephem.localtime(fullmoondate)
fullmoondate = str(lt).split()
fullmoonhour = fullmoondate[1].split(".")
self.fullmoondate.SetLabel(str(fullmoondate[0]))
self.fullmoonhour.SetLabel(str(fullmoonhour[0]))
#
# Sun Rise
#
sun.compute(here)
srtime = here.next_rising(sun)
lt = ephem.localtime(srtime)
srtime = str(lt).split()
srtime = srtime[1].split(".")
self.srtime.SetLabel(srtime[0])
sraz = str(sun.az).partition(':')
self.sraz.SetLabel(str(sraz[0]) + u'\u00B0 from North')
#
# High Noon
#
sntime = here.next_transit(sun)
lt = ephem.localtime(sntime)
sntime = str(lt).split()
sntime = sntime[1].split(".")
self.sntime.SetLabel(sntime[0])
snaltitude = str(sun.alt).partition(':')
self.snaltitude.SetLabel(str(snaltitude[0]) + u'\u00B0 above Horizon')
#
# Sun Set
#
sstime = here.next_setting(sun)
lt = ephem.localtime(sstime)
sstime = str(lt).split()
sstime = sstime[1].split(".")
self.sstime.SetLabel(sstime[0])
ssaz = str(sun.az).partition(':')
self.ssaz.SetLabel(str(ssaz[0]) + u'\u00B0 from North')
if __name__ == '__main__':
app = App()
app.MainLoop()
| wadda/Lunacy | lunacy.py | Python | gpl-2.0 | 10,999 |
#--------------------------------------------------
# Revision = $Rev: 20 $
# Date = $Date: 2011-08-05 20:42:24 +0200 (Fri, 05 Aug 2011) $
# Author = $Author: stefan $
#--------------------------------------------------
from pluginInterfaces import PluginFit, Parameter,leastsqFit
import numpy as np
class PluginFitThreeBodyBeta(PluginFit):
def __init__(self):
pass
def fit(self,array,errarray,param,xmin=0,xmax=0, fitAxes=[]):
"""return the data that is needed for plotting the fitting result"""
"""0...a, 1...xc, 2...k, 3...y0"""
self.params = [Parameter(v) for v in param]
def f(x): return self.params[0]()/(1+np.exp(-(x-self.params[1]())/self.params[2]()))+self.params[3]()
self.simpleFitAllAxes(f,array,errarray,xmin,xmax, fitAxes)
return self.generateDataFromParameters(f,[np.amin(array[0,:]),np.amax(array[0,:])], np.size(fitAxes)+1, xmin, xmax, fitAxes)
def getInitialParameters(self,data):
"""find the best initial values and return them"""
dx = np.abs(data[0,0] - data[0,-1])
mi = np.amin(data[1,:])
ma = np.amax(data[1,:])
xc = (np.amax(data[0,:])-np.amin(data[0,:]))/2+np.amin(data[0,:])
return [ma-mi,xc,dx*2,mi]
def getParameters(self):
"""return the fit parameters"""
return np.array(["a","xc","dx","y0"])
def getFitModelStr(self):
"""return a string of the implemented fitting model, i.e. 'linear fit (y=A*x +B)'"""
return "Sigmoidal"
def getResultStr(self):
"""return a special result, i.e. 'Frequency = blabla'"""
return "nothing fitted"
| wakalixes/sqldataplot | plugins/pluginFitSigmoidal.py | Python | gpl-2.0 | 1,665 |
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
""" sha1Hash_test.py
Unit tests for sha1.py
"""
from crypto.hash.sha1Hash import SHA1
import unittest
import struct
assert struct.calcsize('!IIIII') == 20, '5 integers should be 20 bytes'
class SHA1_FIPS180_TestCases(unittest.TestCase):
""" SHA-1 tests from FIPS180-1 Appendix A, B and C """
def testFIPS180_1_Appendix_A(self):
""" APPENDIX A. A SAMPLE MESSAGE AND ITS MESSAGE DIGEST """
hashAlg = SHA1()
message = 'abc'
message_digest = 0xA9993E36L, 0x4706816AL, 0xBA3E2571L, 0x7850C26CL, 0x9CD0D89DL
md_string = _toBString(message_digest)
assert( hashAlg(message) == md_string ), 'FIPS180 Appendix A test Failed'
def testFIPS180_1_Appendix_B(self):
""" APPENDIX B. A SECOND SAMPLE MESSAGE AND ITS MESSAGE DIGEST """
hashAlg = SHA1()
message = 'abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq'
message_digest = 0x84983E44L, 0x1C3BD26EL, 0xBAAE4AA1L, 0xF95129E5L, 0xE54670F1L
md_string = _toBString(message_digest)
assert( hashAlg(message) == md_string ), 'FIPS180 Appendix B test Failed'
def testFIPS180_1_Appendix_C(self):
""" APPENDIX C. A THIRD SAMPLE MESSAGE AND ITS MESSAGE DIGEST
Let the message be the binary-coded form of the ASCII string which consists
of 1,000,000 repetitions of "a". """
hashAlg = SHA1()
message = 1000000*'a'
message_digest = 0x34AA973CL, 0xD4C4DAA4L, 0xF61EEB2BL, 0xDBAD2731L, 0x6534016FL
md_string = _toBString(message_digest)
assert( hashAlg(message) == md_string ), 'FIPS180 Appendix C test Failed'
def _toBlock(binaryString):
""" Convert binary string to blocks of 5 words of uint32() """
return [uint32(word) for word in struct.unpack('!IIIII', binaryString)]
def _toBString(block):
""" Convert block (5 words of 32 bits to binary string """
return ''.join([struct.pack('!I',word) for word in block])
if __name__ == '__main__':
# Run the tests from the command line
unittest.main()
| dknlght/dkodi | src/script.module.cryptopy/lib/crypto/hash/sha1Hash_test.py | Python | gpl-2.0 | 2,199 |
#!C:\Users\SeanSaito\Dev\aviato\flask\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'kartograph.py==0.6.8','console_scripts','kartograph'
__requires__ = 'kartograph.py==0.6.8'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('kartograph.py==0.6.8', 'console_scripts', 'kartograph')()
)
| hrishioa/Aviato | flask/Scripts/kartograph-script.py | Python | gpl-2.0 | 364 |
# Copyright 2006 John Duda
# This file is part of Infoshopkeeper.
# Infoshopkeeper is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or any later version.
# Infoshopkeeper is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Infoshopkeeper; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
# USA
from wxPython.wx import *
import os
import datetime
from objects.emprunt import Emprunt
from popups.members import AddMemberPanel, ShowMembersPanel
class CheckoutPopup(wxDialog):
def __init__(self, parent):
self.parent=parent
wxDialog.__init__(self, parent,-1,"Check out items")
self.mastersizer = wxBoxSizer(wxVERTICAL)
self.static1 = wxStaticText(self, -1, "Check out to :")
self.mastersizer.Add(self.static1)
self.notebook = wxNotebook(self, -1, style=wxNB_TOP)
self.new_member_panel = AddMemberPanel(parent=self.notebook, main_window=parent,
on_successful_add=self.Borrow, cancel=self.Close)
self.notebook.AddPage(self.new_member_panel, "New member")
self.show_member_panel = ShowMembersPanel(parent=self.notebook, main_window=parent, motherDialog=self, on_select=self.Borrow)
self.notebook.AddPage(self.show_member_panel, "Existing member")
self.mastersizer.Add(self.notebook)
self.SetSizer(self.mastersizer)
for i in self.parent.orderbox.items:
print i.database_id, "... ", i.id
#self.b = wxButton(self, -1, "Checkout", (15, 80))
#EVT_BUTTON(self, self.b.GetId(), self.Checkout)
#self.b.SetDefault()
self.mastersizer.SetSizeHints(self)
def Borrow(self, id):
borrower = self.parent.membersList.get(id)
print borrower
for i in self.parent.orderbox.items:
# Check if this work on sqlobject 0.7... I got
# lots of problem on 0.6.1, and itemID __isn't__
# defined in emprunt, which is plain weirdness
e = Emprunt(borrower = id, itemID=i.database_id)
print i.database_id
self.parent.orderbox.setBorrowed()
self.parent.orderbox.void()
self.Close()
def OnCancel(self,event):
self.EndModal(1)
def Checkout(self,event):
borrower=self.borrower.GetValue()
if len(borrower)>0:
today="%s" % datetime.date.today()
self.parent.orderbox.change_status(today+"-"+borrower)
self.parent.orderbox.void()
self.Close()
| johm/infoshopkeeper | popups/checkout.py | Python | gpl-2.0 | 2,958 |
from datetime import *
from Tweetstream import *
from UserAnalyser import *
from TimeAnalyser import *
import math
import sys
import pickle
#Frequency over the common
def load_list(filein):
d = dict()
for l in filein:
l = eval(l)
d[l[0]] = l[1]
return d
if __name__ == "__main__":
follow = load_list(open(sys.argv[5], 'r'))
keywords = open(sys.argv[2], 'r').readline().strip("\n").split(",")
userstream = Tweetstream(jsonfilee=sys.argv[3], jsonformat=False, keywords=keywords)
topicstream = Tweetstream(jsonfilee=sys.argv[1], jsonformat=False, keywords=keywords)
ua = UserAnalyser (sys.argv[4], keywords = keywords)
ua.load_usersVectors()
ua.load_idf()
ua.load_usersScore()
rank = dict()
# normalizar pelo numero de kw no topic vector
c = 0
for t in userstream:
rank[t['id']] = 0
n = 0
if t['user_id'] in follow:
c += 1
for fuser in follow[t['user_id']]:
if fuser in ua.usersScore:
rank[t['id']] += ua.usersScore[fuser]
n += 1
if n > 0: rank[t['id']] /= n
print c
#prinit score, nwindow
pickle.dump(rank, open(sys.argv[4]+"_rank_USER_followers.pick", 'w'), pickle.HIGHEST_PROTOCOL)
| agdavis/contextual_features | gen_user_followers.py | Python | gpl-2.0 | 1,139 |
class Solution(object):
def setZeroes(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: void Do not return anything, modify matrix in-place instead.
"""
width,height = len(matrix[0]),len(matrix)
for i in xrange(height):
foundzero = False
for j in xrange(width):
if matrix[i][j] == 0:
foundzero = True
matrix[i][j] = float("inf")
if not foundzero:
continue
for j in xrange(width):
if matrix[i][j] != float("inf"):
matrix[i][j] = 0
for i in xrange(width):
foundtarget = False
for j in xrange(height):
if matrix[j][i] == float("inf"):
foundtarget = True
break
if not foundtarget:
continue
for j in xrange(height):
matrix[j][i] = 0
| hufeiya/leetcode | python/73_Set_Matrix_Zeroes.py | Python | gpl-2.0 | 1,004 |
"""
core/api/serializers.py is the module for core model api data serializers
"""
#import core django module
from django.contrib.auth.models import User, Permission
#import external modules
from rest_framework import serializers
#import project modules
from core.models import (Product, ProductCategory, UnitOfMeasurement, UOMCategory, CompanyCategory, Company,
Currency, Rate, Contact, Address, EmployeeCategory, Employee, ProductPresentation,
ModeOfAdministration, ProductItem, ProductFormulation)
class UserSerializer(serializers.ModelSerializer):
"""
REST API serializer for User model
"""
class Meta:
model = User
class BaseModelSerializer(serializers.ModelSerializer):
"""
Base Model Serializer for models
"""
created_by = UserSerializer(required=False, read_only=True)
modified_by = UserSerializer(required=False, read_only=True)
class ProductCategorySerializer(BaseModelSerializer):
"""
REST API Serializer for ProductCategory model
"""
class Meta:
model = ProductCategory
class ProductSerializer(BaseModelSerializer):
"""
REST API Serializer for Product models
"""
class Meta:
model = Product
class UOMCategorySerializer(BaseModelSerializer):
"""
REST API Serializer for UOMCategory model
"""
class Meta:
model = UOMCategory
class UnitOfMeasurementSerializer(BaseModelSerializer):
"""
REST API Serializer for UnitOfMeasurement model
"""
class Meta:
model = UnitOfMeasurement
class CompanyCategorySerializer(BaseModelSerializer):
"""
REST API serializer for CompanyCategory model
"""
class Meta:
model = CompanyCategory
class CompanySerializer(BaseModelSerializer):
"""
REST API serializer for Company model
"""
class Meta:
model = Company
class CurrencySerializer(BaseModelSerializer):
"""
REST API serializer for Currency model
"""
class Meta:
model = Currency
fields = ('code', 'name', 'symbol', 'symbol_position', 'rates',)
class RateSerializer(BaseModelSerializer):
"""
REST API serializer for Rate model
"""
class Meta:
model = Rate
class ContactSerializer(BaseModelSerializer):
"""
REST API serializer for Contact model
"""
class Meta:
model = Contact
class AddressSerializer(BaseModelSerializer):
"""
REST API serializer for Address model
"""
class Meta:
model = Address
class EmployeeCategorySerializer(BaseModelSerializer):
"""
REST API serializer for EmployeeCategory
"""
class Meta:
model = EmployeeCategory
class EmployeeSerializer(BaseModelSerializer):
"""
REST API serializer for Employee
"""
class Meta:
model = Employee
class PermissionSerializer(BaseModelSerializer):
"""
REST API serializer for Permission model
"""
class Meta:
model = Permission
class ProductPresentationSerializer(BaseModelSerializer):
"""
REST API serializer for ProductPresentation model
"""
class Meta:
model = ProductPresentation
class ModeOfAdministrationSerializer(BaseModelSerializer):
"""
REST API serializer for ModeOfAdministration model
"""
class Meta:
model = ModeOfAdministration
class ProductItemSerializer(BaseModelSerializer):
"""
REST API serializer for ProductItem model
"""
class Meta:
model = ProductItem
class ProductFormulationSerializer(BaseModelSerializer):
"""
REST API serializer for ProductFormulation model, it can be Lyophilized, Liquid or Not Applicable
"""
class Meta:
model = ProductFormulation
| eHealthAfrica/LMIS | LMIS/core/api/serializers.py | Python | gpl-2.0 | 3,856 |
def hamming(s,t):
dist = 0
for x in range(len(s)):
if s[x]!=t[x]:
dist+=1
return dist
| adijo/rosalind | old/hamming_distance.py | Python | gpl-2.0 | 100 |
# -------------------------------------------------------------------------------------------------
# Rick, a Rust intercal compiler. Save your souls!
#
# Copyright (c) 2015-2021 Georg Brandl
#
# This program is free software; you can redistribute it and/or modify it under the terms of the
# GNU General Public License as published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with this program;
# if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
# -------------------------------------------------------------------------------------------------
import os
import sys
import time
import difflib
from os import path
from subprocess import Popen, PIPE, STDOUT
already_compiled = set()
def run_test(testname, testcode, compiled):
stdin = b''
if path.isfile(testname + '.tst'):
with open(testname + '.tst', 'rb') as stdinfile:
stdin = stdinfile.read()
with open(testname + '.chk', 'r') as stdoutfile:
stdout = stdoutfile.read()
def check(proc, remove_cargo):
real_stdout, _ = proc.communicate(stdin)
real_stdout = real_stdout.decode()
# remove cargo's "Running" line
if remove_cargo:
errindex = real_stdout.find('An unknown error occurred')
if errindex == -1:
errindex = real_stdout.find('error: Process didn\'t exit successfully')
if errindex > -1:
real_stdout = real_stdout[:errindex]
if real_stdout != stdout:
print('*** ERROR: standard output does not match check file')
print(''.join(difflib.unified_diff(stdout.splitlines(True),
real_stdout.splitlines(True))))
raise RuntimeError
print('')
print('>>> Test: ' + testname)
print(' > Step 1: interpreted')
check(Popen(['cargo', 'run', '--release', '-q', '--', '-Rbi', testcode],
stdin=PIPE, stdout=PIPE, stderr=STDOUT), True)
print(' > Step 2: interpreted + optimized')
check(Popen(['cargo', 'run', '--release', '-q', '--', '-Rbio', testcode],
stdin=PIPE, stdout=PIPE, stderr=STDOUT), True)
if compiled:
print(' > Step 3: compiled + optimized')
if testcode not in already_compiled:
if os.system('cargo run --release -q -- -RFbo %s > /dev/null' % testcode) != 0:
print('*** ERROR: compilation failed')
raise RuntimeError
already_compiled.add(testcode)
check(Popen([testcode[:-2]], stdin=PIPE, stdout=PIPE, stderr=STDOUT),
False)
def main():
start = time.time()
compile_flag = '--nocompile' not in sys.argv
skip_flag = '--all' not in sys.argv
tests = [path.splitext(test.replace('/', os.sep))[0]
for test in sys.argv[1:] if not test.startswith('-')]
print('Building...')
if os.system('cargo build --release') != 0:
return 2
print('Running tests, please wait...')
passed = 0
total = 0
failed = []
for root, dirs, files in os.walk('code'):
dirs.sort()
for fn in sorted(files):
if not fn.endswith('.chk'):
continue
if skip_flag and fn.startswith(('fft-', 'flonck', 'unlambda')):
continue
testname = path.join(root, fn)[:-4]
if tests and testname not in tests:
continue
testcode = testname + '.i'
# special case
if fn.startswith('fft-'):
testcode = path.join(root, 'fft.i')
elif fn.startswith('life-'):
testcode = path.join(root, 'life2.i')
if not path.isfile(testcode):
print('')
print('*** WARNING: found %s.chk, but not %s' % (testname, testcode))
continue
total += 1
try:
t1 = time.time()
run_test(testname, testcode, compile_flag)
t2 = time.time()
passed += 1
print('--- passed (%5.2f sec)' % (t2 - t1))
except RuntimeError:
failed.append(testname)
end = time.time()
print('')
print('RESULT: %d/%d tests passed (%6.2f sec)' % (passed, total, end - start))
if failed:
print('Failed:')
for testname in failed:
print(' ' + testname)
return 0 if passed == total else 1
if __name__ == '__main__':
sys.exit(main())
| birkenfeld/rick | test.py | Python | gpl-2.0 | 4,893 |
"""
Windows Process Control
winprocess.run launches a child process and returns the exit code.
Optionally, it can:
redirect stdin, stdout & stderr to files
run the command as another user
limit the process's running time
control the process window (location, size, window state, desktop)
Works on Windows NT, 2000 & XP. Requires Mark Hammond's win32
extensions.
This code is free for any purpose, with no warranty of any kind.
-- John B. Dell'Aquila <[email protected]>
"""
import win32api, win32process, win32security
import win32event, win32con, msvcrt, win32gui
def logonUser(loginString):
"""
Login as specified user and return handle.
loginString: 'Domain\nUser\nPassword'; for local
login use . or empty string as domain
e.g. '.\nadministrator\nsecret_password'
"""
domain, user, passwd = loginString.split('\n')
return win32security.LogonUser(
user,
domain,
passwd,
win32con.LOGON32_LOGON_INTERACTIVE,
win32con.LOGON32_PROVIDER_DEFAULT
)
class Process:
"""
A Windows process.
"""
def __init__(self, cmd, login=None,
hStdin=None, hStdout=None, hStderr=None,
show=1, xy=None, xySize=None,
desktop=None):
"""
Create a Windows process.
cmd: command to run
login: run as user 'Domain\nUser\nPassword'
hStdin, hStdout, hStderr:
handles for process I/O; default is caller's stdin,
stdout & stderr
show: wShowWindow (0=SW_HIDE, 1=SW_NORMAL, ...)
xy: window offset (x, y) of upper left corner in pixels
xySize: window size (width, height) in pixels
desktop: lpDesktop - name of desktop e.g. 'winsta0\\default'
None = inherit current desktop
'' = create new desktop if necessary
User calling login requires additional privileges:
Act as part of the operating system [not needed on Windows XP]
Increase quotas
Replace a process level token
Login string must EITHER be an administrator's account
(ordinary user can't access current desktop - see Microsoft
Q165194) OR use desktop='' to run another desktop invisibly
(may be very slow to startup & finalize).
"""
si = win32process.STARTUPINFO()
si.dwFlags = (win32con.STARTF_USESTDHANDLES ^
win32con.STARTF_USESHOWWINDOW)
if hStdin is None:
si.hStdInput = win32api.GetStdHandle(win32api.STD_INPUT_HANDLE)
else:
si.hStdInput = hStdin
if hStdout is None:
si.hStdOutput = win32api.GetStdHandle(win32api.STD_OUTPUT_HANDLE)
else:
si.hStdOutput = hStdout
if hStderr is None:
si.hStdError = win32api.GetStdHandle(win32api.STD_ERROR_HANDLE)
else:
si.hStdError = hStderr
si.wShowWindow = show
if xy is not None:
si.dwX, si.dwY = xy
si.dwFlags ^= win32con.STARTF_USEPOSITION
if xySize is not None:
si.dwXSize, si.dwYSize = xySize
si.dwFlags ^= win32con.STARTF_USESIZE
if desktop is not None:
si.lpDesktop = desktop
procArgs = (None, # appName
cmd, # commandLine
None, # processAttributes
None, # threadAttributes
1, # bInheritHandles
win32process.CREATE_NEW_CONSOLE, # dwCreationFlags
None, # newEnvironment
None, # currentDirectory
si) # startupinfo
if login is not None:
hUser = logonUser(login)
win32security.ImpersonateLoggedOnUser(hUser)
procHandles = win32process.CreateProcessAsUser(hUser, *procArgs)
win32security.RevertToSelf()
else:
procHandles = win32process.CreateProcess(*procArgs)
self.hProcess, self.hThread, self.PId, self.TId = procHandles
def wait(self, mSec=None):
"""
Wait for process to finish or for specified number of
milliseconds to elapse.
"""
if mSec is None:
mSec = win32event.INFINITE
return win32event.WaitForSingleObject(self.hProcess, mSec)
def kill(self, gracePeriod=5000):
"""
Kill process. Try for an orderly shutdown via WM_CLOSE. If
still running after gracePeriod (5 sec. default), terminate.
"""
win32gui.EnumWindows(self.__close__, 0)
if self.wait(gracePeriod) != win32event.WAIT_OBJECT_0:
win32process.TerminateProcess(self.hProcess, 0)
win32api.Sleep(100) # wait for resources to be released
def __close__(self, hwnd, dummy):
"""
EnumWindows callback - sends WM_CLOSE to any window
owned by this process.
"""
TId, PId = win32process.GetWindowThreadProcessId(hwnd)
if PId == self.PId:
win32gui.PostMessage(hwnd, win32con.WM_CLOSE, 0, 0)
def exitCode(self):
"""
Return process exit code.
"""
return win32process.GetExitCodeProcess(self.hProcess)
def run(cmd, mSec=None, stdin=None, stdout=None, stderr=None, **kw):
"""
Run cmd as a child process and return exit code.
mSec: terminate cmd after specified number of milliseconds
stdin, stdout, stderr:
file objects for child I/O (use hStdin etc. to attach
handles instead of files); default is caller's stdin,
stdout & stderr;
kw: see Process.__init__ for more keyword options
"""
if stdin is not None:
kw['hStdin'] = msvcrt.get_osfhandle(stdin.fileno())
if stdout is not None:
kw['hStdout'] = msvcrt.get_osfhandle(stdout.fileno())
if stderr is not None:
kw['hStderr'] = msvcrt.get_osfhandle(stderr.fileno())
child = Process(cmd, **kw)
if child.wait(mSec) != win32event.WAIT_OBJECT_0:
child.kill()
raise WindowsError, 'process timeout exceeded'
return child.exitCode()
if __name__ == '__main__':
# Pipe commands to a shell and display the output in notepad
print 'Testing winprocess.py...'
import tempfile
timeoutSeconds = 15
cmdString = """\
REM Test of winprocess.py piping commands to a shell.\r
REM This window will close in %d seconds.\r
vol\r
net user\r
_this_is_a_test_of_stderr_\r
""" % timeoutSeconds
cmd, out = tempfile.TemporaryFile(), tempfile.TemporaryFile()
cmd.write(cmdString)
cmd.seek(0)
print 'CMD.EXE exit code:', run('cmd.exe', show=0, stdin=cmd,
stdout=out, stderr=out)
cmd.close()
print 'NOTEPAD exit code:', run('notepad.exe %s' % out.file.name,
show=win32con.SW_MAXIMIZE,
mSec=timeoutSeconds*1000)
out.close()
| alexei-matveev/ccp1gui | jobmanager/winprocess.py | Python | gpl-2.0 | 7,039 |
# pylint: disable = C0301
from bs4 import BeautifulSoup
from urllib2 import urlopen
import pandas as pd
pos_idx_map = {
'qb': 2,
'rb': 3,
'wr': 4,
'te': 5,
}
def make_url(pos, wk):
ii = pos_idx_map[pos]
fstr = "http://fantasydata.com/nfl-stats/nfl-fantasy-football-stats.aspx?fs=1&stype=0&sn=1&w=%s&s=&t=0&p=%s&st=FantasyPointsPPR&d=1&ls=&live=false" \
% (wk, ii)
return fstr
def html2df(soup):
table = soup.find('table')
headers = [header.text.lower() for header in table.find_all('th')]
rows = []
for row in table.find_all('tr'):
rows.append([val.text.encode('utf8') for val in row.find_all('td')])
rows = [rr for rr in rows if len(rr) > 0]
df = pd.DataFrame.from_records(rows)
df.columns = headers
return df
def position_html_local(posn):
dflist = []
for ii in range(1, 17):
fname = '%s%s.html' % (posn, ii)
with open(fname) as f:
df = html2df(BeautifulSoup(f))
df['wk'] = ii
df.columns = header_clean(df.columns, posn)
dflist.append(df)
return pd.concat(dflist)
def position_html(posn):
dflist = []
for ii in range(1, 17):
fname = make_url(posn, ii)
df = html2df(BeautifulSoup(urlopen(fname)))
df['wk'] = ii
df.columns = header_clean(df.columns, posn)
dflist.append(df)
return pd.concat(dflist)
pos_header_suffixes = {
'qb': ['_pass', '_rush'],
'rb': ['_rush', '_recv'],
'wr': ['_recv'],
'te': ['_recv'],
}
exclude_cols = ['rk', 'player', 'team', 'pos', 'fantasy points',
'wk', 'fum', 'lost', 'qb rating']
def header_clean(header, posn):
res = []
if posn in pos_header_suffixes:
suffixes = pos_header_suffixes[posn]
seen_dict = {hh: 0 for hh in header}
for hh in header:
if not hh in exclude_cols:
hres = hh + suffixes[seen_dict[hh]]
seen_dict[hh] += 1
res.append(hres)
else:
res.append(hh)
else:
res = header
return res
if __name__ == '__main__':
data_all = {}
for pp in ['qb', 'wr', 'rb', 'te']:
data_all[pp] = position_html_local(pp)
data_all[pp].to_pickle('%s.pkl' % pp)
| yikelu/nfl_fantasy_data | htmls2csvs.py | Python | gpl-2.0 | 2,265 |
#!/usr/bin/env python
# coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class JsonErrorResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
Swagger model
:param dict swaggerTypes: The key is attribute name and the value is attribute type.
:param dict attributeMap: The key is attribute name and the value is json key in definition.
"""
self.swagger_types = {
'status': 'str',
'message': 'str'
}
self.attribute_map = {
'status': 'status',
'message': 'message'
}
# Status: \"ok\" or \"error\"
self.status = None # str
# Error message
self.message = None # str
def __repr__(self):
properties = []
for p in self.__dict__:
if p != 'swaggerTypes' and p != 'attributeMap':
properties.append('{prop}={val!r}'.format(prop=p, val=self.__dict__[p]))
return '<{name} {props}>'.format(name=__name__, props=' '.join(properties))
| QuantiModo/QuantiModo-SDK-Python | SwaggerPetstore/models/json_error_response.py | Python | gpl-2.0 | 1,773 |
# -*- coding: utf-8 -*-
#
# This file is part of EventGhost.
# Copyright © 2005-2016 EventGhost Project <http://www.eventghost.net/>
#
# EventGhost is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 2 of the License, or (at your option)
# any later version.
#
# EventGhost is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along
# with EventGhost. If not, see <http://www.gnu.org/licenses/>.
import inspect
import os
import sys
import threading
import time
import wx
from CommonMark import commonmark
from ctypes import c_ulonglong, windll
from datetime import datetime as dt, timedelta as td
from docutils.core import publish_parts as ReSTPublishParts
from docutils.writers.html4css1 import Writer
from functools import update_wrapper
from os.path import abspath, dirname, exists, join
from types import ClassType
# Local imports
import eg
__all__ = [
"Bunch", "NotificationHandler", "LogIt", "LogItWithReturn", "TimeIt",
"AssertInMainThread", "AssertInActionThread", "ParseString", "SetDefault",
"EnsureVisible", "VBoxSizer", "HBoxSizer", "EqualizeWidths", "AsTasklet",
"ExecFile", "GetTopLevelWindow",
]
USER_CLASSES = (type, ClassType)
class Bunch(object):
"""
Universal collection of a bunch of named stuff.
Often we want to just collect a bunch of stuff together, naming each
item of the bunch. A dictionary is OK for that; however, when names are
constants and to be used just like variables, the dictionary-access syntax
("if bunch['squared'] > threshold", etc) is not maximally clear. It takes
very little effort to build a little class, as in this 'Bunch', that will
both ease the initialisation task and provide elegant attribute-access
syntax ("if bunch.squared > threshold", etc).
Usage is simple::
point = eg.Bunch(x=100, y=200)
# and of course you can read/write the named
# attributes you just created, add others, del
# some of them, etc, etc:
point.squared = point.x * point.y
if point.squared > threshold:
point.isok = True
"""
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class HBoxSizer(wx.BoxSizer): #IGNORE:R0904
def __init__(self, *items):
wx.BoxSizer.__init__(self, wx.HORIZONTAL)
self.AddMany(items)
class MyHtmlDocWriter(Writer):
def apply_template(self):
return """\
%(head_prefix)s
%(head)s
%(stylesheet)s
%(body_prefix)s
%(body_pre_docinfo)s
%(docinfo)s
%(body)s
%(body_suffix)s
""" % self.interpolation_dict()
HTML_DOC_WRITER = MyHtmlDocWriter()
class NotificationHandler(object):
__slots__ = ["listeners"]
def __init__(self):
self.listeners = []
class VBoxSizer(wx.BoxSizer): #IGNORE:R0904
def __init__(self, *items):
wx.BoxSizer.__init__(self, wx.VERTICAL)
self.AddMany(items)
def AppUrl(description, url):
if url:
txt = '<p><div align=right><i><font color="#999999" size=-1>%s <a href="%s">%s</a>.</font></i></div></p>' % (
eg.text.General.supportSentence,
url,
eg.text.General.supportLink
)
else:
return description
if description.startswith("<md>"):
description = description[4:]
description = DecodeMarkdown(description)
elif description.startswith("<rst>"):
description = description[5:]
description = DecodeReST(description)
return description + txt
def AssertInActionThread(func):
if not eg.debugLevel:
return func
def AssertWrapper(*args, **kwargs):
if eg.actionThread._ThreadWorker__thread != threading.currentThread():
raise AssertionError(
"Called outside ActionThread: %s() in %s" %
(func.__name__, func.__module__)
)
return func(*args, **kwargs)
return func(*args, **kwargs)
return update_wrapper(AssertWrapper, func)
def AssertInMainThread(func):
if not eg.debugLevel:
return func
def AssertWrapper(*args, **kwargs):
if eg.mainThread != threading.currentThread():
raise AssertionError(
"Called outside MainThread: %s in %s" %
(func.__name__, func.__module__)
)
return func(*args, **kwargs)
return update_wrapper(AssertWrapper, func)
def AsTasklet(func):
def Wrapper(*args, **kwargs):
eg.Tasklet(func)(*args, **kwargs).run()
return update_wrapper(Wrapper, func)
def CollectGarbage():
import gc
#gc.set_debug(gc.DEBUG_SAVEALL)
#gc.set_debug(gc.DEBUG_UNCOLLECTABLE)
from pprint import pprint
print "threshold:", gc.get_threshold()
print "unreachable object count:", gc.collect()
garbageList = gc.garbage[:]
for i, obj in enumerate(garbageList):
print "Object Num %d:" % i
pprint(obj)
#print "Referrers:"
#print(gc.get_referrers(o))
#print "Referents:"
#print(gc.get_referents(o))
print "Done."
#print "unreachable object count:", gc.collect()
#from pprint import pprint
#pprint(gc.garbage)
def DecodeMarkdown(source):
return commonmark(source)
def DecodeReST(source):
#print repr(source)
res = ReSTPublishParts(
source=PrepareDocstring(source),
writer=HTML_DOC_WRITER,
settings_overrides={"stylesheet_path": ""}
)
#print repr(res)
return res['body']
def EnsureVisible(window):
"""
Ensures the given wx.TopLevelWindow is visible on the screen.
Moves and resizes it if necessary.
"""
from eg.WinApi.Dynamic import (
sizeof, byref, GetMonitorInfo, MonitorFromWindow, GetWindowRect,
MONITORINFO, RECT, MONITOR_DEFAULTTONEAREST,
# MonitorFromRect, MONITOR_DEFAULTTONULL,
)
hwnd = window.GetHandle()
windowRect = RECT()
GetWindowRect(hwnd, byref(windowRect))
#hMonitor = MonitorFromRect(byref(windowRect), MONITOR_DEFAULTTONULL)
#if hMonitor:
# return
parent = window.GetParent()
if parent:
hwnd = parent.GetHandle()
hMonitor = MonitorFromWindow(hwnd, MONITOR_DEFAULTTONEAREST)
monInfo = MONITORINFO()
monInfo.cbSize = sizeof(MONITORINFO)
GetMonitorInfo(hMonitor, byref(monInfo))
displayRect = monInfo.rcWork
left = windowRect.left
right = windowRect.right
top = windowRect.top
bottom = windowRect.bottom
# shift the window horizontally into the display area
if left < displayRect.left:
right += (displayRect.left - left)
left = displayRect.left
if right > displayRect.right:
right = displayRect.right
elif right > displayRect.right:
left += (displayRect.right - right)
right = displayRect.right
if left < displayRect.left:
left = displayRect.left
# shift the window vertically into the display area
if top < displayRect.top:
bottom += (displayRect.top - top)
top = displayRect.top
if bottom > displayRect.bottom:
bottom = displayRect.bottom
elif bottom > displayRect.bottom:
top += (displayRect.bottom - bottom)
bottom = displayRect.bottom
if top < displayRect.top:
top = displayRect.top
# set the new position and size
window.SetRect((left, top, right - left, bottom - top))
def EqualizeWidths(ctrls):
maxWidth = max((ctrl.GetBestSize()[0] for ctrl in ctrls))
for ctrl in ctrls:
ctrl.SetMinSize((maxWidth, -1))
def ExecFile(filename, globals=None, locals=None):
"""
Replacement for the Python built-in execfile() function, but handles
unicode filenames right.
"""
FSE = sys.getfilesystemencoding()
flnm = filename.encode(FSE) if isinstance(filename, unicode) else filename
return execfile(flnm, globals, locals)
def GetBootTimestamp(unix_timestamp = True):
"""
Returns the time of the last system boot.
If unix_timestamp == True, result is a unix temestamp.
Otherwise it is in human readable form.
"""
now = time.time()
GetTickCount64 = windll.kernel32.GetTickCount64
GetTickCount64.restype = c_ulonglong
up = GetTickCount64() / 1000.0
if not unix_timestamp:
st = str(dt.fromtimestamp(now - up))
return st if "." not in st else st[:st.index(".")]
return now - up
def GetClosestLanguage():
"""
Returns the language file closest to system locale.
"""
langDir = join(dirname(abspath(sys.executable)), "languages")
if exists(langDir):
locale = wx.Locale()
name = locale.GetLanguageCanonicalName(locale.GetSystemLanguage())
if exists(join(langDir, name + ".py")):
return name
else:
for f in [f for f in os.listdir(langDir) if f.endswith(".py")]:
if f.startswith(name[0:3]):
return f[0:5]
return "en_EN"
def GetFirstParagraph(text):
"""
Return the first paragraph of a description string.
The string can be encoded in HTML or reStructuredText.
The paragraph is returned as HTML.
"""
text = text.lstrip()
if text.startswith("<md>"):
text = text[4:]
text = DecodeMarkdown(text)
start = text.find("<p>")
end = text.find("</p>")
return text[start + 3:end].replace("\n", " ")
elif text.startswith("<rst>"):
text = text[5:]
text = DecodeReST(text)
start = text.find("<p>")
end = text.find("</p>")
return text[start + 3:end].replace("\n", " ")
else:
result = ""
for line in text.splitlines():
if line == "":
break
result += " " + line
return ' '.join(result.split())
def GetFuncArgString(func, args, kwargs):
classname = ""
argnames = inspect.getargspec(func)[0]
start = 0
if argnames:
if argnames[0] == "self":
classname = args[0].__class__.__name__ + "."
start = 1
res = []
append = res.append
for key, value in zip(argnames, args)[start:]:
append(str(key) + GetMyRepresentation(value))
for key, value in kwargs.items():
append(str(key) + GetMyRepresentation(value))
fname = classname + func.__name__
return fname, "(" + ", ".join(res) + ")"
def GetMyRepresentation(value):
"""
Give a shorter representation of some wx-objects. Returns normal repr()
for everything else. Also adds a "=" sign at the beginning to make it
useful as a "formatvalue" function for inspect.formatargvalues().
"""
typeString = repr(type(value))
if typeString.startswith("<class 'wx._core."):
return "=<wx.%s>" % typeString[len("<class 'wx._core."): -2]
if typeString.startswith("<class 'wx._controls."):
return "=<wx.%s>" % typeString[len("<class 'wx._controls."): -2]
return "=" + repr(value)
def GetTopLevelWindow(window):
"""
Returns the top level parent window of a wx.Window. This is in most
cases a wx.Dialog or wx.Frame.
"""
result = window
while True:
parent = result.GetParent()
if parent is None:
return result
elif isinstance(parent, wx.TopLevelWindow):
return parent
result = parent
def GetUpTime(seconds = True):
"""
Returns a runtime of system in seconds.
If seconds == False, returns the number of days, hours, minutes and seconds.
"""
GetTickCount64 = windll.kernel32.GetTickCount64
GetTickCount64.restype = c_ulonglong
ticks = GetTickCount64() / 1000.0
if not seconds:
delta = str(td(seconds = ticks))
return delta if "." not in delta else delta[:delta.index(".")]
return ticks
def IsVista():
"""
Determine if we're running Vista or higher.
"""
return (sys.getwindowsversion()[0] >= 6)
def IsXP():
"""
Determine if we're running XP or higher.
"""
return (sys.getwindowsversion()[0:2] >= (5, 1))
def LogIt(func):
"""
Logs the function call, if eg.debugLevel is set.
"""
if not eg.debugLevel:
return func
if func.func_code.co_flags & 0x20:
raise TypeError("Can't wrap generator function")
def LogItWrapper(*args, **kwargs):
funcName, argString = GetFuncArgString(func, args, kwargs)
eg.PrintDebugNotice(funcName + argString)
return func(*args, **kwargs)
return update_wrapper(LogItWrapper, func)
def LogItWithReturn(func):
"""
Logs the function call and return, if eg.debugLevel is set.
"""
if not eg.debugLevel:
return func
def LogItWithReturnWrapper(*args, **kwargs):
funcName, argString = GetFuncArgString(func, args, kwargs)
eg.PrintDebugNotice(funcName + argString)
result = func(*args, **kwargs)
eg.PrintDebugNotice(funcName + " => " + repr(result))
return result
return update_wrapper(LogItWithReturnWrapper, func)
def ParseString(text, filterFunc=None):
start = 0
chunks = []
last = len(text) - 1
while 1:
pos = text.find('{', start)
if pos < 0:
break
if pos == last:
break
chunks.append(text[start:pos])
if text[pos + 1] == '{':
chunks.append('{')
start = pos + 2
else:
start = pos + 1
end = text.find('}', start)
if end == -1:
raise SyntaxError("unmatched bracket")
word = text[start:end]
res = None
if filterFunc:
res = filterFunc(word)
if res is None:
res = eval(word, {}, eg.globals.__dict__)
chunks.append(unicode(res))
start = end + 1
chunks.append(text[start:])
return "".join(chunks)
def PrepareDocstring(docstring):
"""
Convert a docstring into lines of parseable reST. Return it as a list of
lines usable for inserting into a docutils ViewList (used as argument
of nested_parse()). An empty line is added to act as a separator between
this docstring and following content.
"""
lines = docstring.expandtabs().splitlines()
# Find minimum indentation of any non-blank lines after first line.
margin = sys.maxint
for line in lines[1:]:
content = len(line.lstrip())
if content:
indent = len(line) - content
margin = min(margin, indent)
# Remove indentation.
if lines:
lines[0] = lines[0].lstrip()
if margin < sys.maxint:
for i in range(1, len(lines)):
lines[i] = lines[i][margin:]
# Remove any leading blank lines.
while lines and not lines[0]:
lines.pop(0)
# make sure there is an empty line at the end
if lines and lines[-1]:
lines.append('')
return "\n".join(lines)
def Reset():
eg.stopExecutionFlag = True
eg.programCounter = None
del eg.programReturnStack[:]
eg.eventThread.ClearPendingEvents()
eg.actionThread.ClearPendingEvents()
eg.PrintError("Execution stopped by user")
def SetDefault(targetCls, defaultCls):
targetDict = targetCls.__dict__
for defaultKey, defaultValue in defaultCls.__dict__.iteritems():
if defaultKey not in targetDict:
setattr(targetCls, defaultKey, defaultValue)
elif type(defaultValue) in USER_CLASSES:
SetDefault(targetDict[defaultKey], defaultValue)
def SplitFirstParagraph(text):
"""
Split the first paragraph of a description string.
The string can be encoded in HTML or reStructuredText.
The paragraph is returned as HTML.
"""
text = text.lstrip()
if text.startswith("<md>"):
text = text[4:]
text = DecodeMarkdown(text)
start = text.find("<p>")
end = text.find("</p>")
return (
text[start + 3:end].replace("\n", " "),
text[end + 4:].replace("\n", " ")
)
elif text.startswith("<rst>"):
text = text[5:]
text = DecodeReST(text)
start = text.find("<p>")
end = text.find("</p>")
return (
text[start + 3:end].replace("\n", " "),
text[end + 4:].replace("\n", " ")
)
else:
result = ""
remaining = ""
lines = text.splitlines()
for i, line in enumerate(lines):
if line.strip() == "":
remaining = " ".join(lines[i:])
break
result += " " + line
return ' '.join(result.split()), remaining
def TimeIt(func):
""" Decorator to measure the execution time of a function.
Will print the time to the log.
"""
if not eg.debugLevel:
return func
def TimeItWrapper(*args, **kwargs):
startTime = time.clock()
funcName, _ = GetFuncArgString(func, args, kwargs)
res = func(*args, **kwargs)
eg.PrintDebugNotice(funcName + " :" + repr(time.clock() - startTime))
return res
return update_wrapper(TimeItWrapper, func)
def UpdateStartupShortcut(create):
from eg import Shortcut
path = os.path.join(
eg.folderPath.Startup,
eg.APP_NAME + ".lnk"
)
if os.path.exists(path):
os.remove(path)
if create:
if not os.path.exists(eg.folderPath.Startup):
os.makedirs(eg.folderPath.Startup)
Shortcut.Create(
path=path,
target=os.path.abspath(sys.executable),
arguments="-h -e OnInitAfterBoot",
startIn=os.path.dirname(os.path.abspath(sys.executable)),
)
| WoLpH/EventGhost | eg/Utils.py | Python | gpl-2.0 | 17,930 |
#!/bin/python
import re
import sys
import os
from datetime import date
class VersionHandler:
def __init__(self, file):
self.file = file
self.major = 0
self.minor = 0
self.revision = 0
self.build = 1
self.touch()
def read(self):
try:
f = open(self.file, 'r')
lines = f.readlines()
f.close()
for line in lines:
self.readline(line)
except IOError as e:
print 'File not found: %s (%s)'%(self.file, e)
sys.exit(1)
def write(self):
try:
d = os.path.dirname(self.file)
if not os.path.exists(d):
os.makedirs(d)
f = open(self.file, 'w')
f.write('version=%d.%d.%d\n'%(self.major, self.minor, self.revision))
f.write('build=%d\n'%(self.build))
f.write('date=%s\n'%(self.date))
f.close()
except IOError as e:
print 'Failed to update: %s (%s)'%(self.file, e)
sys.exit(1)
def readline(self, line):
line = line.strip('\r\n\t ')
if len(line) == 0:
return
try:
m = re.search('(.*)=(.*)$', line)
if not m:
print 'Failed to parse line: %s'%(line.strip('\n\t '))
return
self.set(m.group(1), m.group(2))
except IndexError as e:
print 'Failed to parse line: %s (%s)'%(line.strip('\n\t '),e)
def set(self,k,v):
if k == 'version':
m = re.search('.*(\d).(\d).(\d).*', v)
(self.major, self.minor, self.revision) = [int(e) for e in m.groups()]
elif k == 'build':
self.build = int(v)
elif k == 'date':
self.date = v
def touch(self):
today = date.today()
self.date = today.isoformat()
def version(self):
return '%d.%d.%d.%d'%(self.major, self.minor, self.revision, self.build)
def datestr(self):
return '%s'%self.date
def __str__(self):
return 'version: %s, date %s'%(self.version(), self.date)
def __repr__(self):
return 'version: %s, date %s'%(self.version(), self.date)
def increment(self, key):
if key == 'build':
self.build += 1
elif key == 'revision':
self.revision += 1
self.build = 0
elif key == 'minor':
self.minor += 1
self.revision = 0
self.build = 0
elif key == 'major':
self.major += 1
self.minor = 0
self.revision = 0
self.build = 0
def print_version(self):
print '%d.%d.%d.%d'%(self.major, self.minor, self.revision, self.build)
def write_hpp(self, file):
d = os.path.dirname(file)
if not os.path.exists(d):
os.makedirs(d)
f = open(file, 'w')
(ignored, filename) = os.path.split(file)
name = filename.upper().replace('.', '_')
f.write('#ifndef %s\n'%name)
f.write('#define %s\n'%name)
f.write('#define PRODUCTVER %d,%d,%d,%d\n'%(self.major, self.minor, self.revision, self.build))
f.write('#define STRPRODUCTVER "%d.%d.%d.%d"\n'%(self.major, self.minor, self.revision, self.build))
f.write('#define STRPRODUCTDATE "%s"\n'%(self.date))
f.write('#endif // %s\n'%name)
f.close()
| mickem/nscp | build/python/VersionHandler.py | Python | gpl-2.0 | 2,824 |
"""Template filters and tags for helping with dates and datetimes"""
# pylint: disable=W0702,C0103
from django import template
from nav.django.settings import DATETIME_FORMAT, SHORT_TIME_FORMAT
from django.template.defaultfilters import date, time
from datetime import timedelta
register = template.Library()
@register.filter
def default_datetime(value):
"""Returns the date as represented by the default datetime format"""
try:
v = date(value, DATETIME_FORMAT)
except:
return value
return v
@register.filter
def short_time_format(value):
"""Returns the value formatted as a short time format
The SHORT_TIME_FORMAT is a custom format not available in the template
"""
try:
return time(value, SHORT_TIME_FORMAT)
except:
return value
@register.filter
def remove_microseconds(delta):
"""Removes microseconds from timedelta"""
try:
return delta - timedelta(microseconds=delta.microseconds)
except:
return delta
| sigmunau/nav | python/nav/django/templatetags/date_and_time.py | Python | gpl-2.0 | 1,012 |
import time
import midipy as midi
midi.open(128, 0, "midipy test", 0)
for (note, t) in [(48,0.5),(48,0.5),(50,1.0),(48,1.0),(53,1.0),(52,1.0),
(48,0.5),(48,0.5),(50,1.0),(48,1.0),(55,1.0),(53,1.0)]:
midi.note_on(note,127)
time.sleep(t/2)
midi.note_off(note,127)
midi.close()
| tcoxon/wiitar | midipy_src/test.py | Python | gpl-2.0 | 311 |
import os
import os.path
from amuse.units import units
from amuse.datamodel import Particle
from amuse.ext.star_to_sph import pickle_stellar_model
from amuse.community.mesa.interface import MESA as stellar_evolution_code
from xiTau_parameters import triple_parameters
def evolve_giant(giant, stop_radius):
stellar_evolution = stellar_evolution_code()
giant_in_code = stellar_evolution.particles.add_particle(giant)
while (giant_in_code.radius < 0.7 | units.AU):
giant_in_code.evolve_one_step()
print "Giant starts to ascend the giant branch, now saving model every step..."
print giant_in_code.as_set()
i = 0
while (giant_in_code.radius < stop_radius):
giant_in_code.evolve_one_step()
print giant_in_code.radius, giant_in_code.age
pickle_file_name = "./model_{0:=04}_".format(i) + "%0.1f"%(giant_in_code.radius.value_in(units.AU))
pickle_stellar_model(giant_in_code, pickle_file_name)
i += 1
if __name__ == "__main__":
model_directory = os.path.join("../../../../../BIGDATA/code/amuse-10.0", "giant_models")
if not os.path.exists(model_directory):
os.mkdir(model_directory)
os.chdir(model_directory)
giant = Particle(mass = triple_parameters["mass_out"])
print "\nEvolving with", stellar_evolution_code.__name__
evolve_giant(giant, 1.0 | units.AU)
print "Done"
| hilaglanz/TCE | articles/A_evolve_outer_star_to_giant.py | Python | gpl-2.0 | 1,405 |
"""
WSGI config for ahaha project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ahaha.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| pimiento/captures | captures/wsgi.py | Python | gpl-2.0 | 1,132 |
from gi.repository import Gtk
import gourmet.gtk_extras.dialog_extras as de
from gourmet.plugin import RecDisplayModule, UIPlugin, MainPlugin, ToolPlugin
from .recipe_emailer import RecipeEmailer
from gettext import gettext as _
class EmailRecipePlugin (MainPlugin, UIPlugin):
ui_string = '''
<menubar name="RecipeIndexMenuBar">
<menu name="Tools" action="Tools">
<placeholder name="StandaloneTool">
<menuitem action="EmailRecipes"/>
</placeholder>
</menu>
</menubar>
'''
def setup_action_groups (self):
self.actionGroup = Gtk.ActionGroup(name='RecipeEmailerActionGroup')
self.actionGroup.add_actions([
('EmailRecipes',None,_('Email recipes'),
None,_('Email all selected recipes (or all recipes if no recipes are selected'),self.email_selected),
])
self.action_groups.append(self.actionGroup)
def activate (self, pluggable):
self.rg = self.pluggable = pluggable
self.add_to_uimanager(pluggable.ui_manager)
def get_selected_recs (self):
recs = self.rg.get_selected_recs_from_rec_tree()
if not recs:
recs = self.rd.fetch_all(self.rd.recipe_table, deleted=False, sort_by=[('title',1)])
return recs
def email_selected (self, *args):
recs = self.get_selected_recs()
l = len(recs)
if l > 20:
if not de.getBoolean(
title=_('Email recipes'),
# only called for l>20, so fancy gettext methods
# shouldn't be necessary if my knowledge of
# linguistics serves me
sublabel=_('Do you really want to email all %s selected recipes?')%l,
custom_yes=_('Yes, e_mail them'),
cancel=False,
):
return
re = RecipeEmailer(recs)
re.send_email_with_attachments()
| thinkle/gourmet | gourmet/plugins/email_plugin/emailer_plugin.py | Python | gpl-2.0 | 1,934 |
# encoding: utf-8
# module PIL._imaging
# from /usr/lib/python2.7/dist-packages/PIL/_imaging.so
# by generator 1.135
# no doc
# no imports
# Variables with simple values
DEFAULT_STRATEGY = 0
FILTERED = 1
FIXED = 4
HUFFMAN_ONLY = 2
jpeglib_version = '8.0'
PILLOW_VERSION = '2.5.1'
RLE = 3
zlib_version = '1.2.8'
# functions
def alpha_composite(*args, **kwargs): # real signature unknown
pass
def bit_decoder(*args, **kwargs): # real signature unknown
pass
def blend(*args, **kwargs): # real signature unknown
pass
def convert(*args, **kwargs): # real signature unknown
pass
def copy(*args, **kwargs): # real signature unknown
pass
def crc32(*args, **kwargs): # real signature unknown
pass
def draw(*args, **kwargs): # real signature unknown
pass
def effect_mandelbrot(*args, **kwargs): # real signature unknown
pass
def effect_noise(*args, **kwargs): # real signature unknown
pass
def eps_encoder(*args, **kwargs): # real signature unknown
pass
def fill(*args, **kwargs): # real signature unknown
pass
def fli_decoder(*args, **kwargs): # real signature unknown
pass
def font(*args, **kwargs): # real signature unknown
pass
def getcodecstatus(*args, **kwargs): # real signature unknown
pass
def getcount(*args, **kwargs): # real signature unknown
pass
def gif_decoder(*args, **kwargs): # real signature unknown
pass
def gif_encoder(*args, **kwargs): # real signature unknown
pass
def hex_decoder(*args, **kwargs): # real signature unknown
pass
def hex_encoder(*args, **kwargs): # real signature unknown
pass
def jpeg_decoder(*args, **kwargs): # real signature unknown
pass
def jpeg_encoder(*args, **kwargs): # real signature unknown
pass
def libtiff_decoder(*args, **kwargs): # real signature unknown
pass
def libtiff_encoder(*args, **kwargs): # real signature unknown
pass
def linear_gradient(*args, **kwargs): # real signature unknown
pass
def map_buffer(*args, **kwargs): # real signature unknown
pass
def msp_decoder(*args, **kwargs): # real signature unknown
pass
def new(*args, **kwargs): # real signature unknown
pass
def open_ppm(*args, **kwargs): # real signature unknown
pass
def outline(*args, **kwargs): # real signature unknown
pass
def packbits_decoder(*args, **kwargs): # real signature unknown
pass
def path(*args, **kwargs): # real signature unknown
pass
def pcd_decoder(*args, **kwargs): # real signature unknown
pass
def pcx_decoder(*args, **kwargs): # real signature unknown
pass
def pcx_encoder(*args, **kwargs): # real signature unknown
pass
def radial_gradient(*args, **kwargs): # real signature unknown
pass
def raw_decoder(*args, **kwargs): # real signature unknown
pass
def raw_encoder(*args, **kwargs): # real signature unknown
pass
def sun_rle_decoder(*args, **kwargs): # real signature unknown
pass
def tga_rle_decoder(*args, **kwargs): # real signature unknown
pass
def tiff_lzw_decoder(*args, **kwargs): # real signature unknown
pass
def wedge(*args, **kwargs): # real signature unknown
pass
def xbm_decoder(*args, **kwargs): # real signature unknown
pass
def xbm_encoder(*args, **kwargs): # real signature unknown
pass
def zip_decoder(*args, **kwargs): # real signature unknown
pass
def zip_encoder(*args, **kwargs): # real signature unknown
pass
# no classes
| ProfessorX/Config | .PyCharm30/system/python_stubs/-1247972723/PIL/_imaging.py | Python | gpl-2.0 | 3,425 |
# Copyright (C) 2008-2010 INRIA - EDF R&D
# Author: Damien Garaud
#
# This file is part of the PuppetMaster project. It checks the module
# 'network'.
#
# This script is free; you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
import sys
import socket
import unittest
from puppetmaster import network
test_method_name = ['testInit', 'testGetValue', 'testUsedMemory',
'testAvailableHost', 'testLaunchCommand']
class NetworkTestCase(unittest.TestCase):
def __init__(self, methodName='runTest', host_file = None,
forced_ssh_config = False):
unittest.TestCase.__init__(self, methodName)
self.host_file = host_file
self.forced_ssh_config = forced_ssh_config
# If there is file.
if self.host_file == None:
self.is_file = False
else:
self.is_file = True
def setUp(self):
import random
if self.is_file:
self.net = network.Network(self.host_file, self.forced_ssh_config)
# Just local host.
self.net_local = network.Network()
# The command which will be launched.
self.command = "echo 'Hello World!'"
def tearDown(self):
pass
def testInit(self):
# Checks the name and the number of cpu.
# For the local host.
self.assertTrue(self.net_local.hosts[0].name == socket.gethostname())
self.assertTrue(self.net_local.GetNhost() == 1)
self.assertTrue(self.net_local.hosts[0].connection)
# Is there a file?
if self.is_file:
self.assertTrue(self.net.GetNhost() > 0)
self.assertTrue(self.net.GetConnectedHostNumber() > 0)
# Wrong argument.
# An 'network' instance takes a list 'host' instance, list of string
# or a file.
self.assertRaises(ValueError, network.Network, 1)
self.assertRaises(ValueError, network.Network, [])
self.assertRaises(ValueError, network.Network, [1,2])
self.assertRaises(ValueError, network.Network, 'no_file')
def testGetValue(self):
# For the local host.
host_name = self.net_local.GetHostNames()
proc_num = self.net_local.GetProcessorNumber()
connected_num = self.net_local.GetConnectedHostNumber()
# 'host_name' must be a list of string.
self.assertTrue(isinstance(host_name, list))
self.assertTrue(isinstance(host_name[0], str))
# 'proc_num' must be a list of tuples (hostname, Nproc)
self.assertTrue(isinstance(proc_num, list))
self.assertTrue(isinstance(proc_num[0], tuple))
self.assertTrue(isinstance(proc_num[0][0], str))
self.assertTrue(isinstance(proc_num[0][1], int))
# 'connected_num' must be an integer greater than 0.
self.assertTrue(isinstance(connected_num, int))
# Checks size.
self.assertTrue(len(host_name) > 0)
self.assertTrue(len(proc_num[0]) == 2)
self.assertTrue(connected_num > 0)
# For a list of hosts.
if self.is_file:
host_name = self.net.GetHostNames()
proc_num = self.net.GetProcessorNumber()
connected_num = self.net.GetConnectedHostNumber()
# 'host_name' must be a list of string.
self.assertTrue(isinstance(host_name, list))
self.assertTrue(isinstance(host_name[0], str))
# 'proc_num' must be a list of tuples (hostname, Nproc)
self.assertTrue(isinstance(proc_num, list))
self.assertTrue(isinstance(proc_num[0], tuple))
self.assertTrue(isinstance(proc_num[0][0], str))
self.assertTrue(isinstance(proc_num[0][1], int))
# 'connected_num' must be an integer greater than 0.
self.assertTrue(isinstance(connected_num, int))
# Checks size.
self.assertTrue(len(host_name) > 0)
self.assertTrue(len(proc_num[0]) == 2)
self.assertTrue(connected_num > 0)
def testUsedMemory(self):
# Gets used memory ('free' Unix command).
# For the local host.
used_mem = self.net_local.GetUsedMemory()
# 'used_mem' must be a list of tuple (hostname, value).
self.assertTrue(isinstance(used_mem, list))
self.assertTrue(isinstance(used_mem[0], tuple))
self.assertTrue(isinstance(used_mem[0][0], str))
# Checks size.
self.assertTrue(len(used_mem) == 1)
self.assertTrue(len(used_mem[0]) == 2)
# For a list of hosts.
if self.is_file:
used_mem = self.net.GetUsedMemory()
# 'used_mem' must be a list of tuple (hostname, value).
self.assertTrue(isinstance(used_mem, list))
self.assertTrue(isinstance(used_mem[0], tuple))
self.assertTrue(isinstance(used_mem[0][0], str))
# Checks size.
self.assertTrue(len(used_mem) >= 1)
self.assertTrue(len(used_mem[0]) == 2)
def testAvailableHost(self):
# Gets available hosts (used 'uptime' Unix command).
# For the local host.
available_host = self.net_local.GetAvailableHosts()
# 'available_host' must be a list of tuple (hostname, available_cpu).
self.assertTrue(isinstance(available_host, list))
if len(available_host) > 0:
self.assertTrue(isinstance(available_host[0], tuple))
self.assertTrue(isinstance(available_host[0][0], str))
self.assertTrue(isinstance(available_host[0][1], int))
# For a list of hosts.
if self.is_file:
available_host = self.net.GetAvailableHosts()
# 'available_host' must be a list of tuple
# (hostname, available_cpu).
self.assertTrue(isinstance(available_host, list))
if len(available_host) > 0:
self.assertTrue(isinstance(available_host[0], tuple))
self.assertTrue(isinstance(available_host[0][0], str))
self.assertTrue(isinstance(available_host[0][1], int))
def testLaunchCommand(self):
import random
# For the local host.
status = self.net_local.LaunchInt(self.command)
statusout = self.net_local.LaunchFG(self.command)
popen4_instance = self.net_local.LaunchBG(self.command)
subproc = self.net_local.LaunchSubProcess(self.command)
wait_return = self.net_local.LaunchWait(self.command, 2., 0.2)
# Checks type.
self.assertTrue(isinstance(status, int))
self.assertTrue(isinstance(statusout, tuple))
self.assertTrue(isinstance(statusout[0], int))
self.assertTrue(isinstance(wait_return, tuple))
# The status must be '0'.
self.assertTrue(status == 0)
self.assertTrue(statusout[0] == 0)
self.assertTrue(popen4_instance.wait() == 0)
self.assertTrue(subproc.wait() == 0)
self.assertTrue(wait_return[0] == 0)
# For a random host.
if self.is_file:
index = random.randint(0, self.net.GetNhost() - 1)
random_host = self.net.hosts[index]
# Launches the command.
status = self.net.LaunchInt(self.command + ' 2>/dev/null',
random_host)
statusout = self.net.LaunchFG(self.command, random_host)
popen4_instance = self.net.LaunchBG(self.command, random_host)
subproc = self.net.LaunchSubProcess(self.command, random_host)
wait_return = self.net.LaunchWait(self.command, 2., 0.2,
random_host)
# Checks type.
self.assertTrue(isinstance(status, int))
self.assertTrue(isinstance(statusout, tuple))
self.assertTrue(isinstance(statusout[0], int))
self.assertTrue(isinstance(wait_return, tuple))
# The status must be '0' if the connection dit not fail.
if random_host.connection:
self.assertTrue(status == 0)
self.assertTrue(statusout[0] == 0)
self.assertTrue(popen4_instance.wait() == 0)
self.assertTrue(subproc.wait() == 0)
self.assertTrue(wait_return[0] == 0)
else:
self.assertTrue(status != 0)
self.assertTrue(statusout[0] != 0)
self.assertTrue(popen4_instance.wait() != 0)
self.assertTrue(subproc.wait() != 0)
self.assertTrue(wait_return[0] != 0)
if __name__ == '__main__':
unittest.main()
| garaud/puppetmaster | test/network_test.py | Python | gpl-2.0 | 8,973 |
#! /usr/bin/env python
"""
``climactic.suite``
-------------------
.. autoclass:: ClimacticTestSuite
"""
import logging
import unittest
from pathlib import Path
from climactic.case import ClimacticTestCase
logger = logging.getLogger(__name__)
class ClimacticTestSuite(unittest.TestSuite):
"""
A collection of tests.
"""
@classmethod
def from_targets(cls, *targets, **kwargs):
suite = cls()
tests = []
logger.trace("Processing target list {}", list(targets))
for target in targets:
logger.trace("Processing target '{}'", target)
try:
target_path = Path(target).resolve()
except FileNotFoundError:
logger.warn(
"Target '{}' could not be found", target
)
continue
if target_path.is_dir():
target_tests = cls.collect_dir(
target_path,
**kwargs
)
tests.extend(target_tests)
else:
for target_test in cls.collect_file(
target_path
):
tests.append(target_test)
suite.addTests(tests)
return suite
@classmethod
def from_dir(cls, dir_path, **kwargs):
return cls.from_targets(dir_path, **kwargs)
@classmethod
def collect_dir(cls, dir_path, recursive=True):
tests = []
dir_path = Path(dir_path)
logger.trace("+ Collecting dir {}", str(dir_path))
target_paths = dir_path.glob(
("**" if recursive else "*") +
"/test_*.yml"
)
for target_path in target_paths:
for test in cls.collect_file(
target_path,
base_path=dir_path
):
tests.append(test)
logger.trace("- Collecting dir {}", str(dir_path))
return tests
@classmethod
def collect_file(cls, target_path, base_path=None):
logger.trace(
" + Loading yml file {!r}",
str(target_path)
)
yield from ClimacticTestCase.from_path(
target_path,
base_path=base_path
)
| b33j0r/climactic | climactic/suite.py | Python | gpl-2.0 | 2,252 |
#!hyphen-venv/bin/python
import imp
from migrate.versioning import api
from app import db
from config import SQLALCHEMY_DATABASE_URI
from config import SQLALCHEMY_MIGRATE_REPO
migration = SQLALCHEMY_MIGRATE_REPO + \
'/versions/%03d_migration.py' % \
(api.db_version(
SQLALCHEMY_DATABASE_URI,
SQLALCHEMY_MIGRATE_REPO) + 1)
tmp_module = imp.new_module('old_model')
old_model = api.create_model(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
exec old_model in tmp_module.__dict__
script = api.make_update_script_for_model(
SQLALCHEMY_DATABASE_URI,
SQLALCHEMY_MIGRATE_REPO,
tmp_module.meta, db.metadata)
open(migration, "wt").write(script)
api.upgrade(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
print 'New migration saved as ' + migration
print 'Current database version: ' + \
str(api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)) | bkfunk/HyphenCMS | db_migrate.py | Python | gpl-2.0 | 1,012 |
# pylint:disable=R0201
from OpenOrange import *
from User import User
from RetroactiveAccounts import RetroactiveAccounts
class HeirFinder(RetroactiveAccounts):
def doReplacements(self, txt):
d = {1:"ONE", 2:"TWO"}
us = User.bring("USER")
txt = txt.replace(":1", us.Name + d[1])
return txt
def run(self):
query8 = self.getQuery()
query8.sql = self.doReplacements(query8.sql)
#pylint:disable=E6601
query8.open() #there will be missing tables here
| ancho85/pylint-playero-plugin | tests/input/func_noerror_query_heir.py | Python | gpl-2.0 | 522 |
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import wx
from .common import update_class
class Separator(wx.StaticLine):
def __init__(self, parent):
wx.StaticLine.__init__(self, parent.get_container(), -1,
wx.DefaultPosition, wx.DefaultSize, wx.LI_HORIZONTAL)
update_class(Separator)
| lunixbochs/fs-uae-gles | launcher/fs_uae_launcher/fsui/wx/separator.py | Python | gpl-2.0 | 394 |
# multiAgents.py
# --------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to
# http://inst.eecs.berkeley.edu/~cs188/pacman/pacman.html
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# ([email protected]) and Dan Klein ([email protected]).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel ([email protected]).
from util import manhattanDistance
from game import Directions
import random, util
from game import Agent
class ReflexAgent(Agent):
"""
A reflex agent chooses an action at each choice point by examining
its alternatives via a state evaluation function.
The code below is provided as a guide. You are welcome to change
it in any way you see fit, so long as you don't touch our method
headers.
"""
def getAction(self, gameState):
"""
You do not need to change this method, but you're welcome to.
getAction chooses among the best options according to the evaluation function.
Just like in the previous project, getAction takes a GameState and returns
some Directions.X for some X in the set {North, South, West, East, Stop}
"""
# Collect legal moves and successor states
legalMoves = gameState.getLegalActions()
# Choose one of the best actions
scores = [self.evaluationFunction(gameState, action) for action in legalMoves]
bestScore = max(scores)
bestIndices = [index for index in range(len(scores)) if scores[index] == bestScore]
chosenIndex = random.choice(bestIndices) # Pick randomly among the best
"Add more of your code here if you want to"
return legalMoves[chosenIndex]
def evaluationFunction(self, currentGameState, action):
"""
Design a better evaluation function here.
The evaluation function takes in the current and proposed successor
GameStates (pacman.py) and returns a number, where higher numbers are better.
The code below extracts some useful information from the state, like the
remaining food (newFood) and Pacman position after moving (newPos).
newScaredTimes holds the number of moves that each ghost will remain
scared because of Pacman having eaten a power pellet.
Print out these variables to see what you're getting, then combine them
to create a masterful evaluation function.
"""
# Useful information you can extract from a GameState (pacman.py)
successorGameState = currentGameState.generatePacmanSuccessor(action)
newPos = successorGameState.getPacmanPosition()
newFood = successorGameState.getFood()
newGhostStates = successorGameState.getGhostStates()
newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]
"*** YOUR CODE HERE ***"
ghost = str(newGhostStates[0])
ghost = ghost[ghost.find('=') + 1 : ghost.rfind(',')]
ghost = ghost.replace(".0", "")
#print newPos, newGhostStates[0]
if str(newPos) == ghost:
return -10
if newFood[newPos[0]][newPos[1]]:
return 3
if newScaredTimes[0] > 0:
return 10
return successorGameState.getScore()
def scoreEvaluationFunction(currentGameState):
"""
This default evaluation function just returns the score of the state.
The score is the same one displayed in the Pacman GUI.
This evaluation function is meant for use with adversarial search agents
(not reflex agents).
"""
return currentGameState.getScore()
class MultiAgentSearchAgent(Agent):
"""
This class provides some common elements to all of your
multi-agent searchers. Any methods defined here will be available
to the MinimaxPacmanAgent, AlphaBetaPacmanAgent & ExpectimaxPacmanAgent.
You *do not* need to make any changes here, but you can if you want to
add functionality to all your adversarial search agents. Please do not
remove anything, however.
Note: this is an abstract class: one that should not be instantiated. It's
only partially specified, and designed to be extended. Agent (game.py)
is another abstract class.
"""
def __init__(self, evalFn = 'scoreEvaluationFunction', depth = '2'):
self.index = 0 # Pacman is always agent index 0
self.evaluationFunction = util.lookup(evalFn, globals())
self.depth = int(depth)
class MinimaxAgent(MultiAgentSearchAgent):
"""
Your minimax agent (question 2)
"""
def getAction(self, gameState):
"""
Returns the minimax action from the current gameState using self.depth
and self.evaluationFunction.
Here are some method calls that might be useful when implementing minimax.
gameState.getLegalActions(agentIndex):
Returns a list of legal actions for an agent
agentIndex=0 means Pacman, ghosts are >= 1
gameState.generateSuccessor(agentIndex, action):
Returns the successor game state after an agent takes an action
gameState.getNumAgents():
Returns the total number of agents in the game
"""
"*** YOUR CODE HERE ***"
util.raiseNotDefined()
class AlphaBetaAgent(MultiAgentSearchAgent):
"""
Your minimax agent with alpha-beta pruning (question 3)
"""
def getAction(self, gameState):
"""
Returns the minimax action using self.depth and self.evaluationFunction
"""
"*** YOUR CODE HERE ***"
util.raiseNotDefined()
class ExpectimaxAgent(MultiAgentSearchAgent):
"""
Your expectimax agent (question 4)
"""
def getAction(self, gameState):
"""
Returns the expectimax action using self.depth and self.evaluationFunction
All ghosts should be modeled as choosing uniformly at random from their
legal moves.
"""
"*** YOUR CODE HERE ***"
util.raiseNotDefined()
def betterEvaluationFunction(currentGameState):
"""
Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable
evaluation function (question 5).
DESCRIPTION: <write something here so we know what you did>
"""
"*** YOUR CODE HERE ***"
util.raiseNotDefined()
# Abbreviation
better = betterEvaluationFunction
class ContestAgent(MultiAgentSearchAgent):
"""
Your agent for the mini-contest
"""
def getAction(self, gameState):
"""
Returns an action. You can use any method you want and search to any depth you want.
Just remember that the mini-contest is timed, so you have to trade off speed and computation.
Ghosts don't behave randomly anymore, but they aren't perfect either -- they'll usually
just make a beeline straight towards Pacman (or away from him if they're scared!)
"""
"*** YOUR CODE HERE ***"
util.raiseNotDefined()
| sumitb/cse537 | multiagent/multiAgents.py | Python | gpl-2.0 | 7,379 |
# -*- coding: utf-8 -*-
#
# This file is part of CERN Document Server.
# Copyright (C) 2016, 2019 CERN.
#
# CERN Document Server is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# CERN Document Server is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CERN Document Server; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Pytest configuration.
Before running any of the tests you must have initialized the assets using
the ``script scripts/setup-assets.sh``.
"""
from __future__ import absolute_import, print_function
import os
import shutil
import tempfile
import uuid
import pkg_resources
import pytest
from cds_dojson.marc21 import marc21
from dojson.contrib.marc21.utils import create_record, split_blob
from elasticsearch.exceptions import RequestError
from invenio_db import db as _db
from invenio_indexer.api import RecordIndexer
from invenio_pidstore import current_pidstore
from invenio_records.api import Record
from invenio_search import current_search, current_search_client
from selenium import webdriver
from sqlalchemy_utils.functions import create_database, database_exists
from cds.factory import create_app
@pytest.yield_fixture(scope='session', autouse=True)
def base_app(request):
"""Flask application fixture."""
instance_path = tempfile.mkdtemp()
os.environ.update(
APP_INSTANCE_PATH=instance_path
)
app = create_app(
# CELERY_ALWAYS_EAGER=True,
# CELERY_CACHE_BACKEND="memory",
# CELERY_EAGER_PROPAGATES_EXCEPTIONS=True,
# CELERY_RESULT_BACKEND="cache",
SECRET_KEY="CHANGE_ME",
SECURITY_PASSWORD_SALT="CHANGE_ME",
MAIL_SUPPRESS_SEND=True,
TESTING=True,
)
with app.app_context():
yield app
# Teardown
shutil.rmtree(instance_path)
@pytest.yield_fixture(scope='session')
def db(base_app):
"""Initialize database."""
# Init
if not database_exists(str(_db.engine.url)):
create_database(str(_db.engine.url))
_db.create_all()
yield _db
# Teardown
_db.session.remove()
_db.drop_all()
@pytest.yield_fixture(scope='session')
def es(base_app):
"""Provide elasticsearch access."""
try:
list(current_search.create())
except RequestError:
list(current_search.delete())
list(current_search.create())
current_search_client.indices.refresh()
yield current_search_client
list(current_search.delete(ignore=[404]))
@pytest.yield_fixture(scope='session', autouse=True)
def app(base_app, es, db):
"""Application with ES and DB."""
yield base_app
def pytest_generate_tests(metafunc):
"""Override pytest's default test collection function.
For each test in this directory which uses the `env_browser` fixture,
the given test is called once for each value found in the
`E2E_WEBDRIVER_BROWSERS` environment variable.
"""
if 'env_browser' in metafunc.fixturenames:
# In Python 2.7 the fallback kwarg of os.environ.get is `failobj`,
# in 3.x it's `default`.
browsers = os.environ.get('E2E_WEBDRIVER_BROWSERS',
'Firefox').split()
metafunc.parametrize('env_browser', browsers, indirect=True)
@pytest.yield_fixture()
def env_browser(request):
"""Fixture for a webdriver instance of the browser."""
if request.param is None:
request.param = "Firefox"
# Create instance of webdriver.`request.param`()
browser = getattr(webdriver, request.param)()
yield browser
# Quit the webdriver instance
browser.quit()
@pytest.fixture()
def demo_records(app):
"""Create demo records."""
data_path = pkg_resources.resource_filename(
'cds.modules.fixtures', 'data/records.xml'
)
with open(data_path) as source:
indexer = RecordIndexer()
with _db.session.begin_nested():
for index, data in enumerate(split_blob(source.read()), start=1):
# create uuid
rec_uuid = uuid.uuid4()
# do translate
record = marc21.do(create_record(data))
# create PID
current_pidstore.minters['recid'](
rec_uuid, record
)
# create record
indexer.index(Record.create(record, id_=rec_uuid))
_db.session.commit()
return data_path
| CERNDocumentServer/cds | tests/e2e/conftest.py | Python | gpl-2.0 | 5,085 |
import wpilib
import hal
from wpilib import RobotDrive
class KwarqsDriveMech(RobotDrive):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.weight_multiplier = 1
def set_multiplier(self, in_multi = None):
if in_multi != None :
self.weight_multiplier = in_multi
else:
self.weight_multiplier = 1
def mecanumDrive_Cartesian(self, x, y, rotation, gyroAngle):
"""Drive method for Mecanum wheeled robots.
A method for driving with Mecanum wheeled robots. There are 4 wheels
on the robot, arranged so that the front and back wheels are toed in
45 degrees. When looking at the wheels from the top, the roller
axles should form an X across the robot.
This is designed to be directly driven by joystick axes.
:param x: The speed that the robot should drive in the X direction.
[-1.0..1.0]
:param y: The speed that the robot should drive in the Y direction.
This input is inverted to match the forward == -1.0 that
joysticks produce. [-1.0..1.0]
:param rotation: The rate of rotation for the robot that is
completely independent of the translation. [-1.0..1.0]
:param gyroAngle: The current angle reading from the gyro. Use this
to implement field-oriented controls.
"""
if not wpilib.RobotDrive.kMecanumCartesian_Reported:
hal.HALReport(hal.HALUsageReporting.kResourceType_RobotDrive,
self.getNumMotors(),
hal.HALUsageReporting.kRobotDrive_MecanumCartesian)
RobotDrive.kMecanumCartesian_Reported = True
xIn = x
yIn = y
# Negate y for the joystick.
yIn = -yIn
# Compenstate for gyro angle.
xIn, yIn = RobotDrive.rotateVector(xIn, yIn, gyroAngle)
wheelSpeeds = [0]*self.kMaxNumberOfMotors
wheelSpeeds[self.MotorType.kFrontLeft] = xIn + yIn + rotation
wheelSpeeds[self.MotorType.kFrontRight] = -xIn + yIn - rotation
wheelSpeeds[self.MotorType.kRearLeft] = -xIn + yIn + ( rotation * self.weight_multiplier )
wheelSpeeds[self.MotorType.kRearRight] = xIn + yIn - ( rotation * self.weight_multiplier )
RobotDrive.normalize(wheelSpeeds)
self.frontLeftMotor.set(wheelSpeeds[self.MotorType.kFrontLeft] * self.invertedMotors[self.MotorType.kFrontLeft] * self.maxOutput, self.syncGroup)
self.frontRightMotor.set(wheelSpeeds[self.MotorType.kFrontRight] * self.invertedMotors[self.MotorType.kFrontRight] * self.maxOutput, self.syncGroup)
self.rearLeftMotor.set(wheelSpeeds[self.MotorType.kRearLeft] * self.invertedMotors[self.MotorType.kRearLeft] * self.maxOutput, self.syncGroup)
self.rearRightMotor.set(wheelSpeeds[self.MotorType.kRearRight] * self.invertedMotors[self.MotorType.kRearRight] * self.maxOutput, self.syncGroup)
if self.syncGroup != 0:
wpilib.CANJaguar.updateSyncGroup(self.syncGroup)
self.feed()
| frc2423/2015 | recycle_rush/custom/kwarqs_drive_mech.py | Python | gpl-2.0 | 3,114 |
#!/usr/bin/python3
import sys
from gi.repository import GExiv2
phototags = {
'Exif.Photo.ExposureTime': "Belichtung:\t",
'Exif.Photo.FNumber': "Blende:\t\tF",
# 'Exif.Photo.ExposureProgram',
'Exif.Photo.ISOSpeedRatings': "ISO:\t\t",
# 'Exif.Photo.SensitivityType',
# 'Exif.Photo.ExifVersion',
# 'Exif.Photo.DateTimeOriginal',
# 'Exif.Photo.DateTimeDigitized',
# 'Exif.Photo.ComponentsConfiguration',
# 'Exif.Photo.CompressedBitsPerPixel',
# 'Exif.Photo.ExposureBiasValue',
# 'Exif.Photo.MaxApertureValue',
# 'Exif.Photo.MeteringMode',
# 'Exif.Photo.LightSource',
# 'Exif.Photo.Flash',
'Exif.Photo.FocalLength': "Brennweite:\t"
# 'Exif.Photo.MakerNote'
}
for i in range(1, len(sys.argv)):
metadata = GExiv2.Metadata(sys.argv[i])
print("file: {}".format(sys.argv[i]))
for key in phototags:
try:
print("{}: {}".format(phototags[key], metadata[key]))
except KeyError:
continue
| pseyfert/pyexiv2-scripts | dumpPHOTO.py | Python | gpl-2.0 | 997 |
"""
WSGI config for mongobacked project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mongobacked.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| ludi1001/IonMedicationSystem | mongobacked/mongobacked/wsgi.py | Python | gpl-2.0 | 397 |
# -*- coding: utf-8 -*-
#
#
# TheVirtualBrain-Framework Package. This package holds all Data Management, and
# Web-UI helpful to run brain-simulations. To use it, you also need do download
# TheVirtualBrain-Scientific Package (for simulators). See content of the
# documentation-folder for more details. See also http://www.thevirtualbrain.org
#
# (c) 2012-2013, Baycrest Centre for Geriatric Care ("Baycrest")
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by the Free
# Software Foundation. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details. You should have received a copy of the GNU General
# Public License along with this program; if not, you can download it here
# http://www.gnu.org/licenses/old-licenses/gpl-2.0
#
#
# CITATION:
# When using The Virtual Brain for scientific publications, please cite it as follows:
#
# Paula Sanz Leon, Stuart A. Knock, M. Marmaduke Woodman, Lia Domide,
# Jochen Mersmann, Anthony R. McIntosh, Viktor Jirsa (2013)
# The Virtual Brain: a simulator of primate brain network dynamics.
# Frontiers in Neuroinformatics (7:10. doi: 10.3389/fninf.2013.00010)
#
#
"""
Install TVB Framework package for developers.
Execute:
python setup.py install/develop
"""
import os
import shutil
import setuptools
VERSION = "1.4"
TVB_TEAM = "Mihai Andrei, Lia Domide, Ionel Ortelecan, Bogdan Neacsa, Calin Pavel, "
TVB_TEAM += "Stuart Knock, Marmaduke Woodman, Paula Sansz Leon, "
TVB_INSTALL_REQUIREMENTS = ["apscheduler", "beautifulsoup", "cherrypy", "genshi", "cfflib", "formencode==1.3.0a1",
"h5py==2.3.0", "lxml", "minixsv", "mod_pywebsocket", "networkx", "nibabel", "numpy",
"numexpr", "psutil", "scikit-learn", "scipy", "simplejson", "PIL>=1.1.7",
"sqlalchemy==0.7.8", "sqlalchemy-migrate==0.7.2", "matplotlib==1.2.1"]
EXCLUDE_INTROSPECT_FOLDERS = [folder for folder in os.listdir(".")
if os.path.isdir(os.path.join(".", folder)) and folder != "tvb"]
setuptools.setup(name="tvb",
version=VERSION,
packages=setuptools.find_packages(exclude=EXCLUDE_INTROSPECT_FOLDERS),
license="GPL v2",
author=TVB_TEAM,
author_email='[email protected]',
include_package_data=True,
install_requires=TVB_INSTALL_REQUIREMENTS,
extras_require={'postgres': ["psycopg2"]})
## Clean after install
shutil.rmtree('tvb.egg-info', True)
| rajul/tvb-framework | setup.py | Python | gpl-2.0 | 2,824 |
#!/usr/bin/env python
'''# shufflez.py '''
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
__author__ = ["A'mmer Almadani:Mad_Dev", "sysbase.org"]
__email__ = ["[email protected]", "[email protected]"]
import random
import urllib2
from search import GoogleSearch, SearchError
import time
from multiprocessing import Process
from threading import Timer
class shufflez:
def __init__(self):
self.word_list = 'lists/wordlist.txt'
self.websites = 'lists/websites.txt'
self.user_agent = 'lists/user_agent.txt'
def together(self, *functions):
process = []
for function in functions:
s = Process(target=function)
s.start()
process.append(s)
for s in process:
s.join()
def randomize(self, r, typ):
'''Return Random List
r (range): int
typ : word | site | user-agent
'''
lst = []
if typ == 'word':
list_to_parse = self.word_list
elif typ == 'site':
list_to_parse = self.websites
elif typ == 'user-agent':
list_to_parse = self.user_agent
a = open(list_to_parse, 'r')
for i in a.readlines():
lst.append(i)
random.shuffle(lst)
if typ == 'site':
return map(lambda x:x if 'http://' in x else 'http://' +x, lst)[0:int(r)]
else:
return lst[0:int(r)]
def append_to_list(self, typ, lst):
if typ == 'word':
l = self.word_list
elif typ == 'link':
l = self.websites
li = open(l, 'a')
for i in lst:
li.write(i+'\n')
li.close()
def open_url(self, url, user_agent):
try:
header = { 'User-Agent' : str(user_agent) }
req = urllib2.Request(url, headers=header)
response = urllib2.urlopen(req)
print 'STATUS', response.getcode()
except:
pass
def google(self, term):
links_from_google = []
words_from_google = []
try:
gs = GoogleSearch(term)
gs.results_per_page = 10
results = gs.get_results()
for res in results:
words_from_google.append(res.title.encode('utf8'))
print '\033[92mGot new words from Google...appending to list\n\033[0m'
self.append_to_list('word', words_from_google)
links_from_google.append(res.url.encode('utf8'))
print '\033[92mGot new link from Google...appending to list\n\033[0m'
self.append_to_list('link', links_from_google)
except SearchError, e:
print "Search failed: %s" % e
mask = shufflez()
def random_websites():
count = random.randint(1,15)
for i, e, in zip(mask.randomize(10, 'site'), mask.randomize(10, 'user-agent')):
if count == random.randint(1,15):
break
else:
sleep_time = str(random.randint(1,5)) +'.'+ str(random.randint(1,9))
print 'VISITING', '\033[92m', i , '\033[0m', 'USING', '\033[94m', e, '\033[0m', 'SLEEPING FOR', '\033[95m', sleep_time, 'SECONDS', '\033[0m'
time.sleep(float(sleep_time))
mask.open_url(i, e)
print '\n'
def random_google():
count = random.randint(1,15)
for i in mask.randomize(10, 'word'):
if count == random.randint(1,15):
break
else:
sleep_time = str(random.randint(1,5)) +'.'+ str(random.randint(1,9))
print 'SEARCHING FOR', '\033[92m', i ,'\033[0m', 'SLEEPING FOR', '\033[95m', sleep_time, 'SECONDS', '\033[0m', '\n'
time.sleep(float(sleep_time))
mask.google(i)
#while True:
# try:
# mask.together(random_google(), random_websites())
# except KeyboardInterrupt:
# print 'Exit'
# break | Logic-gate/shuffelz | shuffelz.py | Python | gpl-2.0 | 4,742 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('codecompetitions', '0006_auto_20140805_2234'),
]
operations = [
migrations.AddField(
model_name='problem',
name='expected_output',
field=models.TextField(default=''),
preserve_default=False,
),
migrations.AddField(
model_name='problem',
name='input_data',
field=models.TextField(blank=True, null=True),
preserve_default=True,
),
migrations.AddField(
model_name='problem',
name='read_from_file',
field=models.CharField(blank=True, null=True, max_length=80),
preserve_default=True,
),
migrations.AddField(
model_name='problem',
name='time_limit',
field=models.PositiveIntegerField(default=5),
preserve_default=True,
),
]
| baryon5/mercury | codecompetitions/migrations/0007_auto_20140805_2253.py | Python | gpl-2.0 | 1,067 |
# Copyright (C) 2009 Jeremy S. Sanders
# Email: Jeremy Sanders <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
##############################################################################
"""A paint engine to produce EMF exports.
Requires: PyQt-x11-gpl-4.6-snapshot-20090906.tar.gz
sip-4.9-snapshot-20090906.tar.gz
pyemf
"""
import struct
import pyemf
from .. import qtall as qt
inch_mm = 25.4
scale = 100
def isStockObject(obj):
"""Is this a stock windows object."""
return (obj & 0x80000000) != 0
class _EXTCREATEPEN(pyemf._EMR._EXTCREATEPEN):
"""Extended pen creation record with custom line style."""
typedef = [
('i','handle',0),
('i','offBmi',0),
('i','cbBmi',0),
('i','offBits',0),
('i','cbBits',0),
('i','style'),
('i','penwidth'),
('i','brushstyle'),
('i','color'),
('i','brushhatch',0),
('i','numstyleentries')
]
def __init__(self, style=pyemf.PS_SOLID, width=1, color=0,
styleentries=[]):
"""Create pen.
styleentries is a list of dash and space lengths."""
pyemf._EMR._EXTCREATEPEN.__init__(self)
self.style = style
self.penwidth = width
self.color = pyemf._normalizeColor(color)
self.brushstyle = 0x0 # solid
if style & pyemf.PS_STYLE_MASK != pyemf.PS_USERSTYLE:
styleentries = []
self.numstyleentries = len(styleentries)
if styleentries:
self.unhandleddata = struct.pack(
"i"*self.numstyleentries, *styleentries)
def hasHandle(self):
return True
class EMFPaintEngine(qt.QPaintEngine):
"""Custom EMF paint engine."""
def __init__(self, width_in, height_in, dpi=75):
qt.QPaintEngine.__init__(
self,
qt.QPaintEngine.Antialiasing |
qt.QPaintEngine.PainterPaths |
qt.QPaintEngine.PrimitiveTransform |
qt.QPaintEngine.PaintOutsidePaintEvent |
qt.QPaintEngine.PatternBrush
)
self.width = width_in
self.height = height_in
self.dpi = dpi
def begin(self, paintdevice):
self.emf = pyemf.EMF(self.width, self.height, int(self.dpi*scale))
self.pen = self.emf.GetStockObject(pyemf.BLACK_PEN)
self.pencolor = (0, 0, 0)
self.brush = self.emf.GetStockObject(pyemf.NULL_BRUSH)
self.paintdevice = paintdevice
return True
def drawLines(self, lines):
"""Draw lines to emf output."""
for line in lines:
self.emf.Polyline(
[ (int(line.x1()*scale), int(line.y1()*scale)),
(int(line.x2()*scale), int(line.y2()*scale)) ] )
def drawPolygon(self, points, mode):
"""Draw polygon on output."""
# print "Polygon"
pts = [(int(p.x()*scale), int(p.y()*scale)) for p in points]
if mode == qt.QPaintEngine.PolylineMode:
self.emf.Polyline(pts)
else:
self.emf.SetPolyFillMode({
qt.QPaintEngine.WindingMode: pyemf.WINDING,
qt.QPaintEngine.OddEvenMode: pyemf.ALTERNATE,
qt.QPaintEngine.ConvexMode: pyemf.WINDING
})
self.emf.Polygon(pts)
def drawEllipse(self, rect):
"""Draw an ellipse."""
# print "ellipse"
args = (
int(rect.left()*scale), int(rect.top()*scale),
int(rect.right()*scale), int(rect.bottom()*scale),
int(rect.left()*scale), int(rect.top()*scale),
int(rect.left()*scale), int(rect.top()*scale),
)
self.emf.Pie(*args)
self.emf.Arc(*args)
def drawPoints(self, points):
"""Draw points."""
# print "points"
for pt in points:
x, y = (pt.x()-0.5)*scale, (pt.y()-0.5)*scale
self.emf.Pie(
int(x), int(y),
int((pt.x()+0.5)*scale), int((pt.y()+0.5)*scale),
int(x), int(y), int(x), int(y) )
def drawPixmap(self, r, pixmap, sr):
"""Draw pixmap to display."""
# convert pixmap to BMP format
bytearr = qt.QByteArray()
buf = qt.QBuffer(bytearr)
buf.open(qt.QIODevice.WriteOnly)
pixmap.save(buf, "BMP")
# chop off bmp header to get DIB
bmp = bytes(buf.data())
dib = bmp[0xe:]
hdrsize, = struct.unpack('<i', bmp[0xe:0x12])
dataindex, = struct.unpack('<i', bmp[0xa:0xe])
datasize, = struct.unpack('<i', bmp[0x22:0x26])
epix = pyemf._EMR._STRETCHDIBITS()
epix.rclBounds_left = int(r.left()*scale)
epix.rclBounds_top = int(r.top()*scale)
epix.rclBounds_right = int(r.right()*scale)
epix.rclBounds_bottom = int(r.bottom()*scale)
epix.xDest = int(r.left()*scale)
epix.yDest = int(r.top()*scale)
epix.cxDest = int(r.width()*scale)
epix.cyDest = int(r.height()*scale)
epix.xSrc = int(sr.left())
epix.ySrc = int(sr.top())
epix.cxSrc = int(sr.width())
epix.cySrc = int(sr.height())
epix.dwRop = 0xcc0020 # SRCCOPY
offset = epix.format.minstructsize + 8
epix.offBmiSrc = offset
epix.cbBmiSrc = hdrsize
epix.offBitsSrc = offset + dataindex - 0xe
epix.cbBitsSrc = datasize
epix.iUsageSrc = 0x0 # DIB_RGB_COLORS
epix.unhandleddata = dib
self.emf._append(epix)
def _createPath(self, path):
"""Convert qt path to emf path"""
self.emf.BeginPath()
count = path.elementCount()
i = 0
#print "Start path"
while i < count:
e = path.elementAt(i)
if e.type == qt.QPainterPath.MoveToElement:
self.emf.MoveTo( int(e.x*scale), int(e.y*scale) )
#print "M", e.x*scale, e.y*scale
elif e.type == qt.QPainterPath.LineToElement:
self.emf.LineTo( int(e.x*scale), int(e.y*scale) )
#print "L", e.x*scale, e.y*scale
elif e.type == qt.QPainterPath.CurveToElement:
e1 = path.elementAt(i+1)
e2 = path.elementAt(i+2)
params = (
( int(e.x*scale), int(e.y*scale) ),
( int(e1.x*scale), int(e1.y*scale) ),
( int(e2.x*scale), int(e2.y*scale) ),
)
self.emf.PolyBezierTo(params)
#print "C", params
i += 2
else:
assert False
i += 1
ef = path.elementAt(0)
el = path.elementAt(count-1)
if ef.x == el.x and ef.y == el.y:
self.emf.CloseFigure()
#print "closing"
self.emf.EndPath()
def drawPath(self, path):
"""Draw a path on the output."""
# print "path"
self._createPath(path)
self.emf.StrokeAndFillPath()
def drawTextItem(self, pt, textitem):
"""Convert text to a path and draw it.
"""
# print "text", pt, textitem.text()
path = qt.QPainterPath()
path.addText(pt, textitem.font(), textitem.text())
fill = self.emf.CreateSolidBrush(self.pencolor)
self.emf.SelectObject(fill)
self._createPath(path)
self.emf.FillPath()
self.emf.SelectObject(self.brush)
self.emf.DeleteObject(fill)
def end(self):
return True
def saveFile(self, filename):
self.emf.save(filename)
def _updatePen(self, pen):
"""Update the pen to the currently selected one."""
# line style
style = {
qt.Qt.NoPen: pyemf.PS_NULL,
qt.Qt.SolidLine: pyemf.PS_SOLID,
qt.Qt.DashLine: pyemf.PS_DASH,
qt.Qt.DotLine: pyemf.PS_DOT,
qt.Qt.DashDotLine: pyemf.PS_DASHDOT,
qt.Qt.DashDotDotLine: pyemf.PS_DASHDOTDOT,
qt.Qt.CustomDashLine: pyemf.PS_USERSTYLE,
}[pen.style()]
if style != pyemf.PS_NULL:
# set cap style
style |= {
qt.Qt.FlatCap: pyemf.PS_ENDCAP_FLAT,
qt.Qt.SquareCap: pyemf.PS_ENDCAP_SQUARE,
qt.Qt.RoundCap: pyemf.PS_ENDCAP_ROUND,
}[pen.capStyle()]
# set join style
style |= {
qt.Qt.MiterJoin: pyemf.PS_JOIN_MITER,
qt.Qt.BevelJoin: pyemf.PS_JOIN_BEVEL,
qt.Qt.RoundJoin: pyemf.PS_JOIN_ROUND,
qt.Qt.SvgMiterJoin: pyemf.PS_JOIN_MITER,
}[pen.joinStyle()]
# use proper widths of lines
style |= pyemf.PS_GEOMETRIC
width = int(pen.widthF()*scale)
qc = pen.color()
color = (qc.red(), qc.green(), qc.blue())
self.pencolor = color
if pen.style() == qt.Qt.CustomDashLine:
# make an extended pen if we need a custom dash pattern
dash = [int(pen.widthF()*scale*f) for f in pen.dashPattern()]
newpen = self.emf._appendHandle( _EXTCREATEPEN(
style, width=width, color=color, styleentries=dash))
else:
# use a standard create pen
newpen = self.emf.CreatePen(style, width, color)
self.emf.SelectObject(newpen)
# delete old pen if it is not a stock object
if not isStockObject(self.pen):
self.emf.DeleteObject(self.pen)
self.pen = newpen
def _updateBrush(self, brush):
"""Update to selected brush."""
style = brush.style()
qc = brush.color()
color = (qc.red(), qc.green(), qc.blue())
# print "brush", color
if style == qt.Qt.SolidPattern:
newbrush = self.emf.CreateSolidBrush(color)
elif style == qt.Qt.NoBrush:
newbrush = self.emf.GetStockObject(pyemf.NULL_BRUSH)
else:
try:
hatch = {
qt.Qt.HorPattern: pyemf.HS_HORIZONTAL,
qt.Qt.VerPattern: pyemf.HS_VERTICAL,
qt.Qt.CrossPattern: pyemf.HS_CROSS,
qt.Qt.BDiagPattern: pyemf.HS_BDIAGONAL,
qt.Qt.FDiagPattern: pyemf.HS_FDIAGONAL,
qt.Qt.DiagCrossPattern: pyemf.HS_DIAGCROSS
}[brush.style()]
except KeyError:
newbrush = self.emf.CreateSolidBrush(color)
else:
newbrush = self.emf.CreateHatchBrush(hatch, color)
self.emf.SelectObject(newbrush)
if not isStockObject(self.brush):
self.emf.DeleteObject(self.brush)
self.brush = newbrush
def _updateClipPath(self, path, operation):
"""Update clipping path."""
# print "clip"
if operation != qt.Qt.NoClip:
self._createPath(path)
clipmode = {
qt.Qt.ReplaceClip: pyemf.RGN_COPY,
qt.Qt.IntersectClip: pyemf.RGN_AND,
}[operation]
else:
# is this the only wave to get rid of clipping?
self.emf.BeginPath()
self.emf.MoveTo(0,0)
w = int(self.width*self.dpi*scale)
h = int(self.height*self.dpi*scale)
self.emf.LineTo(w, 0)
self.emf.LineTo(w, h)
self.emf.LineTo(0, h)
self.emf.CloseFigure()
self.emf.EndPath()
clipmode = pyemf.RGN_COPY
self.emf.SelectClipPath(mode=clipmode)
def _updateTransform(self, m):
"""Update transformation."""
self.emf.SetWorldTransform(
m.m11(), m.m12(),
m.m21(), m.m22(),
m.dx()*scale, m.dy()*scale)
def updateState(self, state):
"""Examine what has changed in state and call apropriate function."""
ss = state.state()
if ss & qt.QPaintEngine.DirtyPen:
self._updatePen(state.pen())
if ss & qt.QPaintEngine.DirtyBrush:
self._updateBrush(state.brush())
if ss & qt.QPaintEngine.DirtyTransform:
self._updateTransform(state.transform())
if ss & qt.QPaintEngine.DirtyClipPath:
self._updateClipPath(state.clipPath(), state.clipOperation())
if ss & qt.QPaintEngine.DirtyClipRegion:
path = qt.QPainterPath()
path.addRegion(state.clipRegion())
self._updateClipPath(path, state.clipOperation())
def type(self):
return qt.QPaintEngine.PostScript
class EMFPaintDevice(qt.QPaintDevice):
"""Paint device for EMF paint engine."""
def __init__(self, width_in, height_in, dpi=75):
qt.QPaintDevice.__init__(self)
self.engine = EMFPaintEngine(width_in, height_in, dpi=dpi)
def paintEngine(self):
return self.engine
def metric(self, m):
"""Return the metrics of the painter."""
if m == qt.QPaintDevice.PdmWidth:
return int(self.engine.width * self.engine.dpi)
elif m == qt.QPaintDevice.PdmHeight:
return int(self.engine.height * self.engine.dpi)
elif m == qt.QPaintDevice.PdmWidthMM:
return int(self.engine.width * inch_mm)
elif m == qt.QPaintDevice.PdmHeightMM:
return int(self.engine.height * inch_mm)
elif m == qt.QPaintDevice.PdmNumColors:
return 2147483647
elif m == qt.QPaintDevice.PdmDepth:
return 24
elif m == qt.QPaintDevice.PdmDpiX:
return int(self.engine.dpi)
elif m == qt.QPaintDevice.PdmDpiY:
return int(self.engine.dpi)
elif m == qt.QPaintDevice.PdmPhysicalDpiX:
return int(self.engine.dpi)
elif m == qt.QPaintDevice.PdmPhysicalDpiY:
return int(self.engine.dpi)
elif m == qt.QPaintDevice.PdmDevicePixelRatio:
return 1
# Qt >= 5.6
elif m == getattr(qt.QPaintDevice, 'PdmDevicePixelRatioScaled', -1):
return 1
else:
# fall back
return qt.QPaintDevice.metric(self, m)
| veusz/veusz | veusz/document/emf_export.py | Python | gpl-2.0 | 14,778 |
#
# The Python Imaging Library.
# $Id$
#
# TIFF file handling
#
# TIFF is a flexible, if somewhat aged, image file format originally
# defined by Aldus. Although TIFF supports a wide variety of pixel
# layouts and compression methods, the name doesn't really stand for
# "thousands of incompatible file formats," it just feels that way.
#
# To read TIFF data from a stream, the stream must be seekable. For
# progressive decoding, make sure to use TIFF files where the tag
# directory is placed first in the file.
#
# History:
# 1995-09-01 fl Created
# 1996-05-04 fl Handle JPEGTABLES tag
# 1996-05-18 fl Fixed COLORMAP support
# 1997-01-05 fl Fixed PREDICTOR support
# 1997-08-27 fl Added support for rational tags (from Perry Stoll)
# 1998-01-10 fl Fixed seek/tell (from Jan Blom)
# 1998-07-15 fl Use private names for internal variables
# 1999-06-13 fl Rewritten for PIL 1.0 (1.0)
# 2000-10-11 fl Additional fixes for Python 2.0 (1.1)
# 2001-04-17 fl Fixed rewind support (seek to frame 0) (1.2)
# 2001-05-12 fl Added write support for more tags (from Greg Couch) (1.3)
# 2001-12-18 fl Added workaround for broken Matrox library
# 2002-01-18 fl Don't mess up if photometric tag is missing (D. Alan Stewart)
# 2003-05-19 fl Check FILLORDER tag
# 2003-09-26 fl Added RGBa support
# 2004-02-24 fl Added DPI support; fixed rational write support
# 2005-02-07 fl Added workaround for broken Corel Draw 10 files
# 2006-01-09 fl Added support for float/double tags (from Russell Nelson)
#
# Copyright (c) 1997-2006 by Secret Labs AB. All rights reserved.
# Copyright (c) 1995-1997 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
__version__ = "1.3.5"
import Image, ImageFile
import array, string, sys
import ImagePalette
II = "II" # little-endian (intel-style)
MM = "MM" # big-endian (motorola-style)
try:
if sys.byteorder == "little":
native_prefix = II
else:
native_prefix = MM
except AttributeError:
if ord(array.array("i",[1]).tostring()[0]):
native_prefix = II
else:
native_prefix = MM
#
# --------------------------------------------------------------------
# Read TIFF files
def il16(c,o=0):
return ord(c[o]) + (ord(c[o+1])<<8)
def il32(c,o=0):
return ord(c[o]) + (ord(c[o+1])<<8) + (ord(c[o+2])<<16) + (ord(c[o+3])<<24)
def ol16(i):
return chr(i&255) + chr(i>>8&255)
def ol32(i):
return chr(i&255) + chr(i>>8&255) + chr(i>>16&255) + chr(i>>24&255)
def ib16(c,o=0):
return ord(c[o+1]) + (ord(c[o])<<8)
def ib32(c,o=0):
return ord(c[o+3]) + (ord(c[o+2])<<8) + (ord(c[o+1])<<16) + (ord(c[o])<<24)
def ob16(i):
return chr(i>>8&255) + chr(i&255)
def ob32(i):
return chr(i>>24&255) + chr(i>>16&255) + chr(i>>8&255) + chr(i&255)
# a few tag names, just to make the code below a bit more readable
IMAGEWIDTH = 256
IMAGELENGTH = 257
BITSPERSAMPLE = 258
COMPRESSION = 259
PHOTOMETRIC_INTERPRETATION = 262
FILLORDER = 266
IMAGEDESCRIPTION = 270
STRIPOFFSETS = 273
SAMPLESPERPIXEL = 277
ROWSPERSTRIP = 278
STRIPBYTECOUNTS = 279
X_RESOLUTION = 282
Y_RESOLUTION = 283
PLANAR_CONFIGURATION = 284
RESOLUTION_UNIT = 296
SOFTWARE = 305
DATE_TIME = 306
ARTIST = 315
PREDICTOR = 317
COLORMAP = 320
TILEOFFSETS = 324
EXTRASAMPLES = 338
SAMPLEFORMAT = 339
JPEGTABLES = 347
COPYRIGHT = 33432
IPTC_NAA_CHUNK = 33723 # newsphoto properties
PHOTOSHOP_CHUNK = 34377 # photoshop properties
ICCPROFILE = 34675
EXIFIFD = 34665
XMP = 700
COMPRESSION_INFO = {
# Compression => pil compression name
1: "raw",
2: "tiff_ccitt",
3: "group3",
4: "group4",
5: "tiff_lzw",
6: "tiff_jpeg", # obsolete
7: "jpeg",
32771: "tiff_raw_16", # 16-bit padding
32773: "packbits"
}
OPEN_INFO = {
# (ByteOrder, PhotoInterpretation, SampleFormat, FillOrder, BitsPerSample,
# ExtraSamples) => mode, rawmode
(II, 0, 1, 1, (1,), ()): ("1", "1;I"),
(II, 0, 1, 2, (1,), ()): ("1", "1;IR"),
(II, 0, 1, 1, (8,), ()): ("L", "L;I"),
(II, 0, 1, 2, (8,), ()): ("L", "L;IR"),
(II, 1, 1, 1, (1,), ()): ("1", "1"),
(II, 1, 1, 2, (1,), ()): ("1", "1;R"),
(II, 1, 1, 1, (8,), ()): ("L", "L"),
(II, 1, 1, 1, (8,8), (2,)): ("LA", "LA"),
(II, 1, 1, 2, (8,), ()): ("L", "L;R"),
(II, 1, 1, 1, (16,), ()): ("I;16", "I;16"),
(II, 1, 2, 1, (16,), ()): ("I;16S", "I;16S"),
(II, 1, 2, 1, (32,), ()): ("I", "I;32S"),
(II, 1, 3, 1, (32,), ()): ("F", "F;32F"),
(II, 2, 1, 1, (8,8,8), ()): ("RGB", "RGB"),
(II, 2, 1, 2, (8,8,8), ()): ("RGB", "RGB;R"),
(II, 2, 1, 1, (8,8,8,8), (0,)): ("RGBX", "RGBX"),
(II, 2, 1, 1, (8,8,8,8), (1,)): ("RGBA", "RGBa"),
(II, 2, 1, 1, (8,8,8,8), (2,)): ("RGBA", "RGBA"),
(II, 2, 1, 1, (8,8,8,8), (999,)): ("RGBA", "RGBA"), # corel draw 10
(II, 3, 1, 1, (1,), ()): ("P", "P;1"),
(II, 3, 1, 2, (1,), ()): ("P", "P;1R"),
(II, 3, 1, 1, (2,), ()): ("P", "P;2"),
(II, 3, 1, 2, (2,), ()): ("P", "P;2R"),
(II, 3, 1, 1, (4,), ()): ("P", "P;4"),
(II, 3, 1, 2, (4,), ()): ("P", "P;4R"),
(II, 3, 1, 1, (8,), ()): ("P", "P"),
(II, 3, 1, 1, (8,8), (2,)): ("PA", "PA"),
(II, 3, 1, 2, (8,), ()): ("P", "P;R"),
(II, 5, 1, 1, (8,8,8,8), ()): ("CMYK", "CMYK"),
(II, 6, 1, 1, (8,8,8), ()): ("YCbCr", "YCbCr"),
(II, 8, 1, 1, (8,8,8), ()): ("LAB", "LAB"),
(MM, 0, 1, 1, (1,), ()): ("1", "1;I"),
(MM, 0, 1, 2, (1,), ()): ("1", "1;IR"),
(MM, 0, 1, 1, (8,), ()): ("L", "L;I"),
(MM, 0, 1, 2, (8,), ()): ("L", "L;IR"),
(MM, 1, 1, 1, (1,), ()): ("1", "1"),
(MM, 1, 1, 2, (1,), ()): ("1", "1;R"),
(MM, 1, 1, 1, (8,), ()): ("L", "L"),
(MM, 1, 1, 1, (8,8), (2,)): ("LA", "LA"),
(MM, 1, 1, 2, (8,), ()): ("L", "L;R"),
(MM, 1, 1, 1, (16,), ()): ("I;16B", "I;16B"),
(MM, 1, 2, 1, (16,), ()): ("I;16BS", "I;16BS"),
(MM, 1, 2, 1, (32,), ()): ("I;32BS", "I;32BS"),
(MM, 1, 3, 1, (32,), ()): ("F;32BF", "F;32BF"),
(MM, 2, 1, 1, (8,8,8), ()): ("RGB", "RGB"),
(MM, 2, 1, 2, (8,8,8), ()): ("RGB", "RGB;R"),
(MM, 2, 1, 1, (8,8,8,8), (0,)): ("RGBX", "RGBX"),
(MM, 2, 1, 1, (8,8,8,8), (1,)): ("RGBA", "RGBa"),
(MM, 2, 1, 1, (8,8,8,8), (2,)): ("RGBA", "RGBA"),
(MM, 2, 1, 1, (8,8,8,8), (999,)): ("RGBA", "RGBA"), # corel draw 10
(MM, 3, 1, 1, (1,), ()): ("P", "P;1"),
(MM, 3, 1, 2, (1,), ()): ("P", "P;1R"),
(MM, 3, 1, 1, (2,), ()): ("P", "P;2"),
(MM, 3, 1, 2, (2,), ()): ("P", "P;2R"),
(MM, 3, 1, 1, (4,), ()): ("P", "P;4"),
(MM, 3, 1, 2, (4,), ()): ("P", "P;4R"),
(MM, 3, 1, 1, (8,), ()): ("P", "P"),
(MM, 3, 1, 1, (8,8), (2,)): ("PA", "PA"),
(MM, 3, 1, 2, (8,), ()): ("P", "P;R"),
(MM, 5, 1, 1, (8,8,8,8), ()): ("CMYK", "CMYK"),
(MM, 6, 1, 1, (8,8,8), ()): ("YCbCr", "YCbCr"),
(MM, 8, 1, 1, (8,8,8), ()): ("LAB", "LAB"),
}
PREFIXES = ["MM\000\052", "II\052\000", "II\xBC\000"]
def _accept(prefix):
return prefix[:4] in PREFIXES
##
# Wrapper for TIFF IFDs.
class ImageFileDirectory:
# represents a TIFF tag directory. to speed things up,
# we don't decode tags unless they're asked for.
def __init__(self, prefix):
self.prefix = prefix[:2]
if self.prefix == MM:
self.i16, self.i32 = ib16, ib32
self.o16, self.o32 = ob16, ob32
elif self.prefix == II:
self.i16, self.i32 = il16, il32
self.o16, self.o32 = ol16, ol32
else:
raise SyntaxError("not a TIFF IFD")
self.reset()
def reset(self):
self.tags = {}
self.tagdata = {}
self.tagtype = {} # added 2008-06-05 by Florian Hoech
self.next = None
# dictionary API (sort of)
def keys(self):
return self.tagdata.keys() + self.tags.keys()
def items(self):
items = self.tags.items()
for tag in self.tagdata.keys():
items.append((tag, self[tag]))
return items
def __len__(self):
return len(self.tagdata) + len(self.tags)
def __getitem__(self, tag):
try:
return self.tags[tag]
except KeyError:
type, data = self.tagdata[tag] # unpack on the fly
size, handler = self.load_dispatch[type]
self.tags[tag] = data = handler(self, data)
del self.tagdata[tag]
return data
def get(self, tag, default=None):
try:
return self[tag]
except KeyError:
return default
def getscalar(self, tag, default=None):
try:
value = self[tag]
if len(value) != 1:
if tag == SAMPLEFORMAT:
# work around broken (?) matrox library
# (from Ted Wright, via Bob Klimek)
raise KeyError # use default
raise ValueError, "not a scalar"
return value[0]
except KeyError:
if default is None:
raise
return default
def has_key(self, tag):
return self.tags.has_key(tag) or self.tagdata.has_key(tag)
def __setitem__(self, tag, value):
if type(value) is not type(()):
value = (value,)
self.tags[tag] = value
# load primitives
load_dispatch = {}
def load_byte(self, data):
l = []
for i in range(len(data)):
l.append(ord(data[i]))
return tuple(l)
load_dispatch[1] = (1, load_byte)
def load_string(self, data):
if data[-1:] == '\0':
data = data[:-1]
return data
load_dispatch[2] = (1, load_string)
def load_short(self, data):
l = []
for i in range(0, len(data), 2):
l.append(self.i16(data, i))
return tuple(l)
load_dispatch[3] = (2, load_short)
def load_long(self, data):
l = []
for i in range(0, len(data), 4):
l.append(self.i32(data, i))
return tuple(l)
load_dispatch[4] = (4, load_long)
def load_rational(self, data):
l = []
for i in range(0, len(data), 8):
l.append((self.i32(data, i), self.i32(data, i+4)))
return tuple(l)
load_dispatch[5] = (8, load_rational)
def load_float(self, data):
a = array.array("f", data)
if self.prefix != native_prefix:
a.byteswap()
return tuple(a)
load_dispatch[11] = (4, load_float)
def load_double(self, data):
a = array.array("d", data)
if self.prefix != native_prefix:
a.byteswap()
return tuple(a)
load_dispatch[12] = (8, load_double)
def load_undefined(self, data):
# Untyped data
return data
load_dispatch[7] = (1, load_undefined)
def load(self, fp):
# load tag dictionary
self.reset()
i16 = self.i16
i32 = self.i32
for i in range(i16(fp.read(2))):
ifd = fp.read(12)
tag, typ = i16(ifd), i16(ifd, 2)
if Image.DEBUG:
import TiffTags
tagname = TiffTags.TAGS.get(tag, "unknown")
typname = TiffTags.TYPES.get(typ, "unknown")
print "tag: %s (%d)" % (tagname, tag),
print "- type: %s (%d)" % (typname, typ),
try:
dispatch = self.load_dispatch[typ]
except KeyError:
if Image.DEBUG:
print "- unsupported type", typ
continue # ignore unsupported type
size, handler = dispatch
size = size * i32(ifd, 4)
# Get and expand tag value
if size > 4:
here = fp.tell()
fp.seek(i32(ifd, 8))
data = ImageFile._safe_read(fp, size)
fp.seek(here)
else:
data = ifd[8:8+size]
if len(data) != size:
raise IOError, "not enough data"
self.tagdata[tag] = typ, data
self.tagtype[tag] = typ
if Image.DEBUG:
if tag in (COLORMAP, IPTC_NAA_CHUNK, PHOTOSHOP_CHUNK, ICCPROFILE, XMP):
print "- value: <table: %d bytes>" % size
else:
print "- value:", self[tag]
self.next = i32(fp.read(4))
# save primitives
def save(self, fp):
o16 = self.o16
o32 = self.o32
fp.write(o16(len(self.tags)))
# always write in ascending tag order
tags = self.tags.items()
tags.sort()
directory = []
append = directory.append
offset = fp.tell() + len(self.tags) * 12 + 4
stripoffsets = None
# pass 1: convert tags to binary format
for tag, value in tags:
typ = None
if self.tagtype.has_key(tag):
typ = self.tagtype[tag]
if typ == 1:
# byte data
data = value = string.join(map(chr, value), "")
elif typ == 7:
# untyped data
data = value = string.join(value, "")
elif type(value[0]) is type(""):
# string data
typ = 2
data = value = string.join(value, "\0") + "\0"
else:
# integer data
if tag == STRIPOFFSETS:
stripoffsets = len(directory)
typ = 4 # to avoid catch-22
elif tag in (X_RESOLUTION, Y_RESOLUTION):
# identify rational data fields
typ = 5
elif not typ:
typ = 3
for v in value:
if v >= 65536:
typ = 4
if typ == 3:
data = string.join(map(o16, value), "")
else:
data = string.join(map(o32, value), "")
if Image.DEBUG:
import TiffTags
tagname = TiffTags.TAGS.get(tag, "unknown")
typname = TiffTags.TYPES.get(typ, "unknown")
print "save: %s (%d)" % (tagname, tag),
print "- type: %s (%d)" % (typname, typ),
if tag in (COLORMAP, IPTC_NAA_CHUNK, PHOTOSHOP_CHUNK, ICCPROFILE, XMP):
size = len(data)
print "- value: <table: %d bytes>" % size
else:
print "- value:", value
# figure out if data fits into the directory
if len(data) == 4:
append((tag, typ, len(value), data, ""))
elif len(data) < 4:
append((tag, typ, len(value), data + (4-len(data))*"\0", ""))
else:
count = len(value)
if typ == 5:
count = count / 2 # adjust for rational data field
append((tag, typ, count, o32(offset), data))
offset = offset + len(data)
if offset & 1:
offset = offset + 1 # word padding
# update strip offset data to point beyond auxiliary data
if stripoffsets is not None:
tag, typ, count, value, data = directory[stripoffsets]
assert not data, "multistrip support not yet implemented"
value = o32(self.i32(value) + offset)
directory[stripoffsets] = tag, typ, count, value, data
# pass 2: write directory to file
for tag, typ, count, value, data in directory:
if Image.DEBUG > 1:
print tag, typ, count, repr(value), repr(data)
fp.write(o16(tag) + o16(typ) + o32(count) + value)
# -- overwrite here for multi-page --
fp.write("\0\0\0\0") # end of directory
# pass 3: write auxiliary data to file
for tag, typ, count, value, data in directory:
fp.write(data)
if len(data) & 1:
fp.write("\0")
return offset
##
# Image plugin for TIFF files.
class TiffImageFile(ImageFile.ImageFile):
format = "TIFF"
format_description = "Adobe TIFF"
def _open(self):
"Open the first image in a TIFF file"
# Header
ifh = self.fp.read(8)
if ifh[:4] not in PREFIXES:
raise SyntaxError, "not a TIFF file"
# image file directory (tag dictionary)
self.tag = self.ifd = ImageFileDirectory(ifh[:2])
# setup frame pointers
self.__first = self.__next = self.ifd.i32(ifh, 4)
self.__frame = -1
self.__fp = self.fp
# and load the first frame
self._seek(0)
def seek(self, frame):
"Select a given frame as current image"
if frame < 0:
frame = 0
self._seek(frame)
def tell(self):
"Return the current frame number"
return self._tell()
def _seek(self, frame):
self.fp = self.__fp
if frame < self.__frame:
# rewind file
self.__frame = -1
self.__next = self.__first
while self.__frame < frame:
if not self.__next:
raise EOFError, "no more images in TIFF file"
self.fp.seek(self.__next)
self.tag.load(self.fp)
self.__next = self.tag.next
self.__frame = self.__frame + 1
self._setup()
def _tell(self):
return self.__frame
def _decoder(self, rawmode, layer):
"Setup decoder contexts"
args = None
if rawmode == "RGB" and self._planar_configuration == 2:
rawmode = rawmode[layer]
compression = self._compression
if compression == "raw":
args = (rawmode, 0, 1)
elif compression == "jpeg":
args = rawmode, ""
if self.tag.has_key(JPEGTABLES):
# Hack to handle abbreviated JPEG headers
self.tile_prefix = self.tag[JPEGTABLES]
elif compression == "packbits":
args = rawmode
elif compression == "tiff_lzw":
args = rawmode
if self.tag.has_key(317):
# Section 14: Differencing Predictor
self.decoderconfig = (self.tag[PREDICTOR][0],)
if self.tag.has_key(ICCPROFILE):
self.info['icc_profile'] = self.tag[ICCPROFILE]
return args
def _setup(self):
"Setup this image object based on current tags"
if self.tag.has_key(0xBC01):
raise IOError, "Windows Media Photo files not yet supported"
getscalar = self.tag.getscalar
# extract relevant tags
self._compression = COMPRESSION_INFO[getscalar(COMPRESSION, 1)]
self._planar_configuration = getscalar(PLANAR_CONFIGURATION, 1)
# photometric is a required tag, but not everyone is reading
# the specification
photo = getscalar(PHOTOMETRIC_INTERPRETATION, 0)
fillorder = getscalar(FILLORDER, 1)
if Image.DEBUG:
print "*** Summary ***"
print "- compression:", self._compression
print "- photometric_interpretation:", photo
print "- planar_configuration:", self._planar_configuration
print "- fill_order:", fillorder
# size
xsize = getscalar(IMAGEWIDTH)
ysize = getscalar(IMAGELENGTH)
self.size = xsize, ysize
if Image.DEBUG:
print "- size:", self.size
format = getscalar(SAMPLEFORMAT, 1)
# mode: check photometric interpretation and bits per pixel
key = (
self.tag.prefix, photo, format, fillorder,
self.tag.get(BITSPERSAMPLE, (1,)),
self.tag.get(EXTRASAMPLES, ())
)
if Image.DEBUG:
print "format key:", key
try:
self.mode, rawmode = OPEN_INFO[key]
except KeyError:
if Image.DEBUG:
print "- unsupported format"
raise SyntaxError, "unknown pixel mode"
if Image.DEBUG:
print "- raw mode:", rawmode
print "- pil mode:", self.mode
self.info["compression"] = self._compression
xres = getscalar(X_RESOLUTION, (1, 1))
yres = getscalar(Y_RESOLUTION, (1, 1))
if xres and yres:
xres = xres[0] / (xres[1] or 1)
yres = yres[0] / (yres[1] or 1)
resunit = getscalar(RESOLUTION_UNIT, 1)
if resunit == 2: # dots per inch
self.info["dpi"] = xres, yres
elif resunit == 3: # dots per centimeter. convert to dpi
self.info["dpi"] = xres * 2.54, yres * 2.54
else: # No absolute unit of measurement
self.info["resolution"] = xres, yres
# build tile descriptors
x = y = l = 0
self.tile = []
if self.tag.has_key(STRIPOFFSETS):
# striped image
h = getscalar(ROWSPERSTRIP, ysize)
w = self.size[0]
a = None
for o in self.tag[STRIPOFFSETS]:
if not a:
a = self._decoder(rawmode, l)
self.tile.append(
(self._compression,
(0, min(y, ysize), w, min(y+h, ysize)),
o, a))
y = y + h
if y >= self.size[1]:
x = y = 0
l = l + 1
a = None
elif self.tag.has_key(TILEOFFSETS):
# tiled image
w = getscalar(322)
h = getscalar(323)
a = None
for o in self.tag[TILEOFFSETS]:
if not a:
a = self._decoder(rawmode, l)
self.tile.append(
(self._compression,
(x, y, x+w, y+h),
o, a))
x = x + w
if x >= self.size[0]:
x, y = 0, y + h
if y >= self.size[1]:
x = y = 0
l = l + 1
a = None
else:
if Image.DEBUG:
print "- unsupported data organization"
raise SyntaxError("unknown data organization")
# fixup palette descriptor
if self.mode == "P":
palette = map(lambda a: chr(a / 256), self.tag[COLORMAP])
self.palette = ImagePalette.raw("RGB;L", string.join(palette, ""))
#
# --------------------------------------------------------------------
# Write TIFF files
# little endian is default except for image modes with explict big endian byte-order
SAVE_INFO = {
# mode => rawmode, byteorder, photometrics, sampleformat, bitspersample, extra
"1": ("1", II, 1, 1, (1,), None),
"L": ("L", II, 1, 1, (8,), None),
"LA": ("LA", II, 1, 1, (8,8), 2),
"P": ("P", II, 3, 1, (8,), None),
"PA": ("PA", II, 3, 1, (8,8), 2),
"I": ("I;32S", II, 1, 2, (32,), None),
"I;16": ("I;16", II, 1, 1, (16,), None),
"I;16S": ("I;16S", II, 1, 2, (16,), None),
"F": ("F;32F", II, 1, 3, (32,), None),
"RGB": ("RGB", II, 2, 1, (8,8,8), None),
"RGBX": ("RGBX", II, 2, 1, (8,8,8,8), 0),
"RGBA": ("RGBA", II, 2, 1, (8,8,8,8), 2),
"CMYK": ("CMYK", II, 5, 1, (8,8,8,8), None),
"YCbCr": ("YCbCr", II, 6, 1, (8,8,8), None),
"LAB": ("LAB", II, 8, 1, (8,8,8), None),
"I;32BS": ("I;32BS", MM, 1, 2, (32,), None),
"I;16B": ("I;16B", MM, 1, 1, (16,), None),
"I;16BS": ("I;16BS", MM, 1, 2, (16,), None),
"F;32BF": ("F;32BF", MM, 1, 3, (32,), None),
}
def _cvt_res(value):
# convert value to TIFF rational number -- (numerator, denominator)
if type(value) in (type([]), type(())):
assert(len(value) % 2 == 0)
return value
if type(value) == type(1):
return (value, 1)
value = float(value)
return (int(value * 65536), 65536)
def _save(im, fp, filename):
try:
rawmode, prefix, photo, format, bits, extra = SAVE_INFO[im.mode]
except KeyError:
raise IOError, "cannot write mode %s as TIFF" % im.mode
ifd = ImageFileDirectory(prefix)
# -- multi-page -- skip TIFF header on subsequent pages
if fp.tell() == 0:
# tiff header (write via IFD to get everything right)
# PIL always starts the first IFD at offset 8
fp.write(ifd.prefix + ifd.o16(42) + ifd.o32(8))
ifd[IMAGEWIDTH] = im.size[0]
ifd[IMAGELENGTH] = im.size[1]
# additions written by Greg Couch, [email protected]
# inspired by image-sig posting from Kevin Cazabon, [email protected]
if hasattr(im, 'tag'):
# preserve tags from original TIFF image file
for key in (RESOLUTION_UNIT, X_RESOLUTION, Y_RESOLUTION):
if im.tag.tagdata.has_key(key):
ifd[key] = im.tag.tagdata.get(key)
# preserve some more tags from original TIFF image file
# -- 2008-06-06 Florian Hoech
ifd.tagtype = im.tag.tagtype
for key in (IPTC_NAA_CHUNK, PHOTOSHOP_CHUNK, XMP):
if im.tag.has_key(key):
ifd[key] = im.tag[key]
# preserve ICC profile (should also work when saving other formats
# which support profiles as TIFF) -- 2008-06-06 Florian Hoech
if im.info.has_key("icc_profile"):
ifd[ICCPROFILE] = im.info["icc_profile"]
if im.encoderinfo.has_key("description"):
ifd[IMAGEDESCRIPTION] = im.encoderinfo["description"]
if im.encoderinfo.has_key("resolution"):
ifd[X_RESOLUTION] = ifd[Y_RESOLUTION] \
= _cvt_res(im.encoderinfo["resolution"])
if im.encoderinfo.has_key("x resolution"):
ifd[X_RESOLUTION] = _cvt_res(im.encoderinfo["x resolution"])
if im.encoderinfo.has_key("y resolution"):
ifd[Y_RESOLUTION] = _cvt_res(im.encoderinfo["y resolution"])
if im.encoderinfo.has_key("resolution unit"):
unit = im.encoderinfo["resolution unit"]
if unit == "inch":
ifd[RESOLUTION_UNIT] = 2
elif unit == "cm" or unit == "centimeter":
ifd[RESOLUTION_UNIT] = 3
else:
ifd[RESOLUTION_UNIT] = 1
if im.encoderinfo.has_key("software"):
ifd[SOFTWARE] = im.encoderinfo["software"]
if im.encoderinfo.has_key("date time"):
ifd[DATE_TIME] = im.encoderinfo["date time"]
if im.encoderinfo.has_key("artist"):
ifd[ARTIST] = im.encoderinfo["artist"]
if im.encoderinfo.has_key("copyright"):
ifd[COPYRIGHT] = im.encoderinfo["copyright"]
dpi = im.encoderinfo.get("dpi")
if dpi:
ifd[RESOLUTION_UNIT] = 2
ifd[X_RESOLUTION] = _cvt_res(dpi[0])
ifd[Y_RESOLUTION] = _cvt_res(dpi[1])
if bits != (1,):
ifd[BITSPERSAMPLE] = bits
if len(bits) != 1:
ifd[SAMPLESPERPIXEL] = len(bits)
if extra is not None:
ifd[EXTRASAMPLES] = extra
if format != 1:
ifd[SAMPLEFORMAT] = format
ifd[PHOTOMETRIC_INTERPRETATION] = photo
if im.mode == "P":
lut = im.im.getpalette("RGB", "RGB;L")
ifd[COLORMAP] = tuple(map(lambda v: ord(v) * 256, lut))
# data orientation
stride = len(bits) * ((im.size[0]*bits[0]+7)/8)
ifd[ROWSPERSTRIP] = im.size[1]
ifd[STRIPBYTECOUNTS] = stride * im.size[1]
ifd[STRIPOFFSETS] = 0 # this is adjusted by IFD writer
ifd[COMPRESSION] = 1 # no compression
offset = ifd.save(fp)
ImageFile._save(im, fp, [
("raw", (0,0)+im.size, offset, (rawmode, stride, 1))
])
# -- helper for multi-page save --
if im.encoderinfo.has_key("_debug_multipage"):
#just to access o32 and o16 (using correct byte order)
im._debug_multipage = ifd
#
# --------------------------------------------------------------------
# Register
Image.register_open("TIFF", TiffImageFile, _accept)
Image.register_save("TIFF", _save)
Image.register_extension("TIFF", ".tif")
Image.register_extension("TIFF", ".tiff")
Image.register_mime("TIFF", "image/tiff")
| ppizarror/Ned-For-Spod | bin/external/pil/TiffImagePlugin.py | Python | gpl-2.0 | 27,979 |
#!/usr/local/bin/python3
class TestClass(object):
def foo():
doc = "The foo property."
def fget(self):
return self._foo
def fset(self, value):
self._foo = value
def fdel(self):
del self._foo
return locals()
foo = property(**foo())
def bar():
doc = "The bar property."
def fget(self):
return self._bar
def fset(self, value):
self._bar = value
def fdel(self):
del self._bar
return locals()
bar = property(**bar())
def __init__(self, foo, bar):
self.foo = "foo"
self.bar = "bar"
def test_method(self, attr):
if attr == 1:
prop = self.foo
else:
prop = self.bar
print(prop)
prop = 'TADA!'
tc = TestClass(1,2)
print(tc.foo)
print(tc.bar)
tc.test_method('foo')
#print(tc.foo)
#print(dir(tc))
| Etzeitet/pythonjournal | pythonjournal/proptest.py | Python | gpl-2.0 | 953 |
#!/usr/bin/python
import sys, os, urllib, argparse, base64, time, threading, re
from gi.repository import Gtk, WebKit, Notify
webView = None
def refresh(widget, event):
global webView
webView.reload()
window_title = ''
def HandleTitleChanged(webview, title):
global window_title
window_title = title
parent = webview
while parent.get_parent() != None:
parent = webview.get_parent()
parent.set_title(title)
return True
def HandleCreateWebView(webview, frame):
info = Gtk.Window()
info.set_default_size(1000, 700)
child = WebKit.WebView()
child.connect('create-web-view', HandleCreateWebView)
child.connect('close-web-view', HandleCloseWebView)
child.connect('navigation-policy-decision-requested', HandleNavigationRequested)
#child.connect('notify::title', HandleTitleChanged)
info.set_title('')
info.add(child)
info.show_all()
return child
def HandleCloseWebView(webview):
parent = webview
while parent.get_parent() != None:
parent = webview.get_parent()
parent.destroy()
def HandleNewWindowPolicyDecisionRequested(webview, frame, request, navigation_action, policy_decision):
if '&URL=' in request.get_uri():
os.system('xdg-open "%s"' % urllib.unquote(request.get_uri().split('&URL=')[1]).decode('utf8'))
def HandleNavigationRequested(webview, frame, request, navigation_action, policy_decision):
if '&URL=' in request.get_uri():
HandleCloseWebView(webview)
return 1
prefills = {}
submit = False
ignore_submit = []
def prefill_password(webview, frame):
global prefills, submit
should_ignore_submit = False
dom = webview.get_dom_document()
forms = dom.get_forms()
for i in range(0, forms.get_length()):
form = forms.item(i)
elements = form.get_elements()
is_form_modified = False
for j in range(0, elements.get_length()):
element = elements.item(j)
element_name = element.get_name()
if element_name in ignore_submit:
should_ignore_submit = True
for key in prefills.keys():
if element_name == key:
if prefills[key].lower() == 'true':
element.set_checked(True)
is_form_modified = True
else:
element.set_value(prefills[key])
is_form_modified = True
if is_form_modified and submit and not should_ignore_submit:
form.submit()
def HandleMimeType(webview, frame, request, mimetype, policy_decision):
print 'Requested decision for mimetype:', mimetype
return True
stop_threads = False
search_notifys = []
def SearchNotify(webview):
global stop_threads
global window_title
global search_notifys
while True:
if stop_threads:
break
dom = webview.get_dom_document()
if not dom:
continue
body = dom.get_body()
if not body:
continue
body_html = body.get_inner_html()
if not body_html:
continue
for notice in search_notifys:
msgs = list(set(re.findall(notice, body_html)))
if len(msgs) > 0:
for msg in msgs:
Notify.init(window_title)
msg_notify = Notify.Notification.new(window_title, msg, "dialog-information")
msg_notify.show()
time.sleep(2) # Don't duplicate the notification
time.sleep(2)
if __name__ == "__main__":
parser_epilog = ("Example:\n\n"
"./simple_browse.py https://owa.example.com --useragent=\"Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0\" --stylesheet=~/simple_browse/sample_styles/owa_style.css --username=<webmail username> --b64pass=\"<base64 encoded password>\" --forminput=trusted:true --submit --notify=PHNwYW4gY2xhc3M9Im53SXRtVHh0U2JqIj4oW1x3IF0rKTwvc3Bhbj4=\n\n"
"This command will open Outlook Web Access, set the user agent to allow it to \nload using pipelight (for silverlight support), login to webmail, then apply a \ncustom css style to make webmail look like a desktop app. When new emails\narrive, notification will be sent to gnome-shell.\n")
parser = argparse.ArgumentParser(description="Simple Browser: A simple webkit browser written in Python", epilog=parser_epilog, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("url")
parser.add_argument("--useragent", help="An optional user agent to apply to the main page")
parser.add_argument("--stylesheet", help="An optional stylesheet to apply to the main page")
parser.add_argument("--username", help="A username we'll try to use to sign in")
parser.add_argument("--password", help="A password for signing in")
parser.add_argument("--b64pass", help="An alternative b64 encoded password for sign on")
parser.add_argument("--forminput", help="A form field name and value to prefill (seperated by a colon). Only one value for each key is allowed.", action='append')
parser.add_argument("--submit", help="Submit the filled form when we've finished entering values", action="store_true")
parser.add_argument("--ignore-submit", help="Ignore the submit if the form contains this key", action='append')
parser.add_argument("--title", help="Title for the window")
parser.add_argument("--notify", help="A regex search string, base64 encoded, which will display a notification when found, example: <span class=\"nwItmTxtSbj\">([\w ]+)</span>", action='append')
args = parser.parse_args()
url = args.url
user_agent = None
if args.useragent:
user_agent = args.useragent
stylesheet = None
if args.stylesheet:
stylesheet = 'file://localhost%s' % os.path.abspath(args.stylesheet)
if args.username:
prefills['username'] = args.username
if args.b64pass:
prefills['password'] = base64.b64decode(args.b64pass)
elif args.password:
prefills['password'] = args.password
if args.submit:
submit = True
if args.forminput:
for field in args.forminput:
key, value = field.split(':')
if key in prefills:
parser.print_help()
exit(1)
prefills[key] = value
if args.ignore_submit:
ignore_submit.extend(args.ignore_submit)
if args.notify:
for notice in args.notify:
search_notifys.append(base64.b64decode(notice))
win = Gtk.Window()
scrolled = Gtk.ScrolledWindow()
win.set_default_size(1500, 900)
webView = WebKit.WebView()
webView.load_uri(url)
overlay = Gtk.Overlay()
overlay.add(webView)
# Apply Settings
settings = WebKit.WebSettings()
if user_agent:
settings.set_property('user-agent', user_agent)
settings.set_property('enable-spell-checking', True)
if stylesheet:
settings.set_property('user-stylesheet-uri', stylesheet)
webView.set_settings(settings)
# Add Signal handlers to the webview
webView.connect('create-web-view', HandleCreateWebView)
webView.connect('close-web-view', HandleCloseWebView)
webView.connect('new-window-policy-decision-requested', HandleNewWindowPolicyDecisionRequested)
webView.connect('navigation-policy-decision-requested', HandleNavigationRequested)
#webView.connect('notify::title', HandleTitleChanged)
webView.connect('mime-type-policy-decision-requested', HandleMimeType)
webView.connect('load-finished', prefill_password)
win.set_title('')
# Add the Refresh button
fixed = Gtk.Fixed()
fixed.set_halign(Gtk.Align.START)
fixed.set_valign(Gtk.Align.START)
overlay.add_overlay(fixed)
fixed.show()
image = Gtk.Image()
image.set_from_pixbuf(Gtk.IconTheme().load_icon('gtk-refresh', 10, 0))
imgevent = Gtk.EventBox()
imgevent.add(image)
imgevent.connect('button-press-event', refresh)
fixed.put(imgevent, 10, 10)
win.add(scrolled)
scrolled.add(overlay)
win.show_all()
win.connect('destroy', Gtk.main_quit)
if args.title:
window_title = args.title
win.set_title(args.title)
if search_notifys:
t = threading.Thread(target=SearchNotify, args=(webView,))
t.start()
Gtk.main()
stop_threads = True
| DavidMulder/simple_browse | simple_browse.py | Python | gpl-2.0 | 8,391 |
"""
WSGI config for nykampweb project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "nykampweb.settings")
application = get_wsgi_application()
| dqnykamp/nykampweb | nykampweb/wsgi.py | Python | gpl-2.0 | 395 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2007 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with translate; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Manage the Wordfast Translation Memory format
Wordfast TM format is the Translation Memory format used by the
U{Wordfast<http://www.wordfast.net/>} computer aided translation tool.
It is a bilingual base class derived format with L{WordfastTMFile}
and L{WordfastUnit} providing file and unit level access.
Wordfast tools
==============
Wordfast is a computer aided translation tool. It is an application
built on top of Microsoft Word and is implemented as a rather
sophisticated set of macros. Understanding that helps us understand
many of the seemingly strange choices around this format including:
encoding, escaping and file naming.
Implementation
==============
The implementation covers the full requirements of a Wordfast TM file.
The files are simple Tab Separated Value (TSV) files that can be read
by Microsoft Excel and other spreadsheet programs. They use the .txt
extension which does make it more difficult to automatically identify
such files.
The dialect of the TSV files is specified by L{WordfastDialect}.
Encoding
--------
The files are UTF-16 or ISO-8859-1 (Latin1) encoded. These choices
are most likely because Microsoft Word is the base editing tool for
Wordfast.
The format is tab separated so we are able to detect UTF-16 vs Latin-1
by searching for the occurance of a UTF-16 tab character and then
continuing with the parsing.
Timestamps
----------
L{WordfastTime} allows for the correct management of the Wordfast
YYYYMMDD~HHMMSS timestamps. However, timestamps on individual units are
not updated when edited.
Header
------
L{WordfastHeader} provides header management support. The header
functionality is fully implemented through observing the behaviour of the
files in real use cases, input from the Wordfast programmers and
public documentation.
Escaping
--------
Wordfast TM implements a form of escaping that covers two aspects:
1. Placeable: bold, formating, etc. These are left as is and ignored.
It is up to the editor and future placeable implementation to manage
these.
2. Escapes: items that may confuse Excel or translators are
escaped as &'XX;. These are fully implemented and are converted to
and from Unicode. By observing behaviour and reading documentation
we where able to observe all possible escapes. Unfortunately the
escaping differs slightly between Windows and Mac version. This
might cause errors in future.
Functions allow for L{conversion to Unicode<_wf_to_char>} and L{back to
Wordfast escapes<_char_to_wf>}.
Extended Attributes
-------------------
The last 4 columns allow users to define and manage extended attributes.
These are left as is and are not directly managed byour implemenation.
"""
import csv
import sys
import time
from translate.storage import base
WF_TIMEFORMAT = "%Y%m%d~%H%M%S"
"""Time format used by Wordfast"""
WF_FIELDNAMES_HEADER = ["date", "userlist", "tucount", "src-lang", "version", "target-lang", "license", "attr1list", "attr2list", "attr3list", "attr4list", "attr5list"]
"""Field names for the Wordfast header"""
WF_FIELDNAMES = ["date", "user", "reuse", "src-lang", "source", "target-lang", "target", "attr1", "attr2", "attr3", "attr4"]
"""Field names for a Wordfast TU"""
WF_FIELDNAMES_HEADER_DEFAULTS = {
"date": "%19000101~121212",
"userlist": "%User ID,TT,TT Translate-Toolkit",
"tucount": "%TU=00000001",
"src-lang": "%EN-US",
"version": "%Wordfast TM v.5.51w9/00",
"target-lang": "",
"license": "%---00000001",
"attr1list": "",
"attr2list": "",
"attr3list": "",
"attr4list": "" }
"""Default or minimum header entries for a Wordfast file"""
# TODO Needs validation. The following need to be checked against a WF TM file to ensure
# that the correct Unicode values have been chosen for the characters. For now these look
# correct and have been taken from Windows CP1252 and Macintosh code points found for
# the respective character sets on Linux.
WF_ESCAPE_MAP = (
("&'26;", u"\u0026"), # & - Ampersand (must be first to prevent escaping of escapes)
("&'82;", u"\u201A"), # ‚ - Single low-9 quotation mark
("&'85;", u"\u2026"), # … - Elippsis
("&'91;", u"\u2018"), # ‘ - left single quotation mark
("&'92;", u"\u2019"), # ’ - right single quotation mark
("&'93;", u"\u201C"), # “ - left double quotation mark
("&'94;", u"\u201D"), # ” - right double quotation mark
("&'96;", u"\u2013"), # – - en dash (validate)
("&'97;", u"\u2014"), # — - em dash (validate)
("&'99;", u"\u2122"), # ™ - Trade mark
# Windows only
("&'A0;", u"\u00A0"), # - Non breaking space
("&'A9;", u"\u00A9"), # © - Copyright
("&'AE;", u"\u00AE"), # ® - Registered
("&'BC;", u"\u00BC"), # ¼
("&'BD;", u"\u00BD"), # ½
("&'BE;", u"\u00BE"), # ¾
# Mac only
("&'A8;", u"\u00AE"), # ® - Registered
("&'AA;", u"\u2122"), # ™ - Trade mark
("&'C7;", u"\u00AB"), # « - Left-pointing double angle quotation mark
("&'C8;", u"\u00BB"), # » - Right-pointing double angle quotation mark
("&'C9;", u"\u2026"), # … - Horizontal Elippsis
("&'CA;", u"\u00A0"), # - Non breaking space
("&'D0;", u"\u2013"), # – - en dash (validate)
("&'D1;", u"\u2014"), # — - em dash (validate)
("&'D2;", u"\u201C"), # “ - left double quotation mark
("&'D3;", u"\u201D"), # ” - right double quotation mark
("&'D4;", u"\u2018"), # ‘ - left single quotation mark
("&'D5;", u"\u2019"), # ’ - right single quotation mark
("&'E2;", u"\u201A"), # ‚ - Single low-9 quotation mark
("&'E3;", u"\u201E"), # „ - Double low-9 quotation mark
# Other markers
#("&'B;", u"\n"), # Soft-break - XXX creates a problem with roundtripping could also be represented by \u2028
)
"""Mapping of Wordfast &'XX; escapes to correct Unicode characters"""
TAB_UTF16 = "\x00\x09"
"""The tab \\t character as it would appear in UTF-16 encoding"""
def _char_to_wf(string):
"""Char -> Wordfast &'XX; escapes
Full roundtripping is not possible because of the escaping of NEWLINE \\n
and TAB \\t"""
# FIXME there is no platform check to ensure that we use Mac encodings when running on a Mac
if string:
for code, char in WF_ESCAPE_MAP:
string = string.replace(char.encode('utf-8'), code)
string = string.replace("\n", "\\n").replace("\t", "\\t")
return string
def _wf_to_char(string):
"""Wordfast &'XX; escapes -> Char"""
if string:
for code, char in WF_ESCAPE_MAP:
string = string.replace(code, char.encode('utf-8'))
string = string.replace("\\n", "\n").replace("\\t", "\t")
return string
class WordfastDialect(csv.Dialect):
"""Describe the properties of a Wordfast generated TAB-delimited file."""
delimiter = "\t"
lineterminator = "\r\n"
quoting = csv.QUOTE_NONE
if sys.version_info < (2, 5, 0):
# We need to define the following items for csv in Python < 2.5
quoting = csv.QUOTE_MINIMAL # Wordfast does not quote anything, since we escape
# \t anyway in _char_to_wf this should not be a problem
doublequote = False
skipinitialspace = False
escapechar = None
quotechar = '"'
csv.register_dialect("wordfast", WordfastDialect)
class WordfastTime(object):
"""Manages time stamps in the Wordfast format of YYYYMMDD~hhmmss"""
def __init__(self, newtime=None):
self._time = None
if not newtime:
self.time = None
elif isinstance(newtime, basestring):
self.timestring = newtime
elif isinstance(newtime, time.struct_time):
self.time = newtime
def get_timestring(self):
"""Get the time in the Wordfast time format"""
if not self._time:
return None
else:
return time.strftime(WF_TIMEFORMAT, self._time)
def set_timestring(self, timestring):
"""Set the time_sturct object using a Wordfast time formated string
@param timestring: A Wordfast time string (YYYMMDD~hhmmss)
@type timestring: String
"""
self._time = time.strptime(timestring, WF_TIMEFORMAT)
timestring = property(get_timestring, set_timestring)
def get_time(self):
"""Get the time_struct object"""
return self._time
def set_time(self, newtime):
"""Set the time_struct object
@param newtime: a new time object
@type newtime: time.time_struct
"""
if newtime and isinstance(newtime, time.struct_time):
self._time = newtime
else:
self._time = None
time = property(get_time, set_time)
def __str__(self):
if not self.timestring:
return ""
else:
return self.timestring
class WordfastHeader(object):
"""A wordfast translation memory header"""
def __init__(self, header=None):
self._header_dict = []
if not header:
self.header = self._create_default_header()
elif isinstance(header, dict):
self.header = header
def _create_default_header(self):
"""Create a default Wordfast header with the date set to the current time"""
defaultheader = WF_FIELDNAMES_HEADER_DEFAULTS
defaultheader['date'] = '%%%s' % WordfastTime(time.localtime()).timestring
return defaultheader
def getheader(self):
"""Get the header dictionary"""
return self._header_dict
def setheader(self, newheader):
self._header_dict = newheader
header = property(getheader, setheader)
def settargetlang(self, newlang):
self._header_dict['target-lang'] = '%%%s' % newlang
targetlang = property(None, settargetlang)
def settucount(self, count):
self._header_dict['tucount'] = '%%TU=%08d' % count
tucount = property(None, settucount)
class WordfastUnit(base.TranslationUnit):
"""A Wordfast translation memory unit"""
def __init__(self, source=None):
self._dict = {}
if source:
self.source = source
super(WordfastUnit, self).__init__(source)
def _update_timestamp(self):
"""Refresh the timestamp for the unit"""
self._dict['date'] = WordfastTime(time.localtime()).timestring
def getdict(self):
"""Get the dictionary of values for a Wordfast line"""
return self._dict
def setdict(self, newdict):
"""Set the dictionary of values for a Wordfast line
@param newdict: a new dictionary with Wordfast line elements
@type newdict: Dict
"""
# TODO First check that the values are OK
self._dict = newdict
dict = property(getdict, setdict)
def _get_source_or_target(self, key):
if self._dict.get(key, None) is None:
return None
elif self._dict[key]:
return _wf_to_char(self._dict[key]).decode('utf-8')
else:
return ""
def _set_source_or_target(self, key, newvalue):
if newvalue is None:
self._dict[key] = None
if isinstance(newvalue, unicode):
newvalue = newvalue.encode('utf-8')
newvalue = _char_to_wf(newvalue)
if not key in self._dict or newvalue != self._dict[key]:
self._dict[key] = newvalue
self._update_timestamp()
def getsource(self):
return self._get_source_or_target('source')
def setsource(self, newsource):
self._rich_source = None
return self._set_source_or_target('source', newsource)
source = property(getsource, setsource)
def gettarget(self):
return self._get_source_or_target('target')
def settarget(self, newtarget):
self._rich_target = None
return self._set_source_or_target('target', newtarget)
target = property(gettarget, settarget)
def settargetlang(self, newlang):
self._dict['target-lang'] = newlang
targetlang = property(None, settargetlang)
def __str__(self):
return str(self._dict)
def istranslated(self):
if not self._dict.get('source', None):
return False
return bool(self._dict.get('target', None))
class WordfastTMFile(base.TranslationStore):
"""A Wordfast translation memory file"""
Name = _("Wordfast Translation Memory")
Mimetypes = ["application/x-wordfast"]
Extensions = ["txt"]
def __init__(self, inputfile=None, unitclass=WordfastUnit):
"""construct a Wordfast TM, optionally reading in from inputfile."""
self.UnitClass = unitclass
base.TranslationStore.__init__(self, unitclass=unitclass)
self.filename = ''
self.header = WordfastHeader()
self._encoding = 'iso-8859-1'
if inputfile is not None:
self.parse(inputfile)
def parse(self, input):
"""parsese the given file or file source string"""
if hasattr(input, 'name'):
self.filename = input.name
elif not getattr(self, 'filename', ''):
self.filename = ''
if hasattr(input, "read"):
tmsrc = input.read()
input.close()
input = tmsrc
if TAB_UTF16 in input.split("\n")[0]:
self._encoding = 'utf-16'
else:
self._encoding = 'iso-8859-1'
try:
input = input.decode(self._encoding).encode('utf-8')
except:
raise ValueError("Wordfast files are either UTF-16 (UCS2) or ISO-8859-1 encoded")
for header in csv.DictReader(input.split("\n")[:1], fieldnames=WF_FIELDNAMES_HEADER, dialect="wordfast"):
self.header = WordfastHeader(header)
lines = csv.DictReader(input.split("\n")[1:], fieldnames=WF_FIELDNAMES, dialect="wordfast")
for line in lines:
newunit = WordfastUnit()
newunit.dict = line
self.addunit(newunit)
def __str__(self):
output = csv.StringIO()
header_output = csv.StringIO()
writer = csv.DictWriter(output, fieldnames=WF_FIELDNAMES, dialect="wordfast")
unit_count = 0
for unit in self.units:
if unit.istranslated():
unit_count += 1
writer.writerow(unit.dict)
if unit_count == 0:
return ""
output.reset()
self.header.tucount = unit_count
outheader = csv.DictWriter(header_output, fieldnames=WF_FIELDNAMES_HEADER, dialect="wordfast")
outheader.writerow(self.header.header)
header_output.reset()
decoded = "".join(header_output.readlines() + output.readlines()).decode('utf-8')
try:
return decoded.encode(self._encoding)
except UnicodeEncodeError:
return decoded.encode('utf-16')
| lehmannro/translate | storage/wordfast.py | Python | gpl-2.0 | 16,175 |
import logging
import struct
from memory import Memory
from network import Mac, IpAddress
from gbe import Gbe
LOGGER = logging.getLogger(__name__)
# Offsets for fields in the memory map, in bytes
OFFSET_CORE_TYPE = 0x0
OFFSET_BUFFER_SIZE = 0x4
OFFSET_WORD_LEN = 0x8
OFFSET_MAC_ADDR = 0xc
OFFSET_IP_ADDR = 0x14
OFFSET_GW_ADDR = 0x18
OFFSET_NETMASK = 0x1c
OFFSET_MC_IP = 0x20
OFFSET_MC_MASK = 0x24
OFFSET_BUF_VLD = 0x28
OFFSET_FLAGS = 0x2c
OFFSET_PORT = 0x30
OFFSET_STATUS = 0x34
OFFSET_CONTROL = 0x40
OFFSET_ARP_SIZE = 0x44
OFFSET_TX_PKT_RATE = 0x48
OFFSET_TX_PKT_CNT = 0x4c
OFFSET_TX_VLD_RATE = 0x50
OFFSET_TX_VLD_CNT = 0x54
OFFSET_TX_OF_CNT = 0x58
OFFSET_TX_AF_CNT = 0x5c
OFFSET_RX_PKT_RATE = 0x60
OFFSET_RX_PKT_CNT = 0x64
OFFSET_RX_VLD_RATE = 0x68
OFFSET_RX_VLD_CNT = 0x6c
OFFSET_RX_OF_CNT = 0x70
OFFSET_RX_AF_CNT = 0x74
OFFSET_COUNT_RST = 0x78
OFFSET_ARP_CACHE = 0x1000
OFFSET_TX_BUFFER = 0x4000
OFFSET_RX_BUFFER = 0x8000
# Sizes for fields in the memory map, in bytes
SIZE_CORE_TYPE = 0x4
SIZE_BUFFER_SIZE = 0x4
SIZE_WORD_LEN = 0x4
SIZE_MAC_ADDR = 0x8
SIZE_IP_ADDR = 0x4
SIZE_GW_ADDR = 0x4
SIZE_NETMASK = 0x4
SIZE_MC_IP = 0x4
SIZE_MC_MASK = 0x4
SIZE_BUF_AVAIL = 0x4
SIZE_FLAGS = 0x4
SIZE_PORT = 0x4
SIZE_STATUS = 0x8
SIZE_CONTROL = 0x8
SIZE_ARP_SIZE = 0x4
SIZE_TX_PKT_RATE = 0x4
SIZE_TX_PKT_CNT = 0x4
SIZE_TX_VLD_RATE = 0x4
SIZE_TX_VLD_CNT = 0x4
SIZE_TX_OF_CNT = 0x4
SIZE_TX_AF_CNT = 0x4
SIZE_RX_PKT_RATE = 0x4
SIZE_RX_PKT_CNT = 0x4
SIZE_RX_VLD_RATE = 0x4
SIZE_RX_VLD_CNT = 0x4
SIZE_RX_OF_CNT = 0x4
SIZE_RX_AF_CNT = 0x4
SIZE_COUNT_RST = 0x4
SIZE_ARP_CACHE = 0x3000
SIZE_TX_BUFFER = 0x4000
SIZE_RX_BUFFER = 0x4000
class OneGbe(Memory, Gbe):
"""
To do with the CASPER ten GBE yellow block implemented on FPGAs,
and interfaced-to via KATCP memory reads/writes.
"""
def __init__(self, parent, name, address, length_bytes, device_info=None):
"""
:param parent: Parent object who owns this TenGbe instance
:param name: Unique name of the instance
:param address:
:param length_bytes:
:param device_info: Information about this device
"""
Memory.__init__(self, name, 32, address, length_bytes)
Gbe.__init__(self, parent, name, address, length_bytes, device_info)
self.memmap_compliant = self._check_memmap_compliance()
@property
def mac(self):
return self.get_gbe_core_details()['mac']
@property
def ip_address(self):
return self.get_gbe_core_details()['ip']
@property
def port(self):
return self.get_gbe_core_details()['fabric_port']
def _check_memmap_compliance(self):
"""
Look at the first word of the core's memory map and try to
figure out if it compliant with the harmonized ethernet map.
This isn't flawless, but unless the user sets a very weird
MAC address for their core (which is what the old core's map
stored in register 0, it should be OK).
"""
x = self.parent.read(self.name, 4)
cpu_tx_en, cpu_rx_en, rev, core_type = struct.unpack('4B', x)
if (cpu_tx_en > 1) or (cpu_rx_en > 1) or (core_type != 2):
return False
else:
return True
def post_create_update(self, raw_device_info):
"""
Update the device with information not available at creation.
:param raw_device_info: info about this block that may be useful
"""
super(TenGbe, self).post_create_update(raw_device_info)
self.snaps = {'tx': None, 'rx': None}
for snapshot in self.parent.snapshots:
if snapshot.name.find(self.name + '_') == 0:
name = snapshot.name.replace(self.name + '_', '')
if name == 'txs_ss':
self.snaps['tx'] = snapshot.name
elif name == 'rxs_ss':
self.snaps['rx'] = snapshot.name
else:
errmsg = '%s: incorrect snap %s under tengbe ' \
'block' % (self.fullname, snapshot.name)
LOGGER.error(errmsg)
raise RuntimeError(errmsg)
def read_txsnap(self):
"""
Read the TX snapshot embedded in this TenGBE yellow block
"""
return self.snaps['tx'].read(timeout=10)['data']
def read_rxsnap(self):
"""
Read the RX snapshot embedded in this TenGBE yellow block
"""
return self.snaps['rx'].read(timeout=10)['data']
# def fabric_start(self):
# """
# Setup the interface by writing to the fabric directly, bypassing tap.
# :param self:
# :return:
# """
# if self.tap_running():
# log_runtime_error(
# LOGGER, 'TAP running on %s, stop tap before '
# 'accessing fabric directly.' % self.name)
# mac_location = 0x00
# ip_location = 0x10
# port_location = 0x22
# self.parent.write(self.name, self.mac.packed(), mac_location)
# self.parent.write(self.name, self.ip_address.packed(), ip_location)
# # self.parent.write_int(self.name, self.port, offset = port_location)
def dhcp_start(self):
"""
Configure this interface, then start a DHCP client on ALL interfaces.
"""
#if self.mac is None:
# TODO get MAC from EEPROM serial number and assign here
# self.mac = '0'
reply, _ = self.parent.transport.katcprequest(
name='tap-start', request_timeout=5,
require_ok=True,
request_args=(self.name, self.name, '0.0.0.0',
str(self.port), str(self.mac), ))
if reply.arguments[0] != 'ok':
raise RuntimeError('%s: failure starting tap driver.' % self.name)
reply, _ = self.parent.transport.katcprequest(
name='tap-arp-config', request_timeout=1,
require_ok=True,
request_args=(self.name, 'mode', '0'))
if reply.arguments[0] != 'ok':
raise RuntimeError('%s: failure disabling ARP.' % self.name)
reply, _ = self.parent.transport.katcprequest(
name='tap-dhcp', request_timeout=30,
require_ok=True,
request_args=(self.name, ))
if reply.arguments[0] != 'ok':
raise RuntimeError('%s: failure starting DHCP client.' % self.name)
reply, _ = self.parent.transport.katcprequest(
name='tap-arp-config', request_timeout=1,
require_ok=True,
request_args=(self.name, 'mode', '-1'))
if reply.arguments[0] != 'ok':
raise RuntimeError('%s: failure re-enabling ARP.' % self.name)
# it looks like the command completed without error, so
# update the basic core details
self.get_gbe_core_details()
def tap_start(self, restart=False):
"""
Program a 10GbE device and start the TAP driver.
:param restart: stop before starting
"""
if len(self.name) > 8:
raise NameError('%s: tap device identifier must be shorter than 9 '
'characters..' % self.fullname)
if restart:
self.tap_stop()
if self.tap_running():
LOGGER.info('%s: tap already running.' % self.fullname)
return
LOGGER.info('%s: starting tap driver.' % self.fullname)
reply, _ = self.parent.transport.katcprequest(
name='tap-start', request_timeout=-1, require_ok=True,
request_args=(self.name, self.name, str(self.ip_address),
str(self.port), str(self.mac), ))
if reply.arguments[0] != 'ok':
raise RuntimeError('%s: failure starting tap driver.' %
self.fullname)
def tap_stop(self):
"""
Stop a TAP driver.
"""
if not self.tap_running():
return
LOGGER.info('%s: stopping tap driver.' % self.fullname)
reply, _ = self.parent.transport.katcprequest(
name='tap-stop', request_timeout=-1,
require_ok=True, request_args=(self.name, ))
if reply.arguments[0] != 'ok':
raise RuntimeError('%s: failure stopping tap '
'device.' % self.fullname)
def tap_info(self):
"""
Get info on the tap instance running on this interface.
"""
uninforms = []
def handle_inform(msg):
uninforms.append(msg)
self.parent.unhandled_inform_handler = handle_inform
_, informs = self.parent.transport.katcprequest(
name='tap-info', request_timeout=-1,
require_ok=False, request_args=(self.name, ))
self.parent.unhandled_inform_handler = None
# process the tap-info
if len(informs) == 1:
return {'name': informs[0].arguments[0],
'ip': informs[0].arguments[1]}
elif len(informs) == 0:
return {'name': '', 'ip': ''}
else:
raise RuntimeError('%s: invalid return from tap-info?' %
self.fullname)
# TODO - this request should return okay if the tap isn't
# running - it shouldn't fail
# if reply.arguments[0] != 'ok':
# log_runtime_error(LOGGER, 'Failure getting tap info for '
# 'device %s." % str(self))
def tap_running(self):
"""
Determine if an instance if tap is already running on for this
ten GBE interface.
"""
tapinfo = self.tap_info()
if tapinfo['name'] == '':
return False
return True
def tap_arp_reload(self):
"""
Have the tap driver reload its ARP table right now.
"""
reply, _ = self.parent.transport.katcprequest(
name="tap-arp-reload", request_timeout=-1,
require_ok=True, request_args=(self.name, ))
if reply.arguments[0] != 'ok':
raise RuntimeError('Failure requesting ARP reload for tap '
'device %s.' % str(self))
def multicast_receive(self, ip_str, group_size):
"""
Send a request to KATCP to have this tap instance send a multicast
group join request.
:param ip_str: A dotted decimal string representation of the base
mcast IP address.
:param group_size: An integer for how many mcast addresses from
base to respond to.
"""
# mask = 255*(2 ** 24) + 255*(2 ** 16) + 255*(2 ** 8) + (255-group_size)
# self.parent.write_int(self.name, str2ip(ip_str), offset=12)
# self.parent.write_int(self.name, mask, offset=13)
# mcast_group_string = ip_str + '+' + str(group_size)
mcast_group_string = ip_str
reply, _ = self.parent.transport.katcprequest(
'tap-multicast-add', -1, True, request_args=(self.name, 'recv',
mcast_group_string, ))
if reply.arguments[0] == 'ok':
if mcast_group_string not in self.multicast_subscriptions:
self.multicast_subscriptions.append(mcast_group_string)
return
else:
raise RuntimeError('%s: failed adding multicast receive %s to '
'tap device.' % (self.fullname,
mcast_group_string))
def multicast_remove(self, ip_str):
"""
Send a request to be removed from a multicast group.
:param ip_str: A dotted decimal string representation of the base
mcast IP address.
"""
try:
reply, _ = self.parent.transport.katcprequest(
'tap-multicast-remove', -1, True,
request_args=(self.name, IpAddress.str2ip(ip_str), ))
except:
raise RuntimeError('%s: tap-multicast-remove does not seem to '
'be supported on %s' % (self.fullname,
self.parent.host))
if reply.arguments[0] == 'ok':
if ip_str not in self.multicast_subscriptions:
LOGGER.warning(
'%s: That is odd, %s removed from mcast subscriptions, but '
'it was not in its list of sbscribed addresses.' % (
self.fullname, ip_str))
self.multicast_subscriptions.remove(ip_str)
return
else:
raise RuntimeError('%s: failed removing multicast address %s '
'from tap device' % (self.fullname,
IpAddress.str2ip(ip_str)))
def _fabric_enable_disable(self, target_val):
"""
:param target_val:
"""
if self.memmap_compliant:
word_bytes = list(
struct.unpack('>4B', self.parent.read(self.name, 4, OFFSET_FLAGS)))
if word_bytes[0] == target_val:
return
word_bytes[0] = target_val
word_packed = struct.pack('>4B', *word_bytes)
self.parent.write(self.name, word_packed, OFFSET_FLAGS)
else:
# 0x20 or (0x20 / 4)? What was the /4 for?
word_bytes = list(
struct.unpack('>4B', self.parent.read(self.name, 4, 0x20)))
if word_bytes[1] == target_val:
return
word_bytes[1] = target_val
word_packed = struct.pack('>4B', *word_bytes)
self.parent.write(self.name, word_packed, 0x20)
def fabric_enable(self):
"""
Enable the core fabric
"""
self._fabric_enable_disable(1)
def fabric_disable(self):
"""
Enable the core fabric
"""
self._fabric_enable_disable(0)
def fabric_soft_reset_toggle(self):
"""
Toggle the fabric soft reset
"""
if self.memmap_compliant:
word_bytes = struct.unpack('>4B', self.parent.read(self.name, 4, OFFSET_FLAGS))
word_bytes = list(word_bytes)
def write_val(val):
word_bytes[2] = val
word_packed = struct.pack('>4B', *word_bytes)
if val == 0:
self.parent.write(self.name, word_packed, OFFSET_FLAGS)
else:
self.parent.blindwrite(self.name, word_packed, OFFSET_FLAGS)
if word_bytes[2] == 1:
write_val(0)
write_val(1)
write_val(0)
else:
word_bytes = struct.unpack('>4B', self.parent.read(self.name, 4, 0x20))
word_bytes = list(word_bytes)
def write_val(val):
word_bytes[0] = val
word_packed = struct.pack('>4B', *word_bytes)
if val == 0:
self.parent.write(self.name, word_packed, 0x20)
else:
self.parent.blindwrite(self.name, word_packed, 0x20)
if word_bytes[0] == 1:
write_val(0)
write_val(1)
write_val(0)
def get_gbe_core_details(self, read_arp=False, read_cpu=False):
"""
Get 10GbE core details.
assemble struct for header stuff...
.. code-block:: python
\"\"\"
0x00 - 0x07: MAC address
0x08 - 0x0b: Not used
0x0c - 0x0f: Gateway addr
0x10 - 0x13: IP addr
0x14 - 0x17: Not assigned
0x18 - 0x1b: Buffer sizes
0x1c - 0x1f: Not assigned
0x20 : Soft reset (bit 0)
0x21 : Fabric enable (bit 0)
0x22 - 0x23: Fabric port
0x24 - 0x27: XAUI status (bit 2,3,4,5 = lane sync, bit6 = chan_bond)
0x28 - 0x2b: PHY config
0x28 : RX_eq_mix
0x29 : RX_eq_pol
0x2a : TX_preemph
0x2b : TX_diff_ctrl
0x30 - 0x33: Multicast IP RX base address
0x34 - 0x37: Multicast IP mask
0x38 - 0x3b: Subnet mask
0x1000 : CPU TX buffer
0x2000 : CPU RX buffer
0x3000 : ARP tables start
word_width = 8
\"\"\"
self.add_field(Bitfield.Field('mac0', 0, word_width, 0, 0 * word_width))
self.add_field(Bitfield.Field('mac1', 0, word_width, 0, 1 * word_width))
self.add_field(Bitfield.Field('mac2', 0, word_width, 0, 2 * word_width))
self.add_field(Bitfield.Field('mac3', 0, word_width, 0, 3 * word_width))
self.add_field(Bitfield.Field('mac4', 0, word_width, 0, 4 * word_width))
self.add_field(Bitfield.Field('mac5', 0, word_width, 0, 5 * word_width))
self.add_field(Bitfield.Field('mac6', 0, word_width, 0, 6 * word_width))
self.add_field(Bitfield.Field('mac7', 0, word_width, 0, 7 * word_width))
self.add_field(Bitfield.Field('unused_1', 0, (0x0c - 0x08) * word_width, 0, 8 * word_width))
self.add_field(Bitfield.Field('gateway_ip0', 0, word_width, 0, 0x0c * word_width))
self.add_field(Bitfield.Field('gateway_ip1', 0, word_width, 0, 0x0d * word_width))
self.add_field(Bitfield.Field('gateway_ip2', 0, word_width, 0, 0x0e * word_width))
self.add_field(Bitfield.Field('gateway_ip3', 0, word_width, 0, 0x0f * word_width))
self.add_field(Bitfield.Field('ip0', 0, word_width, 0, 0x10 * word_width))
self.add_field(Bitfield.Field('ip1', 0, word_width, 0, 0x11 * word_width))
self.add_field(Bitfield.Field('ip2', 0, word_width, 0, 0x12 * word_width))
self.add_field(Bitfield.Field('ip3', 0, word_width, 0, 0x13 * word_width))
self.add_field(Bitfield.Field('unused_2', 0, (0x18 - 0x14) * word_width, 0, 0x14 * word_width))
self.add_field(Bitfield.Field('buf_sizes', 0, (0x1c - 0x18) * word_width, 0, 0x18 * word_width))
self.add_field(Bitfield.Field('unused_3', 0, (0x20 - 0x1c) * word_width, 0, 0x1c * word_width))
self.add_field(Bitfield.Field('soft_reset', 2, 1, 0, 0x20 * word_width))
self.add_field(Bitfield.Field('fabric_enable', 2, 1, 0, 0x21 * word_width))
self.add_field(Bitfield.Field('port', 0, (0x24 - 0x22) * word_width, 0, 0x22 * word_width))
self.add_field(Bitfield.Field('xaui_status', 0, (0x28 - 0x24) * word_width, 0, 0x24 * word_width))
self.add_field(Bitfield.Field('rx_eq_mix', 0, word_width, 0, 0x28 * word_width))
self.add_field(Bitfield.Field('rq_eq_pol', 0, word_width, 0, 0x29 * word_width))
self.add_field(Bitfield.Field('tx_preempth', 0, word_width, 0, 0x2a * word_width))
self.add_field(Bitfield.Field('tx_diff_ctrl', 0, word_width, 0, 0x2b * word_width))
#self.add_field(Bitfield.Field('buffer_tx', 0, 0x1000 * word_width, 0, 0x1000 * word_width))
#self.add_field(Bitfield.Field('buffer_rx', 0, 0x1000 * word_width, 0, 0x2000 * word_width))
#self.add_field(Bitfield.Field('arp_table', 0, 0x1000 * word_width, 0, 0x3000 * word_width))
"""
if self.memmap_compliant:
data = self.parent.read(self.name, 16384)
data = list(struct.unpack('>16384B', data))
returnval = {
'ip_prefix': '%i.%i.%i.' % (data[0x14], data[0x15], data[0x16]),
'ip': IpAddress('%i.%i.%i.%i' % (data[0x14], data[0x15],
data[0x16], data[0x17])),
'subnet_mask': IpAddress('%i.%i.%i.%i' % (
data[0x1c], data[0x1d], data[0x1e], data[0x1f])),
'mac': Mac('%i:%i:%i:%i:%i:%i' % (data[0x0e], data[0x0f],
data[0x10], data[0x11],
data[0x12], data[0x13])),
'gateway_ip': IpAddress('%i.%i.%i.%i' % (data[0x18], data[0x19],
data[0x1a], data[0x1b])),
'fabric_port': ((data[0x32] << 8) + (data[0x33])),
'fabric_en': bool(data[0x2f] & 1),
'multicast': {'base_ip': IpAddress('%i.%i.%i.%i' % (
data[0x20], data[0x21], data[0x22], data[0x23])),
'ip_mask': IpAddress('%i.%i.%i.%i' % (
data[0x24], data[0x25], data[0x26], data[0x27])),
'rx_ips': []}
}
else:
data = self.parent.read(self.name, 16384)
data = list(struct.unpack('>16384B', data))
returnval = {
'ip_prefix': '%i.%i.%i.' % (data[0x10], data[0x11], data[0x12]),
'ip': IpAddress('%i.%i.%i.%i' % (data[0x10], data[0x11],
data[0x12], data[0x13])),
'subnet_mask': IpAddress('%i.%i.%i.%i' % (
data[0x38], data[0x39], data[0x3a], data[0x3b])),
'mac': Mac('%i:%i:%i:%i:%i:%i' % (data[0x02], data[0x03],
data[0x04], data[0x05],
data[0x06], data[0x07])),
'gateway_ip': IpAddress('%i.%i.%i.%i' % (data[0x0c], data[0x0d],
data[0x0e], data[0x0f])),
'fabric_port': ((data[0x22] << 8) + (data[0x23])),
'fabric_en': bool(data[0x21] & 1),
'xaui_lane_sync': [bool(data[0x27] & 4), bool(data[0x27] & 8),
bool(data[0x27] & 16), bool(data[0x27] & 32)],
'xaui_status': [data[0x24], data[0x25], data[0x26], data[0x27]],
'xaui_chan_bond': bool(data[0x27] & 64),
'xaui_phy': {'rx_eq_mix': data[0x28], 'rx_eq_pol': data[0x29],
'tx_preemph': data[0x2a], 'tx_swing': data[0x2b]},
'multicast': {'base_ip': IpAddress('%i.%i.%i.%i' % (
data[0x30], data[0x31], data[0x32], data[0x33])),
'ip_mask': IpAddress('%i.%i.%i.%i' % (
data[0x34], data[0x35], data[0x36], data[0x37])),
'rx_ips': []}
}
possible_addresses = [int(returnval['multicast']['base_ip'])]
mask_int = int(returnval['multicast']['ip_mask'])
for ctr in range(32):
mask_bit = (mask_int >> ctr) & 1
if not mask_bit:
new_ips = []
for ip in possible_addresses:
new_ips.append(ip & (~(1 << ctr)))
new_ips.append(new_ips[-1] | (1 << ctr))
possible_addresses.extend(new_ips)
tmp = list(set(possible_addresses))
for ip in tmp:
returnval['multicast']['rx_ips'].append(IpAddress(ip))
if read_arp:
returnval['arp'] = self.get_arp_details(data)
if read_cpu:
returnval.update(self.get_cpu_details(data))
self.core_details = returnval
return returnval
def get_arp_details(self, port_dump=None):
"""
Get ARP details from this interface.
:param port_dump: A list of raw bytes from interface memory.
:type port_dump: list
"""
if self.memmap_compliant:
arp_addr = OFFSET_ARP_CACHE
else:
arp_addr = 0x3000
if port_dump is None:
port_dump = self.parent.read(self.name, 16384)
port_dump = list(struct.unpack('>16384B', port_dump))
returnval = []
for addr in range(256):
mac = []
for ctr in range(2, 8):
mac.append(port_dump[arp_addr + (addr * 8) + ctr])
returnval.append(mac)
return returnval
def get_cpu_details(self, port_dump=None):
"""
Read details of the CPU buffers.
:param port_dump:
"""
#TODO Not memmap compliant
if port_dump is None:
port_dump = self.parent.read(self.name, 16384)
port_dump = list(struct.unpack('>16384B', port_dump))
returnval = {'cpu_tx': {}}
for ctr in range(4096 / 8):
tmp = []
for ctr2 in range(8):
tmp.append(port_dump[4096 + (8 * ctr) + ctr2])
returnval['cpu_tx'][ctr*8] = tmp
returnval['cpu_rx_buf_unack_data'] = port_dump[6 * 4 + 3]
returnval['cpu_rx'] = {}
for ctr in range(port_dump[6 * 4 + 3] + 8):
tmp = []
for ctr2 in range(8):
tmp.append(port_dump[8192 + (8 * ctr) + ctr2])
returnval['cpu_rx'][ctr * 8] = tmp
return returnval
def set_arp_table(self, macs):
"""Set the ARP table with a list of MAC addresses. The list, `macs`,
is passed such that the zeroth element is the MAC address of the
device with IP XXX.XXX.XXX.0, and element N is the MAC address of the
device with IP XXX.XXX.XXX.N"""
if self.memmap_compliant:
arp_addr = OFFSET_ARP_CACHE
else:
arp_addr = 0x3000
macs = list(macs)
macs_pack = struct.pack('>%dQ' % (len(macs)), *macs)
self.parent.write(self.name, macs_pack, offset=arp_addr)
# end
| ska-sa/casperfpga | src/onegbe.py | Python | gpl-2.0 | 26,401 |
"""
Django settings for Outcumbent project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
TEMPLATE_DIRS = ('templates')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '3&@zwum@#!0f+g(k-pvlw#9n05t$kuz_5db58-02739t+u*u(r'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'outcumbent',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'Outcumbent.urls'
WSGI_APPLICATION = 'Outcumbent.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'outcumbentdb',
'USER': 'root',
#'PASSWORD': 'root',
'HOST': '127.0.0.1',
'PORT': '3306'
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
| praxeo/outcumbent | Outcumbent/settings.py | Python | gpl-2.0 | 2,140 |
"""
This package supplies tools for working with automated services
connected to a server. It was written with IRC in mind, so it's not
very generic, in that it pretty much assumes a single client connected
to a central server, and it's not easy for a client to add further connections
at runtime (But possible, though you might have to avoid selector.Reactor.loop.
"""
__all__ = [
"irc",
"selector",
"connection",
"irc2num"
]
| kaaveland/anybot | im/__init__.py | Python | gpl-2.0 | 449 |
'''
Access Control Lists testing based on newpynfs framework
Aurelien Charbon - Bull SA
'''
from random_gen import *
from optparse import OptionParser
import commands
import os
import threading
import time
import random
alphabet='abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ123456789_-() ~'
t_alphabet=len(alphabet)
def test_longacl(l,path):
# mesures sur le getfacl
test = RandomGen()
u = commands.getoutput('rm ' + path + "/*") # clean directory
print "test acl getfacl\n"
for i in range(l):
test.getUserList()
testfile = 'testfile' + str(i)
u = commands.getoutput('touch ' + path + "/" + testfile)
print "setfacl with " + str(i) + " entries\n " + u
for j in range(i):
user = test.uList.pop()
mode = test.createRandomMode()
u = commands.getoutput('setfacl -m u:' + user + ':' + mode + " " + path + "/" + testfile)
if u != "":
print "setfacl -m u:" + user + ':' + mode + " " + path + "/" + testfile
print u
def main():
parser = OptionParser()
parser.add_option("-l", "--length", dest="length",type="int",help="max lentgh of ACL")
parser.add_option("-p", "--path", dest="path",help="path of test file")
(options, args) = parser.parse_args()
test_longacl(options.length,options.path)
main()
| anthony-kolesov/arc_ltp | testcases/network/nfsv4/acl/test_long_acl.py | Python | gpl-2.0 | 1,341 |
# kamene.contrib.description = Label Distribution Protocol (LDP)
# kamene.contrib.status = loads
# http://git.savannah.gnu.org/cgit/ldpscapy.git/snapshot/ldpscapy-5285b81d6e628043df2a83301b292f24a95f0ba1.tar.gz
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Copyright (C) 2010 Florian Duraffourg
import struct
from kamene.packet import *
from kamene.fields import *
from kamene.ansmachine import *
from kamene.layers.inet import UDP
from kamene.layers.inet import TCP
from kamene.base_classes import Net
# Guess payload
def guess_payload(p):
LDPTypes = {
0x0001: LDPNotification,
0x0100: LDPHello,
0x0200: LDPInit,
0x0201: LDPKeepAlive,
0x0300: LDPAddress,
0x0301: LDPAddressWM,
0x0400: LDPLabelMM,
0x0401: LDPLabelReqM,
0x0404: LDPLabelARM,
0x0402: LDPLabelWM,
0x0403: LDPLabelRelM,
}
type = struct.unpack("!H",p[0:2])[0]
type = type & 0x7fff
if type == 0x0001 and struct.unpack("!H",p[2:4])[0] > 20:
return LDP
if type in LDPTypes:
return LDPTypes[type]
else:
return conf.raw_layer
## Fields ##
# 3.4.1. FEC TLV
class FecTLVField(StrField):
islist=1
def m2i(self, pkt, x):
nbr = struct.unpack("!H",x[2:4])[0]
used = 0
x=x[4:]
list=[]
while x:
#if x[0] == 1:
# list.append('Wildcard')
#else:
#mask=ord(x[8*i+3])
#add=inet_ntoa(x[8*i+4:8*i+8])
mask=ord(x[3])
nbroctets = mask / 8
if mask % 8:
nbroctets += 1
add=inet_ntoa(x[4:4+nbroctets]+"\x00"*(4-nbroctets))
list.append( (add, mask) )
used += 4 + nbroctets
x=x[4+nbroctets:]
return list
def i2m(self, pkt, x):
if type(x) is str:
return x
s = "\x01\x00"
l = 0
fec = ""
for o in x:
fec += "\x02\x00\x01"
# mask length
fec += struct.pack("!B",o[1])
# Prefix
fec += inet_aton(o[0])
l += 8
s += struct.pack("!H",l)
s += fec
return s
def size(self, s):
"""Get the size of this field"""
l = 4 + struct.unpack("!H",s[2:4])[0]
return l
def getfield(self, pkt, s):
l = self.size(s)
return s[l:],self.m2i(pkt, s[:l])
# 3.4.2.1. Generic Label TLV
class LabelTLVField(StrField):
def m2i(self, pkt, x):
return struct.unpack("!I",x[4:8])[0]
def i2m(self, pkt, x):
if type(x) is str:
return x
s = "\x02\x00\x00\x04"
s += struct.pack("!I",x)
return s
def size(self, s):
"""Get the size of this field"""
l = 4 + struct.unpack("!H",s[2:4])[0]
return l
def getfield(self, pkt, s):
l = self.size(s)
return s[l:],self.m2i(pkt, s[:l])
# 3.4.3. Address List TLV
class AddressTLVField(StrField):
islist=1
def m2i(self, pkt, x):
nbr = struct.unpack("!H",x[2:4])[0] - 2
nbr /= 4
x=x[6:]
list=[]
for i in range(0,nbr):
add = x[4*i:4*i+4]
list.append(inet_ntoa(add))
return list
def i2m(self, pkt, x):
if type(x) is str:
return x
l=2+len(x)*4
s = "\x01\x01"+struct.pack("!H",l)+"\x00\x01"
for o in x:
s += inet_aton(o)
return s
def size(self, s):
"""Get the size of this field"""
l = 4 + struct.unpack("!H",s[2:4])[0]
return l
def getfield(self, pkt, s):
l = self.size(s)
return s[l:],self.m2i(pkt, s[:l])
# 3.4.6. Status TLV
class StatusTLVField(StrField):
islist=1
def m2i(self, pkt, x):
l = []
statuscode = struct.unpack("!I",x[4:8])[0]
l.append( (statuscode & 2**31) >> 31)
l.append( (statuscode & 2**30) >> 30)
l.append( statuscode & 0x3FFFFFFF )
l.append( struct.unpack("!I", x[8:12])[0] )
l.append( struct.unpack("!H", x[12:14])[0] )
return l
def i2m(self, pkt, x):
if type(x) is str:
return x
s = "\x03\x00" + struct.pack("!H",10)
statuscode = 0
if x[0] != 0:
statuscode += 2**31
if x[1] != 0:
statuscode += 2**30
statuscode += x[2]
s += struct.pack("!I",statuscode)
if len(x) > 3:
s += struct.pack("!I",x[3])
else:
s += "\x00\x00\x00\x00"
if len(x) > 4:
s += struct.pack("!H",x[4])
else:
s += "\x00\x00"
return s
def getfield(self, pkt, s):
l = 14
return s[l:],self.m2i(pkt, s[:l])
# 3.5.2 Common Hello Parameters TLV
class CommonHelloTLVField(StrField):
islist = 1
def m2i(self, pkt, x):
list = []
v = struct.unpack("!H",x[4:6])[0]
list.append(v)
flags = struct.unpack("B",x[6])[0]
v = ( flags & 0x80 ) >> 7
list.append(v)
v = ( flags & 0x40 ) >> 7
list.append(v)
return list
def i2m(self, pkt, x):
if type(x) is str:
return x
s = "\x04\x00\x00\x04"
s += struct.pack("!H",x[0])
byte = 0
if x[1] == 1:
byte += 0x80
if x[2] == 1:
byte += 0x40
s += struct.pack("!B",byte)
s += "\x00"
return s
def getfield(self, pkt, s):
l = 8
return s[l:],self.m2i(pkt, s[:l])
# 3.5.3 Common Session Parameters TLV
class CommonSessionTLVField(StrField):
islist = 1
def m2i(self, pkt, x):
l = []
l.append(struct.unpack("!H",x[6:8])[0])
octet = struct.unpack("B",x[8:9])[0]
l.append( (octet & 2**7 ) >> 7 )
l.append( (octet & 2**6 ) >> 6 )
l.append( struct.unpack("B",x[9:10])[0] )
l.append( struct.unpack("!H",x[10:12])[0] )
l.append( inet_ntoa(x[12:16]) )
l.append( struct.unpack("!H",x[16:18])[0] )
return l
def i2m(self, pkt, x):
if type(x) is str:
return x
s = "\x05\x00\x00\x0E\x00\x01"
s += struct.pack("!H",x[0])
octet = 0
if x[1] != 0:
octet += 2**7
if x[2] != 0:
octet += 2**6
s += struct.pack("!B",octet)
s += struct.pack("!B",x[3])
s += struct.pack("!H",x[4])
s += inet_aton(x[5])
s += struct.pack("!H",x[6])
return s
def getfield(self, pkt, s):
l = 18
return s[l:],self.m2i(pkt, s[:l])
## Messages ##
# 3.5.1. Notification Message
class LDPNotification(Packet):
name = "LDPNotification"
fields_desc = [ BitField("u",0,1),
BitField("type", 0x0001, 15),
ShortField("len", None),
IntField("id", 0) ,
StatusTLVField("status",(0,0,0,0,0)) ]
def post_build(self, p, pay):
if self.len is None:
l = len(p) - 4
p = p[:2]+struct.pack("!H", l)+p[4:]
return p+pay
def guess_payload_class(self, p):
return guess_payload(p)
# 3.5.2. Hello Message
class LDPHello(Packet):
name = "LDPHello"
fields_desc = [ BitField("u",0,1),
BitField("type", 0x0100, 15),
ShortField("len", None),
IntField("id", 0) ,
CommonHelloTLVField("params",[180,0,0]) ]
def post_build(self, p, pay):
if self.len is None:
l = len(p) - 4
p = p[:2]+struct.pack("!H", l)+p[4:]
return p+pay
def guess_payload_class(self, p):
return guess_payload(p)
# 3.5.3. Initialization Message
class LDPInit(Packet):
name = "LDPInit"
fields_desc = [ BitField("u",0,1),
XBitField("type", 0x0200, 15),
ShortField("len", None),
IntField("id", 0),
CommonSessionTLVField("params",None)]
def post_build(self, p, pay):
if self.len is None:
l = len(p) - 4
p = p[:2]+struct.pack("!H", l)+p[4:]
return p+pay
def guess_payload_class(self, p):
return guess_payload(p)
# 3.5.4. KeepAlive Message
class LDPKeepAlive(Packet):
name = "LDPKeepAlive"
fields_desc = [ BitField("u",0,1),
XBitField("type", 0x0201, 15),
ShortField("len", None),
IntField("id", 0)]
def post_build(self, p, pay):
if self.len is None:
l = len(p) - 4
p = p[:2]+struct.pack("!H", l)+p[4:]
return p+pay
def guess_payload_class(self, p):
return guess_payload(p)
# 3.5.5. Address Message
class LDPAddress(Packet):
name = "LDPAddress"
fields_desc = [ BitField("u",0,1),
XBitField("type", 0x0300, 15),
ShortField("len", None),
IntField("id", 0),
AddressTLVField("address",None) ]
def post_build(self, p, pay):
if self.len is None:
l = len(p) - 4
p = p[:2]+struct.pack("!H", l)+p[4:]
return p+pay
def guess_payload_class(self, p):
return guess_payload(p)
# 3.5.6. Address Withdraw Message
class LDPAddressWM(Packet):
name = "LDPAddressWM"
fields_desc = [ BitField("u",0,1),
XBitField("type", 0x0301, 15),
ShortField("len", None),
IntField("id", 0),
AddressTLVField("address",None) ]
def post_build(self, p, pay):
if self.len is None:
l = len(p) - 4
p = p[:2]+struct.pack("!H", l)+p[4:]
return p+pay
def guess_payload_class(self, p):
return guess_payload(p)
# 3.5.7. Label Mapping Message
class LDPLabelMM(Packet):
name = "LDPLabelMM"
fields_desc = [ BitField("u",0,1),
XBitField("type", 0x0400, 15),
ShortField("len", None),
IntField("id", 0),
FecTLVField("fec",None),
LabelTLVField("label",0)]
def post_build(self, p, pay):
if self.len is None:
l = len(p) - 4
p = p[:2]+struct.pack("!H", l)+p[4:]
return p+pay
def guess_payload_class(self, p):
return guess_payload(p)
# 3.5.8. Label Request Message
class LDPLabelReqM(Packet):
name = "LDPLabelReqM"
fields_desc = [ BitField("u",0,1),
XBitField("type", 0x0401, 15),
ShortField("len", None),
IntField("id", 0),
FecTLVField("fec",None)]
def post_build(self, p, pay):
if self.len is None:
l = len(p) - 4
p = p[:2]+struct.pack("!H", l)+p[4:]
return p+pay
def guess_payload_class(self, p):
return guess_payload(p)
# 3.5.9. Label Abort Request Message
class LDPLabelARM(Packet):
name = "LDPLabelARM"
fields_desc = [ BitField("u",0,1),
XBitField("type", 0x0404, 15),
ShortField("len", None),
IntField("id", 0),
FecTLVField("fec",None),
IntField("labelRMid",0)]
def post_build(self, p, pay):
if self.len is None:
l = len(p) - 4
p = p[:2]+struct.pack("!H", l)+p[4:]
return p+pay
def guess_payload_class(self, p):
return guess_payload(p)
# 3.5.10. Label Withdraw Message
class LDPLabelWM(Packet):
name = "LDPLabelWM"
fields_desc = [ BitField("u",0,1),
XBitField("type", 0x0402, 15),
ShortField("len", None),
IntField("id", 0),
FecTLVField("fec",None),
LabelTLVField("label",0)]
def post_build(self, p, pay):
if self.len is None:
l = len(p) - 4
p = p[:2]+struct.pack("!H", l)+p[4:]
return p+pay
def guess_payload_class(self, p):
return guess_payload(p)
# 3.5.11. Label Release Message
class LDPLabelRelM(Packet):
name = "LDPLabelRelM"
fields_desc = [ BitField("u",0,1),
XBitField("type", 0x0403, 15),
ShortField("len", None),
IntField("id", 0),
FecTLVField("fec",None),
LabelTLVField("label",0)]
def post_build(self, p, pay):
if self.len is None:
l = len(p) - 4
p = p[:2]+struct.pack("!H", l)+p[4:]
return p+pay
def guess_payload_class(self, p):
return guess_payload(p)
# 3.1. LDP PDUs
class LDP(Packet):
name = "LDP"
fields_desc = [ ShortField("version",1),
ShortField("len", None),
IPField("id","127.0.0.1"),
ShortField("space",0) ]
def post_build(self, p, pay):
if self.len is None:
l = len(p)+len(pay)-4
p = p[:2]+struct.pack("!H", l)+p[4:]
return p+pay
def guess_payload_class(self, p):
return guess_payload(p)
bind_layers( TCP, LDP, sport=646, dport=646 )
bind_layers( UDP, LDP, sport=646, dport=646 )
| phaethon/scapy | kamene/contrib/ldp.py | Python | gpl-2.0 | 13,902 |
#common sets up the conduit environment
from common import *
#setup test
test = SimpleSyncTest()
#Setup the key to sync
gconf = test.get_dataprovider("GConfTwoWay")
gconf.module.whitelist = ['/apps/metacity/general/num_workspaces']
folder = test.get_dataprovider("TestFolderTwoWay")
test.prepare(gconf, folder)
test.set_two_way_policy({"conflict":"ask","deleted":"ask"})
test.set_two_way_sync(True)
a = test.get_source_count()
b = test.get_sink_count()
ok("Got items to sync (%s,%s)" % (a,b), a == 1 and b == 0)
for i in (1,2,3,4):
if i > 1:
#Now modify the file
f = folder.module.get(
folder.module.get_all()[0]
)
f._set_file_mtime(datetime.datetime(2008,1,i))
a,b = test.sync()
aborted,errored,conflicted = test.get_sync_result()
ok("Sync #%s: Completed without conflicts" % i, aborted == False and errored == False and conflicted == False)
ok("Sync #%s: All items (%s,%s)" % (i,a,b), a == b and a == 1)
finished()
| arsfeld/conduit | test/python-tests/TestSyncGConfFolder.py | Python | gpl-2.0 | 1,001 |
from razer.client import DeviceManager
from razer.client import constants as razer_constants
# Create a DeviceManager. This is used to get specific devices
device_manager = DeviceManager()
print("Found {} Razer devices".format(len(device_manager.devices)))
print()
# Disable daemon effect syncing.
# Without this, the daemon will try to set the lighting effect to every device.
device_manager.sync_effects = False
# Iterate over each device and set the wave effect
for device in device_manager.devices:
print("Setting {} to wave".format(device.name))
# Set the effect to wave.
# wave requires a direction, but different effect have different arguments.
device.fx.wave(razer_constants.WAVE_LEFT)
| z3ntu/razer-drivers | examples/basic_effect.py | Python | gpl-2.0 | 718 |
"""Pets now have a description
Revision ID: 0c431867c679
Revises: 5b1bdc1f3125
Create Date: 2016-11-07 18:36:25.912155
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '0c431867c679'
down_revision = '5b1bdc1f3125'
branch_labels = None
depends_on = None
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('pet', sa.Column('description', sa.Text(), nullable=False))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('pet', 'description')
### end Alembic commands ###
| asif-mahmud/Pyramid-Apps | pethouse/alembic/versions/0c431867c679_pets_now_have_a_description.py | Python | gpl-2.0 | 659 |
class movie:
"""Stores movie metadata"""
# The constructor takes
# - The Title
# - The youtube trailer
# - The poster image URL
def __init__(self, title, youtube_trailer, poster_url):
self.title = title
self.trailer_youtube_url = youtube_trailer
self.poster_image_url = poster_url
| amilendra/Udacity_FullStack_P1_MovieTracker | media.py | Python | gpl-2.0 | 338 |
from __future__ import with_statement
from fabric.api import task
@task
def md5():
"""
Check MD5 sums (unavailable, empty, with content)
"""
import hashlib
from fabric.api import cd, hide, run, settings
import fabtools
with cd('/tmp'):
run('touch f1')
assert fabtools.files.md5sum('f1') == hashlib.md5('').hexdigest()
run('echo -n hello > f2')
assert fabtools.files.md5sum('f2') == hashlib.md5('hello').hexdigest()
with settings(hide('warnings')):
assert fabtools.files.md5sum('doesnotexist') is None
| juanantoniofm/accesible-moodle | fabtools/tests/fabfiles/md5.py | Python | gpl-2.0 | 590 |
# -*- coding: utf-8 -*-
"""
Emotiv acquisition :
Reverse engineering and original crack code written by
Cody Brocious (http://github.com/daeken)
Kyle Machulis (http://github.com/qdot)
Many thanks for their contribution.
Need python-crypto.
"""
import multiprocessing as mp
import numpy as np
import msgpack
import time
from collections import OrderedDict
from .base import DeviceBase
import platform
WINDOWS = (platform.system() == "Windows")
try:
import pywinusb.hid as hid
except:
pass
import os
from subprocess import check_output
from Crypto.Cipher import AES
from Crypto import Random
import Queue
tasks = Queue.Queue()
_channel_names = [ 'F3', 'F4', 'P7', 'FC6', 'F7', 'F8','T7','P8','FC5','AF4','T8','O2','O1','AF3']
sensorBits = {
'F3': [10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7],
'FC5': [28, 29, 30, 31, 16, 17, 18, 19, 20, 21, 22, 23, 8, 9],
'AF3': [46, 47, 32, 33, 34, 35, 36, 37, 38, 39, 24, 25, 26, 27],
'F7': [48, 49, 50, 51, 52, 53, 54, 55, 40, 41, 42, 43, 44, 45],
'T7': [66, 67, 68, 69, 70, 71, 56, 57, 58, 59, 60, 61, 62, 63],
'P7': [84, 85, 86, 87, 72, 73, 74, 75, 76, 77, 78, 79, 64, 65],
'O1': [102, 103, 88, 89, 90, 91, 92, 93, 94, 95, 80, 81, 82, 83],
'O2': [140, 141, 142, 143, 128, 129, 130, 131, 132, 133, 134, 135, 120, 121],
'P8': [158, 159, 144, 145, 146, 147, 148, 149, 150, 151, 136, 137, 138, 139],
'T8': [160, 161, 162, 163, 164, 165, 166, 167, 152, 153, 154, 155, 156, 157],
'F8': [178, 179, 180, 181, 182, 183, 168, 169, 170, 171, 172, 173, 174, 175],
'AF4': [196, 197, 198, 199, 184, 185, 186, 187, 188, 189, 190, 191, 176, 177],
'FC6': [214, 215, 200, 201, 202, 203, 204, 205, 206, 207, 192, 193, 194, 195],
'F4': [216, 217, 218, 219, 220, 221, 222, 223, 208, 209, 210, 211, 212, 213]
}
quality_bits = [99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112]
def create_analog_subdevice_param(channel_names):
n = len(channel_names)
d = {
'type' : 'AnalogInput',
'nb_channel' : n,
'params' :{ },
'by_channel_params' : {
'channel_indexes' : range(n),
'channel_names' : channel_names,
}
}
return d
def get_info(device):
info = { }
info['class'] = 'EmotivMultiSignals'
if WINDOWS:
# EMOTIV
info['device_path'] = device.device_path
info['board_name'] = '{} #{}'.format(device.vendor_name, device.serial_number).replace('\n', '').replace('\r', '')
info['serial'] = device.serial_number
info['hid'] = device
else:
info['device_path'] = device
name = device_path.strip('/dev/')
realInputPath = os.path.realpath("/sys/class/hidraw/" + name)
path = '/'.join(realInputPath.split('/')[:-4])
with open(path + "/manufacturer", 'r') as f:
manufacturer = f.readline()
with open(path + "/serial", 'r') as f:
serial = f.readline().strip()
info['board_name'] = '{} #{}'.format(manufacturer, serial).replace('\n', '').replace('\r', '')
info['serial'] = serial
# PYACQ
info['global_params'] = {'buffer_length' : 60.,}
info['subdevices'] = [ ]
info['subdevices'].append(create_analog_subdevice_param(_channel_names))
quality_name = ['Quality {}'.format(n) for n in _channel_names]
info['subdevices'].append(create_analog_subdevice_param(quality_name))
info['subdevices'].append(create_analog_subdevice_param([ 'X','Y']))
return info
def dump(obj):
for attr in dir(obj):
print "obj.%s = %s" % (attr, getattr(obj, attr))
class EmotivMultiSignals(DeviceBase):
def __init__(self, **kargs):
DeviceBase.__init__(self, **kargs)
@classmethod
def get_available_devices(cls):
devices = OrderedDict()
if WINDOWS:
try:
for device in hid.find_all_hid_devices():
print "device : ", device
if (device.product_name == 'Emotiv RAW DATA' or device.product_name == 'EPOC BCI'):
devices['Emotiv '+device.serial_number] = get_info(device)
finally:
pass
else:
serials = { }
for name in os.listdir("/sys/class/hidraw"):
realInputPath = os.path.realpath("/sys/class/hidraw/" + name)
path = '/'.join(realInputPath.split('/')[:-4])
try:
with open(path + "/manufacturer", 'r') as f:
manufacturer = f.readline()
if "emotiv" in manufacturer.lower():
with open(path + "/serial", 'r') as f:
serial = f.readline().strip()
if serial not in serials:
serials[serial] = [ ]
serials[serial].append(name)
except IOError as e:
print "Couldn't open file: %s" % e
for serial, names in serials.items():
device_path = '/dev/'+names[1]
info = get_info(device_path)
devices['Emotiv '+device_path] = info
return devices
def configure(self, buffer_length = 60,
subdevices = None,
):
self.params = {'buffer_length' : buffer_length,
'subdevices' : subdevices,
}
self.__dict__.update(self.params)
self.configured = True
def initialize(self):
devices = EmotivMultiSignals.get_available_devices()
self.device = devices.values()[0]
if self.subdevices is None:
self.subdevices = self.device['subdevices']
self.sampling_rate = 128.
self.packet_size = 1
l = int(self.sampling_rate*self.buffer_length)
self.buffer_length = (l - l%self.packet_size)/self.sampling_rate
self.name = '{}'.format(self.device['board_name'])
self.streams = [ ]
for s, sub in enumerate(self.subdevices):
stream = self.streamhandler.new_AnalogSignalSharedMemStream(name = self.name+str(s) , sampling_rate = self.sampling_rate,
nb_channel = sub['nb_channel'], buffer_length = self.buffer_length,
packet_size = self.packet_size, dtype = np.float64,
channel_names = sub['by_channel_params']['channel_names'],
channel_indexes = sub['by_channel_params']['channel_indexes'],
)
self.streams.append(stream)
def start(self):
self.stop_flag = mp.Value('i', 0) #flag pultiproc = global
self.process = mp.Process(target = emotiv_mainLoop, args=(self.stop_flag, self.streams, self.device) )
self.process.start()
print 'FakeMultiAnalogChannel started:', self.name
self.running = True
def stop(self):
self.stop_flag.value = 1
self.process.join()
print 'FakeMultiAnalogChannel stopped:', self.name
self.running = False
def close(self):
if WINDOWS:
self.device['hid'].close()
else:
pass
# for ii in self.streams:
# self.streams[ii].stop()
def setupCrypto(serial):
type = 0 #feature[5]
type &= 0xF
type = 0
#I believe type == True is for the Dev headset, I'm not using that. That's the point of this library in the first place I thought.
k = ['\0'] * 16
k[0] = serial[-1]
k[1] = '\0'
k[2] = serial[-2]
if type:
k[3] = 'H'
k[4] = serial[-1]
k[5] = '\0'
k[6] = serial[-2]
k[7] = 'T'
k[8] = serial[-3]
k[9] = '\x10'
k[10] = serial[-4]
k[11] = 'B'
else:
k[3] = 'T'
k[4] = serial[-3]
k[5] = '\x10'
k[6] = serial[-4]
k[7] = 'B'
k[8] = serial[-1]
k[9] = '\0'
k[10] = serial[-2]
k[11] = 'H'
k[12] = serial[-3]
k[13] = '\0'
k[14] = serial[-4]
k[15] = 'P'
#It doesn't make sense to have more than one greenlet handling this as data needs to be in order anyhow. I guess you could assign an ID or something
#to each packet but that seems like a waste also or is it? The ID might be useful if your using multiple headsets or usb sticks.
key = ''.join(k)
iv = Random.new().read(AES.block_size)
cipher = AES.new(key, AES.MODE_ECB, iv)
return cipher
def get_level(data, bits):
level = 0
for i in range(13, -1, -1):
level <<= 1
b, o = (bits[i] / 8) + 1, bits[i] % 8
level |= (ord(data[b]) >> o) & 1
return level
def emotiv_mainLoop(stop_flag, streams, device):
import zmq
abs_pos = pos = 0
#setup cryto
cipher = setupCrypto(device['serial'])
streamChan, streamImp, streamGyro = streams
#Data channels socket
context = zmq.Context()
socket_chan = context.socket(zmq.PUB)
socket_chan.bind("tcp://*:{}".format(streamChan['port']))
#Impedance channels socket
socket_imp = context.socket(zmq.PUB)
socket_imp.bind("tcp://*:{}".format(streamImp['port']))
#Gyro channels socket
socket_gyro = context.socket(zmq.PUB)
socket_gyro.bind("tcp://*:{}".format(streamGyro['port']))
packet_size = streamChan['packet_size']
sampling_rate = streamChan['sampling_rate']
np_arr_chan = streamChan['shared_array'].to_numpy_array()
np_arr_imp = streamImp['shared_array'].to_numpy_array()
np_arr_gyro = streamGyro['shared_array'].to_numpy_array()
half_size = np_arr_chan.shape[1]/2 # same for the others
impedance_qualities = { }
for name in _channel_names + ['X', 'Y', 'Unknown']:
impedance_qualities[name] = 0.
if WINDOWS:
device['hid'].open()
device['hid'].set_raw_data_handler(emotiv_handler)
else:
hidraw = open(device['device_path'])
while True:
# READ DATA
if WINDOWS:
crypted_data = tasks.get(True)
else:
crypted_data = hidraw.read(32)
# PROCESS
data = cipher.decrypt(crypted_data[:16]) + cipher.decrypt(crypted_data[16:])
# current impedance quality
sensor_num = ord(data[0])
num_to_name = { 0 : 'F3', 1:'FC5', 2 : 'AF3', 3 : 'F7', 4:'T7', 5 : 'P7',
6 : 'O1', 7 : 'O2', 8: 'P8', 9 : 'T8', 10: 'F8', 11 : 'AF4',
12 : 'FC6', 13: 'F4', 14 : 'F8', 15:'AF4',
64 : 'F3', 65 : 'FC5', 66 : 'AF3', 67 : 'F7', 68 : 'T7', 69 : 'P7',
70 : 'O1', 71 : 'O2', 72: 'P8', 73 : 'T8', 74: 'F8', 75 : 'AF4',
76 : 'FC6', 77: 'F4', 78 : 'F8', 79:'AF4',
80 : 'FC6',
}
if sensor_num in num_to_name:
sensor_name = num_to_name[sensor_num]
impedance_qualities[sensor_name] = get_level(data, quality_bits) / 540
for c, channel_name in enumerate(_channel_names):
bits = sensorBits[channel_name]
# channel value
value = get_level(data, bits)
np_arr_chan[c,pos] = value
np_arr_chan[c,pos+half_size] = value
#channel qualities
np_arr_imp[c,pos] = impedance_qualities[channel_name]
np_arr_imp[c,pos+half_size] = impedance_qualities[channel_name]
gyroX = ord(data[29]) - 106
gyroY = ord(data[30]) - 105
np_arr_gyro[:,pos] = [gyroX, gyroY]
np_arr_gyro[:,pos+half_size] = [gyroX, gyroY]
abs_pos += packet_size
pos = abs_pos%half_size
socket_chan.send(msgpack.dumps(abs_pos))
socket_imp.send(msgpack.dumps(abs_pos))
socket_gyro.send(msgpack.dumps(abs_pos))
if stop_flag.value:
print 'will stop'
break
# Windows handler
def emotiv_handler(data):
"""
Receives packets from headset for Windows. Sends them to the crypto process
"""
assert data[0] == 0
tasks.put_nowait(''.join(map(chr, data[1:])))
return True
Emotiv = EmotivMultiSignals
| Hemisphere-Project/Telemir-DatabitMe | Telemir-EEG/pyacq/pyacq/core/devices/emotiv.py | Python | gpl-2.0 | 12,623 |
# -*- coding: utf-8 -*-
#
# Nitrate is copyright 2010 Red Hat, Inc.
#
# Nitrate is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version. This program is distributed in
# the hope that it will be useful, but WITHOUT ANY WARRANTY; without
# even the implied warranties of TITLE, NON-INFRINGEMENT,
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# The GPL text is available in the file COPYING that accompanies this
# distribution and at <http://www.gnu.org/licenses>.
#
# Authors:
# Xuqing Kuang <[email protected]>
from django.contrib.auth.models import User
from django.core.exceptions import PermissionDenied
from kobo.django.xmlrpc.decorators import user_passes_test, login_required, log_call
from nitrate.core.utils.xmlrpc import XMLRPCSerializer
__all__ = (
'filter',
'get',
'get_me',
'update',
)
def get_user_dict(user):
u = XMLRPCSerializer(model = user)
u = u.serialize_model()
if u.get('password'):
del u['password']
return u
@log_call
def filter(request, query):
"""
Description: Performs a search and returns the resulting list of test cases.
Params: $query - Hash: keys must match valid search fields.
+------------------------------------------------------------------+
| Case Search Parameters |
+------------------------------------------------------------------+
| Key | Valid Values |
| id | Integer: ID |
| username | String: User name |
| first_name | String: User first name |
| last_name | String: User last name |
| email | String Email |
| is_active | Boolean: Return the active users |
| groups | ForeignKey: AuthGroup |
+------------------------------------------------------------------+
Returns: Array: Matching test cases are retuned in a list of hashes.
Example:
>>> User.filter({'username__startswith': 'x'})
"""
users = User.objects.filter(**query)
return [get_user_dict(u) for u in users]
def get(request, id):
"""
Description: Used to load an existing test case from the database.
Params: $id - Integer/String: An integer representing the ID in the database
Returns: A blessed User object Hash
Example:
>>> User.get(2206)
"""
return get_user_dict(User.objects.get(pk = id))
def get_me(request):
"""
Description: Get the information of myself.
Returns: A blessed User object Hash
Example:
>>> User.get_me()
"""
return get_user_dict(request.user)
def update(request, values = {}, id = None):
"""
Description: Updates the fields of the selected user. it also can change the
informations of other people if you have permission.
Params: $values - Hash of keys matching TestCase fields and the new values
to set each field to.
$id - Integer/String(Optional)
Integer: A single TestCase ID.
String: A comma string of User ID.
Default: The ID of myself
Returns: A blessed User object Hash
+-------------------+----------------+-----------------------------------------+
| Field | Type | Null |
+-------------------+----------------+-----------------------------------------+
| first_name | String | Optional |
| last_name | String | Optional(Required if changes category) |
| email | String | Optional |
| password | String | Optional |
| old_password | String | Required by password |
+-------------------+----------------+-----------------------------------------+
Example:
>>> User.update({'first_name': 'foo'})
>>> User.update({'password': 'foo', 'old_password': '123'})
>>> User.update({'password': 'foo', 'old_password': '123'}, 2206)
"""
if id:
u = User.objects.get(pk = id)
else:
u = request.user
editable_fields = ['first_name', 'last_name', 'email', 'password']
if not request.user.has_perm('auth.change_changeuser') and request.user != u:
raise PermissionDenied
for f in editable_fields:
if values.get(f):
if f == 'password':
if not request.user.has_perm('auth.change_changeuser') and not values.get('old_password'):
raise PermissionDenied('Old password is required')
if not request.user.has_perm('auth.change_changeuser') and not u.check_password(values.get('old_password')):
raise PermissionDenied('Password is incorrect')
u.set_password(values['password'])
else:
setattr(u, f, values[f])
u.save()
return get_user_dict(u)
| tkdchen/nitrate-xmlrpc | nitratexmlrpc/api/user.py | Python | gpl-2.0 | 5,535 |
# #
# Copyright 2013-2014 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
# #
"""
Toy build unit test
@author: Kenneth Hoste (Ghent University)
"""
import glob
import grp
import os
import re
import shutil
import stat
import sys
import tempfile
from test.framework.utilities import EnhancedTestCase
from unittest import TestLoader
from unittest import main as unittestmain
from vsc.utils.fancylogger import setLogLevelDebug, logToScreen
import easybuild.tools.module_naming_scheme # required to dynamically load test module naming scheme(s)
from easybuild.framework.easyconfig.easyconfig import EasyConfig
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import mkdir, read_file, write_file
from easybuild.tools.modules import modules_tool
class ToyBuildTest(EnhancedTestCase):
"""Toy build unit test."""
def setUp(self):
"""Test setup."""
super(ToyBuildTest, self).setUp()
fd, self.dummylogfn = tempfile.mkstemp(prefix='easybuild-dummy', suffix='.log')
os.close(fd)
# adjust PYTHONPATH such that test easyblocks are found
import easybuild
eb_blocks_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'sandbox'))
if not eb_blocks_path in sys.path:
sys.path.append(eb_blocks_path)
easybuild = reload(easybuild)
import easybuild.easyblocks
reload(easybuild.easyblocks)
reload(easybuild.tools.module_naming_scheme)
# clear log
write_file(self.logfile, '')
def tearDown(self):
"""Cleanup."""
super(ToyBuildTest, self).tearDown()
# remove logs
if os.path.exists(self.dummylogfn):
os.remove(self.dummylogfn)
def check_toy(self, installpath, outtxt, version='0.0', versionprefix='', versionsuffix=''):
"""Check whether toy build succeeded."""
full_version = ''.join([versionprefix, version, versionsuffix])
# check for success
success = re.compile("COMPLETED: Installation ended successfully")
self.assertTrue(success.search(outtxt), "COMPLETED message found in '%s" % outtxt)
# if the module exists, it should be fine
toy_module = os.path.join(installpath, 'modules', 'all', 'toy', full_version)
msg = "module for toy build toy/%s found (path %s)" % (full_version, toy_module)
self.assertTrue(os.path.exists(toy_module), msg)
# module file is symlinked according to moduleclass
toy_module_symlink = os.path.join(installpath, 'modules', 'tools', 'toy', full_version)
self.assertTrue(os.path.islink(toy_module_symlink))
self.assertTrue(os.path.exists(toy_module_symlink))
# make sure installation log file and easyconfig file are copied to install dir
software_path = os.path.join(installpath, 'software', 'toy', full_version)
install_log_path_pattern = os.path.join(software_path, 'easybuild', 'easybuild-toy-%s*.log' % version)
self.assertTrue(len(glob.glob(install_log_path_pattern)) == 1, "Found 1 file at %s" % install_log_path_pattern)
# make sure test report is available
test_report_path_pattern = os.path.join(software_path, 'easybuild', 'easybuild-toy-%s*test_report.md' % version)
self.assertTrue(len(glob.glob(test_report_path_pattern)) == 1, "Found 1 file at %s" % test_report_path_pattern)
ec_file_path = os.path.join(software_path, 'easybuild', 'toy-%s.eb' % full_version)
self.assertTrue(os.path.exists(ec_file_path))
devel_module_path = os.path.join(software_path, 'easybuild', 'toy-%s-easybuild-devel' % full_version)
self.assertTrue(os.path.exists(devel_module_path))
def test_toy_build(self, extra_args=None, ec_file=None, tmpdir=None, verify=True, fails=False, verbose=True,
raise_error=False, test_report=None, versionsuffix=''):
"""Perform a toy build."""
if extra_args is None:
extra_args = []
test_readme = False
if ec_file is None:
ec_file = os.path.join(os.path.dirname(__file__), 'easyconfigs', 'toy-0.0.eb')
test_readme = True
full_ver = '0.0%s' % versionsuffix
args = [
ec_file,
'--sourcepath=%s' % self.test_sourcepath,
'--buildpath=%s' % self.test_buildpath,
'--installpath=%s' % self.test_installpath,
'--debug',
'--unittest-file=%s' % self.logfile,
'--force',
'--robot=%s' % os.pathsep.join([self.test_buildpath, os.path.dirname(__file__)]),
]
if tmpdir is not None:
args.append('--tmpdir=%s' % tmpdir)
if test_report is not None:
args.append('--dump-test-report=%s' % test_report)
args.extend(extra_args)
myerr = None
try:
outtxt = self.eb_main(args, logfile=self.dummylogfn, do_build=True, verbose=verbose,
raise_error=raise_error)
except Exception, err:
myerr = err
if raise_error:
raise myerr
if verify:
self.check_toy(self.test_installpath, outtxt, versionsuffix=versionsuffix)
if test_readme:
# make sure postinstallcmds were used
toy_install_path = os.path.join(self.test_installpath, 'software', 'toy', full_ver)
self.assertEqual(read_file(os.path.join(toy_install_path, 'README')), "TOY\n")
# make sure full test report was dumped, and contains sensible information
if test_report is not None:
self.assertTrue(os.path.exists(test_report))
if fails:
test_result = 'FAIL'
else:
test_result = 'SUCCESS'
regex_patterns = [
r"Test result[\S\s]*Build succeeded for %d out of 1" % (not fails),
r"Overview of tested easyconfig[\S\s]*%s[\S\s]*%s" % (test_result, os.path.basename(ec_file)),
r"Time info[\S\s]*start:[\S\s]*end:",
r"EasyBuild info[\S\s]*framework version:[\S\s]*easyblocks ver[\S\s]*command line[\S\s]*configuration",
r"System info[\S\s]*cpu model[\S\s]*os name[\S\s]*os version[\S\s]*python version",
r"List of loaded modules",
r"Environment",
]
test_report_txt = read_file(test_report)
for regex_pattern in regex_patterns:
regex = re.compile(regex_pattern, re.M)
msg = "Pattern %s found in full test report: %s" % (regex.pattern, test_report_txt)
self.assertTrue(regex.search(test_report_txt), msg)
return outtxt
def test_toy_broken(self):
"""Test deliberately broken toy build."""
tmpdir = tempfile.mkdtemp()
broken_toy_ec = os.path.join(tmpdir, "toy-broken.eb")
toy_ec_file = os.path.join(os.path.dirname(__file__), 'easyconfigs', 'toy-0.0.eb')
broken_toy_ec_txt = read_file(toy_ec_file)
broken_toy_ec_txt += "checksums = ['clearywrongchecksum']"
write_file(broken_toy_ec, broken_toy_ec_txt)
error_regex = "Checksum verification .* failed"
self.assertErrorRegex(EasyBuildError, error_regex, self.test_toy_build, ec_file=broken_toy_ec, tmpdir=tmpdir,
verify=False, fails=True, verbose=False, raise_error=True)
# make sure log file is retained, also for failed build
log_path_pattern = os.path.join(tmpdir, 'easybuild-*', 'easybuild-toy-0.0*.log')
self.assertTrue(len(glob.glob(log_path_pattern)) == 1, "Log file found at %s" % log_path_pattern)
# make sure individual test report is retained, also for failed build
test_report_fp_pattern = os.path.join(tmpdir, 'easybuild-*', 'easybuild-toy-0.0*test_report.md')
self.assertTrue(len(glob.glob(test_report_fp_pattern)) == 1, "Test report %s found" % test_report_fp_pattern)
# test dumping full test report (doesn't raise an exception)
test_report_fp = os.path.join(self.test_buildpath, 'full_test_report.md')
self.test_toy_build(ec_file=broken_toy_ec, tmpdir=tmpdir, verify=False, fails=True, verbose=False,
raise_error=True, test_report=test_report_fp)
# cleanup
shutil.rmtree(tmpdir)
def test_toy_tweaked(self):
"""Test toy build with tweaked easyconfig, for testing extra easyconfig parameters."""
test_ecs_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'easyconfigs')
ec_file = os.path.join(self.test_buildpath, 'toy-0.0-tweaked.eb')
shutil.copy2(os.path.join(test_ecs_dir, 'toy-0.0.eb'), ec_file)
# tweak easyconfig by appending to it
ec_extra = '\n'.join([
"versionsuffix = '-tweaked'",
"modextrapaths = {'SOMEPATH': ['foo/bar', 'baz']}",
"modextravars = {'FOO': 'bar'}",
"modloadmsg = 'THANKS FOR LOADING ME, I AM %(name)s v%(version)s'",
"modtclfooter = 'puts stderr \"oh hai!\"'",
])
write_file(ec_file, ec_extra, append=True)
args = [
ec_file,
'--sourcepath=%s' % self.test_sourcepath,
'--buildpath=%s' % self.test_buildpath,
'--installpath=%s' % self.test_installpath,
'--debug',
'--force',
]
outtxt = self.eb_main(args, do_build=True, verbose=True, raise_error=True)
self.check_toy(self.test_installpath, outtxt, versionsuffix='-tweaked')
toy_module = os.path.join(self.test_installpath, 'modules', 'all', 'toy', '0.0-tweaked')
toy_module_txt = read_file(toy_module)
self.assertTrue(re.search('setenv\s*FOO\s*"bar"', toy_module_txt))
self.assertTrue(re.search('prepend-path\s*SOMEPATH\s*\$root/foo/bar', toy_module_txt))
self.assertTrue(re.search('prepend-path\s*SOMEPATH\s*\$root/baz', toy_module_txt))
self.assertTrue(re.search('module-info mode load.*\n\s*puts stderr\s*.*I AM toy v0.0', toy_module_txt))
self.assertTrue(re.search('puts stderr "oh hai!"', toy_module_txt))
def test_toy_buggy_easyblock(self):
"""Test build using a buggy/broken easyblock, make sure a traceback is reported."""
ec_file = os.path.join(os.path.dirname(__file__), 'easyconfigs', 'toy-0.0.eb')
kwargs = {
'ec_file': ec_file,
'extra_args': ['--easyblock=EB_toy_buggy'],
'raise_error': True,
'verify': False,
'verbose': False,
}
err_regex = r"crashed with an error.*Traceback[\S\s]*toy_buggy.py.*build_step[\S\s]*global name 'run_cmd'"
self.assertErrorRegex(EasyBuildError, err_regex, self.test_toy_build, **kwargs)
def test_toy_build_formatv2(self):
"""Perform a toy build (format v2)."""
# set $MODULEPATH such that modules for specified dependencies are found
modulepath = os.environ.get('MODULEPATH')
os.environ['MODULEPATH'] = os.path.abspath(os.path.join(os.path.dirname(__file__), 'modules'))
args = [
os.path.join(os.path.dirname(__file__), 'easyconfigs', 'v2.0', 'toy.eb'),
'--sourcepath=%s' % self.test_sourcepath,
'--buildpath=%s' % self.test_buildpath,
'--installpath=%s' % self.test_installpath,
'--debug',
'--unittest-file=%s' % self.logfile,
'--force',
'--robot=%s' % os.pathsep.join([self.test_buildpath, os.path.dirname(__file__)]),
'--software-version=0.0',
'--toolchain=dummy,dummy',
'--experimental',
]
outtxt = self.eb_main(args, logfile=self.dummylogfn, do_build=True, verbose=True)
self.check_toy(self.test_installpath, outtxt)
# restore
if modulepath is not None:
os.environ['MODULEPATH'] = modulepath
else:
del os.environ['MODULEPATH']
def test_toy_build_with_blocks(self):
"""Test a toy build with multiple blocks."""
orig_sys_path = sys.path[:]
# add directory in which easyconfig file can be found to Python search path, since we're not specifying it full path below
tmpdir = tempfile.mkdtemp()
# note get_paths_for expects easybuild/easyconfigs subdir
ecs_path = os.path.join(tmpdir, "easybuild", "easyconfigs")
os.makedirs(ecs_path)
shutil.copy2(os.path.join(os.path.dirname(__file__), 'easyconfigs', 'toy-0.0-multiple.eb'), ecs_path)
sys.path.append(tmpdir)
args = [
'toy-0.0-multiple.eb',
'--sourcepath=%s' % self.test_sourcepath,
'--buildpath=%s' % self.test_buildpath,
'--installpath=%s' % self.test_installpath,
'--debug',
'--unittest-file=%s' % self.logfile,
'--force',
]
outtxt = self.eb_main(args, logfile=self.dummylogfn, do_build=True, verbose=True)
for toy_prefix, toy_version, toy_suffix in [
('', '0.0', '-somesuffix'),
('someprefix-', '0.0', '-somesuffix')
]:
self.check_toy(self.test_installpath, outtxt, version=toy_version,
versionprefix=toy_prefix, versionsuffix=toy_suffix)
# cleanup
shutil.rmtree(tmpdir)
sys.path = orig_sys_path
def test_toy_build_formatv2_sections(self):
"""Perform a toy build (format v2, using sections)."""
versions = {
'0.0': {'versionprefix': '', 'versionsuffix': ''},
'1.0': {'versionprefix': '', 'versionsuffix': ''},
'1.1': {'versionprefix': 'stable-', 'versionsuffix': ''},
'1.5': {'versionprefix': 'stable-', 'versionsuffix': '-early'},
'1.6': {'versionprefix': 'stable-', 'versionsuffix': '-early'},
'2.0': {'versionprefix': 'stable-', 'versionsuffix': '-early'},
'3.0': {'versionprefix': 'stable-', 'versionsuffix': '-mature'},
}
for version, specs in versions.items():
args = [
os.path.join(os.path.dirname(__file__), 'easyconfigs', 'v2.0', 'toy-with-sections.eb'),
'--sourcepath=%s' % self.test_sourcepath,
'--buildpath=%s' % self.test_buildpath,
'--installpath=%s' % self.test_installpath,
'--debug',
'--unittest-file=%s' % self.logfile,
'--force',
'--robot=%s' % os.pathsep.join([self.test_buildpath, os.path.dirname(__file__)]),
'--software-version=%s' % version,
'--toolchain=dummy,dummy',
'--experimental',
]
outtxt = self.eb_main(args, logfile=self.dummylogfn, do_build=True, verbose=True)
specs['version'] = version
self.check_toy(self.test_installpath, outtxt, **specs)
def test_toy_download_sources(self):
"""Test toy build with sources that still need to be 'downloaded'."""
tmpdir = tempfile.mkdtemp()
# copy toy easyconfig file, and append source_urls to it
shutil.copy2(os.path.join(os.path.dirname(__file__), 'easyconfigs', 'toy-0.0.eb'), tmpdir)
source_url = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'sandbox', 'sources', 'toy')
ec_file = os.path.join(tmpdir, 'toy-0.0.eb')
write_file(ec_file, '\nsource_urls = ["file://%s"]\n' % source_url, append=True)
# unset $EASYBUILD_XPATH env vars, to make sure --prefix is picked up
for cfg_opt in ['build', 'install', 'source']:
del os.environ['EASYBUILD_%sPATH' % cfg_opt.upper()]
sourcepath = os.path.join(tmpdir, 'mysources')
args = [
ec_file,
'--prefix=%s' % tmpdir,
'--sourcepath=%s' % ':'.join([sourcepath, '/bar']), # include senseless path which should be ignored
'--debug',
'--unittest-file=%s' % self.logfile,
'--force',
]
outtxt = self.eb_main(args, logfile=self.dummylogfn, do_build=True, verbose=True)
self.check_toy(tmpdir, outtxt)
self.assertTrue(os.path.exists(os.path.join(sourcepath, 't', 'toy', 'toy-0.0.tar.gz')))
shutil.rmtree(tmpdir)
def test_toy_permissions(self):
"""Test toy build with custom umask settings."""
toy_ec_file = os.path.join(os.path.dirname(__file__), 'easyconfigs', 'toy-0.0.eb')
args = [
'--sourcepath=%s' % self.test_sourcepath,
'--buildpath=%s' % self.test_buildpath,
'--installpath=%s' % self.test_installpath,
'--debug',
'--unittest-file=%s' % self.logfile,
'--force',
]
# set umask hard to verify default reliably
orig_umask = os.umask(0022)
# test specifying a non-existing group
allargs = [toy_ec_file] + args + ['--group=thisgroupdoesnotexist']
outtxt, err = self.eb_main(allargs, logfile=self.dummylogfn, do_build=True, return_error=True)
err_regex = re.compile("Failed to get group ID .* group does not exist")
self.assertTrue(err_regex.search(outtxt), "Pattern '%s' found in '%s'" % (err_regex.pattern, outtxt))
# determine current group name (at least we can use that)
gid = os.getgid()
curr_grp = grp.getgrgid(gid).gr_name
for umask, cfg_group, ec_group, dir_perms, fil_perms, bin_perms in [
(None, None, None, 0755, 0644, 0755), # default: inherit session umask
(None, None, curr_grp, 0750, 0640, 0750), # default umask, but with specified group in ec
(None, curr_grp, None, 0750, 0640, 0750), # default umask, but with specified group in cfg
(None, 'notagrp', curr_grp, 0750, 0640, 0750), # default umask, but with specified group in both cfg and ec
('000', None, None, 0777, 0666, 0777), # stupid empty umask
('032', None, None, 0745, 0644, 0745), # no write/execute for group, no write for other
('030', None, curr_grp, 0740, 0640, 0740), # no write for group, with specified group
('077', None, None, 0700, 0600, 0700), # no access for other/group
]:
if cfg_group is None and ec_group is None:
allargs = [toy_ec_file]
elif ec_group is not None:
shutil.copy2(toy_ec_file, self.test_buildpath)
tmp_ec_file = os.path.join(self.test_buildpath, os.path.basename(toy_ec_file))
write_file(tmp_ec_file, "\ngroup = '%s'" % ec_group, append=True)
allargs = [tmp_ec_file]
allargs.extend(args)
if umask is not None:
allargs.append("--umask=%s" % umask)
if cfg_group is not None:
allargs.append("--group=%s" % cfg_group)
outtxt = self.eb_main(allargs, logfile=self.dummylogfn, do_build=True, verbose=True)
# verify that installation was correct
self.check_toy(self.test_installpath, outtxt)
# group specified in easyconfig overrules configured group
group = cfg_group
if ec_group is not None:
group = ec_group
# verify permissions
paths_perms = [
# no write permissions for group/other, regardless of umask
(('software', 'toy', '0.0'), dir_perms & ~ 0022),
(('software', 'toy', '0.0', 'bin'), dir_perms & ~ 0022),
(('software', 'toy', '0.0', 'bin', 'toy'), bin_perms & ~ 0022),
]
# only software subdirs are chmod'ed for 'protected' installs, so don't check those if a group is specified
if group is None:
paths_perms.extend([
(('software', ), dir_perms),
(('software', 'toy'), dir_perms),
(('software', 'toy', '0.0', 'easybuild', '*.log'), fil_perms),
(('modules', ), dir_perms),
(('modules', 'all'), dir_perms),
(('modules', 'all', 'toy'), dir_perms),
(('modules', 'all', 'toy', '0.0'), fil_perms),
])
for path, correct_perms in paths_perms:
fullpath = glob.glob(os.path.join(self.test_installpath, *path))[0]
perms = os.stat(fullpath).st_mode & 0777
msg = "Path %s has %s permissions: %s" % (fullpath, oct(correct_perms), oct(perms))
self.assertEqual(perms, correct_perms, msg)
if group is not None:
path_gid = os.stat(fullpath).st_gid
self.assertEqual(path_gid, grp.getgrnam(group).gr_gid)
# cleanup for next iteration
shutil.rmtree(self.test_installpath)
# restore original umask
os.umask(orig_umask)
def test_toy_gid_sticky_bits(self):
"""Test setting gid and sticky bits."""
subdirs = [
(('',), False),
(('software',), False),
(('software', 'toy'), False),
(('software', 'toy', '0.0'), True),
(('modules', 'all'), False),
(('modules', 'all', 'toy'), False),
]
# no gid/sticky bits by default
self.test_toy_build()
for subdir, _ in subdirs:
fullpath = os.path.join(self.test_installpath, *subdir)
perms = os.stat(fullpath).st_mode
self.assertFalse(perms & stat.S_ISGID, "no gid bit on %s" % fullpath)
self.assertFalse(perms & stat.S_ISVTX, "no sticky bit on %s" % fullpath)
# git/sticky bits are set, but only on (re)created directories
self.test_toy_build(extra_args=['--set-gid-bit', '--sticky-bit'])
for subdir, bits_set in subdirs:
fullpath = os.path.join(self.test_installpath, *subdir)
perms = os.stat(fullpath).st_mode
if bits_set:
self.assertTrue(perms & stat.S_ISGID, "gid bit set on %s" % fullpath)
self.assertTrue(perms & stat.S_ISVTX, "sticky bit set on %s" % fullpath)
else:
self.assertFalse(perms & stat.S_ISGID, "no gid bit on %s" % fullpath)
self.assertFalse(perms & stat.S_ISVTX, "no sticky bit on %s" % fullpath)
# start with a clean slate, now gid/sticky bits should be set on everything
shutil.rmtree(self.test_installpath)
self.test_toy_build(extra_args=['--set-gid-bit', '--sticky-bit'])
for subdir, _ in subdirs:
fullpath = os.path.join(self.test_installpath, *subdir)
perms = os.stat(fullpath).st_mode
self.assertTrue(perms & stat.S_ISGID, "gid bit set on %s" % fullpath)
self.assertTrue(perms & stat.S_ISVTX, "sticky bit set on %s" % fullpath)
def test_allow_system_deps(self):
"""Test allow_system_deps easyconfig parameter."""
tmpdir = tempfile.mkdtemp()
# copy toy easyconfig file, and append source_urls to it
shutil.copy2(os.path.join(os.path.dirname(__file__), 'easyconfigs', 'toy-0.0.eb'), tmpdir)
ec_file = os.path.join(tmpdir, 'toy-0.0.eb')
write_file(ec_file, "\nallow_system_deps = [('Python', SYS_PYTHON_VERSION)]\n", append=True)
self.test_toy_build(ec_file=ec_file)
shutil.rmtree(tmpdir)
def test_toy_hierarchical(self):
"""Test toy build under example hierarchical module naming scheme."""
self.setup_hierarchical_modules()
mod_prefix = os.path.join(self.test_installpath, 'modules', 'all')
args = [
os.path.join(os.path.dirname(__file__), 'easyconfigs', 'toy-0.0.eb'),
'--sourcepath=%s' % self.test_sourcepath,
'--buildpath=%s' % self.test_buildpath,
'--installpath=%s' % self.test_installpath,
'--debug',
'--unittest-file=%s' % self.logfile,
'--force',
'--robot=%s' % os.path.join(os.path.dirname(os.path.abspath(__file__)), 'easyconfigs'),
'--module-naming-scheme=HierarchicalMNS',
]
# test module paths/contents with gompi build
extra_args = [
'--try-toolchain=goolf,1.4.10',
]
self.eb_main(args + extra_args, logfile=self.dummylogfn, do_build=True, verbose=True, raise_error=True)
# make sure module file is installed in correct path
toy_module_path = os.path.join(mod_prefix, 'MPI', 'GCC', '4.7.2', 'OpenMPI', '1.6.4', 'toy', '0.0')
self.assertTrue(os.path.exists(toy_module_path))
# check that toolchain load is expanded to loads for toolchain dependencies,
# except for the ones that extend $MODULEPATH to make the toy module available
modtxt = read_file(toy_module_path)
for dep in ['goolf', 'GCC', 'OpenMPI']:
load_regex = re.compile("load %s" % dep)
self.assertFalse(load_regex.search(modtxt), "Pattern '%s' not found in %s" % (load_regex.pattern, modtxt))
for dep in ['OpenBLAS', 'FFTW', 'ScaLAPACK']:
load_regex = re.compile("load %s" % dep)
self.assertTrue(load_regex.search(modtxt), "Pattern '%s' found in %s" % (load_regex.pattern, modtxt))
os.remove(toy_module_path)
# test module path with GCC/4.7.2 build
extra_args = [
'--try-toolchain=GCC,4.7.2',
]
self.eb_main(args + extra_args, logfile=self.dummylogfn, do_build=True, verbose=True, raise_error=True)
# make sure module file is installed in correct path
toy_module_path = os.path.join(mod_prefix, 'Compiler', 'GCC', '4.7.2', 'toy', '0.0')
self.assertTrue(os.path.exists(toy_module_path))
# no dependencies or toolchain => no module load statements in module file
modtxt = read_file(toy_module_path)
self.assertFalse(re.search("module load", modtxt))
os.remove(toy_module_path)
# test module path with GCC/4.7.2 build, pretend to be an MPI lib by setting moduleclass
extra_args = [
'--try-toolchain=GCC,4.7.2',
'--try-amend=moduleclass=mpi',
]
self.eb_main(args + extra_args, logfile=self.dummylogfn, do_build=True, verbose=True, raise_error=True)
# make sure module file is installed in correct path
toy_module_path = os.path.join(mod_prefix, 'Compiler', 'GCC', '4.7.2', 'toy', '0.0')
self.assertTrue(os.path.exists(toy_module_path))
# 'module use' statements to extend $MODULEPATH are present
modtxt = read_file(toy_module_path)
modpath_extension = os.path.join(mod_prefix, 'MPI', 'GCC', '4.7.2', 'toy', '0.0')
self.assertTrue(re.search("^module\s*use\s*%s" % modpath_extension, modtxt, re.M))
os.remove(toy_module_path)
# ... unless they shouldn't be
extra_args.append('--try-amend=include_modpath_extensions=') # pass empty string as equivalent to False
self.eb_main(args + extra_args, logfile=self.dummylogfn, do_build=True, verbose=True, raise_error=True)
modtxt = read_file(toy_module_path)
modpath_extension = os.path.join(mod_prefix, 'MPI', 'GCC', '4.7.2', 'toy', '0.0')
self.assertFalse(re.search("^module\s*use\s*%s" % modpath_extension, modtxt, re.M))
os.remove(toy_module_path)
# test module path with dummy/dummy build
extra_args = [
'--try-toolchain=dummy,dummy',
]
self.eb_main(args + extra_args, logfile=self.dummylogfn, do_build=True, verbose=True, raise_error=True)
# make sure module file is installed in correct path
toy_module_path = os.path.join(mod_prefix, 'Core', 'toy', '0.0')
self.assertTrue(os.path.exists(toy_module_path))
# no dependencies or toolchain => no module load statements in module file
modtxt = read_file(toy_module_path)
self.assertFalse(re.search("module load", modtxt))
os.remove(toy_module_path)
# test module path with dummy/dummy build, pretend to be a compiler by setting moduleclass
extra_args = [
'--try-toolchain=dummy,dummy',
'--try-amend=moduleclass=compiler',
]
self.eb_main(args + extra_args, logfile=self.dummylogfn, do_build=True, verbose=True, raise_error=True)
# make sure module file is installed in correct path
toy_module_path = os.path.join(mod_prefix, 'Core', 'toy', '0.0')
self.assertTrue(os.path.exists(toy_module_path))
# no dependencies or toolchain => no module load statements in module file
modtxt = read_file(toy_module_path)
modpath_extension = os.path.join(mod_prefix, 'Compiler', 'toy', '0.0')
self.assertTrue(re.search("^module\s*use\s*%s" % modpath_extension, modtxt, re.M))
os.remove(toy_module_path)
# building a toolchain module should also work
args = ['gompi-1.4.10.eb'] + args[1:]
modules_tool().purge()
self.eb_main(args, logfile=self.dummylogfn, do_build=True, verbose=True, raise_error=True)
def test_toy_advanced(self):
"""Test toy build with extensions and non-dummy toolchain."""
test_dir = os.path.abspath(os.path.dirname(__file__))
os.environ['MODULEPATH'] = os.path.join(test_dir, 'modules')
test_ec = os.path.join(test_dir, 'easyconfigs', 'toy-0.0-gompi-1.3.12.eb')
self.test_toy_build(ec_file=test_ec, versionsuffix='-gompi-1.3.12')
def test_toy_hidden(self):
"""Test installing a hidden module."""
ec_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'easyconfigs', 'toy-0.0.eb')
self.test_toy_build(ec_file=ec_file, extra_args=['--hidden'], verify=False)
# module file is hidden
toy_module = os.path.join(self.test_installpath, 'modules', 'all', 'toy', '.0.0')
self.assertTrue(os.path.exists(toy_module), 'Found hidden module %s' % toy_module)
# installed software is not hidden
toybin = os.path.join(self.test_installpath, 'software', 'toy', '0.0', 'bin', 'toy')
self.assertTrue(os.path.exists(toybin))
def test_module_filepath_tweaking(self):
"""Test using --suffix-modules-path."""
# install test module naming scheme dynamically
test_mns_parent_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'sandbox')
sys.path.append(test_mns_parent_dir)
reload(easybuild)
reload(easybuild.tools)
reload(easybuild.tools.module_naming_scheme)
mns_path = "easybuild.tools.module_naming_scheme.test_module_naming_scheme"
__import__(mns_path, globals(), locals(), [''])
eb_file = os.path.join(os.path.dirname(__file__), 'easyconfigs', 'toy-0.0.eb')
args = [
eb_file,
'--sourcepath=%s' % self.test_sourcepath,
'--buildpath=%s' % self.test_buildpath,
'--installpath=%s' % self.test_installpath,
'--force',
'--debug',
'--suffix-modules-path=foobarbaz',
'--module-naming-scheme=TestModuleNamingScheme',
]
self.eb_main(args, do_build=True, verbose=True)
mod_file_prefix = os.path.join(self.test_installpath, 'modules')
self.assertTrue(os.path.exists(os.path.join(mod_file_prefix, 'foobarbaz', 'toy', '0.0')))
self.assertTrue(os.path.exists(os.path.join(mod_file_prefix, 'TOOLS', 'toy', '0.0')))
self.assertTrue(os.path.islink(os.path.join(mod_file_prefix, 'TOOLS', 'toy', '0.0')))
self.assertTrue(os.path.exists(os.path.join(mod_file_prefix, 't', 'toy', '0.0')))
self.assertTrue(os.path.islink(os.path.join(mod_file_prefix, 't', 'toy', '0.0')))
def test_toy_archived_easyconfig(self):
"""Test archived easyconfig for a succesful build."""
repositorypath = os.path.join(self.test_installpath, 'easyconfigs_archive')
extra_args = [
'--repository=FileRepository',
'--repositorypath=%s' % repositorypath,
]
self.test_toy_build(raise_error=True, extra_args=extra_args)
archived_ec = os.path.join(repositorypath, 'toy', 'toy-0.0.eb')
self.assertTrue(os.path.exists(archived_ec))
ec = EasyConfig(archived_ec)
self.assertEqual(ec.name, 'toy')
self.assertEqual(ec.version, '0.0')
def suite():
""" return all the tests in this file """
return TestLoader().loadTestsFromTestCase(ToyBuildTest)
if __name__ == '__main__':
#logToScreen(enable=True)
#setLogLevelDebug()
unittestmain()
| geimer/easybuild-framework | test/framework/toy_build.py | Python | gpl-2.0 | 33,739 |
from utils import textAppend, textPrepend, textCut, textEditLastChar, error, textCursorPos
class File:
""" Represents a file (A separated class allow to open several files at a time.
The class also holds the whole file content. (The vim buffers only store either the
accepted chunks, or the editing statement) """
def __init__(self, plugin, buffers):
self.windowsManager = plugin.windowsManager
self.coqManager = plugin.coqManager
self.input = buffers[0]
self.output = buffers[1]
# Each chunk is describe by the following tuple : (startPos, endPos, newLine), where startPos and endPos are coords tuple
self.chunks = []
# The whole file content
self.code = []
self.editPosition = (0, 0)
# We manage a virtual new-line at the end of the compiled buffer.
self.initOutputCursor()
def initOutputCursor(self):
""" Init the newline-cursor in the Compiled buffer. """
self.output.options['modifiable'] = True
del self.output[:]
self.drawNewlineCursor(False)
self.output.options['modifiable'] = False
self.editNewLine = False
# We backtrack every chunks
self.chunks = self.chunks[:- self.coqManager.rewind(len(self.chunks))]
def drawNewlineCursor(self, newLine):
if newLine:
self.windowsManager.commands('__Compiled__', ["normal G$a Dt"])
else:
self.windowsManager.commands('__Compiled__', ["normal G$a PR"])
def next(self):
nextChunk = self.windowsManager.input.getChunk(self.input, (0, 0))
if nextChunk :
if self.coqManager.sendChunk(nextChunk[0]):
if self.editNewLine:
chunkStart = (0, textCursorPos(self.output)[1] + 1, 2)
else:
chunkStart = textCursorPos(self.output, diffX = 3) # diffX=2 to ignore the newline-cursor
chunkStart = (chunkStart[0], chunkStart[1], 0)
chunk = textCut(self.input, (0, 0, 2), nextChunk[1])
self.output.options['modifiable'] = True
# Remove the last newline-cursor
self.windowsManager.commands('__Compiled__', ["normal G$a"])
textAppend(self.output, chunk, self.editNewLine)
self.editNewLine = nextChunk[2]
chunkEnd = textCursorPos(self.output)
if self.editNewLine:
self.drawNewlineCursor(True)
chunkEnd = (chunkEnd[0] + 1, chunkEnd[1], 1)
else:
self.drawNewlineCursor(False)
chunkEnd = (chunkEnd[0] + 1, chunkEnd[1], 0)
self.output.options['modifiable'] = False
self.chunks.append((chunkStart, chunkEnd, self.editNewLine))
def prev(self):
""" Backtrack of one chunk """
if len(self.chunks) <= 0:
print("No chunk to backtrack !")
return None
actualRewind = self.coqManager.rewind(1)
if actualRewind == 1:
self.output.options['modifiable'] = True
# Remove the last newline-cursor
self.windowsManager.commands('__Compiled__', ["normal G$a"])
lastChunk = self.chunks[-1]
chunk = textCut(self.output, lastChunk[0], lastChunk[1])
textPrepend(self.input, chunk, lastChunk[2])
self.chunks.pop()
if len(self.chunks) == 0:
self.editNewLine = False
else:
self.editNewLine = self.chunks[-1][2]
self.drawNewlineCursor(self.editNewLine)
self.output.options['modifiable'] = False
def write(self, filename):
try:
file = open(filename, 'w')
# We write the compiled buffer, and then the edit buffer
for i in xrange(len(self.output) - 1):
file.write(self.output[i] + "\n")
interline = self.output[-1][:-4] # We don't take the newline-cursor
if not self.editNewLine:
interline += self.input[0]
file.write(interline + "\n")
for i in xrange(0 if self.editNewLine else 1, len(self.input)):
file.write(self.input[i] + "\n")
file.close()
except IOError as e:
error(str(e))
def open(self, filename):
# First, clear the buffers
self.initOutputCursor()
del self.chunks[:]
del self.input[:]
try:
file = open(filename, 'r')
# We simply add every lines in the Edit buffer
firstLine = True
for line in file:
if firstLine: # We don't want to skip the first line
self.input[0] = line
firstLine = False
else: self.input.append(line)
file.close()
except IOError as e:
error(str(e))
| QuanticPotato/vcoq | plugin/file.py | Python | gpl-2.0 | 4,084 |
# -*- coding:utf-8 -*-
import tradeStrategy as tds
import sendEmail as se
import tradeTime as tt
import tushare as ts
import pdSql_common as pds
from pdSql import StockSQL
import numpy as np
import sys,datetime
from pydoc import describe
from multiprocessing import Pool
import os, time
import file_config as fc
from position_history_update import combine_file,CHINESE_DICT
from position_history_update import get_latest_yh_k_stocks_from_csv
def get_stop_trade_symbol():
today_df = ts.get_today_all()
today_df = today_df[today_df.amount>0]
today_df_high_open = today_df[today_df.open>today_df.settlement*1.005]
all_trade_code = today_df['code'].values.tolist()
all_a_code = pds.get_all_code(hist_dir="C:/中国银河证券海王星/T0002/export/")
#all_a_code = pds.get_all_code(hist_dir="C:/hist/day/data/")
all_stop_codes = list(set(all_a_code).difference(set(all_trade_code)))
return all_stop_codes
def get_stopped_stocks(given_stocks=[],except_stocks=[],hist_dir='C:/hist/day/data/'):
import easyquotation
quotation =easyquotation.use('qq')
stop_stocks = []
if given_stocks:
this_quotation = quotation.stocks(given_stocks)
else:
this_quotation = quotation.all
all_stocks = list(this_quotation.keys())
#print('all_stocks=',('150251' in all_stocks))
#print('hist_dir=',hist_dir)
exist_codes = pds.get_all_code(hist_dir)
#print('exist_codes=',('150251' in exist_codes))
#print('all_stocks=',all_stocks)
all_codes = list(set(all_stocks) & (set(exist_codes)))
#print('all_codes=',all_codes)
for stock_code in all_codes:
if this_quotation[stock_code]:
#print(this_quotation[stock_code])
if this_quotation[stock_code]['ask1']==0 and this_quotation[stock_code]['volume']==0:
stop_stocks.append(stock_code)
else:
pass
if except_stocks:
all_codes = list(set(all_codes).difference(set(except_stocks)))
#print('all_codes=',('150251' in all_codes))
#print('stop_stocks=', stop_stocks)
#print(len(stop_stocks))
#print('all_stocks=',all_stocks)
#print(len(all_stocks))
return stop_stocks,all_codes
def get_exit_data(symbols,last_date_str):
refer_index = ['sh','cyb']
symbols = symbols +refer_index
temp_datas = {}
for symbol in symbols:
dest_df=pds.pd.read_csv('C:/hist/day/data/%s.csv' % symbol)
print(dest_df)
#dest_df = get_raw_hist_df(code_str=symbol)
if dest_df.empty:
pass
else:
dest_df_last_date = dest_df.tail(1).iloc[0]['date']
if dest_df_last_date==last_date_str:
exit_price = dest_df.tail(3)
return
#get_exit_data(symbols=['000029'],last_date_str='2016/08/23')
#get_stopped_stocks()
def back_test_dapan(test_codes,k_num=0,source='yh',rate_to_confirm = 0.01,processor_id=0):
i=0
for stock_symbol in test_codes:
if stock_symbol=='000029' and source=='easyhistory':
continue
print(i,stock_symbol)
s_stock=tds.Stockhistory(stock_symbol,'D',test_num=k_num,source=source,rate_to_confirm=rate_to_confirm)
if s_stock.h_df.empty:
print('New stock %s and no history data' % stock_symbol)
continue
if True:
if dapan_stocks and (stock_symbol in dapan_stocks):
dapan_criteria = ((s_stock.temp_hist_df['o_change']> 0.30) & (s_stock.temp_hist_df['pos20'].shift(1)<=1.0))
dapan_regress_column_type = 'open'
dapan_high_o_df,dapan_high_open_columns = s_stock.regress_common(dapan_criteria,post_days=[0,-1,-2,-3,-4,-5,-10,-20,-60],regress_column = dapan_regress_column_type,
base_column='open',fix_columns=['date','close','p_change','o_change','position','pos20','oo_chg','oh_chg','ol_chg','oc_chg'])
dapan_high_o_df['code'] = stock_symbol
dapan_high_o_df['ho_index'] = np.where(dapan_high_o_df['pos20']<=0,0,(dapan_high_o_df['o_change']/dapan_high_o_df['pos20']).round(2))
dapan_ho_df= dapan_ho_df.append(dapan_high_o_df)
else:
pass
def back_test_stocks(test_codes,k_num=0,source='yh',rate_to_confirm = 0.01,processor_id=0,save_type='',
all_result_columns=[],trend_columns=[],all_temp_columns=[],deep_star_columns=[]):
i=0
ma_num = 20
regress_column_type = 'close'
all_result_df = tds.pd.DataFrame({}, columns=all_result_columns)
all_trend_result_df = tds.pd.DataFrame({}, columns=trend_columns)
all_temp_hist_df = tds.pd.DataFrame({}, columns=all_temp_columns)
#deep_star_columns = ['date','close','p_change','o_change','position','low_high_open','high_o_day0','high_o_day1','high_o_day3',
# 'high_o_day5','high_o_day10','high_o_day20','high_o_day50']
#deep_star_columns = []
deep_star_df = tds.pd.DataFrame({}, columns=deep_star_columns)
print('processor_id=%s : %s'% (processor_id, test_codes))
for stock_symbol in test_codes:
if stock_symbol=='000029' and source=='easyhistory':
continue
print('processor_id=%s :%s,%s' %(processor_id,i,stock_symbol))
s_stock=tds.Stockhistory(stock_symbol,'D',test_num=k_num,source=source,rate_to_confirm=rate_to_confirm)
if s_stock.h_df.empty:
print('New stock %s and no history data' % stock_symbol)
continue
if True:
#try:
result_df = s_stock.form_temp_df(stock_symbol)
test_result = s_stock.regression_test(rate_to_confirm)
recent_trend = s_stock.get_recent_trend(num=ma_num,column='close')
s_stock.diff_ma(ma=[10,30],target_column='close',win_num=5)
temp_hist_df = s_stock.temp_hist_df.set_index('date')
#temp_hist_df.to_csv('C:/hist/day/temp/%s.csv' % stock_symbol)
temp_hist_df_tail = temp_hist_df.tail(1)
temp_hist_df_tail['code'] = stock_symbol
all_temp_hist_df= all_temp_hist_df.append(temp_hist_df_tail)
#columns = ['close','p_change','o_change','position','low_high_open','high_o_day0','high_o_day1','high_o_day3','high_o_day5','high_o_day10','high_o_day20']
#high_o_df,high_open_columns = s_stock.regress_high_open(regress_column = regress_column_type,base_column='open')
#criteria = s_stock.temp_hist_df['low_high_open']!= 0
criteria = ((s_stock.temp_hist_df['star_l']> 0.50) & (s_stock.temp_hist_df['l_change']<-3.0) & (s_stock.temp_hist_df['pos20'].shift(1)<0.2))
high_o_df,high_open_columns = s_stock.regress_common(criteria,post_days=[0,-1,-2,-3,-4,-5,-10,-20,-60],regress_column = regress_column_type,
base_column='close',fix_columns=['date','close','p_change','o_change','position','pos20','MAX20high','star_l'])
high_o_df['code'] = stock_symbol
high_o_df['star_index'] = np.where(high_o_df['pos20']<=0,0,(high_o_df['star_l']/high_o_df['pos20']*((high_o_df['MAX20high']-high_o_df['close'])/high_o_df['MAX20high'])).round(2))
deep_star_df= deep_star_df.append(high_o_df)
i = i+1
if test_result.empty:
pass
else:
test_result_df = tds.pd.DataFrame(test_result.to_dict(), columns=all_result_columns, index=[stock_symbol])
all_result_df = all_result_df.append(test_result_df,ignore_index=False)
if recent_trend.empty:
pass
else:
trend_result_df = tds.pd.DataFrame(recent_trend.to_dict(), columns=trend_columns, index=[stock_symbol])
all_trend_result_df = all_trend_result_df.append(trend_result_df,ignore_index=False)
#except:
# print('Regression test exception for stock: %s' % stock_symbol)
if save_type=='csv': #write to csv
all_temp_hist_df_file_name = 'C:/work/temp1/all_temp_hist_%s' %processor_id +'.csv'
all_result_df_file_name = 'C:/work/temp1/all_result_%s' %processor_id +'.csv'
deep_star_df_file_name = 'C:/work/temp1/deep_star_%s' %processor_id +'.csv'
all_trend_result_df_file_name = 'C:/work/temp1/all_trend_result_%s' %processor_id +'.csv'
all_temp_hist_df.to_csv(all_temp_hist_df_file_name)
all_result_df.to_csv(all_result_df_file_name)
deep_star_df.to_csv(deep_star_df_file_name)
all_trend_result_df.to_csv(all_trend_result_df_file_name)
return all_temp_hist_df,all_result_df,deep_star_df,all_trend_result_df
def back_test_one_stock(stock_symbol,rate_to_confirm=0.0001,temp_dir=fc.ALL_TEMP_DIR,bs_temp_dir=fc.ALL_BACKTEST_DIR):
if stock_symbol=='000029' and source=='easyhistory':
return
s_stock=tds.Stockhistory(stock_symbol,'D',test_num=0,source='yh',rate_to_confirm=0.01)
if s_stock.h_df.empty:
print('New stock %s and no history data' % stock_symbol)
return
result_df = s_stock.form_temp_df(stock_symbol)
s_stock.form_regression_result(save_dir=bs_temp_dir,rate_to_confirm = 0.0001)
#recent_trend = s_stock.get_recent_trend(num=20,column='close')
s_stock.diff_ma_score(ma=[10,30,60,120,250],target_column='close',win_num=5)
temp_hist_df = s_stock.temp_hist_df.set_index('date')
try:
temp_hist_df.to_csv(temp_dir + '%s.csv' % stock_symbol)
except:
pass
"""
temp_hist_df_tail = temp_hist_df.tail(1)
temp_hist_df_tail['code'] = stock_symbol
"""
return
def multiprocess_back_test(allcodes,pool_num=10):
#(code_list_dict,k_num=0,source='yh',rate_to_confirm = 0.01,processor_id=0,save_type='',
#all_result_columns=[],trend_columns=[],all_temp_columns=[],deep_star_columns=[]):
#code_list_dict = seprate_list(all_codes,4)
#print('code_list_dict=',code_list_dict)
print('Parent process %s.' % os.getpid())
print('num_stocks=',len(allcodes))
start = time.time()
"""
processor_num=len(code_list_dict)
#update_yh_hist_data(codes_list=[],process_id=0)
p = Pool()
for i in range(processor_num):
p.apply_async(back_test_stocks, args=(code_list_dict[i],k_num,source,rate_to_confirm,i,'csv',
all_result_columns,trend_columns,all_temp_columns,deep_star_columns,))
"""
""" Map multiprocess """
p = Pool(pool_num)
p.map(back_test_one_stock,allcodes)
print('Waiting for all subprocesses done...')
p.close()
p.join()
print('All subprocesses done.')
end = time.time()
time_cost = end - start
print('Task multiprocess_back_test runs %0.2f seconds.' % time_cost)
return time_cost
def combine_multi_process_result(processor_num=4,all_result_columns=[],all_temp_columns=[],trend_columns=[],deep_star_columns=[]):
#all_result_columns,all_temp_columns,trend_columns,deep_star_columns=[],[],[],[]
all_result_df = tds.pd.DataFrame({}, columns=[])#all_result_columns)
all_trend_result_df = tds.pd.DataFrame({}, columns=[])#trend_columns)
all_temp_hist_df = tds.pd.DataFrame({}, columns=[])#all_temp_columns)
deep_star_df = tds.pd.DataFrame({}, columns=[])#deep_star_columns)
df0 = None
for processor_id in range(4):
all_temp_hist_df_file_name = 'C:/work/temp1/all_temp_hist_%s' %processor_id +'.csv'
all_result_df_file_name = 'C:/work/temp1/all_result_%s' %processor_id +'.csv'
deep_star_df_file_name = 'C:/work/temp1/deep_star_%s' %processor_id +'.csv'
all_trend_result_df_file_name = 'C:/work/temp1/all_trend_result_%s' %processor_id +'.csv'
all_temp_hist_df = all_temp_hist_df.append(tds.pd.read_csv(all_temp_hist_df_file_name, header=0,encoding='gb2312'),ignore_index=True) #names=all_temp_columns
all_result_df = all_result_df.append(tds.pd.read_csv(all_result_df_file_name, header=0,encoding='gb2312'),ignore_index=True) #names=all_result_columns,
deep_star_df = deep_star_df.append(tds.pd.read_csv(deep_star_df_file_name, header=0,encoding='gb2312'),ignore_index=True)#names=deep_star_columns,
all_trend_result_df = all_trend_result_df.append(tds.pd.read_csv(all_trend_result_df_file_name, header=0,encoding='gb2312'),ignore_index=True) #names=trend_columns,
return all_temp_hist_df,all_result_df,deep_star_df,all_trend_result_df
def seprate_list(all_codes,seprate_num=4):
#all_codes = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]
c = len(all_codes)
sub_c = int(c/seprate_num)
code_list_dict = {}
for j in range(seprate_num-1):
code_list_dict[j] = all_codes[j*sub_c:(j+1)*sub_c]
code_list_dict[j+1] = all_codes[(j+1)*sub_c:]
return code_list_dict
def get_latest_backtest_datas(write_file_name=fc.ALL_BACKTEST_FILE,data_dir=fc.ALL_BACKTEST_DIR):
"""
获取所有回测最后一个K线的数据:特定目录下
"""
#columns = ['date', 'close', 'id', 'trade', 'p_change', 'position', 'operation', 's_price', 'b_price', 'profit', 'cum_prf', 'fuli_prf', 'hold_count']
#df = combine_file(tail_num=1,dest_dir=data_dir,keyword='bs_',prefile_slip_num=3,columns=columns)
columns = pds.get_data_columns(dest_dir=data_dir)
df = combine_file(tail_num=1,dest_dir=data_dir,keyword='',prefile_slip_num=0,columns=columns)
if df.empty:
return df
df['counts']=df.index
df = df[columns+['counts','code']]
df['code'] = df['code'].apply(lambda x: pds.format_code(x))
df['name'] = df['code'].apply(lambda x: pds.format_name_by_code(x,CHINESE_DICT))
df = df.set_index('code')
if write_file_name:
df.to_csv(write_file_name,encoding='utf-8')
return df
def get_latest_backtest_datas_from_csv(file_name=fc.ALL_BACKTEST_FILE):
"""
获取所有回测最后一个K线的数据
"""
#file_name = 'D:/work/backtest/all_bs_stocks.csv'
columns = ['date', 'close', 'id', 'trade', 'p_change', 'position', 'operation', 's_price', 'b_price', 'profit', 'cum_prf', 'fuli_prf', 'hold_count']
columns = pds.get_data_columns(dest_dir=fc.ALL_BACKTEST_DIR) + ['counts','code','name']
try:
df = pd.read_csv(file_name,usecols=columns)
df['code'] = df['code'].apply(lambda x: pds.format_code(x))
df = df.set_index('code')
return df
except:
return get_latest_backtest_datas(write_file_name=file_name)
def get_latest_temp_datas(write_file_name=fc.ALL_TEMP_FILE,data_dir=fc.ALL_TEMP_DIR,files=[]):
"""
获取所有回测最后一个K线的数据:特定目录下
"""
#columns = ['date', 'close', 'id', 'trade', 'p_change', 'position', 'operation', 's_price', 'b_price', 'profit', 'cum_prf', 'fuli_prf', 'hold_count']
columns = pds.get_data_columns(dest_dir=data_dir)
#df = combine_file(tail_num=1,dest_dir=data_dir,keyword='bs_',prefile_slip_num=3,columns=columns)
df = combine_file(tail_num=1,dest_dir=data_dir,keyword='',prefile_slip_num=0,columns=columns,file_list=files)
if df.empty:
return df
df['counts']=df.index
df = df[columns+['counts','code']]
df['code'] = df['code'].apply(lambda x: pds.format_code(x))
df['name'] = df['code'].apply(lambda x: pds.format_name_by_code(x,CHINESE_DICT))
df = df.set_index('code')
if write_file_name:
df.to_csv(write_file_name,encoding='utf-8')
return df
def get_latest_temp_datas_from_csv(file_name=fc.ALL_TEMP_FILE):
"""
获取所有回测最后一个K线的数据
"""
#file_name = 'D:/work/backtest/all_bs_stocks.csv'
#columns = ['date', 'close', 'id', 'trade', 'p_change', 'position', 'operation', 's_price', 'b_price', 'profit', 'cum_prf', 'fuli_prf', 'hold_count']
columns = pds.get_data_columns(dest_dir=fc.ALL_TEMP_DIR) + ['counts','code','name']
try:
df = pd.read_csv(file_name,usecols=columns)
df['code'] = df['code'].apply(lambda x: pds.format_code(x))
df = df.set_index('code')
return df
except:
return get_latest_backtest_datas(write_file_name=file_name)
def get_all_regress_summary(given_stocks=[],confirm=0.01,dest_file=fc.ALL_SUMMARY_FILE):
all_result_df = tds.pd.DataFrame({})
"""
latest_temp_df = tds.pd.read_csv( fc.ALL_TEMP_FILE)
latest_temp_df['code'] = latest_temp_df['code'].apply(lambda x: pds.format_code(x))
stock_codes = latest_temp_df['code'].values.tolist()
latest_temp_df = latest_temp_df.set_index('code')
#print(latest_temp_df.ix['000014'].date)
"""
#given_stocks = ['000001','000002']
for stock_symbol in given_stocks:
s_stock = tds.Stockhistory(stock_symbol,'D',test_num=0,source='yh',rate_to_confirm=confirm)
result_series = s_stock.get_regression_result(rate_to_confirm=confirm,refresh_regression=False,
from_csv=True,bs_csv_dir=fc.ALL_BACKTEST_DIR,temp_csv_dir=fc.ALL_TEMP_DIR)
if not result_series.empty:
test_result_df = tds.pd.DataFrame({stock_symbol:result_series}).T
all_result_df = all_result_df.append(test_result_df,ignore_index=False)
if dest_file:
try:
all_result_df['code'] = all_result_df.index
all_result_df['name'] = all_result_df['code'].apply(lambda x: pds.format_name_by_code(x,CHINESE_DICT))
del all_result_df['code']
#dest_file = 'D:/work/result/all_summary1.csv'
all_result_df.to_csv(dest_file,encoding='utf-8')
except:
pass
return all_result_df
def back_test_yh_only(given_codes=[],except_stocks=[],mark_insql=True):
"""
高于三天收盘最大值时买入,低于三天最低价的最小值时卖出: 33策略
"""
"""
:param k_num: string type or int type: mean counts of history if int type; mean start date of history if date str
:param given_codes: str type,
:param except_stocks: list type,
:param type: str type, force update K data from YH
:return: source: history data from web if 'easyhistory', history data from YH if 'YH'
"""
#addition_name = ''
#if type == 'index':
last_date_str = pds.tt.get_last_trade_date(date_format='%Y/%m/%d')
print('last_date_str=',last_date_str)
all_stock_df = get_latest_yh_k_stocks_from_csv()
print('all_stock_df:',all_stock_df)
all_stocks = all_stock_df.index.values.tolist()
if given_codes:
all_stocks = list(set(all_stocks).intersection(set(given_codes)))
print('所有股票数量: ',len(all_stocks))
stop_df = all_stock_df[all_stock_df.date<last_date_str]
all_stop_codes = stop_df.index.values.tolist()
print('停牌股票数量',len(all_stop_codes))
all_trade_codes = list(set(all_stocks).difference(set(all_stop_codes)))
final_codes = list(set(all_trade_codes).difference(set(except_stocks)))
print('最后回测股票数量',len(final_codes))
#stock_sql = StockSQL()
#pre_is_tdx_uptodate,pre_is_pos_uptodate,pre_is_backtest_uptodate,systime_dict = stock_sql.is_histdata_uptodate()
#print(pre_is_tdx_uptodate,pre_is_pos_uptodate,pre_is_backtest_uptodate,systime_dict)
pre_is_backtest_uptodate = False
#print('final_codes=',final_codes)
#stock_sql.close()
if not pre_is_backtest_uptodate:
time_cost = multiprocess_back_test(final_codes,pool_num=10) #20分钟左右
"""
if time_cost>300:#防止超时
stock_sql = StockSQL()
if mark_insql:
#标识已经更新回测数据至数据库
stock_sql.update_system_time(update_field='backtest_time')
print('完成回测')
is_tdx_uptodate,is_pos_uptodate,is_backtest_uptodate,systime_dict = stock_sql.is_histdata_uptodate()
"""
is_backtest_uptodate = True
if is_backtest_uptodate:
print('触发手动回测数据持久化,', datetime.datetime.now())
"""汇总回测数据,并写入CSV文件,方便交易调用,2分钟左右"""
df = get_latest_backtest_datas(write_file_name=fc.ALL_BACKTEST_FILE,data_dir=fc.ALL_BACKTEST_DIR)
print('完成回测数据汇总,',datetime.datetime.now())
df = get_latest_backtest_datas_from_csv() #从CSV文件读取所有回测数据
"""汇总temp数据,并写入CSV文件,方便交易调用,8分钟"""
#temp_df = get_latest_temp_datas(write_file_name=fc.ALL_TEMP_FILE,data_dir=fc.ALL_TEMP_DIR)
print('完成temp数据汇总,',datetime.datetime.now())
temp_df = get_latest_temp_datas_from_csv()
summary_df = get_all_regress_summary(given_stocks=final_codes,dest_file=fc.ALL_SUMMARY_FILE)
print('完成回测数据分析汇总,约20分钟,',datetime.datetime.now())
print('完成回测数据持久化')
else:
print('数据未标识至数据库:显示数据未更新')
def back_test_yh(given_codes=[],except_stocks=[],mark_insql=True):
"""
高于三天收盘最大值时买入,低于三天最低价的最小值时卖出: 33策略
"""
"""
:param k_num: string type or int type: mean counts of history if int type; mean start date of history if date str
:param given_codes: str type,
:param except_stocks: list type,
:param type: str type, force update K data from YH
:return: source: history data from web if 'easyhistory', history data from YH if 'YH'
"""
#addition_name = ''
#if type == 'index':
last_date_str = pds.tt.get_last_trade_date(date_format='%Y/%m/%d')
print('last_date_str=',last_date_str)
all_stock_df = get_latest_yh_k_stocks_from_csv()
#print('all_stock_df:',all_stock_df)
all_stocks = all_stock_df.index.values.tolist()
if given_codes:
all_stocks = list(set(all_stocks).intersection(set(given_codes)))
print('所有股票数量: ',len(all_stocks))
stop_df = all_stock_df[all_stock_df.date<last_date_str]
all_stop_codes = stop_df.index.values.tolist()
print('停牌股票数量',len(all_stop_codes))
all_trade_codes = list(set(all_stocks).difference(set(all_stop_codes)))
final_codes = list(set(all_trade_codes).difference(set(except_stocks)))
print('最后回测股票数量',len(final_codes))
stock_sql = StockSQL()
pre_is_tdx_uptodate,pre_is_pos_uptodate,pre_is_backtest_uptodate,systime_dict = stock_sql.is_histdata_uptodate()
#print(pre_is_tdx_uptodate,pre_is_pos_uptodate,pre_is_backtest_uptodate,systime_dict)
pre_is_backtest_uptodate = False
#print('final_codes=',final_codes)
#stock_sql.close()
if not pre_is_backtest_uptodate:
time_cost = multiprocess_back_test(final_codes,pool_num=10)
if time_cost>300:#防止超时
stock_sql = StockSQL()
if mark_insql:
"""标识已经更新回测数据至数据库"""
stock_sql.update_system_time(update_field='backtest_time')
print('完成回测')
is_tdx_uptodate,is_pos_uptodate,is_backtest_uptodate,systime_dict = stock_sql.is_histdata_uptodate()
#is_backtest_uptodate = True
if is_backtest_uptodate:
print('触发手动回测数据持久化,', datetime.datetime.now())
"""汇总回测数据,并写入CSV文件,方便交易调用"""
df = get_latest_backtest_datas(write_file_name=fc.ALL_BACKTEST_FILE,data_dir=fc.ALL_BACKTEST_DIR)
print('完成回测数据汇总,',datetime.datetime.now())
#df = get_latest_backtest_datas_from_csv() #从CSV文件读取所有回测数据
"""汇总temp数据,并写入CSV文件,方便交易调用"""
temp_df = get_latest_temp_datas(write_file_name=fc.ALL_TEMP_FILE,data_dir=fc.ALL_TEMP_DIR)
print('完成temp数据汇总,',datetime.datetime.now())
#temp_df = get_latest_temp_datas_from_csv()
summary_df = get_all_regress_summary(given_stocks=final_codes,dest_file=fc.ALL_SUMMARY_FILE)
print('完成回测数据分析汇总,',datetime.datetime.now())
print('完成回测数据持久化')
else:
print('数据未标识至数据库:显示数据未更新')
#stock_sql.close()
else:
print('已经完成回测,无需回测;上一次回测时间:%s' % systime_dict['backtest_time'])
munual_update_csv_data = True
if munual_update_csv_data:
print('触发手动回测数据持久化,', datetime.datetime.now())
"""汇总回测数据,并写入CSV文件,方便交易调用"""
df = get_latest_backtest_datas(write_file_name=fc.ALL_BACKTEST_FILE,data_dir=fc.ALL_BACKTEST_DIR)
print('完成回测数据汇总,',datetime.datetime.now())
#df = get_latest_backtest_datas_from_csv() #从CSV文件读取所有回测数据
"""汇总temp数据,并写入CSV文件,方便交易调用"""
temp_df = get_latest_temp_datas(write_file_name=fc.ALL_TEMP_FILE,data_dir=fc.ALL_TEMP_DIR)
print('完成temp数据汇总,',datetime.datetime.now())
#temp_df = get_latest_temp_datas_from_csv()
summary_df = get_all_regress_summary(given_stocks=final_codes,dest_file=fc.ALL_SUMMARY_FILE)
print('完成回测数据分析汇总,',datetime.datetime.now())
print('完成回测数据持久化')
else:
print('数据未标识至数据库:显示数据未更新')
return True
def back_test0(k_num=0,given_codes=[],except_stocks=['000029'], type='stock', source='easyhistory',rate_to_confirm = 0.01,dapan_stocks=['000001','000002']):
"""
高于三天收盘最大值时买入,低于三天最低价的最小值时卖出: 33策略
"""
"""
:param k_num: string type or int type: mean counts of history if int type; mean start date of history if date str
:param given_codes: str type,
:param except_stocks: list type,
:param type: str type, force update K data from YH
:return: source: history data from web if 'easyhistory', history data from YH if 'YH'
"""
#addition_name = ''
#if type == 'index':
start = time.time()
addition_name = type
all_codes = []
all_stop_codes = []
all_stocks = []
all_trade_codes = []
#print('source=',source)
if source =='yh' or source=='YH':
hist_dir='C:/中国银河证券海王星/T0002/export/'
#print(given_codes,except_stocks)
all_stop_codes,all_stocks1 = get_stopped_stocks(given_codes,except_stocks,hist_dir)
#print('all_stocks1=',('150251' in all_stocks1))
all_trade_codes = list(set(all_stocks1).difference(set(all_stop_codes)))
else:
hist_dir='C:/hist/day/data/'
all_stop_codes,all_stocks = get_stopped_stocks(given_codes,except_stocks,hist_dir)
#print('all_stocks2=',('150251' in all_stocks))
all_trade_codes = list(set(all_stocks).difference(set(all_stop_codes)))
#print('all_trade_codes=',('150251' in all_trade_codes))
#all_codes = ['300128', '002288', '002156', '300126','300162','002717','002799','300515','300516','600519',
# '000418','002673','600060','600887','000810','600115','600567','600199','000596','000538','002274','600036','600030','601398']
column_list = ['count', 'mean', 'std', 'max', 'min', '25%','50%','75%','cum_prf',
'fuli_prf','yearly_prf','last_trade_date','last_trade_price','min_hold_count',
'max_hold_count','avrg_hold_count','this_hold_count','exit','enter',
'position','max_amount_rate','max_amount_distance','break_in',
'break_in_count','break_in_date', 'break_in_distance','success_rate','days']
all_result_df = tds.pd.DataFrame({}, columns=column_list)
i=0
trend_column_list = ['count', 'mean','chg_fuli', 'std', 'min', '25%', '50%', '75%', 'max', 'c_state',
'c_mean', 'pos_mean', 'ft_rate', 'presure', 'holding', 'close','cont_num','amount_rate','ma_amount_rate']
all_trend_result_df = tds.pd.DataFrame({}, columns=trend_column_list)
all_temp_hist_df = tds.pd.DataFrame({}, columns=[])
ma_num = 20
stock_basic_df=ts.get_stock_basics()
basic_code = stock_basic_df['code'].to_dict()
basic_code_keys = basic_code.keys()
#print('all_trade_codes=',all_trade_codes)
deep_columns = ['date','close','p_change','o_change','position','low_high_open','high_o_day0','high_o_day1','high_o_day3',
'high_o_day5','high_o_day10','high_o_day20','high_o_day50']
high_open_columns = []
deep_star_df = tds.pd.DataFrame({}, columns=high_open_columns)
dapan_ho_df = tds.pd.DataFrame({}, columns=high_open_columns)
regress_column_type = 'close'
#s_stock=tds.Stockhistory('300689','D',test_num=0,source='yh',rate_to_confirm=0.01)
#print(s_stock.h_df)
temp_columns = ['open', 'high', 'low', 'last_close', 'close', 'p_change', 'volume', 'amount',
'ROC1', 'MAX20', 'MAX20high', 'MIN20', 'MIN20low', 'h_change', 'l_change', 'o_change',
'MAX3', 'MIN3low', 'MA5', 'v_rate', 'amount_rate', 'ma_amount_rate', 'MA10', 'LINEARREG_ANGLE6MA10',
'LINEARREG_ANGLE10MA10', 'diff_ma10', 'MA6diff_ma10', 'MA20', 'MA30', 'LINEARREG_ANGLE14MA30',
'LINEARREG_ANGLE30MA30', 'diff_ma30', 'MA14diff_ma30', 'LINEARREG_ANGLE14diff_ma30', 'MA60', 'MA120',
'MA250', 'CCI', 'macd', 'macdsignal', 'macdhist', 'u_band', 'm_band', 'l_band', 'fastK', 'slowD',
'fastJ', 'MFI', 'ATR', 'NATR', 'MOM', 'CDLMORNINGDOJISTAR', 'CDLABANDONEDBABY', 'CDLBELTHOLD',
'CDLBREAKAWAY', 'CDL3WHITESOLDIERS', 'CDLPIERCING', 'SAR', 'RSI', 'LINEARREG14', 'LINEARREG30',
'LINEARREG_ANGLE14', 'LINEARREG_ANGLE8', 'LINEARREG_INTERCEPT14', 'LINEARREG_SLOPE14', 'LINEARREG_SLOPE30',
'LINEARREG_ANGLE8ROC1', 'LINEARREG_ANGLE5MA5', 'LINEARREG_ANGLE8MA20', 'LINEARREG_ANGLE14MA60',
'LINEARREG_ANGLE14MA120', 'LINEARREG_ANGLE14MA250', 'LINEARREG_ANGLE8CCI', 'LINEARREG_ANGLE14SAR',
'LINEARREG_ANGLE14RSI', 'LINEARREG_ANGLE8macdhist', 'LINEARREG_ANGLE8MOM', 'LINEARREG_ANGLE14MOM',
'MTM', 'ma5', 'ma10', 'ma20', 'ma30', 'ma60', 'ma120', 'ma250', 'v_ma5', 'v_ma10', 'amount_ma5',
'amount_ma10', 'atr', 'atr_ma5', 'atr_ma10', 'atr_5_rate', 'atr_5_max_r', 'atr_10_rate', 'atr_10_max_r',
'c_max10', 'c_min10', 'h_max10', 'l_min10', 'h_max20', 'l_min20', 'c_max20', 'c_min20', 'c_max60',
'c_min60', 'l_max3', 'h_max3', 'c_max3', 'l_min3', 'c_min2', 'chg_mean2', 'chg_min2', 'chg_min3',
'chg_min4', 'chg_min5', 'chg_max2', 'chg_max3', 'amount_rate_min2', 'rate_1.8', 'atr_in', 'star',
'star_h', 'star_l', 'star_chg', 'k_chg', 'k_rate', 'reverse', 'p_rate', 'oo_chg', 'oh_chg', 'ol_chg',
'oc_chg', 'gap', 'island', 'cross1', 'cross2', 'cross3', 'cross4', 'std', 'pos20', 'pos60', 'cOma5',
'cOma10', 'ma5_k', 'ma5_k2', 'ma5_turn', 'ma10_k', 'ma10_k2', 'ma10_turn', 'ma20_k', 'ma20_k2',
'ma20_turn', 'trend_chg', 'ma5Cma20', 'ma5Cma30', 'ma10Cma20', 'ma10Cma30', 'tangle_p', 'tangle_p1',
'star_in', 'low_high_open', 'break_in', 'break_in_p', 'ma_score0', 'ma30_c_ma60', 'ma10_c_ma30',
'ma5_c_ma10', 'ma_trend_score', 'ma_score', 'gt_open', 'gt_close', 'great_v_rate', 'gt2_amount',
'gt3_amount', 'gt_cont_close', 'k_trend', 'k_score0', 'k_score_m', 'k_score', 'position', 'operation',
'exit_3p', 's_price0', 's_price1', 's_price', 'b_price0', 'b_price1', 'b_price', 'trade', 'trade_na',
'diff_v_MA10', 'diff_MA10', 'diff_std_MA10', 'diff_v_MA30', 'diff_MA30', 'diff_std_MA30','code']
#multiprocess_back_test()
code_list_dict = seprate_list(all_trade_codes,seprate_num=4)
multiprocess_back_test(code_list_dict,k_num=0,source='yh',rate_to_confirm = 0.01,processor_id=0,save_type='',
all_result_columns=column_list,trend_columns=trend_column_list,all_temp_columns=[],deep_star_columns=[])
all_temp_hist_df,all_result_df,deep_star_df,all_trend_result_df =combine_multi_process_result(processor_num=4,all_result_columns=column_list,
all_temp_columns=temp_columns,trend_columns=trend_column_list,deep_star_columns=deep_columns)
#print(result_df.tail(20))
#all_result_df = all_result_df.sort_index(axis=0, by='sum', ascending=False)
print('all_result_df=',all_result_df)
all_result_df = all_result_df.sort_values(axis=0, by='cum_prf', ascending=False)
all_trend_result_df = all_trend_result_df.sort_values(axis=0, by='chg_fuli', ascending=False)
result_summary = all_result_df.describe()
result_codes = all_result_df.index.values.tolist()
result_codes_dict = {}
on_trade_dict = {}
valid_dict = {}
for code in result_codes:
if code in basic_code_keys:
result_codes_dict[code] = basic_code[code]
else:
result_codes_dict[code] = 'NA'
if code in all_stop_codes:
on_trade_dict[code] = 1
else:
on_trade_dict[code] = 0
if code in except_stocks:
valid_dict[code] = 1
else:
valid_dict[code] = 0
"""
all_temp_dict = {}
all_temp_codes = all_temp_hist_df.index.values.tolist()
for code in result_codes:
if code in all_temp_codes:
all_temp_dict[code]= basic_code[code]
else:
result_codes_dict[code] = 'NA'
all_temp_hist_df['code'] = tds.Series(result_codes_dict,index=all_result_df.index)
"""
#print(result_codes_dict)
#print(tds.pd.DataFrame(result_codes_dict, columns=['code'], index=list(result_codes_dict.keys())))
#all_result_df['code'] = result_codes_dict
all_result_df['code'] = tds.Series(result_codes_dict,index=all_result_df.index)
deep_star_df['code'] = tds.Series(result_codes_dict,index=deep_star_df.index)
print('deep_star_df=',deep_star_df)
deep_star_df = deep_star_df[['code','code','star_index']+high_open_columns]
dapan_codes_dict = {}
all_trend_result_df['code'] = tds.Series(result_codes_dict,index=all_trend_result_df.index)
all_result_df['stopped'] = tds.Series(on_trade_dict,index=all_result_df.index)
all_trend_result_df['stopped'] = tds.Series(on_trade_dict,index=all_trend_result_df.index)
all_result_df['invalid'] = tds.Series(valid_dict, index=all_result_df.index)
all_trend_result_df['invalid'] = tds.Series(valid_dict, index=all_trend_result_df.index)
all_result_df['max_r'] = all_result_df['max']/all_result_df['cum_prf']
ma_c_name = '%s日趋势数' % ma_num
trend_column_chiness = {'count':ma_c_name, 'mean': '平均涨幅','chg_fuli': '复利涨幅', 'std': '标准差', 'min': '最小涨幅', '25%': '25%', '50%': '50%', '75%': '75%', 'max': '最大涨幅', 'c_state': '收盘价状态',
'c_mean': '平均收盘价', 'pos_mean': '平均仓位', 'ft_rate': '低点反弹率', 'presure': '压力', 'holding': '支撑', 'close': '收盘价','cont_num': '连涨天数', 'code': '名字', 'stopped': '停牌','invalid': '除外',
'amount_rate':'量比','ma_amount_rate':'短长量比'}
print(all_trend_result_df)
all_trend_result_df_chinese = all_trend_result_df.rename(index=str, columns=trend_column_chiness)
print(all_result_df)
print(all_result_df.describe())
if isinstance(k_num, str):
k_num = k_num.replace('/','').replace('-','')
latest_date_str = pds.tt.get_latest_trade_date(date_format='%Y/%m/%d')
latest_date_str = latest_date_str.replace('/','').replace('-','')
rate_to_confirm_str = '%s' % rate_to_confirm
rate_to_confirm_str = 'rate' + rate_to_confirm_str.replace('.', '_')
#print('latest_date_str=',latest_date_str)
tail_name = '%s_from_%s_%s.csv' % (latest_date_str,k_num,rate_to_confirm_str)
#all_result_df['yearly_prf'] = all_result_df['fuli_prf']**(1.0/(all_result_df['days']/365.0))
result_column_list = ['count','code', 'mean', 'std', 'max', 'min', 'cum_prf',
'fuli_prf','yearly_prf','success_rate','last_trade_date','last_trade_price','min_hold_count',
'max_hold_count','avrg_hold_count','this_hold_count','exit','enter',
'position','max_amount_rate','max_amount_distance','break_in',
'break_in_count','break_in_date', 'break_in_distance',
'stopped','invalid','max_r','25%','50%','75%',]
all_result_df = all_result_df[result_column_list]
all_result_df.to_csv('C:/work/temp/regression_test_' + addition_name +tail_name)
deep_star_df.to_csv('C:/work/temp/pos20_star_%s'% regress_column_type + addition_name +tail_name)
if all_result_df.empty:
pass
else:
consider_df = all_result_df[(all_result_df['max_amount_rate']>2.0) & (all_result_df['position']>0.35) & (all_result_df['stopped']==0) & (all_result_df['invalid']==0)]# & (all_result_df['last_trade_price'] ==0)]
consider_df.to_csv('C:/work/temp/consider_' + addition_name +tail_name)
active_df = all_result_df[(all_result_df['max_r']<0.4) & (all_result_df['code']!='NA') & # (all_result_df['min']>-0.08) & (all_result_df['position']>0.35) &
(all_result_df['max']>(3.9 *all_result_df['min'].abs())) & (all_result_df['invalid']==0) &(all_result_df['stopped']==0)]
active_df['active_score'] = active_df['fuli_prf']/active_df['max_r']/active_df['std']*active_df['fuli_prf']/active_df['cum_prf']
active_df = active_df.sort_values(axis=0, by='active_score', ascending=False)
active_df.to_csv('C:/work/temp/active_' + addition_name +tail_name)
tupo_df = all_result_df[(all_result_df['break_in_distance']!=0) &(all_result_df['break_in_distance']<=20) &
(all_result_df['position']>0.35) & (all_result_df['stopped']==0) &
(all_result_df['invalid']==0) & (all_result_df['code']!='NA') & (all_result_df['last_trade_price']!=0)]# & (all_result_df['last_trade_price'] ==0)]
tupo_df.to_csv('C:/work/temp/tupo_' + addition_name +tail_name)
result_summary.to_csv('C:/work/temp/result_summary_' + addition_name +tail_name)
all_trend_result_df_chinese.to_csv('C:/work/temp/trend_result_%s' % ma_num + addition_name +'%s_to_%s_%s.csv' % (k_num,latest_date_str,rate_to_confirm_str))
if not all_temp_hist_df.empty:
#all_temp_hist_df = all_temp_hist_df[column_list]
all_temp_hist_df = all_temp_hist_df.set_index('code')
all_temp_hist_df.to_csv('C:/work/temp/all_temp_' + addition_name +tail_name)
reverse_df = all_temp_hist_df[(all_temp_hist_df['reverse']>0) &
(all_temp_hist_df['LINEARREG_ANGLE8']<-2.0) &
(all_temp_hist_df['position']>0.35)]#
#reverse_df['r_sort'] = reverse_df['star_chg']/reverse_df['pos20']
reverse_df.to_csv('C:/work/temp/reverse_df_' + addition_name +tail_name)
long_turn_min_angle = -0.5
short_turn_min_angle = 0.2
ma30_df = all_temp_hist_df[(all_temp_hist_df['LINEARREG_ANGLE14MA120']>long_turn_min_angle) &
(all_temp_hist_df['LINEARREG_ANGLE14MA250']>long_turn_min_angle) &
(all_temp_hist_df['LINEARREG_ANGLE14MA60']>long_turn_min_angle) &
(all_temp_hist_df['LINEARREG_ANGLE14MA30']<1.0) &
(all_temp_hist_df['LINEARREG_ANGLE5MA5']>short_turn_min_angle) &
(all_temp_hist_df['LINEARREG_ANGLE6MA10']>short_turn_min_angle) &
(all_temp_hist_df['LINEARREG_ANGLE5MA5']>=all_temp_hist_df['LINEARREG_ANGLE6MA10']) &
(all_temp_hist_df['LINEARREG_ANGLE8ROC1']>0.0) &
(all_temp_hist_df['close']>all_temp_hist_df['ma30']) &
(all_temp_hist_df['position']>0.35)]#
ma30_df.to_csv('C:/work/temp/ma30_df_' + addition_name +tail_name)
"""
if dapan_ho_df.empty:
pass
else:
for code in dapan_stocks:
if code in basic_code_keys:
dapan_codes_dict[code] = basic_code[code]
else:
dapan_codes_dict[code] = 'NA'
dapan_ho_df['code'] = tds.Series(dapan_codes_dict,index=dapan_ho_df.index)
dapan_ho_df = dapan_ho_df[['code','code','ho_index']+dapan_high_open_columns]
dapan_ho_df.to_csv('C:/work/temp/dapan_high_open_%s'% regress_column_type + addition_name +tail_name)
"""
end = time.time()
print('Task Mybacktest runs %0.2f seconds.' % (end - start))
return all_result_df
#back_test(k_num='2015/08/30',given_codes=['000004','000005'],except_stocks=['000029'], type='stock', source='YH') | allisnone/pytrade | low_high33_backtest.py | Python | gpl-2.0 | 42,194 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
from django.db import models
from django.apps import apps
from empresa.models import Empresa
import json
import os
import tempfile
import datetime
import requests
class Parking(models.Model):
empresa = models.OneToOneField(Empresa)
nombre = models.CharField(max_length=40)
plazas = models.IntegerField()
def __unicode__(self):
return "{} ({})".format(self.nombre, self.empresa)
def tupla_tarifa(self):
"Obtener un tarifario dada una recta definida por puntos"
# creamos una lista de listas
lista = map(list, self.tarifa_set.values_list('precio', 'hora'))
# agregamos el rango final de tiempo sacado de la siguiente linea
n = len(lista)
for i in range(n-1):
lista[i].append(lista[i+1][1])
# el rango final ponemos que es 24h
lista[n-1].append(datetime.timedelta(days=1))
# devolvemos [precio, hora_start, hora_end_no_inclusive]
return lista
def tabla_tarifa(self):
"Tarifario con hh:mm para visualizar"
for precio, min0, min1 in self.tupla_tarifa():
t = min1 - datetime.timedelta(seconds=1)
yield min0, t, precio
def get_dia(self):
return float(self.tarifa_set.last().precio)
def get_tarifa(self, td):
"Obtener una tarifa del tarifario"
# calculo de dias completos
precio_dias = td.days * self.get_dia()
# calculo de la fraccion de dia
td = datetime.timedelta(seconds=td.seconds)
for precio, min0, min1 in self.tupla_tarifa():
if min0 <= td < min1:
return precio_dias + float(precio)
def barreras_entrada(self):
return self.barrera_set.filter(entrada=True)
def barreras_salida(self):
return self.barrera_set.filter(entrada=False)
def nodos_remotos(self):
return self.nodoremoto_set.all()
@property
def entrada_set(self):
Entrada = apps.get_model('tickets.Entrada')
return Entrada.objects.por_parking(self)
@property
def coches_hoy(self):
return self.entrada_set.de_hoy().count()
@property
def coches_dentro(self):
return self.entrada_set.de_hoy().dentro().count()
class Expendedor(models.Model):
parking = models.ForeignKey(Parking)
nombre = models.CharField(max_length=40)
mac = models.CharField(max_length=17)
camera_command = models.CharField(max_length=255, blank=True, null=True, help_text="Comando para la camara, "
"con {} donde queramos poner el output filename")
def saca_foto(self):
contenido = None
if self.camera_command:
filename = tempfile.mktemp()
ret = os.system(self.camera_command.format(filename))
if ret == 0:
contenido = open(filename).read()
if os.path.isfile(filename):
os.unlink(filename)
return contenido
def __unicode__(self):
return "{} de {}".format(self.nombre, self.parking.nombre)
class Meta:
verbose_name = 'expendedor'
verbose_name_plural = 'expendedores'
class Barrera(models.Model):
parking = models.ForeignKey(Parking)
nombre = models.CharField(max_length=40)
slug = models.CharField(max_length=40, unique=True)
entrada = models.BooleanField()
abre_url = models.URLField(max_length=100, blank=True, null=True, help_text="si hay url es que esta activo")
abre_post = models.CharField(max_length=100, blank=True, null=True, help_text="post data en formato json")
abresiempre_url = models.URLField(max_length=100, blank=True, null=True, help_text="si hay url es que esta activo")
abresiempre_post = models.CharField(max_length=100, blank=True, null=True, help_text="post data en formato json")
cierra_url = models.URLField(max_length=100, blank=True, null=True, help_text="si hay url es que esta activo")
cierra_post = models.CharField(max_length=100, blank=True, null=True, help_text="post data en formato json")
def abre(self):
if self.abre_post:
r = requests.post(self.abre_url, data=json.loads(self.abre_post))
else:
r = requests.get(self.abre_url)
return r.status_code == 200
def abresiempre(self):
if self.abresiempre_post:
r = requests.post(self.abresiempre_url, data=json.loads(self.abresiempre_post))
else:
r = requests.get(self.abresiempre_url)
return r.status_code == 200
def cierra(self):
if self.cierra_post:
r = requests.post(self.cierra_url, data=json.loads(self.cierra_post))
else:
r = requests.get(self.cierra_url)
return r.status_code == 200
def __unicode__(self):
return "{} ({} de {})".format(self.slug, "entrada" if self.entrada else "salida", self.parking.nombre)
class Meta:
verbose_name = 'barrera'
verbose_name_plural = 'barreras'
class Tarifa(models.Model):
parking = models.ForeignKey(Parking)
precio = models.DecimalField(max_digits=5, decimal_places=2)
hora = models.DurationField(help_text="hora a partir de la cual aplica este precio")
def __unicode__(self):
return "{} = {:.2f} €".format(self.hora, self.precio)
class Meta:
ordering = ('hora', )
class NodoRemoto(models.Model):
parking = models.ForeignKey(Parking)
host_name = models.CharField(max_length = 100, blank = True, null = True, help_text = 'Nombre del Host')
url = models.CharField(max_length = 100, blank=True, null=True, help_text = ' url del demonio nameko' )
nombre = models.CharField(max_length=100, blank=True, null=True, help_text = 'Nombre del demonio nameko')
def __unicode__(self):
return "{} [{}]".format(self.nombre, self.url)
def comandos(self):
return self.comandoremoto_set.all()
class Meta:
verbose_name = 'Nodo Remoto'
verbose_name_plural = 'Nodos Remotos'
class ComandoRemoto(models.Model):
nombre = models.CharField(max_length = 100, blank=True, null=True, help_text = 'nombre del comando')
comando = models.CharField(max_length = 100, blank=True, null=True, help_text= 'comando')
nodoremoto = models.ForeignKey(NodoRemoto)
def __unicode__(self):
return "{}: {}.{}()".format(self.nombre, self.nodoremoto, self.comando)
class Meta:
verbose_name = 'comando Remoto'
verbose_name_plural = 'Comandos Remotos'
# from django.db.models.signals import pre_save
# from django.dispatch import receiver
# @receiver(pre_save, sender=Tarifa)
# def anula_date(sender, instance, using, **kwargs):
# if isinstance(instance, datetime.datetime):
# instance.hora = instance.hora.replace(year=1970, month=1, day=1)
class Visor(models.Model):
url = models.URLField(default="http://192.168.1.1:8000")
descripcion = models.CharField(default="visor colocado en ...", max_length=200)
parking = models.ForeignKey(Parking)
def mostrar_importe(self, importe):
imprte_str = "{:.2f}".format(importe)
# print("importe " + imprte_str)
try:
r = requests.post(self.url, json={"importe": importe})
except:
return False
r = requests.post(self.url, json={"importe": importe})
return r.status_code == 200
def __str__(self):
return self.descripcion
class Meta:
verbose_name_plural = 'Visores'
| amd77/parker | inventario/models.py | Python | gpl-2.0 | 7,559 |
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 19 17:38:50 2015
@author: deep
"""
from binaryTree import BTree, generateRandomTree, inorder
def largestBST(root):
if root.left is None and root.right is None:
return True, 1, root.value, root.value
if root.left:
isBSTL, sizeL, minL, maxL = largestBST(root.left)
else:
isBSTL = True
sizeL = 0
minL = -float('inf')
if root.right:
isBSTR, sizeR, minR, maxR = largestBST(root.right)
else:
isBSTR = True
sizeR = 0
maxR = float('inf')
if isBSTL and isBSTR:
if maxL <= root.value <= minR:
return True, sizeL+sizeR+1, minL, maxR,
size = max(sizeL, sizeR)
return False, size , None, None
root1 = BTree()
root1.value = 0
root2 = BTree()
root2.value = 0
generateRandomTree(root2,2)
generateRandomTree(root1,2)
root1.left.left.left = root2
inorder(root1)
print largestBST(root1)
| ddeepak6992/Algorithms | Binary-Tree/largest_BST_in_a_binary_tree.py | Python | gpl-2.0 | 953 |
from collections import OrderedDict
from rest_framework import pagination
from rest_framework.response import Response
__author__ = 'alexandreferreira'
class DetailPagination(pagination.PageNumberPagination):
def get_paginated_response(self, data):
return Response(OrderedDict([
('count', self.page.paginator.count),
('next', self.get_next_page_number()),
('has_next', self.page.has_next()),
('previous', self.get_previous_page_number()),
('has_previous', self.page.has_previous()),
('current', self.page.number),
('results', data)
]))
def get_next_page_number(self):
if not self.page.has_next():
return self.page.number
return self.page.next_page_number()
def get_previous_page_number(self):
if not self.page.has_previous():
return 1
return self.page.previous_page_number() | alexandreferreira/namesearch-example | namesearch/pagination.py | Python | gpl-2.0 | 948 |
##
# Copyright 2013-2020 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for a GCC+CUDA compiler toolchain.
:author: Kenneth Hoste (Ghent University)
"""
from easybuild.toolchains.compiler.cuda import Cuda
from easybuild.toolchains.gcc import GccToolchain
class GccCUDA(GccToolchain, Cuda):
"""Compiler toolchain with GCC and CUDA."""
NAME = 'gcccuda'
COMPILER_MODULE_NAME = ['GCC', 'CUDA']
SUBTOOLCHAIN = GccToolchain.NAME
| pescobar/easybuild-framework | easybuild/toolchains/gcccuda.py | Python | gpl-2.0 | 1,443 |
#!/usr/bin/python3.3
import unittest
import sys
sys.path.append("/home/hazel/Documents/new_linux_paradise/paradise_office_site/sandbox_v1.0/cygnet_maker/cy_data_validation")
from datetime import time
from time_conv import Time
class TimeTestCase(unittest.TestCase):
''' Tests with numbered degrees of bad or good data, on a scale of 0=baddest to 10=goodest '''
def setUp(self):
self.time = Time.check_time("")
# A string
def test_vbad0(self):
self.time = Time.check_time("iutoeht")
correct_time = time(00, 00, 00)
self.assertEqual( correct_time, self.time)
# An out of range whole number
def test_bad1(self):
self.time = Time.check_time("52304")
correct_time = time(00, 00, 00)
self.assertEqual( correct_time, self.time)
# Two out of range whole numbers
def test_bad2(self):
self.time = Time.check_time("70 80")
correct_time = time(23, 59, 00)
self.assertEqual( correct_time, self.time)
# hours, minutes and seconds formatted crap
def test_middle3(self):
self.time = Time.check_time("03 - 32/74")
correct_time = time(3, 32, 59)
self.assertEqual( correct_time, self.time)
# : in between hours and minutes
def test_good4(self):
self.time = Time.check_time("03:32:50")
correct_time = time(3, 32, 50)
self.assertEqual( correct_time, self.time)
# removing am or pm
def test_vgood5(self):
self.time = Time.check_time("3:35pm")
correct_time = time(15, 35, 00)
self.assertEqual( correct_time, self.time)
def tearDown(self):
self.time = ""
correct_time = ""
if __name__ == '__main__':
unittest.main()
| paradiseOffice/sandbox_API_v1.0 | paradise_office_site/sandbox_v1.0/cygnet_maker/cy_tests/test_time.py | Python | gpl-2.0 | 1,622 |
#!/usr/bin/python3
import argparse
import traceback
import sys
import netaddr
import requests
from flask import Flask, request
from jinja2 import Environment, FileSystemLoader, TemplateNotFound
endpoints = "read/networks read/oplog read/snmp read/switches-management public/distro-tree public/config public/dhcp public/dhcp-summary public/ping public/switches public/switch-state".split()
objects = {}
def getEndpoint(endpoint):
r = requests.get("http://localhost:80/api/{}".format(endpoint))
if r.status_code != 200:
raise Exception("Bad status code for endpoint {}: {}".format(endpoint, r.status_code))
return r.json()
def updateData():
for a in endpoints:
objects[a] = getEndpoint(a)
env = Environment(loader=FileSystemLoader([]), trim_blocks=True)
env.filters["netmask"] = lambda ip: netaddr.IPNetwork(ip).netmask
env.filters["cidr"] = lambda ip: netaddr.IPNetwork(ip).prefixlen
env.filters["networkId"] = lambda ip: netaddr.IPNetwork(ip).ip
env.filters["getFirstDhcpIp"] = lambda ip: netaddr.IPNetwork(ip)[3]
env.filters["getLastDhcpIp"] = lambda ip: netaddr.IPNetwork(ip)[-1]
env.filters["agentDistro"] = lambda src: src.split(":")[0]
env.filters["agentPort"] = lambda src: src.split(":")[1]
env.filters["getFirstFapIP"] = lambda ip: netaddr.IPNetwork(ip)[netaddr.IPNetwork(ip).size / 2]
app = Flask(__name__)
@app.after_request
def add_header(response):
if response.status_code == 200:
response.cache_control.max_age = 5
response.cache_control.s_maxage = 1
return response
@app.route("/<path>", methods=["GET"])
def root_get(path):
updateData()
try:
template = env.get_template(path)
body = template.render(objects=objects, options=request.args)
except TemplateNotFound:
return 'Template "{}" not found\n'.format(path), 404
except Exception as err:
return 'Templating of "{}" failed to render. Most likely due to an error in the template. Error transcript:\n\n{}\n----\n\n{}\n'.format(path, err, traceback.format_exc()), 400
return body, 200
@app.route("/<path>", methods=["POST"])
def root_post(path):
updateData()
try:
content = request.stream.read(int(request.headers["Content-Length"]))
template = env.from_string(content.decode("utf-8"))
body = template.render(objects=objects, options=request.args)
except Exception as err:
return 'Templating of "{}" failed to render. Most likely due to an error in the template. Error transcript:\n\n{}\n----\n\n{}\n'.format(path, err, traceback.format_exc()), 400
return body, 200
parser = argparse.ArgumentParser(description="Process templates for gondul.", add_help=False)
parser.add_argument("-t", "--templates", type=str, nargs="+", help="location of templates")
parser.add_argument("-h", "--host", type=str, default="127.0.0.1", help="host address")
parser.add_argument("-p", "--port", type=int, default=8080, help="host port")
parser.add_argument("-d", "--debug", action="store_true", help="enable debug mode")
args = parser.parse_args()
env.loader.searchpath = args.templates
if not sys.argv[1:]:
parser.print_help()
app.run(host=args.host, port=args.port, debug=args.debug)
| tech-server/gondul | templating/templating.py | Python | gpl-2.0 | 3,215 |
from datetime import datetime
from grazyna.utils import register
@register(cmd='weekend')
def weekend(bot):
"""
Answer to timeless question - are we at .weekend, yet?
"""
current_date = datetime.now()
day = current_date.weekday()
nick = bot.user.nick
if day in (5, 6):
answer = "Oczywiście %s - jest weekend. Omawiamy tylko lajtowe tematy, ok?" % nick
else:
str_day = datetime.strftime(current_date, "%A")
answer = "%s - dopiero %s, musisz jeszcze poczekać..." % (nick, str_day)
bot.reply(answer)
| firemark/grazyna | grazyna/plugins/weekend.py | Python | gpl-2.0 | 562 |
from kvmap.code.projections import *
from urllib2 import urlopen
from httplib import HTTPConnection
from threading import Thread
from kivy.logger import Logger
from kivy.loader import Loader
from os.path import join, dirname
import time, os
import hashlib
try:
from pyproj import Proj
from xml.etree import ElementTree as ET
except:
pass
class WMSOverlayServer(object):
cache = {}
available_maptype = dict(roadmap='Roadmap') # default
type = "wms"
'''Generic WMS server'''
def __init__(self, progress_callback=None):
self.progress_callback = progress_callback
def setProgressCallback(self, progress_callback):
self.progress_callback = progress_callback
def getInfo(self, lat, lon, epsilon):
return None
def get(self, parent, width, height):
self.bl = parent.bottom_left
self.tr = parent.top_right
self.zoom = parent.zoom
url = self.geturl(self.bl[0], self.bl[1], self.tr[0], self.tr[1], self.zoom, width, height)
if not url:
return None
key = hashlib.md5(url).hexdigest()
if key in self.cache:
return self.cache[key]
try:
image = Loader.image('http://' + self.provider_host + url, progress_callback=self.progress_callback)
self.cache[key] = image
except Exception, e:
Logger.error('OverlayServer could not find (or read) image %s [%s]' % (url, e))
image = None
def getLegendGraphic(self):
if self.legend is None and not self.triedlegend:
self.triedlegend = True
layer = self.layer
if "," in layer:
layer = layer[layer.rindex(",") + 1:]
if self.legendlayer:
layer = self.legendlayer
url = self.baseurl + "?REQUEST=GetLegendGraphic&VERSION=1.0.0&FORMAT=image/png&LAYER=%s&ext=.png" % (layer)
try:
print 'http://' + self.provider_host + url
image = Loader.image('http://' + self.provider_host + url)
self.legend = image
except Exception, e:
Logger.error('OverlayServer could not find LEGENDGRAPHICS for %s %s' % (self.baseurl, layer))
return self.legend
def xy_to_co(self, lat, lon):
if self.customBounds:
x, y = latlon_to_custom(lat, lon, self.bounds)
elif self.isPLatLon: # patch for android - does not require pyproj library
x, y = lon, lat
elif self.isPGoogle: # patch for android - does not require pyproj library
x, y = latlon_to_google (lat, lon)
else:
x, y = transform(pLatlon, self.projection, lon, lat)
return x, y
def co_to_ll(self, x, y):
if self.customBounds:
u, v = custom_to_unit(lat, lon, self.bounds)
l, m = unit_to_latlon(u, v)
elif self.isPLatLon: # patch for android - does not require pyproj library
l, m = y, x
elif self.isPGoogle: # patch for android - does not require pyproj library
l, m = google_to_latlon (y, x)
else:
l, m = transform(self.projection, pLatlon, y, x)
return l, m
def geturl(self, lat1, lon1, lat2, lon2, zoom, w, h):
try:
x1, y1 = self.xy_to_co(lat1, lon1)
x2, y2 = self.xy_to_co(lat2, lon2)
return self.url + "&BBOX=%f,%f,%f,%f&WIDTH=%i&HEIGHT=%i&ext=.png" % (x1, y1, x2, y2, w, h)
except RuntimeError, e:
return None
def parseLayer(self, layer, data):
try:
name = layer.find("Name").text
except:
name = None
srss = layer.findall("SRS")
if name: # and srss:
data[name] = map(lambda x:x.text, srss)
if self.debug:
print "Provider %s provides layer %s in projections %s" % (self.provider_host, name, data[name])
subs = layer.findall("Layer")
for sub in subs:
self.parseLayer(sub, data)
def initFromGetCapabilities(self, host, baseurl, layer=None, index=0, srs=None):
self.debug = (layer == None) and (index == 0)
# GetCapabilities (Layers + SRS)
if layer is None or srs is None:
capabilities = urlopen(host + baseurl + "?SERVICE=WMS&VERSION=1.1.1&Request=GetCapabilities").read().strip()
try:
tree = ET.fromstring(capabilities)
if self.debug:
ET.dump(tree)
layers = tree.findall("Capability/Layer") # TODO: proper parsing of cascading layers and their SRS
data = {}
for l in layers:
self.parseLayer(l, data)
# Choose Layer and SRS by (alphabetical) index
if layer is None:
layer = sorted(data.keys())[index]
if srs is None:
srs = sorted(data[layer])[0]
except:
pass
print "Displaying from %s/%s: layer %s in SRS %s." % (host, baseurl, layer, srs)
# generate tile URL and init projection by EPSG code
self.layer = layer
self.baseurl = baseurl
self.url = baseurl + "?LAYERS=%s&SRS=%s&FORMAT=image/png&TRANSPARENT=TRUE&SERVICE=WMS&VERSION=1.1.1&REQUEST=GetMap&STYLES=" % (layer, srs)
self.isPGoogle = False
self.isPLatLon = False
self.legend = None
self.legendlayer = None
self.triedlegend = False
if srs == "EPSG:4326":
self.isPLatLon = True
elif srs == "EPSG:900913" or srs == "EPSG:3857":
self.isPGoogle = True
try:
self.projection = pGoogle
except:
pass
else:
try:
self.projection = Proj(init=srs)
except:
pass
| jchome/LocalGuide-Mobile | kvmap/overlays/WMSOverlayServer.py | Python | gpl-2.0 | 5,614 |
# -*- coding: utf-8 -*-
# Copyright (C) 2015 Luis López <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
# USA.
import unittest
from ldotcommons import config
class TestRecord(unittest.TestCase):
def setUp(self):
pass
def test_init_with_args(self):
a = config.Record({'foo': 1, 'bar': 'x'})
self.assertEqual(a.get('foo'), 1)
b = config.Record()
b.set('foo', 1)
b.set('bar', 'x')
self.assertEqual(a, b)
def test_setget(self):
s = config.Record()
s.set('foo', 1)
s.set('bar', 'x')
s.set('x.y', [])
self.assertEqual(s.get('foo'), 1)
self.assertEqual(s.get('bar'), 'x')
self.assertEqual(s.get('x.y'), [])
def test_nonexistent_key(self):
s = config.Record()
with self.assertRaises(KeyError):
s.get('foo')
def test_delete(self):
s = config.Record()
s.set('foo', 1)
s.set('foo.bar', 2)
s.delete('foo')
with self.assertRaises(KeyError):
s.get('foo.bar')
with self.assertRaises(KeyError):
s.get('foo')
def test_eq(self):
data = {
'foo': 1,
'x.y': 'z',
'dict': {'a': 'b'}
}
a = config.Record(**data.copy())
b = config.Record(**data.copy())
self.assertEqual(a, b)
def test_sub(self):
x = config.Record({
'foo': 1,
'bar.x': 'x',
'bar.y': 'y',
})
y = config.Record({
'x': 'x',
'y': 'y',
})
self.assertEqual(x.sub('bar'), y)
def test_children(self):
x = config.Record({
'foo': 1,
'bar.x': 'x',
'bar.y': 'y',
})
self.assertEqual(set(x.children('bar')), set(['x', 'y']))
class TestRecordAttr(unittest.TestCase):
def test_getset(self):
x = config.RecordAttr({'foo': 1, 'bar': 'x', 'a.b': 2})
self.assertEqual(x.foo, 1)
self.assertEqual(x.a.b, 2)
if __name__ == '__main__':
unittest.main()
| ldotlopez/appkit | tests/config.py | Python | gpl-2.0 | 2,801 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "bugman.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| aliasav/Bugman | bugman/manage.py | Python | gpl-2.0 | 249 |
import ctypes.wintypes as ctypes
import braille
import brailleInput
import globalPluginHandler
import scriptHandler
import inputCore
import api
INPUT_MOUSE = 0
INPUT_KEYBOARD = 1
INPUT_HARDWARE = 2
MAPVK_VK_TO_VSC = 0
KEYEVENTF_EXTENDEDKEY = 0x0001
KEYEVENTF_KEYUP = 0x0002
KEYEVENT_SCANCODE = 0x0008
KEYEVENTF_UNICODE = 0x0004
class MOUSEINPUT(ctypes.Structure):
_fields_ = (
('dx', ctypes.c_long),
('dy', ctypes.c_long),
('mouseData', ctypes.DWORD),
('dwFlags', ctypes.DWORD),
('time', ctypes.DWORD),
('dwExtraInfo', ctypes.POINTER(ctypes.c_ulong)),
)
class KEYBDINPUT(ctypes.Structure):
_fields_ = (
('wVk', ctypes.WORD),
('wScan', ctypes.WORD),
('dwFlags', ctypes.DWORD),
('time', ctypes.DWORD),
('dwExtraInfo', ctypes.POINTER(ctypes.c_ulong)),
)
class HARDWAREINPUT(ctypes.Structure):
_fields_ = (
('uMsg', ctypes.DWORD),
('wParamL', ctypes.WORD),
('wParamH', ctypes.WORD),
)
class INPUTUnion(ctypes.Union):
_fields_ = (
('mi', MOUSEINPUT),
('ki', KEYBDINPUT),
('hi', HARDWAREINPUT),
)
class INPUT(ctypes.Structure):
_fields_ = (
('type', ctypes.DWORD),
('union', INPUTUnion))
class BrailleInputGesture(braille.BrailleDisplayGesture, brailleInput.BrailleInputGesture):
def __init__(self, **kwargs):
super(BrailleInputGesture, self).__init__()
for key, value in kwargs.iteritems():
setattr(self, key, value)
self.source="remote{}{}".format(self.source[0].upper(),self.source[1:])
self.scriptPath=getattr(self,"scriptPath",None)
self.script=self.findScript() if self.scriptPath else None
def findScript(self):
if not (isinstance(self.scriptPath,list) and len(self.scriptPath)==3):
return None
module,cls,scriptName=self.scriptPath
focus = api.getFocusObject()
if not focus:
return None
if scriptName.startswith("kb:"):
# Emulate a key press.
return scriptHandler._makeKbEmulateScript(scriptName)
import globalCommands
# Global plugin level.
if cls=='GlobalPlugin':
for plugin in globalPluginHandler.runningPlugins:
if module==plugin.__module__:
func = getattr(plugin, "script_%s" % scriptName, None)
if func:
return func
# App module level.
app = focus.appModule
if app and cls=='AppModule' and module==app.__module__:
func = getattr(app, "script_%s" % scriptName, None)
if func:
return func
# Tree interceptor level.
treeInterceptor = focus.treeInterceptor
if treeInterceptor and treeInterceptor.isReady:
func = getattr(treeInterceptor , "script_%s" % scriptName, None)
# We are no keyboard input
return func
# NVDAObject level.
func = getattr(focus, "script_%s" % scriptName, None)
if func:
return func
for obj in reversed(api.getFocusAncestors()):
func = getattr(obj, "script_%s" % scriptName, None)
if func and getattr(func, 'canPropagate', False):
return func
# Global commands.
func = getattr(globalCommands.commands, "script_%s" % scriptName, None)
if func:
return func
return None
def send_key(vk=None, scan=None, extended=False, pressed=True):
i = INPUT()
i.union.ki.wVk = vk
if scan:
i.union.ki.wScan = scan
else: #No scancode provided, try to get one
i.union.ki.wScan = ctypes.windll.user32.MapVirtualKeyW(vk, MAPVK_VK_TO_VSC)
if not pressed:
i.union.ki.dwFlags |= KEYEVENTF_KEYUP
if extended:
i.union.ki.dwFlags |= KEYEVENTF_EXTENDEDKEY
i.type = INPUT_KEYBOARD
ctypes.windll.user32.SendInput(1, ctypes.byref(i), ctypes.sizeof(INPUT))
| nishimotz/NVDARemote | addon/globalPlugins/remoteClient/input.py | Python | gpl-2.0 | 3,588 |
# -*- coding: utf-8 -*-
from imio.history.config import HISTORY_COMMENT_NOT_VIEWABLE
from imio.history.interfaces import IImioHistory
from imio.history.testing import IntegrationTestCase
from plone import api
from plone.memoize.instance import Memojito
from Products.Five.browser import BrowserView
from zope.component import getAdapter
from zope.component import getMultiAdapter
from zope.viewlet.interfaces import IViewletManager
class TestDocumentByLineViewlet(IntegrationTestCase):
def setUp(self):
super(TestDocumentByLineViewlet, self).setUp()
# get the viewlet
doc = api.content.create(type='Document',
id='doc',
container=self.portal)
view = BrowserView(doc, self.portal.REQUEST)
manager = getMultiAdapter(
(doc, self.portal.REQUEST, view),
IViewletManager,
'plone.belowcontenttitle')
manager.update()
self.viewlet = manager.get(u'imio.history.documentbyline')
def test_show_history(self):
"""Test the show_history method.
The history is shown in every case except if 'ajax_load' is found in the REQUEST."""
self.assertTrue(self.viewlet.show_history())
# show_history is False if displayed in a popup, aka 'ajax_load' in the REQUEST
self.portal.REQUEST.set('ajax_load', True)
self.assertFalse(self.viewlet.show_history())
def test_highlight_history_link(self):
"""Test the highlight_history_link method.
History link will be highlighted if last event had a comment and
if that comment is not an ignorable comment."""
adapter = getAdapter(self.portal.doc, IImioHistory, 'workflow')
# not highlighted because '' is an ignored comment
history = adapter.getHistory()
self.assertFalse(history[-1]['comments'])
self.assertFalse(self.viewlet.highlight_history_link())
# now 'publish' the doc and add a comment, last event has a comment
self.wft.doActionFor(self.portal.doc, 'publish', comment='my publish comment')
# clean memoize
getattr(adapter, Memojito.propname).clear()
history = adapter.getHistory()
self.assertTrue(self.viewlet.highlight_history_link())
self.assertFalse(history[-1]['comments'] in adapter.ignorableHistoryComments())
# now test that the 'you can not access this comment' is an ignored message
self.wft.doActionFor(self.portal.doc, 'retract', comment=HISTORY_COMMENT_NOT_VIEWABLE)
getattr(adapter, Memojito.propname).clear()
history = adapter.getHistory()
self.assertFalse(self.viewlet.highlight_history_link())
self.assertTrue(history[-1]['comments'] in adapter.ignorableHistoryComments())
# test that it works if no history
# it is the case if we changed used workflow
self.wft.setChainForPortalTypes(('Document', ), ('intranet_workflow',))
getattr(adapter, Memojito.propname).clear()
history = adapter.getHistory()
self.assertFalse(self.viewlet.highlight_history_link())
self.assertTrue(history == [])
| IMIO/imio.history | src/imio/history/tests/test_documentbylineviewlet.py | Python | gpl-2.0 | 3,189 |
from django import template
from django.template.loader_tags import BaseIncludeNode
from django.template import Template
from django.conf import settings
from pages.plugins import html_to_template_text, SearchBoxNode
from pages.plugins import LinkNode, EmbedCodeNode
from pages import models
from django.utils.text import unescape_string_literal
from pages.models import Page, slugify
from django.core.urlresolvers import reverse
register = template.Library()
@register.filter
def name_to_url(value):
return models.name_to_url(value)
name_to_url.is_safe = True
class PageContentNode(BaseIncludeNode):
def __init__(self, html_var, render_plugins=True, *args, **kwargs):
super(PageContentNode, self).__init__(*args, **kwargs)
self.html_var = template.Variable(html_var)
self.render_plugins = render_plugins
def render(self, context):
try:
html = unicode(self.html_var.resolve(context))
t = Template(html_to_template_text(html, context,
self.render_plugins))
return self.render_template(t, context)
except:
if settings.TEMPLATE_DEBUG:
raise
return ''
class IncludeContentNode(BaseIncludeNode):
"""
Base class for including some named content inside a other content.
Subclass and override get_content() and get_title() to return HTML or None.
The name of the content to include is stored in self.name
All other parameters are stored in self.args, without quotes (if any).
"""
def __init__(self, parser, token, *args, **kwargs):
super(IncludeContentNode, self).__init__(*args, **kwargs)
bits = token.split_contents()
if len(bits) < 2:
raise template.TemplateSyntaxError, ('%r tag requires at least one'
' argument' % token.contents.split()[0])
self.args = []
for b in bits[1:]:
if is_quoted(b):
b = unescape_string_literal(b)
self.args.append(b)
self.name = self.args.pop(0)
def get_content(self, context):
""" Override this to return content to be included. """
return None
def get_title(self, context):
""" Override this to return a title or None to omit it. """
return self.name
def render(self, context):
try:
template_text = ''
if 'showtitle' in self.args:
title = self.get_title(context)
if title:
template_text += '<h2>%s</h2>' % title
template_text += self.get_content(context)
template = Template(template_text)
return self.render_template(template, context)
except:
if settings.TEMPLATE_DEBUG:
raise
return ''
class IncludePageNode(IncludeContentNode):
def __init__(self, *args, **kwargs):
super(IncludePageNode, self).__init__(*args, **kwargs)
try:
self.page = Page.objects.get(slug__exact=slugify(self.name))
except Page.DoesNotExist:
self.page = None
def get_title(self, context):
if not self.page:
return None
return ('<a href="%s">%s</a>'
% (self.get_page_url(), self.page.name))
def get_page_url(self):
if self.page:
slug = self.page.pretty_slug
else:
slug = name_to_url(self.name)
return reverse('pages:show', args=[slug])
def get_content(self, context):
if not self.page:
return ('<p class="plugin includepage">Unable to include '
'<a href="%s" class="missing_link">%s</a></p>'
% (self.get_page_url(), self.name))
# prevent endless loops
context_page = context['page']
include_stack = context.get('_include_stack', [])
include_stack.append(context_page.name)
if self.page.name in include_stack:
return ('<p class="plugin includepage">Unable to'
' include <a href="%s">%s</a>: endless include'
' loop.</p>' % (self.get_page_url(),
self.page.name))
context['_include_stack'] = include_stack
context['page'] = self.page
template_text = html_to_template_text(self.page.content, context)
# restore context
context['_include_stack'].pop()
context['page'] = context_page
return template_text
@register.tag(name='render_plugins')
def do_render_plugins(parser, token, render_plugins=True):
"""
Render tags and plugins
"""
try:
tag, html_var = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError, ("%r tag requires one argument" %
token.contents.split()[0])
return PageContentNode(html_var, render_plugins)
@register.tag(name='render_tags')
def do_render_tags(parser, token):
"""
Render tags only, does not render plugins
"""
return do_render_plugins(parser, token, render_plugins=False)
@register.tag(name='include_page')
def do_include_page(parser, token):
return IncludePageNode(parser, token)
def is_quoted(text):
return text[0] == text[-1] and text[0] in ('"', "'")
@register.tag(name='embed_code')
def do_embed_code(parser, token):
nodelist = parser.parse(('endembed_code',))
parser.delete_first_token()
return EmbedCodeNode(nodelist)
@register.tag(name='searchbox')
def do_searchbox(parser, token):
try:
tag, query = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError('%r tag requires one argument' %
token.contents.split()[0])
if not is_quoted(query):
raise template.TemplateSyntaxError(
"%r tag's argument should be in quotes" %
token.contents.split()[0])
return SearchBoxNode(query=unescape_string_literal(query))
@register.tag(name='link')
def do_link(parser, token):
try:
tag, href = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError("%r tag requires one argument" %
token.contents.split()[0])
if not is_quoted(href):
raise template.TemplateSyntaxError(
"%r tag's argument should be in quotes" %
token.contents.split()[0])
nodelist = parser.parse(('endlink',))
parser.delete_first_token()
return LinkNode(unescape_string_literal(href), nodelist)
| mivanov/editkit | editkit/pages/templatetags/pages_tags.py | Python | gpl-2.0 | 6,764 |
from opencvBuilder import exists,generate
| bverhagen/openCV-sconsbuilder | opencvBuilder/__init__.py | Python | gpl-2.0 | 42 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "umiss_project.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| CadeiraCuidadora/UMISS-backend | umiss_project/manage.py | Python | gpl-3.0 | 811 |
#!C:\Python27\
"""th_logger.py holds logging handler and config for the Regression test"""
import logging
from testProperty import TEST_OUTPUT_PATH
test_logger = logging.getLogger('TEST_HARNESS')
handler = logging.FileHandler(TEST_OUTPUT_PATH + 'runTest.log')
formatter = logging.Formatter('%(asctime)s %(name)-10s %(levelname)-6s %(message)s')
handler.setFormatter(formatter)
test_logger.addHandler(handler)
test_logger.setLevel(logging.DEBUG)
| roy-boy/python_scripts | th_logger.py | Python | gpl-3.0 | 447 |
'''
*******************************************************************************
* ButtonEvent.py is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* ButtonEvent.py is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with ButtonEvent.py. If not, see <http://www.gnu.org/licenses/>.
********************************************************************************
Created on Jan 5, 2010
@author: iocanto
'''
BUTTON_SELECT = 257
BUTTON_HOTKEY_1 = 258;
BUTTON_HOTKEY_2 = 259;
BUTTON_HOTKEY_3 = 260;
BUTTON_HOTKEY_4 = 261;
BUTTON_RIGHT = 262;
BUTTON_LEFT = 263;
BUTTON_UP = 264;
BUTTON_DOWN = 265;
KEY_UP = 0
KEY_DOWN = 1
class ButtonEvent():
# Constructor
def __init__(self, button = BUTTON_HOTKEY_1, action = KEY_UP ):
self.__button = button
self.__action = action
def __str__ (self):
return "ButtonEvent [__button %i]" % self.__button
def getAction(self):
return self.__action
def getButton(self):
return self.__button
def getButtonName(self):
return { 257 : "BUTTON_SELECT" ,
258 : "BUTTON_HOTKEY_1",
259 : "BUTTON_HOTKEY_2",
260 : "BUTTON_HOTKEY_3",
261 : "BUTTON_HOTKEY_4",
262 : "BUTTON_RIGHT" ,
263 : "BUTTON_LEFT" ,
264 : "BUTTON_UP" ,
265 : "BUTTON_DOWN" ,
}[self.__button]
def setAction(self, action):
self.__action = action
def setButton(self, button):
self.__button = button
| iocanto/bug-python-libraries | ButtonEvent.py | Python | gpl-3.0 | 2,246 |
#!/usr/bin/env python
# coding=utf-8
"""
Distinct powers
Problem 29
Consider all integer combinations of ab for 2 ≤ a ≤ 5 and 2 ≤ b ≤ 5:
2^2=4, 2^3=8, 2^4=16, 2^5=32
3^2=9, 3^3=27, 3^4=81, 3^5=243
4^2=16, 4^3=64, 4^4=256, 4^5=1024
5^2=25, 5^3=125, 5^4=625, 5^5=3125
If they are then placed in numerical order, with any repeats removed, we get
the following sequence of 15 distinct terms:
4, 8, 9, 16, 25, 27, 32, 64, 81, 125, 243, 256, 625, 1024, 3125
How many distinct terms are in the sequence generated by ab for 2 ≤ a ≤ 100 and
2 ≤ b ≤ 100?
"""
from __future__ import print_function
def power_combinations(a, b):
for i in range(2, a):
for j in range(2, b):
yield i ** j
if __name__ == '__main__':
print(len(set(power_combinations(101, 101)))) # 9183
| openqt/algorithms | projecteuler/ac/old/pe029_distinct_powers.py | Python | gpl-3.0 | 823 |
# -*- coding: utf-8 -*-
# Dioptas - GUI program for fast processing of 2D X-ray diffraction data
# Principal author: Clemens Prescher ([email protected])
# Copyright (C) 2014-2019 GSECARS, University of Chicago, USA
# Copyright (C) 2015-2018 Institute for Geology and Mineralogy, University of Cologne, Germany
# Copyright (C) 2019-2020 DESY, Hamburg, Germany
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import numpy as np
from copy import deepcopy
import h5py
from .util import Signal
from .util.ImgCorrection import CbnCorrection, ObliqueAngleDetectorAbsorptionCorrection
from .util import Pattern
from .util.calc import convert_units
from . import ImgModel, CalibrationModel, MaskModel, PatternModel, BatchModel
from .CalibrationModel import DetectorModes
class Configuration(object):
"""
The configuration class contains a working combination of an ImgModel, PatternModel, MaskModel and CalibrationModel.
It does handles the core data manipulation of Dioptas.
The management of multiple Configurations is done by the DioptasModel.
"""
def __init__(self, working_directories=None):
super(Configuration, self).__init__()
self.img_model = ImgModel()
self.mask_model = MaskModel()
self.calibration_model = CalibrationModel(self.img_model)
self.batch_model = BatchModel(self.calibration_model, self.mask_model)
self.pattern_model = PatternModel()
if working_directories is None:
self.working_directories = {'calibration': '', 'mask': '', 'image': os.path.expanduser("~"), 'pattern': '',
'overlay': '', 'phase': '', 'batch': os.path.expanduser("~")}
else:
self.working_directories = working_directories
self.use_mask = False
self.transparent_mask = False
self._integration_rad_points = None
self._integration_unit = '2th_deg'
self._oned_azimuth_range = None
self._cake_azimuth_points = 360
self._cake_azimuth_range = None
self._auto_integrate_pattern = True
self._auto_integrate_cake = False
self.auto_save_integrated_pattern = False
self.integrated_patterns_file_formats = ['.xy']
self.cake_changed = Signal()
self._connect_signals()
def _connect_signals(self):
"""
Connects the img_changed signal to responding functions.
"""
self.img_model.img_changed.connect(self.update_mask_dimension)
self.img_model.img_changed.connect(self.integrate_image_1d)
def integrate_image_1d(self):
"""
Integrates the image in the ImageModel to a Pattern. Will also automatically save the integrated pattern, if
auto_save_integrated is True.
"""
if self.calibration_model.is_calibrated:
if self.use_mask:
mask = self.mask_model.get_mask()
elif self.mask_model.roi is not None:
mask = self.mask_model.roi_mask
else:
mask = None
x, y = self.calibration_model.integrate_1d(azi_range=self.oned_azimuth_range, mask=mask, unit=self.integration_unit,
num_points=self.integration_rad_points)
self.pattern_model.set_pattern(x, y, self.img_model.filename, unit=self.integration_unit) #
if self.auto_save_integrated_pattern:
self._auto_save_patterns()
def integrate_image_2d(self):
"""
Integrates the image in the ImageModel to a Cake.
"""
if self.use_mask:
mask = self.mask_model.get_mask()
elif self.mask_model.roi is not None:
mask = self.mask_model.roi_mask
else:
mask = None
self.calibration_model.integrate_2d(mask=mask,
rad_points=self._integration_rad_points,
azimuth_points=self._cake_azimuth_points,
azimuth_range=self._cake_azimuth_range)
self.cake_changed.emit()
def save_pattern(self, filename=None, subtract_background=False):
"""
Saves the current integrated pattern. The format depends on the file ending. Possible file formats:
[*.xy, *.chi, *.dat, *.fxye]
:param filename: where to save the file
:param subtract_background: flat whether the pattern should be saved with or without subtracted background
"""
if filename is None:
filename = self.img_model.filename
if filename.endswith('.xy'):
self.pattern_model.save_pattern(filename, header=self._create_xy_header(),
subtract_background=subtract_background)
elif filename.endswith('.fxye'):
self.pattern_model.save_pattern(filename, header=self._create_fxye_header(filename),
subtract_background=subtract_background)
else:
self.pattern_model.save_pattern(filename, subtract_background=subtract_background)
def save_background_pattern(self, filename=None):
"""
Saves the current fit background as a pattern. The format depends on the file ending. Possible file formats:
[*.xy, *.chi, *.dat, *.fxye]
"""
if filename is None:
filename = self.img_model.filename
if filename.endswith('.xy'):
self.pattern_model.save_auto_background_as_pattern(filename, header=self._create_xy_header())
elif filename.endswith('.fxye'):
self.pattern_model.save_auto_background_as_pattern(filename, header=self._create_fxye_header(filename))
else:
self.pattern_model.save_pattern(filename)
def _create_xy_header(self):
"""
Creates the header for the xy file format (contains information about calibration parameters).
:return: header string
"""
header = self.calibration_model.create_file_header()
header = header.replace('\r\n', '\n')
header = header + '\n#\n# ' + self._integration_unit + '\t I'
return header
def _create_fxye_header(self, filename):
"""
Creates the header for the fxye file format (used by GSAS and GSAS-II) containing the calibration information
:return: header string
"""
header = 'Generated file ' + filename + ' using DIOPTAS\n'
header = header + self.calibration_model.create_file_header()
unit = self._integration_unit
lam = self.calibration_model.wavelength
if unit == 'q_A^-1':
con = 'CONQ'
else:
con = 'CONS'
header = header + '\nBANK\t1\tNUM_POINTS\tNUM_POINTS ' + con + '\tMIN_X_VAL\tSTEP_X_VAL ' + \
'{0:.5g}'.format(lam * 1e10) + ' 0.0 FXYE'
return header
def _auto_save_patterns(self):
"""
Saves the current pattern in the pattern working directory (specified in self.working_directories['pattern'].
When background subtraction is enabled in the pattern model the pattern will be saved with background
subtraction and without in another sub-folder. ('bkg_subtracted')
"""
for file_ending in self.integrated_patterns_file_formats:
filename = os.path.join(
self.working_directories['pattern'],
os.path.basename(str(self.img_model.filename)).split('.')[:-1][0] + file_ending)
filename = filename.replace('\\', '/')
self.save_pattern(filename)
if self.pattern_model.pattern.has_background():
for file_ending in self.integrated_patterns_file_formats:
directory = os.path.join(self.working_directories['pattern'], 'bkg_subtracted')
if not os.path.exists(directory):
os.mkdir(directory)
filename = os.path.join(directory, self.pattern_model.pattern.name + file_ending)
filename = filename.replace('\\', '/')
self.save_pattern(filename, subtract_background=True)
def update_mask_dimension(self):
"""
Updates the shape of the mask in the MaskModel to the shape of the image in the ImageModel.
"""
self.mask_model.set_dimension(self.img_model._img_data.shape)
@property
def integration_rad_points(self):
return self._integration_rad_points
@integration_rad_points.setter
def integration_rad_points(self, new_value):
self._integration_rad_points = new_value
self.integrate_image_1d()
if self.auto_integrate_cake:
self.integrate_image_2d()
@property
def cake_azimuth_points(self):
return self._cake_azimuth_points
@cake_azimuth_points.setter
def cake_azimuth_points(self, new_value):
self._cake_azimuth_points = new_value
if self.auto_integrate_cake:
self.integrate_image_2d()
@property
def cake_azimuth_range(self):
return self._cake_azimuth_range
@cake_azimuth_range.setter
def cake_azimuth_range(self, new_value):
self._cake_azimuth_range = new_value
if self.auto_integrate_cake:
self.integrate_image_2d()
@property
def oned_azimuth_range(self):
return self._oned_azimuth_range
@oned_azimuth_range.setter
def oned_azimuth_range(self, new_value):
self._oned_azimuth_range = new_value
if self.auto_integrate_pattern:
self.integrate_image_1d()
@property
def integration_unit(self):
return self._integration_unit
@integration_unit.setter
def integration_unit(self, new_unit):
old_unit = self.integration_unit
self._integration_unit = new_unit
auto_bg_subtraction = self.pattern_model.pattern.auto_background_subtraction
if auto_bg_subtraction:
self.pattern_model.pattern.auto_background_subtraction = False
self.integrate_image_1d()
self.update_auto_background_parameters_unit(old_unit, new_unit)
if auto_bg_subtraction:
self.pattern_model.pattern.auto_background_subtraction = True
self.pattern_model.pattern.recalculate_pattern()
self.pattern_model.pattern_changed.emit()
@property
def correct_solid_angle(self):
return self.calibration_model.correct_solid_angle
@correct_solid_angle.setter
def correct_solid_angle(self, new_val):
self.calibration_model.correct_solid_angle = new_val
if self.auto_integrate_pattern:
self.integrate_image_1d()
if self._auto_integrate_cake:
self.integrate_image_2d()
def update_auto_background_parameters_unit(self, old_unit, new_unit):
"""
This handles the changes for the auto background subtraction parameters in the PatternModel when the integration
unit is changed.
:param old_unit: possible values are '2th_deg', 'q_A^-1', 'd_A'
:param new_unit: possible values are '2th_deg', 'q_A^-1', 'd_A'
"""
par_0 = convert_units(self.pattern_model.pattern.auto_background_subtraction_parameters[0],
self.calibration_model.wavelength,
old_unit,
new_unit)
# Value of 0.1 let background subtraction algorithm work without crash.
if np.isnan(par_0):
par_0 = 0.1
self.pattern_model.pattern.auto_background_subtraction_parameters = \
par_0, \
self.pattern_model.pattern.auto_background_subtraction_parameters[1], \
self.pattern_model.pattern.auto_background_subtraction_parameters[2]
if self.pattern_model.pattern.auto_background_subtraction_roi is not None:
self.pattern_model.pattern.auto_background_subtraction_roi = \
convert_units(self.pattern_model.pattern.auto_background_subtraction_roi[0],
self.calibration_model.wavelength,
old_unit,
new_unit), \
convert_units(self.pattern_model.pattern.auto_background_subtraction_roi[1],
self.calibration_model.wavelength,
old_unit,
new_unit)
@property
def auto_integrate_cake(self):
return self._auto_integrate_cake
@auto_integrate_cake.setter
def auto_integrate_cake(self, new_value):
if self._auto_integrate_cake == new_value:
return
self._auto_integrate_cake = new_value
if new_value:
self.img_model.img_changed.connect(self.integrate_image_2d)
else:
self.img_model.img_changed.disconnect(self.integrate_image_2d)
@property
def auto_integrate_pattern(self):
return self._auto_integrate_pattern
@auto_integrate_pattern.setter
def auto_integrate_pattern(self, new_value):
if self._auto_integrate_pattern == new_value:
return
self._auto_integrate_pattern = new_value
if new_value:
self.img_model.img_changed.connect(self.integrate_image_1d)
else:
self.img_model.img_changed.disconnect(self.integrate_image_1d)
@property
def cake_img(self):
return self.calibration_model.cake_img
@property
def roi(self):
return self.mask_model.roi
@roi.setter
def roi(self, new_val):
self.mask_model.roi = new_val
self.integrate_image_1d()
def copy(self):
"""
Creates a copy of the current working directory
:return: copied configuration
:rtype: Configuration
"""
new_configuration = Configuration(self.working_directories)
new_configuration.img_model._img_data = self.img_model._img_data
new_configuration.img_model.img_transformations = deepcopy(self.img_model.img_transformations)
new_configuration.calibration_model.set_pyFAI(self.calibration_model.get_calibration_parameter()[0])
new_configuration.integrate_image_1d()
return new_configuration
def save_in_hdf5(self, hdf5_group):
"""
Saves the configuration group in the given hdf5_group.
:type hdf5_group: h5py.Group
"""
f = hdf5_group
# save general information
general_information = f.create_group('general_information')
# integration parameters:
general_information.attrs['integration_unit'] = self.integration_unit
if self.integration_rad_points:
general_information.attrs['integration_num_points'] = self.integration_rad_points
else:
general_information.attrs['integration_num_points'] = 0
# cake parameters:
general_information.attrs['auto_integrate_cake'] = self.auto_integrate_cake
general_information.attrs['cake_azimuth_points'] = self.cake_azimuth_points
if self.cake_azimuth_range is None:
general_information.attrs['cake_azimuth_range'] = "None"
else:
general_information.attrs['cake_azimuth_range'] = self.cake_azimuth_range
# mask parameters
general_information.attrs['use_mask'] = self.use_mask
general_information.attrs['transparent_mask'] = self.transparent_mask
# auto save parameters
general_information.attrs['auto_save_integrated_pattern'] = self.auto_save_integrated_pattern
formats = [n.encode('ascii', 'ignore') for n in self.integrated_patterns_file_formats]
general_information.create_dataset('integrated_patterns_file_formats', (len(formats), 1), 'S10', formats)
# save working directories
working_directories_gp = f.create_group('working_directories')
try:
for key in self.working_directories:
working_directories_gp.attrs[key] = self.working_directories[key]
except TypeError:
self.working_directories = {'calibration': '', 'mask': '', 'image': '', 'pattern': '', 'overlay': '',
'phase': '', 'batch': ''}
for key in self.working_directories:
working_directories_gp.attrs[key] = self.working_directories[key]
# save image model
image_group = f.create_group('image_model')
image_group.attrs['auto_process'] = self.img_model.autoprocess
image_group.attrs['factor'] = self.img_model.factor
image_group.attrs['has_background'] = self.img_model.has_background()
image_group.attrs['background_filename'] = self.img_model.background_filename
image_group.attrs['background_offset'] = self.img_model.background_offset
image_group.attrs['background_scaling'] = self.img_model.background_scaling
if self.img_model.has_background():
background_data = self.img_model.untransformed_background_data
image_group.create_dataset('background_data', background_data.shape, 'f', background_data)
image_group.attrs['series_max'] = self.img_model.series_max
image_group.attrs['series_pos'] = self.img_model.series_pos
# image corrections
corrections_group = image_group.create_group('corrections')
corrections_group.attrs['has_corrections'] = self.img_model.has_corrections()
for correction, correction_object in self.img_model.img_corrections.corrections.items():
if correction in ['cbn', 'oiadac']:
correction_data = correction_object.get_data()
imcd = corrections_group.create_dataset(correction, correction_data.shape, 'f', correction_data)
for param, value in correction_object.get_params().items():
imcd.attrs[param] = value
elif correction == 'transfer':
params = correction_object.get_params()
transfer_group = corrections_group.create_group('transfer')
original_data = params['original_data']
response_data = params['response_data']
original_ds = transfer_group.create_dataset('original_data', original_data.shape, 'f', original_data)
original_ds.attrs['filename'] = params['original_filename']
response_ds = transfer_group.create_dataset('response_data', response_data.shape, 'f', response_data)
response_ds.attrs['filename'] = params['response_filename']
# the actual image
image_group.attrs['filename'] = self.img_model.filename
current_raw_image = self.img_model.untransformed_raw_img_data
raw_image_data = image_group.create_dataset('raw_image_data', current_raw_image.shape, dtype='f')
raw_image_data[...] = current_raw_image
# image transformations
transformations_group = image_group.create_group('image_transformations')
for ind, transformation in enumerate(self.img_model.get_transformations_string_list()):
transformations_group.attrs[str(ind)] = transformation
# save roi data
if self.roi is not None:
image_group.attrs['has_roi'] = True
image_group.create_dataset('roi', (4,), 'i8', tuple(self.roi))
else:
image_group.attrs['has_roi'] = False
# save mask model
mask_group = f.create_group('mask')
current_mask = self.mask_model.get_mask()
mask_data = mask_group.create_dataset('data', current_mask.shape, dtype=bool)
mask_data[...] = current_mask
# save detector information
detector_group = f.create_group('detector')
detector_mode = self.calibration_model.detector_mode
detector_group.attrs['detector_mode'] = detector_mode.value
if detector_mode == DetectorModes.PREDEFINED:
detector_group.attrs['detector_name'] = self.calibration_model.detector.name
elif detector_mode == DetectorModes.NEXUS:
detector_group.attrs['nexus_filename'] =self.calibration_model.detector.filename
# save calibration model
calibration_group = f.create_group('calibration_model')
calibration_filename = self.calibration_model.filename
if calibration_filename.endswith('.poni'):
base_filename, ext = self.calibration_model.filename.rsplit('.', 1)
else:
base_filename = self.calibration_model.filename
ext = 'poni'
calibration_group.attrs['calibration_filename'] = base_filename + '.' + ext
pyfai_param, fit2d_param = self.calibration_model.get_calibration_parameter()
pfp = calibration_group.create_group('pyfai_parameters')
for key in pyfai_param:
try:
pfp.attrs[key] = pyfai_param[key]
except TypeError:
pfp.attrs[key] = ''
calibration_group.attrs['correct_solid_angle'] = self.correct_solid_angle
if self.calibration_model.distortion_spline_filename is not None:
calibration_group.attrs['distortion_spline_filename'] = self.calibration_model.distortion_spline_filename
# save background pattern and pattern model
background_pattern_group = f.create_group('background_pattern')
try:
background_pattern_x = self.pattern_model.background_pattern.original_x
background_pattern_y = self.pattern_model.background_pattern.original_y
except (TypeError, AttributeError):
background_pattern_x = None
background_pattern_y = None
if background_pattern_x is not None and background_pattern_y is not None:
background_pattern_group.attrs['has_background_pattern'] = True
bgx = background_pattern_group.create_dataset('x', background_pattern_x.shape, dtype='f')
bgy = background_pattern_group.create_dataset('y', background_pattern_y.shape, dtype='f')
bgx[...] = background_pattern_x
bgy[...] = background_pattern_y
else:
background_pattern_group.attrs['has_background_pattern'] = False
pattern_group = f.create_group('pattern')
try:
pattern_x = self.pattern_model.pattern.original_x
pattern_y = self.pattern_model.pattern.original_y
except (TypeError, AttributeError):
pattern_x = None
pattern_y = None
if pattern_x is not None and pattern_y is not None:
px = pattern_group.create_dataset('x', pattern_x.shape, dtype='f')
py = pattern_group.create_dataset('y', pattern_y.shape, dtype='f')
px[...] = pattern_x
py[...] = pattern_y
pattern_group.attrs['pattern_filename'] = self.pattern_model.pattern_filename
pattern_group.attrs['unit'] = self.pattern_model.unit
pattern_group.attrs['file_iteration_mode'] = self.pattern_model.file_iteration_mode
if self.pattern_model.pattern.auto_background_subtraction:
pattern_group.attrs['auto_background_subtraction'] = True
auto_background_group = pattern_group.create_group('auto_background_settings')
auto_background_group.attrs['smoothing'] = \
self.pattern_model.pattern.auto_background_subtraction_parameters[0]
auto_background_group.attrs['iterations'] = \
self.pattern_model.pattern.auto_background_subtraction_parameters[1]
auto_background_group.attrs['poly_order'] = \
self.pattern_model.pattern.auto_background_subtraction_parameters[2]
auto_background_group.attrs['x_start'] = self.pattern_model.pattern.auto_background_subtraction_roi[0]
auto_background_group.attrs['x_end'] = self.pattern_model.pattern.auto_background_subtraction_roi[1]
else:
pattern_group.attrs['auto_background_subtraction'] = False
def load_from_hdf5(self, hdf5_group):
"""
Loads a configuration from the specified hdf5_group.
:type hdf5_group: h5py.Group
"""
f = hdf5_group
# disable all automatic functions
self.auto_integrate_pattern = False
self.auto_integrate_cake = False
self.auto_save_integrated_pattern = False
# get working directories
working_directories = {}
for key, value in f.get('working_directories').attrs.items():
if os.path.isdir(value):
working_directories[key] = value
else:
working_directories[key] = ''
self.working_directories = working_directories
# load pyFAI parameters
pyfai_parameters = {}
for key, value in f.get('calibration_model').get('pyfai_parameters').attrs.items():
pyfai_parameters[key] = value
try:
self.calibration_model.set_pyFAI(pyfai_parameters)
filename = f.get('calibration_model').attrs['calibration_filename']
(file_path, base_name) = os.path.split(filename)
self.calibration_model.filename = filename
self.calibration_model.calibration_name = base_name
except (KeyError, ValueError):
print('Problem with saved pyFAI calibration parameters')
pass
try:
self.correct_solid_angle = f.get('calibration_model').attrs['correct_solid_angle']
except KeyError:
pass
try:
distortion_spline_filename = f.get('calibration_model').attrs['distortion_spline_filename']
self.calibration_model.load_distortion(distortion_spline_filename)
except KeyError:
pass
# load detector definition
try:
detector_mode = f.get('detector').attrs['detector_mode']
if detector_mode == DetectorModes.PREDEFINED.value:
detector_name = f.get('detector').attrs['detector_name']
self.calibration_model.load_detector(detector_name)
elif detector_mode == DetectorModes.NEXUS.value:
nexus_filename = f.get('detector').attrs['nexus_filename']
self.calibration_model.load_detector_from_file(nexus_filename)
except AttributeError: # to ensure backwards compatibility
pass
# load img_model
self.img_model._img_data = np.copy(f.get('image_model').get('raw_image_data')[...])
filename = f.get('image_model').attrs['filename']
self.img_model.filename = filename
try:
self.img_model.file_name_iterator.update_filename(filename)
self.img_model._directory_watcher.path = os.path.dirname(filename)
except EnvironmentError:
pass
self.img_model.autoprocess = f.get('image_model').attrs['auto_process']
self.img_model.autoprocess_changed.emit()
self.img_model.factor = f.get('image_model').attrs['factor']
try:
self.img_model.series_max = f.get('image_model').attrs['series_max']
self.img_model.series_pos = f.get('image_model').attrs['series_pos']
except KeyError:
pass
if f.get('image_model').attrs['has_background']:
self.img_model.background_data = np.copy(f.get('image_model').get('background_data')[...])
self.img_model.background_filename = f.get('image_model').attrs['background_filename']
self.img_model.background_scaling = f.get('image_model').attrs['background_scaling']
self.img_model.background_offset = f.get('image_model').attrs['background_offset']
# load image transformations
transformation_group = f.get('image_model').get('image_transformations')
transformation_list = []
for key, transformation in transformation_group.attrs.items():
transformation_list.append(transformation)
self.calibration_model.load_transformations_string_list(transformation_list)
self.img_model.load_transformations_string_list(transformation_list)
# load roi data
if f.get('image_model').attrs['has_roi']:
self.roi = tuple(f.get('image_model').get('roi')[...])
# load mask model
self.mask_model.set_mask(np.copy(f.get('mask').get('data')[...]))
# load pattern model
if f.get('pattern').get('x') and f.get('pattern').get('y'):
self.pattern_model.set_pattern(f.get('pattern').get('x')[...],
f.get('pattern').get('y')[...],
f.get('pattern').attrs['pattern_filename'],
f.get('pattern').attrs['unit'])
self.pattern_model.file_iteration_mode = f.get('pattern').attrs['file_iteration_mode']
self.integration_unit = f.get('general_information').attrs['integration_unit']
if f.get('background_pattern').attrs['has_background_pattern']:
self.pattern_model.background_pattern = Pattern(f.get('background_pattern').get('x')[...],
f.get('background_pattern').get('y')[...],
'background_pattern')
if f.get('pattern').attrs['auto_background_subtraction']:
bg_params = []
bg_roi = []
bg_params.append(f.get('pattern').get('auto_background_settings').attrs['smoothing'])
bg_params.append(f.get('pattern').get('auto_background_settings').attrs['iterations'])
bg_params.append(f.get('pattern').get('auto_background_settings').attrs['poly_order'])
bg_roi.append(f.get('pattern').get('auto_background_settings').attrs['x_start'])
bg_roi.append(f.get('pattern').get('auto_background_settings').attrs['x_end'])
self.pattern_model.pattern.set_auto_background_subtraction(bg_params, bg_roi,
recalc_pattern=False)
# load general configuration
if f.get('general_information').attrs['integration_num_points']:
self.integration_rad_points = f.get('general_information').attrs['integration_num_points']
# cake parameters:
self.auto_integrate_cake = f.get('general_information').attrs['auto_integrate_cake']
try:
self.cake_azimuth_points = f.get('general_information').attrs['cake_azimuth_points']
except KeyError as e:
pass
try:
if f.get('general_information').attrs['cake_azimuth_range'] == "None":
self.cake_azimuth_range = None
else:
self.cake_azimuth_range = f.get('general_information').attrs['cake_azimuth_range']
except KeyError as e:
pass
# mask parameters
self.use_mask = f.get('general_information').attrs['use_mask']
self.transparent_mask = f.get('general_information').attrs['transparent_mask']
# corrections
if f.get('image_model').get('corrections').attrs['has_corrections']:
for name, correction_group in f.get('image_model').get('corrections').items():
params = {}
for param, val in correction_group.attrs.items():
params[param] = val
if name == 'cbn':
tth_array = 180.0 / np.pi * self.calibration_model.pattern_geometry.ttha
azi_array = 180.0 / np.pi * self.calibration_model.pattern_geometry.chia
cbn_correction = CbnCorrection(tth_array=tth_array, azi_array=azi_array)
cbn_correction.set_params(params)
cbn_correction.update()
self.img_model.add_img_correction(cbn_correction, name)
elif name == 'oiadac':
tth_array = 180.0 / np.pi * self.calibration_model.pattern_geometry.ttha
azi_array = 180.0 / np.pi * self.calibration_model.pattern_geometry.chia
oiadac = ObliqueAngleDetectorAbsorptionCorrection(tth_array=tth_array, azi_array=azi_array)
oiadac.set_params(params)
oiadac.update()
self.img_model.add_img_correction(oiadac, name)
elif name == 'transfer':
params = {
'original_data': correction_group.get('original_data')[...],
'original_filename': correction_group.get('original_data').attrs['filename'],
'response_data': correction_group.get('response_data')[...],
'response_filename': correction_group.get('response_data').attrs['filename']
}
self.img_model.transfer_correction.set_params(params)
self.img_model.enable_transfer_function()
# autosave parameters
self.auto_save_integrated_pattern = f.get('general_information').attrs['auto_save_integrated_pattern']
self.integrated_patterns_file_formats = []
for file_format in f.get('general_information').get('integrated_patterns_file_formats'):
self.integrated_patterns_file_formats.append(file_format[0].decode('utf-8'))
if self.calibration_model.is_calibrated:
self.integrate_image_1d()
else:
self.pattern_model.pattern.recalculate_pattern()
| Dioptas/Dioptas | dioptas/model/Configuration.py | Python | gpl-3.0 | 34,175 |
import time
import json
import pytz
from datetime import datetime, timedelta
from django.utils import timezone
from django.conf import settings
from rest_framework import status
from rest_framework.decorators import api_view, permission_classes
from rest_framework.response import Response
from rest_framework.permissions import IsAdminUser
from ..models.security import (
SecurityLoginAttemptIncorrect, SecurityLoginAttemptCorrect
)
@api_view(['GET'])
@permission_classes((IsAdminUser, ))
def correctlogins_data(request):
date_start_raw = request.GET.get('date_start')
date_end_raw = request.GET.get('date_end')
date_start_tz = None
date_end_tz = None
if not date_start_raw or not date_end_raw:
now = timezone.now()
date_start_tz = now - timedelta(hours=24)
date_end_tz = now
else:
date_start = datetime.fromtimestamp(int(date_start_raw))
date_start_tz = pytz.timezone(settings.TIME_ZONE).localize(date_start, is_dst=None)
date_end = datetime.fromtimestamp(int(date_end_raw))
date_end_tz = pytz.timezone(settings.TIME_ZONE).localize(date_end, is_dst=None)
if date_start_tz == date_end_tz:
now = timezone.now()
date_start_tz = now - timedelta(hours=24)
date_end_tz = now
count_hosts = []
temp_hosts = {}
temp_users = {}
dates = []
values = SecurityLoginAttemptCorrect.objects.filter(time__range=[date_start_tz, date_end_tz])
count_correct_attempt = 0
for p in values:
value = json.loads(p.value)
attempt_count = 0
for host, v in value.get("hosts", {}).items():
attempt_count += v.get("count", 0)
count_correct_attempt += attempt_count
raw_date = v.get("last_date")
date_tz = None
if raw_date:
date = datetime.fromtimestamp(int(raw_date))
date_tz = pytz.timezone(settings.TIME_ZONE).localize(date, is_dst=None)
if host in temp_hosts:
temp_hosts[host]["count"] = temp_hosts[host]["count"] + v.get("count", 0)
temp_hosts[host]["last_date"] = date_tz.strftime("%b %d %H:%M")
else:
temp_hosts[host] = {
"host": host,
"count": v.get("count", 0),
"last_date": date_tz.strftime("%b %d %H:%M")
}
for username, v in value.get("users", {}).items():
attempt_count += v.get("count", 0)
raw_date = v.get("last_date")
date_tz = None
if raw_date:
date = datetime.fromtimestamp(int(raw_date))
date_tz = pytz.timezone(settings.TIME_ZONE).localize(date, is_dst=None)
if username in temp_users:
temp_users[username]["count"] = temp_users[username]["count"] + v.get("count", 0)
temp_users[username]["last_date"] = date_tz.strftime("%b %d %H:%M")
else:
temp_users[username] = {
"username": username,
"count": v.get("count", 0),
"last_date": date_tz.strftime("%b %d %H:%M")
}
count_hosts.append(attempt_count)
dates.append(timezone.localtime(p.time).strftime("%b %d %H:%M"))
hosts = []
for i in temp_hosts:
hosts.append(temp_hosts[i])
if hosts:
hosts.sort(key=lambda x: x["count"], reverse=True)
hosts = hosts[:100]
users = []
for i in temp_users:
users.append(temp_users[i])
if users:
users.sort(key=lambda x: x["count"], reverse=True)
users = users[:100]
date_range = {
"start": time.mktime(timezone.localtime(date_start_tz).timetuple()), # time.mktime(timezone.localtime(timezone.now()).timetuple()),
"start_date": time.mktime(timezone.localtime(date_start_tz).timetuple()) + 10, # time.mktime(timezone.localtime(timezone.now()).timetuple()),
"end_date": time.mktime(timezone.localtime(date_end_tz).timetuple()),
}
if values:
date_range["start"] = time.mktime(timezone.localtime(values[0].time).timetuple())
start_obj = SecurityLoginAttemptCorrect.objects.all().first()
if start_obj:
date_range["start_date"] = time.mktime(timezone.localtime(start_obj.time).timetuple())
if date_range["start_date"] == date_range["end_date"]:
date_range["end_date"] += 10
return Response({
"values": [{
"data": count_hosts,
"label": 'Number of login'
}],
"dates": dates,
"date_range": date_range,
"count_correct_attempt": count_correct_attempt,
"hosts": hosts,
"users": users
}, status=status.HTTP_200_OK)
@api_view(['GET'])
@permission_classes((IsAdminUser, ))
def incorrectlogins_data(request):
date_start_raw = request.GET.get('date_start')
date_end_raw = request.GET.get('date_end')
date_start_tz = None
date_end_tz = None
if not date_start_raw or not date_end_raw:
now = timezone.now()
date_start_tz = now - timedelta(hours=24)
date_end_tz = now
else:
date_start = datetime.fromtimestamp(int(date_start_raw))
date_start_tz = pytz.timezone(settings.TIME_ZONE).localize(date_start, is_dst=None)
date_end = datetime.fromtimestamp(int(date_end_raw))
date_end_tz = pytz.timezone(settings.TIME_ZONE).localize(date_end, is_dst=None)
if date_start_tz == date_end_tz:
now = timezone.now()
date_start_tz = now - timedelta(hours=24)
date_end_tz = now
count_incorrect_attepmt = 0
count_hosts = []
temp_hosts = {}
temp_users = {}
dates = []
values = SecurityLoginAttemptIncorrect.objects.filter(time__range=[date_start_tz, date_end_tz])
for p in values:
value = json.loads(p.value)
attempt_count = 0
for host, v in value.get("hosts", {}).items():
attempt_count += v.get("count", 0)
raw_date = v.get("last_date")
date_tz = None
if raw_date:
date = datetime.fromtimestamp(int(raw_date))
date_tz = pytz.timezone(settings.TIME_ZONE).localize(date, is_dst=None)
if host in temp_hosts:
temp_hosts[host]["count"] = temp_hosts[host]["count"] + v.get("count", 0)
temp_hosts[host]["last_date"] = date_tz.strftime("%b %d %H:%M")
else:
temp_hosts[host] = {
"host": host,
"count": v.get("count", 0),
"last_date": date_tz.strftime("%b %d %H:%M")
}
for user, v in value.get("users", {}).items():
attempt_count += v.get("count")
raw_date = v.get("last_date")
date_tz = None
if raw_date:
date = datetime.fromtimestamp(int(raw_date))
date_tz = pytz.timezone(settings.TIME_ZONE).localize(date, is_dst=None)
if user in temp_users:
temp_users[user]["count"] = temp_users[user]["count"] + v.get("count")
temp_users[user]["last_date"] = date_tz.strftime("%b %d %H:%M")
else:
temp_users[user] = {
"username": user,
"count": v.get("count"),
"last_date": date_tz.strftime("%b %d %H:%M")
}
count_incorrect_attepmt += attempt_count
count_hosts.append(attempt_count)
dates.append(timezone.localtime(p.time).strftime("%b %d %H:%M"))
hosts = []
for i in temp_hosts:
hosts.append(temp_hosts[i])
if hosts:
hosts.sort(key=lambda x: x["count"], reverse=True)
hosts = hosts[:100]
users = []
for i in temp_users:
users.append(temp_users[i])
if users:
users.sort(key=lambda x: x["count"], reverse=True)
users = users[:100]
date_range = {
"start": time.mktime(timezone.localtime(date_start_tz).timetuple()), # time.mktime(timezone.localtime(timezone.now()).timetuple()),
"start_date": time.mktime(timezone.localtime(date_start_tz).timetuple()) + 10, # time.mktime(timezone.localtime(timezone.now()).timetuple()),
"end_date": time.mktime(timezone.localtime(date_end_tz).timetuple()),
}
if values:
date_range["start"] = time.mktime(timezone.localtime(values[0].time).timetuple())
start_obj = SecurityLoginAttemptIncorrect.objects.all().first()
if start_obj:
date_range["start_date"] = time.mktime(timezone.localtime(start_obj.time).timetuple())
if date_range["start_date"] == date_range["end_date"]:
date_range["end_date"] += 10
return Response({
"values": [{
"data": count_hosts,
"label": 'Number of attempt incorrect login'
}],
"dates": dates,
"date_range": date_range,
"count_incorrect_attepmt": count_incorrect_attepmt,
"hosts": hosts,
"users": users
}, status=status.HTTP_200_OK)
| dspichkin/djangodashpanel | djangodashpanel/security/views.py | Python | gpl-3.0 | 9,113 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-08-10 04:19
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('article', '0002_auto_20160810_0134'),
]
operations = [
migrations.RemoveField(
model_name='article',
name='content_1',
),
migrations.AddField(
model_name='article',
name='content',
field=models.CharField(default=0, max_length=10000, verbose_name='内容'),
preserve_default=False,
),
migrations.AlterField(
model_name='article',
name='title',
field=models.CharField(max_length=100, verbose_name='标题'),
),
]
| zhangvs1988/zhangyl-Djangodemo | article/migrations/0003_auto_20160810_1219.py | Python | gpl-3.0 | 806 |
# -*- coding: utf-8 -*-
#
# Copyright © 2011 Pierre Raybaut
# Licensed under the terms of the MIT License
# (see spyderlib/__init__.py for details)
"""IPython v0.11+ Plugin"""
from spyderlib.qt.QtGui import QHBoxLayout
# Local imports
from spyderlib.widgets.ipython import create_widget
from spyderlib.plugins import SpyderPluginWidget
class IPythonPlugin(SpyderPluginWidget):
"""Find in files DockWidget"""
CONF_SECTION = 'ipython'
def __init__(self, parent, args, kernel_widget, kernel_name):
super(IPythonPlugin, self).__init__(parent)
self.kernel_widget = kernel_widget
self.kernel_name = kernel_name
self.ipython_widget = create_widget(argv=args.split())
layout = QHBoxLayout()
layout.addWidget(self.ipython_widget)
self.setLayout(layout)
# Initialize plugin
self.initialize_plugin()
def toggle(self, state):
"""Toggle widget visibility"""
if self.dockwidget:
self.dockwidget.setVisible(state)
#------ SpyderPluginWidget API ---------------------------------------------
def get_plugin_title(self):
"""Return widget title"""
return "IPython (%s) - Experimental!" % self.kernel_name
def get_focus_widget(self):
"""
Return the widget to give focus to when
this plugin's dockwidget is raised on top-level
"""
return self.ipython_widget._control
def get_plugin_actions(self):
"""Return a list of actions related to plugin"""
return []
def register_plugin(self):
"""Register plugin in Spyder's main window"""
self.main.add_dockwidget(self)
def refresh_plugin(self):
"""Refresh widget"""
pass
def closing_plugin(self, cancelable=False):
"""Perform actions before parent main window is closed"""
return True
| jromang/retina-old | distinclude/spyderlib/plugins/ipython.py | Python | gpl-3.0 | 2,012 |
import numpy as np
STR_NOBOND = """AU
3 1 2 1
1 0.00000000 0.00000000 0.00000000 -0.66387672 0.00000000 -0.00000000 0.34509720 3.78326969 -0.00000000 -0.00000000 3.96610412 0.00000000 3.52668267 0.00000000 -0.00000000 -2.98430053 0.00000000 -0.00000000 0.00000000 -0.00000000 1.26744725 -0.00000000 2.16730601
1 1.43043000 0.00000000 1.10716000 0.33193836 -0.16057903 -0.00000000 -0.11299312 1.55235099 -0.00000000 1.15495299 0.60859677 -0.00000000 1.21104235 -4.46820475 0.00000000 -4.55909022 -0.05601735 0.00000000 -3.72029878 -0.00000000 0.46039909 -0.00000000 -2.40410436
1 -1.43043000 0.00000000 1.10716000 0.33193836 0.16057903 -0.00000000 -0.11299312 1.55235099 -0.00000000 -1.15495299 0.60859677 0.00000000 1.21104235 4.46820475 -0.00000000 -4.55909022 0.05601735 0.00000000 3.72029878 -0.00000000 0.46039909 -0.00000000 -2.40410436
Time used in Loprop : 0.45 (cpu) 0.11 (wall)
"""
STR_BOND = """AU
5 1 22 1
1 0.00000000 0.00000000 0.00000000 -0.66387672 0.00000000 -0.00000000 0.41788500 1.19165567 0.00000000 0.00000000 2.74891057 0.00000000 1.33653383 0.00000000 0.00000000 4.18425484 0.00000000 -0.00000000 -0.00000000 -0.00000000 0.19037387 0.00000000 5.96033807
1 0.71521500 0.00000000 0.55358000 0.00000000 -0.06567795 -0.00000000 -0.07278780 2.59161403 -0.00000000 1.21719355 1.98015668 -0.00000000 2.19014883 -7.24839104 0.00000000 -7.16855538 0.59534043 0.00000000 -5.74640170 -0.00000000 1.07707338 -0.00000000 -3.79303206
1 1.43043000 0.00000000 1.10716000 0.33193836 -0.12774005 0.00000000 -0.07659922 0.25654398 0.00000000 0.16487465 -0.00000000 -0.00000000 0.11596794 -0.84400923 0.00000000 -0.97481253 -0.35368757 -0.00000000 -0.84709793 0.00000000 -0.07813759 0.00000000 -0.50758833
1 -0.71521500 0.00000000 0.55358000 0.00000000 0.06567795 -0.00000000 -0.07278780 2.59161403 -0.00000000 1.21719355 -1.98015668 0.00000000 2.19014883 7.24839104 -0.00000000 -7.16855538 -0.59534043 0.00000000 5.74640170 -0.00000000 1.07707338 -0.00000000 -3.79303206
1 -1.43043000 0.00000000 1.10716000 0.33193836 0.12774005 0.00000000 -0.07659922 0.25654398 -0.00000000 -0.16487465 0.00000000 0.00000000 0.11596794 0.84400923 -0.00000000 -0.97481253 0.35368757 0.00000000 0.84709793 -0.00000000 -0.07813759 -0.00000000 -0.50758833
Time used in Loprop : 0.45 (cpu) 0.11 (wall)
"""
class TestBondH2O:
"""H2O tests bonded versus non-bonden results"""
def setup(self):
# Read in string that is for no bonds output
lines = [line for line in STR_BOND.split("\n") if len(line.split()) > 10]
a0 = 1.0
self.n_bond = np.array([8.0, 0.0, 1.0, 0.0, 1.0], dtype=float)
self.r_bond = a0 * np.array([l.split()[1:4] for l in lines], dtype=float)
self.q_bond = np.array([l.split()[4] for l in lines], dtype=float)
self.d_bond = np.array([l.split()[5:8] for l in lines], dtype=float)
self.a_bond = np.array([l.split()[8:15] for l in lines], dtype=float)
self.b_bond = np.array([l.split()[15:26] for l in lines], dtype=float)
self.coc_bond = np.einsum("ij,i", self.r_bond, self.n_bond) / self.n_bond.sum()
# Read in string that is for bonds output -b
lines = [line for line in STR_NOBOND.split("\n") if len(line.split()) > 10]
self.n_nobond = np.array([8.0, 1.0, 1.0], dtype=float)
self.r_nobond = a0 * np.array([l.split()[1:4] for l in lines], dtype=float)
self.q_nobond = np.array([l.split()[4] for l in lines], dtype=float)
self.d_nobond = np.array([l.split()[5:8] for l in lines], dtype=float)
self.a_nobond = np.array([l.split()[8:15] for l in lines], dtype=float)
self.b_nobond = np.array([l.split()[15:26] for l in lines], dtype=float)
self.coc_nobond = (
np.einsum("ij,i", self.r_nobond, self.n_nobond) / self.n_nobond.sum()
)
def test_bond_nobond_properties(self):
"""Center-of-charge equality"""
np.testing.assert_allclose(self.coc_bond, self.coc_nobond)
def test_a(self):
"""Polarizability equality"""
a_tot_bond = np.sum(self.a_bond)
a_tot_nobond = np.sum(self.a_nobond)
np.testing.assert_allclose(a_tot_bond, a_tot_nobond)
def test_b(self):
"""Hyperpolarizability equality"""
b_tot_bond = np.sum(self.b_bond)
b_tot_nobond = np.sum(self.b_nobond)
np.testing.assert_allclose(b_tot_bond, b_tot_nobond)
def test_dip(self):
"""Dipole equality"""
dip_bond = np.einsum(
"ij,i", (self.r_bond - self.coc_bond), self.q_bond
) + self.d_bond.sum(axis=0)
dip_nobond = np.einsum(
"ij,i", (self.r_nobond - self.coc_nobond), self.q_nobond
) + self.d_nobond.sum(axis=0)
np.testing.assert_allclose(dip_bond, dip_nobond)
class TestBondH2S:
"""H2O tests bonded versus non-bonden results"""
def setup(self):
# Read in string that is for no bonds output
lines = [line for line in STR_BOND.split("\n") if len(line.split()) > 10]
a0 = 1.0
self.n_bond = np.array([16.0, 0.0, 1.0, 0.0, 1.0], dtype=float)
self.r_bond = a0 * np.array([l.split()[1:4] for l in lines], dtype=float)
self.q_bond = np.array([l.split()[4] for l in lines], dtype=float)
self.d_bond = np.array([l.split()[5:8] for l in lines], dtype=float)
self.a_bond = np.array([l.split()[8:15] for l in lines], dtype=float)
self.b_bond = np.array([l.split()[15:26] for l in lines], dtype=float)
self.coc_bond = np.einsum("ij,i", self.r_bond, self.n_bond) / self.n_bond.sum()
# Read in string that is for bonds output -b
lines = [line for line in STR_NOBOND.split("\n") if len(line.split()) > 10]
self.n_nobond = np.array([16.0, 1.0, 1.0], dtype=float)
self.r_nobond = a0 * np.array([l.split()[1:4] for l in lines], dtype=float)
self.q_nobond = np.array([l.split()[4] for l in lines], dtype=float)
self.d_nobond = np.array([l.split()[5:8] for l in lines], dtype=float)
self.a_nobond = np.array([l.split()[8:15] for l in lines], dtype=float)
self.b_nobond = np.array([l.split()[15:26] for l in lines], dtype=float)
self.coc_nobond = (
np.einsum("ij,i", self.r_nobond, self.n_nobond) / self.n_nobond.sum()
)
def test_bond_nobond_properties(self):
"""Center-of-charge equality"""
np.testing.assert_allclose(self.coc_bond, self.coc_nobond)
def test_a(self):
"""Polarizability equality"""
a_tot_bond = np.sum(self.a_bond)
a_tot_nobond = np.sum(self.a_nobond)
np.testing.assert_allclose(a_tot_bond, a_tot_nobond)
def test_b(self):
"""Hyperpolarizability equality"""
b_tot_bond = np.sum(self.b_bond)
b_tot_nobond = np.sum(self.b_nobond)
np.testing.assert_allclose(b_tot_bond, b_tot_nobond)
def test_dip(self):
"""Dipole equality"""
dip_bond = np.einsum(
"ij,i", (self.r_bond - self.coc_bond), self.q_bond
) + self.d_bond.sum(axis=0)
dip_nobond = np.einsum(
"ij,i", (self.r_nobond - self.coc_nobond), self.q_nobond
) + self.d_nobond.sum(axis=0)
np.testing.assert_allclose(dip_bond, dip_nobond)
| vahtras/loprop | tests/test_bond.py | Python | gpl-3.0 | 7,956 |
# coding: utf-8
import os
import urllib
import numpy as np
import pickle
from Experiment import Experiment
ROOT_PATH = './full_dataset/article_4_data/grouped_ephys'
ZIPFILE_PATH = './full_dataset/article_4_data'
EXPM_PATH = './results/experiments/'
URL = 'http://microcircuits.epfl.ch/data/released_data/'
if not os.path.exists(EXPM_PATH):
os.makedirs(EXPM_PATH)
if not os.path.exists(ROOT_PATH):
print('It seems that the directory of the raw data does not exist. It is expected to be at: ' + ROOT_PATH)
if not os.path.exists(ROOT_PATH):
print('It seems that the directory with the zip files does not exist. It is expected to be at: ' + ZIPFILE_PATH)
# ==============================================================================
# General io function
# ==============================================================================
def download_info_from_url(url):
"""
Download content from url and return it.
"""
r = urllib.request.urlopen(url)
data = r.read()
data = data.decode(encoding='UTF-8')
return data
def get_genetic_cell_infos(filepath):
"""
Downloads genetic information from cells in the directory at filepath.
"""
filelist = os.listdir(filepath)
raw_names = [name[0:-4] for name in filelist]
cell_names = []
for name in raw_names:
# if name.rfind('ET') == -1:
cell_names.append(name)
infos = {}
for cell in cell_names:
url_complete = URL + cell + '.txt'
try:
infos[cell] = download_info_from_url(url_complete)
except Exception:
next
return infos
def save_filtered_cell_infos(filtername, criterion1='SOM:1', criterion2='PV:0', criterion3='VIP:0'):
"""
Gets genetic information from all cells in ZIPFILE_PATH directory, filters them by the given
criterions and saves the filtered list with pickle.
"""
infos = get_genetic_cell_infos(ZIPFILE_PATH)
desired_cells = {}
for cell in infos.keys():
if criterion1 in infos[cell] and criterion2 in infos[cell] and criterion3 in infos[cell]:
desired_cells[cell] = infos[cell]
with open(filtername + '_infos.pkl', 'wb') as f:
pickle.dump(desired_cells, f)
def save_all_cell_infos(filepath):
"""
Saves genetic information from all cells in ZIPFILE_PATH directory in one list with pickle.
"""
infos = get_genetic_cell_infos(filepath)
with open('cell_infos_full.pkl', 'wb') as f:
pickle.dump(infos, f)
def open_filtered_cell_info_list(filtername):
"""
Opens the list that was saved with save_filtered_cell_infos with the given filtername.
"""
with open(filtername + '_infos.pkl', 'rb') as f:
filtered_list = pickle.load(f)
return filtered_list
def create_experiments_from_list(cells, cell_type, verbose=True):
"""
Creates Experiment objects for cells in cells, adds all existing traces and saves them.
Params:
- cells: List with cell names or dictionairy where the keys are the cell names.
"""
if type(cells) is dict:
cell_names = list(cells.keys())
else:
cell_names = cells
ncells = len(cell_names)
for i in range(ncells):
PATH = os.path.join(ROOT_PATH, cell_names[i])
animal_files = sorted(os.listdir(PATH))
ntraces = int(len(animal_files) / 2)
current_exp = Experiment('Cell_' + cell_names[i] + '_single_traces', cell_type=cell_type)
exp_merged_traces = Experiment('Cell_' + cell_names[i] + '_merged_idrest_traces', cell_type=cell_type)
nincluded_idrest_traces = 0
for j in np.arange(ntraces):
# files end with 'recordingType_recordingNumber.ibw'
file_split = str.split(animal_files[j][0:-4], '_')
file_identifier = file_split[-2] + '_' + file_split[-1] + '.ibw'
current_recording_type = file_split[-2]
# find indeces of matching files in folder (current file always comes first because it's always Ch0)
file_idc = [i for i, elem in enumerate(animal_files) if file_identifier in elem]
current_file = animal_files[file_idc[0]]
voltage_file = animal_files[file_idc[1]]
current_exp.add_trainingset_trace(os.path.join(PATH, voltage_file), 10 ** -3,
os.path.join(PATH, current_file), 10 ** -12, FILETYPE='Igor',
verbose=verbose)
tr = current_exp.trainingset_traces[j]
tr.recording_type = current_recording_type
tr.estimate_input_amp()
if current_recording_type == 'IDRest':
exp_merged_traces.add_trainingset_trace(os.path.join(PATH, voltage_file), 10 ** -3,
os.path.join(PATH, current_file), 10 ** -12, FILETYPE='Igor',
verbose=verbose)
tr = current_exp.trainingset_traces[nincluded_idrest_traces]
tr.recording_type = current_recording_type
tr.estimate_input_amp()
nincluded_idrest_traces += 1
if not len(exp_merged_traces.trainingset_traces) < 3:
exp_merged_traces.mergeTrainingTraces()
exp_merged_traces.save(os.path.join(EXPM_PATH), verbose=verbose)
current_exp.save(os.path.join(EXPM_PATH), verbose=verbose)
def load_merged_traces_experiments_from_list(cells, verbose=True):
"""
Load experiments where IDRest traces have been merged.
This function will try to load an experiment with merged IDRest traces for all cells
in the list and just skip the ones for which it is not found. If no experiments were
found, None is returned.
Params:
- cells: List with cell names or dictionairy where the keys are the cell names.
See also:
load_single_traces_experiments_from_list()
"""
if type(cells) is dict:
cell_names = list(cells.keys())
else:
cell_names = cells
expms = []
for i in range(len(cell_names)):
current_expm_name = 'Experiment_Cell_' + cell_names[i] + '_merged_idrest_traces.pkl'
current_expm_path = os.path.join(EXPM_PATH, current_expm_name)
try:
current_expm = Experiment.load(current_expm_path, verbose=verbose)
expms.append(current_expm)
except:
pass
if not len(expms) == 0:
return expms
else:
return None
def load_single_traces_experiments_from_list(cells, verbose=True):
"""
Load experiments where traces have been added separately.
Params:
- cells: List with cell names or dictionairy where the keys are the cell names.
See also:
load_merged_traces_experiments_from_list()
"""
if type(cells) is dict:
cell_names = list(cells.keys())
else:
cell_names = cells
expms = []
for i in range(len(cell_names)):
current_expm_name = 'Experiment_Cell_' + cell_names[i] + '_single_traces.pkl'
current_expm_path = os.path.join(EXPM_PATH, current_expm_name)
try:
current_expm = Experiment.load(current_expm_path, verbose=verbose)
expms.append(current_expm)
except:
pass
if not len(expms) == 0:
return expms
else:
return None
# ==============================================================================
# From here on it's interneuron-specific functions
# ==============================================================================
def create_interneuron_specific_experiments(verbose=True):
"""
Filters cell infos for SOM, PV and VIP neurons, loads them and creates
Experiment objects.
"""
# create and save filtered info lists for SOM, PV and VIP neurons
save_filtered_cell_infos('som_cells', criterion1='SOM:1', criterion2='PV:0', criterion3='VIP:0')
save_filtered_cell_infos('pv_cells', criterion1='SOM:0', criterion2='PV:1', criterion3='VIP:0')
save_filtered_cell_infos('vip_cells', criterion1='SOM:0', criterion2='PV:0', criterion3='VIP:1')
# get saved lists
som_dict = open_filtered_cell_info_list('som_cells')
vip_dict = open_filtered_cell_info_list('vip_cells')
pv_dict = open_filtered_cell_info_list('pv_cells')
# create experiment objects
create_experiments_from_list(vip_dict, cell_type='vip', verbose=verbose)
create_experiments_from_list(som_dict, cell_type='som', verbose=verbose)
create_experiments_from_list(pv_dict, cell_type='pv', verbose=verbose)
def get_som_expms(merged=False, verbose=True):
som_dict = open_filtered_cell_info_list('som_cells')
if merged:
return load_merged_traces_experiments_from_list(som_dict, verbose=verbose)
else:
return load_single_traces_experiments_from_list(som_dict, verbose=verbose)
def get_pv_expms(merged=False, verbose=True):
pv_dict = open_filtered_cell_info_list('pv_cells')
if merged:
return load_merged_traces_experiments_from_list(pv_dict, verbose=verbose)
else:
return load_single_traces_experiments_from_list(pv_dict, verbose=verbose)
def get_vip_expms(merged=False, verbose=True):
vip_dict = open_filtered_cell_info_list('vip_cells')
if merged:
return load_merged_traces_experiments_from_list(vip_dict, verbose=verbose)
else:
return load_single_traces_experiments_from_list(vip_dict, verbose=verbose)
| awakenting/gif_fitting | bbp_analysis/bluebrain_data_io.py | Python | gpl-3.0 | 9,533 |
#
# controller.py
#
# Copyright (C) 2013-2014 Ashwin Menon <[email protected]>
# Copyright (C) 2015-2018 Track Master Steve <[email protected]>
#
# Alienfx is free software.
#
# You may redistribute it and/or modify it under the terms of the
# GNU General Public License, as published by the Free Software
# Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# Alienfx is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with alienfx. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
""" Base classes for AlienFX controller chips. These must be subclassed for
specific controllers.
This module provides the following classes:
AlienFXController: base class for AlienFX controller chips
"""
from builtins import hex
from builtins import object
import logging
import alienfx.core.usbdriver as alienfx_usbdriver
import alienfx.core.cmdpacket as alienfx_cmdpacket
from alienfx.core.themefile import AlienFXThemeFile
from functools import reduce
class AlienFXController(object):
""" Provides facilities to communicate with an AlienFX controller.
This class provides methods to send commands to an AlienFX controller, and
receive status from the controller. It must be overridden to provide
behaviour specific to a particular AlienFX controller.
"""
# List of all subclasses of this class. Subclasses must add instances of
# themselves to this list. See README for details.
supported_controllers = []
# Zone names
ZONE_LEFT_KEYBOARD = "Left Keyboard"
ZONE_MIDDLE_LEFT_KEYBOARD = "Middle-left Keyboard"
ZONE_MIDDLE_RIGHT_KEYBOARD = "Middle-right Keyboard"
ZONE_RIGHT_KEYBOARD = "Right Keyboard"
ZONE_RIGHT_SPEAKER = "Right Speaker"
ZONE_LEFT_SPEAKER = "Left Speaker"
ZONE_ALIEN_HEAD = "Alien Head"
ZONE_LOGO = "Logo"
ZONE_TOUCH_PAD = "Touchpad"
ZONE_MEDIA_BAR = "Media Bar"
ZONE_STATUS_LEDS = "Status LEDs"
ZONE_POWER_BUTTON = "Power Button"
ZONE_HDD_LEDS = "HDD LEDs"
ZONE_RIGHT_DISPLAY = "Right Display" # LED-bar display right side, as built in the AW17R4
ZONE_LEFT_DISPLAY = "Left Display" # LED-bar display left side, as built in the AW17R4
# State names
STATE_BOOT = "Boot"
STATE_AC_SLEEP = "AC Sleep"
STATE_AC_CHARGED = "AC Charged"
STATE_AC_CHARGING = "AC Charging"
STATE_BATTERY_SLEEP = "Battery Sleep"
STATE_BATTERY_ON = "Battery On"
STATE_BATTERY_CRITICAL = "Battery Critical"
ALIENFX_CONTROLLER_TYPE = "old" # Default controllertype=old. Note that modern controllers are using 8 bits per color. older ones just 4
def __init__(self, conrev=1): # conrev defaulting to 1 to maintain compatibility with old definitions
# conrev=1 -> old controllers (DEFAULT)
# conrev=2 -> newer controllers (17R4 ...)
self.zone_map = {}
self.power_zones = []
self.reset_types = {}
self.state_map = {}
self.vendor_id = 0
self.product_id = 0
self.cmd_packet = alienfx_cmdpacket.AlienFXCmdPacket(conrev) # Loads the cmdpacket.
self._driver = alienfx_usbdriver.AlienFXUSBDriver(self)
def get_zone_name(self, pkt):
""" Given 3 bytes of a command packet, return a string zone
name corresponding to it
"""
zone_mask = (pkt[0] << 16) + (pkt[1] << 8) + pkt[2]
zone_name = ""
for zone in self.zone_map:
bit_mask = self.zone_map[zone]
if zone_mask & bit_mask:
if zone_name != "":
zone_name += ","
zone_name += zone
zone_mask &= ~bit_mask
if zone_mask != 0:
if zone_name != "":
zone_name += ","
zone_name += "UNKNOWN({})".format(hex(zone_mask))
return zone_name
def get_state_name(self, state):
""" Given a state number, return a string state name """
for state_name in self.state_map:
if self.state_map[state_name] == state:
return state_name
return "UNKNOWN"
def get_reset_type_name(self, num):
""" Given a reset number, return a string reset name """
if num in list(self.reset_types.keys()):
return self.reset_types[num]
else:
return "UNKNOWN"
def _ping(self):
""" Send a get-status command to the controller."""
pkt = self.cmd_packet.make_cmd_get_status()
logging.debug("SENDING: {}".format(self.pkt_to_string(pkt)))
self._driver.write_packet(pkt)
self._driver.read_packet()
def _reset(self, reset_type):
""" Send a "reset" packet to the AlienFX controller."""
reset_code = self._get_reset_code(reset_type)
pkt = self.cmd_packet.make_cmd_reset(reset_code)
logging.debug("SENDING: {}".format(self.pkt_to_string(pkt)))
self._driver.write_packet(pkt)
def _wait_controller_ready(self):
""" Keep sending a "get status" packet to the AlienFX controller and
return only when the controller is ready
"""
ready = False
errcount=0
while not ready:
pkt = self.cmd_packet.make_cmd_get_status()
logging.debug("SENDING: {}".format(self.pkt_to_string(pkt)))
self._driver.write_packet(pkt)
try:
resp = self._driver.read_packet()
ready = (resp[0] == self.cmd_packet.STATUS_READY)
except TypeError:
errcount += 1
logging.debug("No Status received yet... Failed tries=" + str(errcount))
if errcount > 50:
logging.error("Controller status could not be retrieved. Is the device already in use?")
quit(-99)
def pkt_to_string(self, pkt_bytes):
""" Return a human readable string representation of an AlienFX
command packet.
"""
return self.cmd_packet.pkt_to_string(pkt_bytes, self)
def _get_no_zone_code(self):
""" Return a zone code corresponding to all non-visible zones."""
zone_codes = [self.zone_map[x] for x in self.zone_map]
return ~reduce(lambda x,y: x|y, zone_codes, 0)
def _get_zone_codes(self, zone_names):
""" Given zone names, return the zone codes they refer to.
"""
zones = 0
for zone in zone_names:
if zone in self.zone_map:
zones |= self.zone_map[zone]
return zones
def _get_reset_code(self, reset_name):
""" Given the name of a reset action, return its code. """
for reset in self.reset_types:
if reset_name == self.reset_types[reset]:
return reset
logging.warning("Unknown reset type: {}".format(reset_name))
return 0
def _make_loop_cmds(self, themefile, zones, block, loop_items):
""" Given loop-items from the theme file, return a list of loop
commands.
"""
loop_cmds = []
pkt = self.cmd_packet
for item in loop_items:
item_type = themefile.get_action_type(item)
item_colours = themefile.get_action_colours(item)
if item_type == AlienFXThemeFile.KW_ACTION_TYPE_FIXED:
if len(item_colours) != 1:
logging.warning("fixed must have exactly one colour value")
continue
loop_cmds.append(
pkt.make_cmd_set_colour(block, zones, item_colours[0]))
elif item_type == AlienFXThemeFile.KW_ACTION_TYPE_BLINK:
if len(item_colours) != 1:
logging.warning("blink must have exactly one colour value")
continue
loop_cmds.append(
pkt.make_cmd_set_blink_colour(block, zones, item_colours[0]))
elif item_type == AlienFXThemeFile.KW_ACTION_TYPE_MORPH:
if len(item_colours) != 2:
logging.warning("morph must have exactly two colour values")
continue
loop_cmds.append(
pkt.make_cmd_set_morph_colour(
block, zones, item_colours[0], item_colours[1]))
else:
logging.warning("unknown loop item type: {}".format(item_type))
return loop_cmds
def _make_zone_cmds(self, themefile, state_name, boot=False):
""" Given a theme file, return a list of zone commands.
If 'boot' is True, then the colour commands created are not saved with
SAVE_NEXT commands. Also, the final command is one to set the colour
of all non-visible zones to black.
"""
zone_cmds = []
block = 1
pkt = self.cmd_packet
state = self.state_map[state_name]
state_items = themefile.get_state_items(state_name)
for item in state_items:
zone_codes = self._get_zone_codes(themefile.get_zone_names(item))
loop_items = themefile.get_loop_items(item)
loop_cmds = self._make_loop_cmds(
themefile, zone_codes, block, loop_items)
if (loop_cmds):
block += 1
for loop_cmd in loop_cmds:
if not boot:
zone_cmds.append(pkt.make_cmd_save_next(state))
zone_cmds.append(loop_cmd)
if not boot:
zone_cmds.append(pkt.make_cmd_save_next(state))
zone_cmds.append(pkt.make_cmd_loop_block_end())
if zone_cmds:
if not boot:
zone_cmds.append(pkt.make_cmd_save())
if boot:
zone_cmds.append(
pkt.make_cmd_set_colour(
block, self._get_no_zone_code(), (0,0,0)))
zone_cmds.append(pkt.make_cmd_loop_block_end())
return zone_cmds
def _send_cmds(self, cmds):
""" Send the given commands to the controller. """
for cmd in cmds:
logging.debug("SENDING: {}".format(self.pkt_to_string(cmd)))
self._driver.write_packet(cmd)
def set_theme(self, themefile):
""" Send the given theme settings to the controller. This should result
in the lights changing to the theme settings immediately.
"""
try:
self._driver.acquire()
cmds_boot = []
pkt = self.cmd_packet
# prepare the controller
self._ping()
self._reset("all-lights-on")
self._wait_controller_ready()
for state_name in self.state_map:
cmds = []
cmds = self._make_zone_cmds(themefile, state_name)
# Boot block commands are saved for sending again later.
# The second time, they are sent without SAVE_NEXT commands.
if (state_name == self.STATE_BOOT):
cmds_boot = self._make_zone_cmds(
themefile, state_name, boot=True)
self._send_cmds(cmds)
cmd = pkt.make_cmd_set_speed(themefile.get_speed())
self._send_cmds([cmd])
# send the boot block commands again
self._send_cmds(cmds_boot)
cmd = pkt.make_cmd_transmit_execute()
self._send_cmds([cmd])
finally:
self._driver.release()
| ashwinm76/alienfx | alienfx/core/controller.py | Python | gpl-3.0 | 11,875 |
from django.test import TestCase
from django.utils.timezone import now
from promises.models import Promise, Category
from popolo.models import Person
from taggit.models import Tag
from ..models import TagExtraCss
nownow = now()
class TagsExtraCssTestCase(TestCase):
def setUp(self):
self.person = Person.objects.create(name=u"A person")
self.category = Category.objects.create(name="Education")
self.promise = Promise.objects.create(name="this is a promise",\
description="this is a description",\
date = nownow,\
person = self.person,
category = self.category
)
def test_a_tag_can_have_extra_css(self):
'''A tag can have an extra css to display extra things'''
self.promise.tags.add("test")
tag = self.promise.tags.first()
extracss = TagExtraCss.objects.create(tag=tag, classes="extraclass")
self.assertTrue(extracss)
self.assertEquals(extracss.tag, tag)
self.assertEquals(extracss.classes, "extraclass")
def test_tag_css_unicode(self):
'''A tag css has a unicode'''
self.promise.tags.add("test")
tag = self.promise.tags.first()
extracss = TagExtraCss.objects.create(tag=tag, classes="extraclass")
self.assertEquals(extracss.__unicode__(), u"extraclass for test")
def test_tag_related_name_(self):
'''A tag has extracsss'''
self.promise.tags.add("test")
tag = self.promise.tags.first()
extracss = TagExtraCss.objects.create(tag=tag, classes="extraclass")
self.assertIn(extracss, tag.extracss.all())
| ciudadanointeligente/check-it | promises_web/tests/tags_extra_css_tests.py | Python | gpl-3.0 | 1,786 |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
with open('requirements.txt') as f:
requirements = f.read().splitlines()
with open('cli-requirements.txt') as f:
cli_requirements = f.read().splitlines()
setuptools.setup(
name="uwg",
use_scm_version=True,
setup_requires=['setuptools_scm'],
author="Ladybug Tools",
author_email="[email protected]",
description="Python application for modeling the urban heat island effect.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/ladybug-tools/uwg",
packages=setuptools.find_packages(exclude=["tests*", "resources*"]),
include_package_data=True,
install_requires=requirements,
extras_require={
'cli': cli_requirements
},
entry_points={
"console_scripts": ["uwg = uwg.cli:main"]
},
classifiers=[
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: Implementation :: CPython",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
"Operating System :: OS Independent"
],
)
| chriswmackey/UWG_Python | setup.py | Python | gpl-3.0 | 1,278 |
import re
import sys
import whoisSrvDict
import whoispy_sock
import parser_branch
OK = '\033[92m'
FAIL = '\033[91m'
ENDC = '\033[0m'
def query(domainName):
rawMsg = ""
tldName = ""
whoisSrvAddr = ""
regex = re.compile('.+\..+')
match = regex.search(domainName)
if not match:
# Invalid domain
_display_fail("Invalid domain format")
return None
# Divice TLD
regex = re.compile('\..+')
match = regex.search(domainName)
if match:
tldName = match.group()
else:
_display_fail("Can not parse TLD")
return None
# Get TLD List
if not (tldName in whoisSrvDict.get_whoisSrvDict()):
_display_fail("Not Found TLD whois server")
return None
whoisSrvAddr = whoisSrvDict.get_whoisSrvDict().get(tldName)
rawMsg = whoispy_sock.get_rawMsg(whoisSrvAddr , domainName, 43)
return parser_branch.get_parser(rawMsg, whoisSrvAddr)
# Display method
def _display_fail(msg):
sys.stdout.write( FAIL )
sys.stdout.write("%s\n" % msg)
sys.stdout.write( ENDC )
def _display_safe(msg):
sys.stdout.write( OK )
sys.stdout.write("%s\n" % msg)
sys.stdout.write( ENDC )
| nemumu/whoispy | whoispy/whoispy.py | Python | gpl-3.0 | 1,198 |
from vectores_oo import Vector
x = input('vector U componente X= ')
y = input('vector U componente X= ')
U = Vector(x,y)
m = input('vector V magnitud= ')
a = input('vector V angulo= ')
V = Vector(m=m, a=a)
E = input('Escalar= ')
print "U=%s" % U
print "V=%s" % V
print 'UxE=%s' % U.x_escalar(E)
print 'VxE=%s' % V.x_escalar(E)
print 'U+V=%s' % U.Suma(V)
print 'U.V=%s' % U.ProductoPunto(V)
print '|UxV|=%s' % U.Modulo_ProductoCruz(V)
| rgarcia-herrera/vectores | vectores.py | Python | gpl-3.0 | 446 |
"""
RESTx: Sane, simple and effective data publishing and integration.
Copyright (C) 2010 MuleSoft Inc. http://www.mulesoft.com
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
"""
A sample template for RESTx components, written in Python.
"""
import urllib
import restx.components
import restx.settings as settings
from restx.platform_specifics import STORAGE_OBJECT
from restx.components.api import *
from org.mulesoft.restx.exception import *
class _ResourceCreateForm(BaseComponent):
# Name, description and doc string of the component as it should appear to the user.
NAME = "_ResourceCreateForm" # Names starting with a '_' are kept private
DESCRIPTION = "Allows creation of a new resource by displaying a resource creation form"
DOCUMENTATION = \
"""The resource gets the name of a component as parameter at run time.
It then reads information about the component and constructs a proper
HTML form suitable for resource creation.
The user submits the filled-out form and a new resource is created.
"""
PARAM_DEFINITION = {}
# A dictionary with information about each exposed service method (sub-resource).
SERVICES = {
"form" : {
"desc" : "Show the resource creation form",
"params" : {
"component_name" : ParameterDef(PARAM_STRING, "Name of the component", required=True),
"message" : ParameterDef(PARAM_STRING, "An error message", required=False, default=""),
"specialized" : ParameterDef(PARAM_BOOL, "Indicates if this is based on a specialized component", required=False, default=False),
},
"positional_params": [ "component_name" ]
},
}
def __create(self, input, component_name, specialized=False):
"""
Accept a resource creation form for a specified component.
"""
d = dict()
for name, value in input.items():
path_elems = name.split("__")
d2 = d
for i, pe in enumerate(path_elems):
if i < len(path_elems)-1:
# More elements to come later? We must create a dict
d2 = d2.setdefault(pe, dict())
else:
if value:
d2[pe] = value
try:
return (True, makeResource(component_name, d, specialized), d)
except RestxException, e:
return (False, e.msg, d)
def form(self, method, input, component_name, message="", specialized=False):
"""
Display a resource creation form for a specified component.
@param method: The HTTP request method.
@type method: string
@param input: Any data that came in the body of the request.
@type input: string
@param component_name: Name of the component for which to create the resource.
@type component_name: string
@param message: An error message to be displayed above the form.
@type message: string
@return: The output data of this service.
@rtype: Result
"""
input_params = dict()
input_rctp = dict()
if input and HttpMethod.POST:
flag, msg, input = self.__create(input, component_name, specialized)
if not flag:
message = msg
else:
return Result.created(msg['uri'], msg)
if input:
if type(input) is dict:
# We receive a dict of values if the 'create' method discovered an
# error. In that case, the values should be used to pre-populate
# the fields when the form is re-displayed (with the error messsage
# on top).
input_rctp = input.get('resource_creation_params', dict()) # Resource creation time parameters
input_params = input.get('params', dict()) # Other parameters
if specialized:
# Need to read the definition of the partial resource and get the
# component name from there.
specialized_code_name = component_name
specialized_def = STORAGE_OBJECT.loadResourceFromStorage(specialized_code_name, True)
component_uri = specialized_def['private']['code_uri']
elems = component_uri.split("/")
component_name = elems[len(elems)-1]
# Take the parameter map from the component
comp = restx.components.make_component(component_name)
if not comp:
return Result.notFound("Cannot find component '%s'" % component_name)
header = settings.HTML_HEADER
# Assemble the form elements for the parameters
params = dict()
params.update(comp.getParams()) # In case this is a Java component, we get a Python dict this way
if specialized:
fname = specialized_def['public']['name']
fdesc = specialized_def['public']['desc']
# Remove all parameters that have been specified in the specialized component resource
# definition already
spec_params = specialized_def['private'].get('params')
if spec_params:
for name in spec_params:
if name in params:
del params[name]
else:
fname = comp.getName()
fdesc = comp.getDesc()
param_fields_html = ""
if params:
param_field_names = params.keys()
param_field_names.sort()
for pname in param_field_names:
pdef = params[pname]
if not pdef.required:
opt_str = "<br>optional, default: %s" % pdef.getDefaultVal()
else:
opt_str = ""
values = input_params.get(pname)
if type(values) is not list and pdef.isList():
if values is None:
values = []
else:
values = [ values ]
param_fields_html += \
"""<tr>
<td valign=top id="%s_name">%s<br><small>(%s%s)</small></td>
<td valign=top>%s</td>
</tr>""" % (pname, pname, pdef.desc, opt_str, pdef.html_type("params__"+pname, values))
if message:
msg = "<b><i><font color=red>%s</font></i></b><br><p>" % message
else:
msg = ""
body = """
<h3>Resource creation form for: %s</h3>
<p><i>"%s"</i></p>
<hr>
Please enter the resource configuration...<br><p>
%s
<form id="resource_form" name="input" action="%s" method="POST">
<table>""" % (fname, fdesc, msg, "%s%s/form/%s%s" % (settings.DOCUMENT_ROOT, self.getMyResourceUri(),
component_name if not specialized else specialized_code_name, "?specialized=y" if specialized else ""))
# Gather any initial values of the resource creation time form fields
suggested_name_value = input_rctp.get("suggested_name", "")
if suggested_name_value:
suggested_name_value = 'value="%s" ' % suggested_name_value
desc_value = input_rctp.get("desc", "")
if desc_value:
desc_value = 'value="%s" ' % desc_value
specialized_value = "checked " if input_rctp.get("specialized") in [ "on", "ON" ] else " "
if not specialized:
body += """
<tr>
<td id="Make_this_a_specialized_component_name">Make this a specialized component:</td>
<td><input type="checkbox" %s id="resource_creation_params__specialized" name="resource_creation_params__specialized" /><label for=resource_creation_params__specialized><small>Can only be used as basis for other resources</small></label></td>
</tr>
""" % specialized_value
body += """
<tr>
<td id="Resource_name_name">Resource name:</td>
<td><input type="text" %sname="resource_creation_params__suggested_name" id="resource_creation_params__suggested_name" /></td>
</tr>
<tr>
<td id="Description_name">Description:<br><small>(optional)</small></td>
<td><input type="text" %sname="resource_creation_params__desc" id="resource_creation_params__desc" /></td>
</tr>
%s
<tr><td colspan=2 align=center><input id="submit_button" type="submit" value="Submit" /></tr>
</table>
</form>""" % (suggested_name_value, desc_value, param_fields_html)
footer = settings.HTML_FOOTER
return Result.ok(header + body + footer).addHeader("Content-type", "text/html; charset=UTF-8")
| jbrendel/RESTx | src/python/restx/components/_ResourceCreateForm.py | Python | gpl-3.0 | 9,666 |
#!/usr/bin/env python
# Unix SMB/CIFS implementation.
# Copyright (C) Jelmer Vernooij <[email protected]> 2007-2010
# Copyright (C) Matthias Dieter Wallnoefer 2009
#
# Based on the original in EJS:
# Copyright (C) Andrew Tridgell <[email protected]> 2005
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Convenience functions for using the SAM."""
import samba
import ldb
import time
import base64
from samba import dsdb
from samba.ndr import ndr_unpack, ndr_pack
from samba.dcerpc import drsblobs, misc
__docformat__ = "restructuredText"
class SamDB(samba.Ldb):
"""The SAM database."""
hash_oid_name = {}
def __init__(self, url=None, lp=None, modules_dir=None, session_info=None,
credentials=None, flags=0, options=None, global_schema=True,
auto_connect=True, am_rodc=None):
self.lp = lp
if not auto_connect:
url = None
elif url is None and lp is not None:
url = lp.samdb_url()
super(SamDB, self).__init__(url=url, lp=lp, modules_dir=modules_dir,
session_info=session_info, credentials=credentials, flags=flags,
options=options)
if global_schema:
dsdb._dsdb_set_global_schema(self)
if am_rodc is not None:
dsdb._dsdb_set_am_rodc(self, am_rodc)
def connect(self, url=None, flags=0, options=None):
if self.lp is not None:
url = self.lp.private_path(url)
super(SamDB, self).connect(url=url, flags=flags,
options=options)
def am_rodc(self):
return dsdb._am_rodc(self)
def domain_dn(self):
return str(self.get_default_basedn())
def enable_account(self, search_filter):
"""Enables an account
:param search_filter: LDAP filter to find the user (eg
samccountname=name)
"""
flags = samba.dsdb.UF_ACCOUNTDISABLE | samba.dsdb.UF_PASSWD_NOTREQD
self.toggle_userAccountFlags(search_filter, flags, on=False)
def toggle_userAccountFlags(self, search_filter, flags, on=True, strict=False):
"""toggle_userAccountFlags
:param search_filter: LDAP filter to find the user (eg
samccountname=name)
:flags: samba.dsdb.UF_* flags
:on: on=True (default) => set, on=False => unset
:strict: strict=False (default) ignore if no action is needed
strict=True raises an Exception if...
"""
res = self.search(base=self.domain_dn(), scope=ldb.SCOPE_SUBTREE,
expression=search_filter, attrs=["userAccountControl"])
if len(res) == 0:
raise Exception('Unable to find user "%s"' % search_filter)
assert(len(res) == 1)
account_dn = res[0].dn
old_uac = int(res[0]["userAccountControl"][0])
if on:
if strict and (old_uac & flags):
error = 'userAccountFlags[%d:0x%08X] already contain 0x%X' % (old_uac, old_uac, flags)
raise Exception(error)
new_uac = old_uac | flags
else:
if strict and not (old_uac & flags):
error = 'userAccountFlags[%d:0x%08X] not contain 0x%X' % (old_uac, old_uac, flags)
raise Exception(error)
new_uac = old_uac & ~flags
if old_uac == new_uac:
return
mod = """
dn: %s
changetype: modify
delete: userAccountControl
userAccountControl: %u
add: userAccountControl
userAccountControl: %u
""" % (account_dn, old_uac, new_uac)
self.modify_ldif(mod)
def force_password_change_at_next_login(self, search_filter):
"""Forces a password change at next login
:param search_filter: LDAP filter to find the user (eg
samccountname=name)
"""
res = self.search(base=self.domain_dn(), scope=ldb.SCOPE_SUBTREE,
expression=search_filter, attrs=[])
if len(res) == 0:
raise Exception('Unable to find user "%s"' % search_filter)
assert(len(res) == 1)
user_dn = res[0].dn
mod = """
dn: %s
changetype: modify
replace: pwdLastSet
pwdLastSet: 0
""" % (user_dn)
self.modify_ldif(mod)
def newgroup(self, groupname, groupou=None, grouptype=None,
description=None, mailaddress=None, notes=None, sd=None):
"""Adds a new group with additional parameters
:param groupname: Name of the new group
:param grouptype: Type of the new group
:param description: Description of the new group
:param mailaddress: Email address of the new group
:param notes: Notes of the new group
:param sd: security descriptor of the object
"""
group_dn = "CN=%s,%s,%s" % (groupname, (groupou or "CN=Users"), self.domain_dn())
# The new user record. Note the reliance on the SAMLDB module which
# fills in the default informations
ldbmessage = {"dn": group_dn,
"sAMAccountName": groupname,
"objectClass": "group"}
if grouptype is not None:
ldbmessage["groupType"] = self.normalise_int32(grouptype)
if description is not None:
ldbmessage["description"] = description
if mailaddress is not None:
ldbmessage["mail"] = mailaddress
if notes is not None:
ldbmessage["info"] = notes
if sd is not None:
ldbmessage["nTSecurityDescriptor"] = ndr_pack(sd)
self.add(ldbmessage)
def deletegroup(self, groupname):
"""Deletes a group
:param groupname: Name of the target group
"""
groupfilter = "(&(sAMAccountName=%s)(objectCategory=%s,%s))" % (groupname, "CN=Group,CN=Schema,CN=Configuration", self.domain_dn())
self.transaction_start()
try:
targetgroup = self.search(base=self.domain_dn(), scope=ldb.SCOPE_SUBTREE,
expression=groupfilter, attrs=[])
if len(targetgroup) == 0:
raise Exception('Unable to find group "%s"' % groupname)
assert(len(targetgroup) == 1)
self.delete(targetgroup[0].dn)
except Exception:
self.transaction_cancel()
raise
else:
self.transaction_commit()
def add_remove_group_members(self, groupname, listofmembers,
add_members_operation=True):
"""Adds or removes group members
:param groupname: Name of the target group
:param listofmembers: Comma-separated list of group members
:param add_members_operation: Defines if its an add or remove
operation
"""
groupfilter = "(&(sAMAccountName=%s)(objectCategory=%s,%s))" % (groupname, "CN=Group,CN=Schema,CN=Configuration", self.domain_dn())
groupmembers = listofmembers.split(',')
self.transaction_start()
try:
targetgroup = self.search(base=self.domain_dn(), scope=ldb.SCOPE_SUBTREE,
expression=groupfilter, attrs=['member'])
if len(targetgroup) == 0:
raise Exception('Unable to find group "%s"' % groupname)
assert(len(targetgroup) == 1)
modified = False
addtargettogroup = """
dn: %s
changetype: modify
""" % (str(targetgroup[0].dn))
for member in groupmembers:
targetmember = self.search(base=self.domain_dn(), scope=ldb.SCOPE_SUBTREE,
expression="(|(sAMAccountName=%s)(CN=%s))" % (member, member), attrs=[])
if len(targetmember) != 1:
continue
if add_members_operation is True and (targetgroup[0].get('member') is None or str(targetmember[0].dn) not in targetgroup[0]['member']):
modified = True
addtargettogroup += """add: member
member: %s
""" % (str(targetmember[0].dn))
elif add_members_operation is False and (targetgroup[0].get('member') is not None and str(targetmember[0].dn) in targetgroup[0]['member']):
modified = True
addtargettogroup += """delete: member
member: %s
""" % (str(targetmember[0].dn))
if modified is True:
self.modify_ldif(addtargettogroup)
except Exception:
self.transaction_cancel()
raise
else:
self.transaction_commit()
def newuser(self, username, password,
force_password_change_at_next_login_req=False,
useusernameascn=False, userou=None, surname=None, givenname=None,
initials=None, profilepath=None, scriptpath=None, homedrive=None,
homedirectory=None, jobtitle=None, department=None, company=None,
description=None, mailaddress=None, internetaddress=None,
telephonenumber=None, physicaldeliveryoffice=None, sd=None,
setpassword=True):
"""Adds a new user with additional parameters
:param username: Name of the new user
:param password: Password for the new user
:param force_password_change_at_next_login_req: Force password change
:param useusernameascn: Use username as cn rather that firstname +
initials + lastname
:param userou: Object container (without domainDN postfix) for new user
:param surname: Surname of the new user
:param givenname: First name of the new user
:param initials: Initials of the new user
:param profilepath: Profile path of the new user
:param scriptpath: Logon script path of the new user
:param homedrive: Home drive of the new user
:param homedirectory: Home directory of the new user
:param jobtitle: Job title of the new user
:param department: Department of the new user
:param company: Company of the new user
:param description: of the new user
:param mailaddress: Email address of the new user
:param internetaddress: Home page of the new user
:param telephonenumber: Phone number of the new user
:param physicaldeliveryoffice: Office location of the new user
:param sd: security descriptor of the object
:param setpassword: optionally disable password reset
"""
displayname = ""
if givenname is not None:
displayname += givenname
if initials is not None:
displayname += ' %s.' % initials
if surname is not None:
displayname += ' %s' % surname
cn = username
if useusernameascn is None and displayname is not "":
cn = displayname
user_dn = "CN=%s,%s,%s" % (cn, (userou or "CN=Users"), self.domain_dn())
dnsdomain = ldb.Dn(self, self.domain_dn()).canonical_str().replace("/", "")
user_principal_name = "%s@%s" % (username, dnsdomain)
# The new user record. Note the reliance on the SAMLDB module which
# fills in the default informations
ldbmessage = {"dn": user_dn,
"sAMAccountName": username,
"userPrincipalName": user_principal_name,
"objectClass": "user"}
if surname is not None:
ldbmessage["sn"] = surname
if givenname is not None:
ldbmessage["givenName"] = givenname
if displayname is not "":
ldbmessage["displayName"] = displayname
ldbmessage["name"] = displayname
if initials is not None:
ldbmessage["initials"] = '%s.' % initials
if profilepath is not None:
ldbmessage["profilePath"] = profilepath
if scriptpath is not None:
ldbmessage["scriptPath"] = scriptpath
if homedrive is not None:
ldbmessage["homeDrive"] = homedrive
if homedirectory is not None:
ldbmessage["homeDirectory"] = homedirectory
if jobtitle is not None:
ldbmessage["title"] = jobtitle
if department is not None:
ldbmessage["department"] = department
if company is not None:
ldbmessage["company"] = company
if description is not None:
ldbmessage["description"] = description
if mailaddress is not None:
ldbmessage["mail"] = mailaddress
if internetaddress is not None:
ldbmessage["wWWHomePage"] = internetaddress
if telephonenumber is not None:
ldbmessage["telephoneNumber"] = telephonenumber
if physicaldeliveryoffice is not None:
ldbmessage["physicalDeliveryOfficeName"] = physicaldeliveryoffice
if sd is not None:
ldbmessage["nTSecurityDescriptor"] = ndr_pack(sd)
self.transaction_start()
try:
self.add(ldbmessage)
# Sets the password for it
if setpassword:
self.setpassword("(samAccountName=%s)" % username, password,
force_password_change_at_next_login_req)
except Exception:
self.transaction_cancel()
raise
else:
self.transaction_commit()
def setpassword(self, search_filter, password,
force_change_at_next_login=False, username=None):
"""Sets the password for a user
:param search_filter: LDAP filter to find the user (eg
samccountname=name)
:param password: Password for the user
:param force_change_at_next_login: Force password change
"""
self.transaction_start()
try:
res = self.search(base=self.domain_dn(), scope=ldb.SCOPE_SUBTREE,
expression=search_filter, attrs=[])
if len(res) == 0:
raise Exception('Unable to find user "%s"' % (username or search_filter))
if len(res) > 1:
raise Exception('Matched %u multiple users with filter "%s"' % (len(res), search_filter))
user_dn = res[0].dn
setpw = """
dn: %s
changetype: modify
replace: unicodePwd
unicodePwd:: %s
""" % (user_dn, base64.b64encode(("\"" + password + "\"").encode('utf-16-le')))
self.modify_ldif(setpw)
if force_change_at_next_login:
self.force_password_change_at_next_login(
"(dn=" + str(user_dn) + ")")
# modify the userAccountControl to remove the disabled bit
self.enable_account(search_filter)
except Exception:
self.transaction_cancel()
raise
else:
self.transaction_commit()
def setexpiry(self, search_filter, expiry_seconds, no_expiry_req=False):
"""Sets the account expiry for a user
:param search_filter: LDAP filter to find the user (eg
samaccountname=name)
:param expiry_seconds: expiry time from now in seconds
:param no_expiry_req: if set, then don't expire password
"""
self.transaction_start()
try:
res = self.search(base=self.domain_dn(), scope=ldb.SCOPE_SUBTREE,
expression=search_filter,
attrs=["userAccountControl", "accountExpires"])
if len(res) == 0:
raise Exception('Unable to find user "%s"' % search_filter)
assert(len(res) == 1)
user_dn = res[0].dn
userAccountControl = int(res[0]["userAccountControl"][0])
accountExpires = int(res[0]["accountExpires"][0])
if no_expiry_req:
userAccountControl = userAccountControl | 0x10000
accountExpires = 0
else:
userAccountControl = userAccountControl & ~0x10000
accountExpires = samba.unix2nttime(expiry_seconds + int(time.time()))
setexp = """
dn: %s
changetype: modify
replace: userAccountControl
userAccountControl: %u
replace: accountExpires
accountExpires: %u
""" % (user_dn, userAccountControl, accountExpires)
self.modify_ldif(setexp)
except Exception:
self.transaction_cancel()
raise
else:
self.transaction_commit()
def set_domain_sid(self, sid):
"""Change the domain SID used by this LDB.
:param sid: The new domain sid to use.
"""
dsdb._samdb_set_domain_sid(self, sid)
def get_domain_sid(self):
"""Read the domain SID used by this LDB. """
return dsdb._samdb_get_domain_sid(self)
domain_sid = property(get_domain_sid, set_domain_sid,
"SID for the domain")
def set_invocation_id(self, invocation_id):
"""Set the invocation id for this SamDB handle.
:param invocation_id: GUID of the invocation id.
"""
dsdb._dsdb_set_ntds_invocation_id(self, invocation_id)
def get_invocation_id(self):
"""Get the invocation_id id"""
return dsdb._samdb_ntds_invocation_id(self)
invocation_id = property(get_invocation_id, set_invocation_id,
"Invocation ID GUID")
def get_oid_from_attid(self, attid):
return dsdb._dsdb_get_oid_from_attid(self, attid)
def get_attid_from_lDAPDisplayName(self, ldap_display_name,
is_schema_nc=False):
'''return the attribute ID for a LDAP attribute as an integer as found in DRSUAPI'''
return dsdb._dsdb_get_attid_from_lDAPDisplayName(self,
ldap_display_name, is_schema_nc)
def get_syntax_oid_from_lDAPDisplayName(self, ldap_display_name):
'''return the syntax OID for a LDAP attribute as a string'''
return dsdb._dsdb_get_syntax_oid_from_lDAPDisplayName(self, ldap_display_name)
def set_ntds_settings_dn(self, ntds_settings_dn):
"""Set the NTDS Settings DN, as would be returned on the dsServiceName
rootDSE attribute.
This allows the DN to be set before the database fully exists
:param ntds_settings_dn: The new DN to use
"""
dsdb._samdb_set_ntds_settings_dn(self, ntds_settings_dn)
def get_ntds_GUID(self):
"""Get the NTDS objectGUID"""
return dsdb._samdb_ntds_objectGUID(self)
def server_site_name(self):
"""Get the server site name"""
return dsdb._samdb_server_site_name(self)
def load_partition_usn(self, base_dn):
return dsdb._dsdb_load_partition_usn(self, base_dn)
def set_schema(self, schema):
self.set_schema_from_ldb(schema.ldb)
def set_schema_from_ldb(self, ldb_conn):
dsdb._dsdb_set_schema_from_ldb(self, ldb_conn)
def dsdb_DsReplicaAttribute(self, ldb, ldap_display_name, ldif_elements):
'''convert a list of attribute values to a DRSUAPI DsReplicaAttribute'''
return dsdb._dsdb_DsReplicaAttribute(ldb, ldap_display_name, ldif_elements)
def dsdb_normalise_attributes(self, ldb, ldap_display_name, ldif_elements):
'''normalise a list of attribute values'''
return dsdb._dsdb_normalise_attributes(ldb, ldap_display_name, ldif_elements)
def get_attribute_from_attid(self, attid):
""" Get from an attid the associated attribute
:param attid: The attribute id for searched attribute
:return: The name of the attribute associated with this id
"""
if len(self.hash_oid_name.keys()) == 0:
self._populate_oid_attid()
if self.hash_oid_name.has_key(self.get_oid_from_attid(attid)):
return self.hash_oid_name[self.get_oid_from_attid(attid)]
else:
return None
def _populate_oid_attid(self):
"""Populate the hash hash_oid_name.
This hash contains the oid of the attribute as a key and
its display name as a value
"""
self.hash_oid_name = {}
res = self.search(expression="objectClass=attributeSchema",
controls=["search_options:1:2"],
attrs=["attributeID",
"lDAPDisplayName"])
if len(res) > 0:
for e in res:
strDisplay = str(e.get("lDAPDisplayName"))
self.hash_oid_name[str(e.get("attributeID"))] = strDisplay
def get_attribute_replmetadata_version(self, dn, att):
"""Get the version field trom the replPropertyMetaData for
the given field
:param dn: The on which we want to get the version
:param att: The name of the attribute
:return: The value of the version field in the replPropertyMetaData
for the given attribute. None if the attribute is not replicated
"""
res = self.search(expression="dn=%s" % dn,
scope=ldb.SCOPE_SUBTREE,
controls=["search_options:1:2"],
attrs=["replPropertyMetaData"])
if len(res) == 0:
return None
repl = ndr_unpack(drsblobs.replPropertyMetaDataBlob,
str(res[0]["replPropertyMetaData"]))
ctr = repl.ctr
if len(self.hash_oid_name.keys()) == 0:
self._populate_oid_attid()
for o in ctr.array:
# Search for Description
att_oid = self.get_oid_from_attid(o.attid)
if self.hash_oid_name.has_key(att_oid) and\
att.lower() == self.hash_oid_name[att_oid].lower():
return o.version
return None
def set_attribute_replmetadata_version(self, dn, att, value,
addifnotexist=False):
res = self.search(expression="dn=%s" % dn,
scope=ldb.SCOPE_SUBTREE,
controls=["search_options:1:2"],
attrs=["replPropertyMetaData"])
if len(res) == 0:
return None
repl = ndr_unpack(drsblobs.replPropertyMetaDataBlob,
str(res[0]["replPropertyMetaData"]))
ctr = repl.ctr
now = samba.unix2nttime(int(time.time()))
found = False
if len(self.hash_oid_name.keys()) == 0:
self._populate_oid_attid()
for o in ctr.array:
# Search for Description
att_oid = self.get_oid_from_attid(o.attid)
if self.hash_oid_name.has_key(att_oid) and\
att.lower() == self.hash_oid_name[att_oid].lower():
found = True
seq = self.sequence_number(ldb.SEQ_NEXT)
o.version = value
o.originating_change_time = now
o.originating_invocation_id = misc.GUID(self.get_invocation_id())
o.originating_usn = seq
o.local_usn = seq
if not found and addifnotexist and len(ctr.array) >0:
o2 = drsblobs.replPropertyMetaData1()
o2.attid = 589914
att_oid = self.get_oid_from_attid(o2.attid)
seq = self.sequence_number(ldb.SEQ_NEXT)
o2.version = value
o2.originating_change_time = now
o2.originating_invocation_id = misc.GUID(self.get_invocation_id())
o2.originating_usn = seq
o2.local_usn = seq
found = True
tab = ctr.array
tab.append(o2)
ctr.count = ctr.count + 1
ctr.array = tab
if found :
replBlob = ndr_pack(repl)
msg = ldb.Message()
msg.dn = res[0].dn
msg["replPropertyMetaData"] = ldb.MessageElement(replBlob,
ldb.FLAG_MOD_REPLACE,
"replPropertyMetaData")
self.modify(msg, ["local_oid:1.3.6.1.4.1.7165.4.3.14:0"])
def write_prefixes_from_schema(self):
dsdb._dsdb_write_prefixes_from_schema_to_ldb(self)
def get_partitions_dn(self):
return dsdb._dsdb_get_partitions_dn(self)
def set_minPwdAge(self, value):
m = ldb.Message()
m.dn = ldb.Dn(self, self.domain_dn())
m["minPwdAge"] = ldb.MessageElement(value, ldb.FLAG_MOD_REPLACE, "minPwdAge")
self.modify(m)
def get_minPwdAge(self):
res = self.search(self.domain_dn(), scope=ldb.SCOPE_BASE, attrs=["minPwdAge"])
if len(res) == 0:
return None
elif not "minPwdAge" in res[0]:
return None
else:
return res[0]["minPwdAge"][0]
def set_minPwdLength(self, value):
m = ldb.Message()
m.dn = ldb.Dn(self, self.domain_dn())
m["minPwdLength"] = ldb.MessageElement(value, ldb.FLAG_MOD_REPLACE, "minPwdLength")
self.modify(m)
def get_minPwdLength(self):
res = self.search(self.domain_dn(), scope=ldb.SCOPE_BASE, attrs=["minPwdLength"])
if len(res) == 0:
return None
elif not "minPwdLength" in res[0]:
return None
else:
return res[0]["minPwdLength"][0]
def set_pwdProperties(self, value):
m = ldb.Message()
m.dn = ldb.Dn(self, self.domain_dn())
m["pwdProperties"] = ldb.MessageElement(value, ldb.FLAG_MOD_REPLACE, "pwdProperties")
self.modify(m)
def get_pwdProperties(self):
res = self.search(self.domain_dn(), scope=ldb.SCOPE_BASE, attrs=["pwdProperties"])
if len(res) == 0:
return None
elif not "pwdProperties" in res[0]:
return None
else:
return res[0]["pwdProperties"][0]
def set_dsheuristics(self, dsheuristics):
m = ldb.Message()
m.dn = ldb.Dn(self, "CN=Directory Service,CN=Windows NT,CN=Services,%s"
% self.get_config_basedn().get_linearized())
if dsheuristics is not None:
m["dSHeuristics"] = ldb.MessageElement(dsheuristics,
ldb.FLAG_MOD_REPLACE, "dSHeuristics")
else:
m["dSHeuristics"] = ldb.MessageElement([], ldb.FLAG_MOD_DELETE,
"dSHeuristics")
self.modify(m)
def get_dsheuristics(self):
res = self.search("CN=Directory Service,CN=Windows NT,CN=Services,%s"
% self.get_config_basedn().get_linearized(),
scope=ldb.SCOPE_BASE, attrs=["dSHeuristics"])
if len(res) == 0:
dsheuristics = None
elif "dSHeuristics" in res[0]:
dsheuristics = res[0]["dSHeuristics"][0]
else:
dsheuristics = None
return dsheuristics
def create_ou(self, ou_dn, description=None, name=None, sd=None):
"""Creates an organizationalUnit object
:param ou_dn: dn of the new object
:param description: description attribute
:param name: name atttribute
:param sd: security descriptor of the object, can be
an SDDL string or security.descriptor type
"""
m = {"dn": ou_dn,
"objectClass": "organizationalUnit"}
if description:
m["description"] = description
if name:
m["name"] = name
if sd:
m["nTSecurityDescriptor"] = ndr_pack(sd)
self.add(m)
def normalise_int32(self, ivalue):
'''normalise a ldap integer to signed 32 bit'''
if int(ivalue) & 0x80000000:
return str(int(ivalue) - 0x100000000)
return str(ivalue)
| gwr/samba | source4/scripting/python/samba/samdb.py | Python | gpl-3.0 | 28,037 |
#
# Copyright 2009-2010 Goran Sterjov
# This file is part of Myelin.
#
# Myelin is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Myelin is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Myelin. If not, see <http://www.gnu.org/licenses/>.
#
import ctypes
from type import Type
# get library
import myelin.library
_lib = myelin.library.get_library()
_types = []
def add_type (klass):
_types.append (klass)
def get_type (type):
for klass in _types:
if klass._class.get_type().get_atom() == type.get_atom():
return klass
return None
def get_types ():
return _types
class Value (object):
def __init__ (self, ptr = None):
if ptr is None:
ptr = _lib.myelin_value_new ()
self._ptr = ptr
def __del__ (self):
_lib.myelin_value_unref (self)
def __repr__ (self):
return ("<%s.%s object at %#x with an instance of type %s at %#x>" %
(self.__module__,
self.__class__.__name__,
id(self),
self.get_type().get_name(),
self.as_pointer()))
@classmethod
def from_pointer (cls, ptr):
if ptr is None:
raise ValueError ("Value pointer cannot be 'None'")
instance = cls (ptr)
_lib.myelin_value_ref (instance)
return instance
def from_param (self):
return self._ptr
def get (self):
# empty value
if self.is_empty(): return None
# get value type
type = self.get_type()
atom = type.get_atom()
# convert value types
if not type.is_pointer() and not type.is_reference():
# fundamental types
if atom == Type.type_bool (): return self.get_bool ()
elif atom == Type.type_char (): return self.get_char ()
elif atom == Type.type_uchar (): return self.get_uchar ()
elif atom == Type.type_int (): return self.get_int ()
elif atom == Type.type_uint (): return self.get_uint ()
elif atom == Type.type_long (): return self.get_long ()
elif atom == Type.type_ulong (): return self.get_ulong ()
elif atom == Type.type_int64 (): return self.get_int64 ()
elif atom == Type.type_uint64 (): return self.get_uint64 ()
elif atom == Type.type_float (): return self.get_float ()
elif atom == Type.type_double (): return self.get_double ()
# elif atom == Type.type_string (): return self.get_string ()
# convert value to meta class instance
class_type = get_type (type)
if class_type is not None:
return class_type (instance = self)
# dont know how to convert value so just return it as is
else:
return self
def set (self, value, atom = None):
from myelin.module import MetaObject
# convert python types
if type(value) is bool: self.set_bool (value)
# set the right integer type
elif type(value) is int or type(value) is long:
if atom is not None:
if atom == Type.type_char(): self.set_char (value)
elif atom == Type.type_uchar(): self.set_uchar (value)
elif atom == Type.type_int(): self.set_int (value)
elif atom == Type.type_uint(): self.set_uint (value)
elif atom == Type.type_long(): self.set_long (value)
elif atom == Type.type_ulong(): self.set_ulong (value)
# for long only
elif type(value) is long:
if atom == Type.type_int64(): self.set_int64 (value)
elif atom == Type.type_uint64(): self.set_uint64 (value)
else:
if type(value) is int: self.set_long (value)
else: self.set_int64 (value)
elif type(value) is float:
if atom is not None:
if atom == Type.type_float(): self.set_float (value)
elif atom == Type.type_double(): self.set_double (value)
else: self.set_double (value)
elif type(value) is str: self.set_string (value)
# set meta object instance
elif isinstance(value, MetaObject):
val = value._object.get_instance()
self.set_pointer (val.get_type(), val.as_pointer())
else:
raise TypeError ("Cannot determine an equivalent type for the " \
"value type '%s'. Conversion failed." %
type(value))
def get_type (self):
type = _lib.myelin_value_get_type (self)
return Type.from_pointer (type)
def is_empty (self):
return _lib.myelin_value_is_empty (self)
def clear (self):
_lib.myelin_value_clear (self)
def get_bool (self):
return _lib.myelin_value_get_bool (self)
def set_bool (self, value):
_lib.myelin_value_set_bool (self, value)
def get_char (self):
return _lib.myelin_value_get_char (self)
def set_char (self, value):
_lib.myelin_value_set_char (self, value)
def get_uchar (self):
return _lib.myelin_value_get_uchar (self)
def set_uchar (self, value):
_lib.myelin_value_set_uchar (self, value)
def get_int (self):
return _lib.myelin_value_get_int (self)
def set_int (self, value):
_lib.myelin_value_set_int (self, value)
def get_uint (self):
return _lib.myelin_value_get_uint (self)
def set_uint (self, value):
_lib.myelin_value_set_uint (self, value)
def get_long (self):
return _lib.myelin_value_get_long (self)
def set_long (self, value):
_lib.myelin_value_set_long (self, value)
def get_ulong (self):
return _lib.myelin_value_get_ulong (self)
def set_ulong (self, value):
_lib.myelin_value_set_ulong (self, value)
def get_int64 (self):
return _lib.myelin_value_get_int64 (self)
def set_int64 (self, value):
_lib.myelin_value_set_int64 (self, value)
def get_uint64 (self):
return _lib.myelin_value_get_uint64 (self)
def set_uint64 (self, value):
_lib.myelin_value_set_uint64 (self, value)
def get_float (self):
return _lib.myelin_value_get_float (self)
def set_float (self, value):
_lib.myelin_value_set_float (self, value)
def get_double (self):
return _lib.myelin_value_get_double (self)
def set_double (self, value):
_lib.myelin_value_set_double (self, value)
def get_string (self):
return _lib.myelin_value_get_string (self)
def set_string (self, value):
_lib.myelin_value_set_string (self, value)
def as_pointer (self):
return _lib.myelin_value_as_pointer (self)
def set_pointer (self, type, pointer):
_lib.myelin_value_set_pointer (self, type, pointer)
###############################################
# Prototypes #
###############################################
_lib.myelin_value_new.argtypes = None
_lib.myelin_value_new.restype = ctypes.c_void_p
_lib.myelin_value_ref.argtypes = [Value]
_lib.myelin_value_ref.restype = ctypes.c_void_p
_lib.myelin_value_unref.argtypes = [Value]
_lib.myelin_value_unref.restype = None
_lib.myelin_value_get_type.argtypes = [Value]
_lib.myelin_value_get_type.restype = ctypes.c_void_p
_lib.myelin_value_is_empty.argtypes = [Value]
_lib.myelin_value_is_empty.restype = ctypes.c_bool
_lib.myelin_value_clear.argtypes = [Value]
_lib.myelin_value_clear.restype = None
# boolean
_lib.myelin_value_get_bool.argtypes = [Value]
_lib.myelin_value_get_bool.restype = ctypes.c_bool
_lib.myelin_value_set_bool.argtypes = [Value, ctypes.c_bool]
_lib.myelin_value_set_bool.restype = None
# char
_lib.myelin_value_get_char.argtypes = [Value]
_lib.myelin_value_get_char.restype = ctypes.c_char
_lib.myelin_value_set_char.argtypes = [Value, ctypes.c_char]
_lib.myelin_value_set_char.restype = None
# uchar
_lib.myelin_value_get_uchar.argtypes = [Value]
_lib.myelin_value_get_uchar.restype = ctypes.c_ubyte
_lib.myelin_value_set_uchar.argtypes = [Value, ctypes.c_ubyte]
_lib.myelin_value_set_uchar.restype = None
# integer
_lib.myelin_value_get_int.argtypes = [Value]
_lib.myelin_value_get_int.restype = ctypes.c_int
_lib.myelin_value_set_int.argtypes = [Value, ctypes.c_int]
_lib.myelin_value_set_int.restype = None
# uint
_lib.myelin_value_get_uint.argtypes = [Value]
_lib.myelin_value_get_uint.restype = ctypes.c_bool
_lib.myelin_value_set_uint.argtypes = [Value, ctypes.c_uint]
_lib.myelin_value_set_uint.restype = None
# long
_lib.myelin_value_get_long.argtypes = [Value]
_lib.myelin_value_get_long.restype = ctypes.c_long
_lib.myelin_value_set_long.argtypes = [Value, ctypes.c_long]
_lib.myelin_value_set_long.restype = None
# ulong
_lib.myelin_value_get_ulong.argtypes = [Value]
_lib.myelin_value_get_ulong.restype = ctypes.c_ulong
_lib.myelin_value_set_ulong.argtypes = [Value, ctypes.c_ulong]
_lib.myelin_value_set_ulong.restype = None
# 64bit integer
_lib.myelin_value_get_int64.argtypes = [Value]
_lib.myelin_value_get_int64.restype = ctypes.c_int64
_lib.myelin_value_set_int64.argtypes = [Value, ctypes.c_int64]
_lib.myelin_value_set_int64.restype = None
# unsigned 64bit integer
_lib.myelin_value_get_uint64.argtypes = [Value]
_lib.myelin_value_get_uint64.restype = ctypes.c_uint64
_lib.myelin_value_set_uint64.argtypes = [Value, ctypes.c_uint64]
_lib.myelin_value_set_uint64.restype = None
# float
_lib.myelin_value_get_float.argtypes = [Value]
_lib.myelin_value_get_float.restype = ctypes.c_float
_lib.myelin_value_set_float.argtypes = [Value, ctypes.c_float]
_lib.myelin_value_set_float.restype = None
# double
_lib.myelin_value_get_double.argtypes = [Value]
_lib.myelin_value_get_double.restype = ctypes.c_double
_lib.myelin_value_set_double.argtypes = [Value, ctypes.c_double]
_lib.myelin_value_set_double.restype = None
# string
_lib.myelin_value_get_string.argtypes = [Value]
_lib.myelin_value_get_string.restype = ctypes.c_char_p
_lib.myelin_value_set_string.argtypes = [Value, ctypes.c_char_p]
_lib.myelin_value_set_string.restype = None
# pointer
_lib.myelin_value_as_pointer.argtypes = [Value]
_lib.myelin_value_as_pointer.restype = ctypes.c_void_p
_lib.myelin_value_set_pointer.argtypes = [Value, Type, ctypes.c_void_p]
_lib.myelin_value_set_pointer.restype = None
| gsterjov/Myelin | bindings/python/myelin/introspection/value.py | Python | gpl-3.0 | 11,453 |
from chiplotle.geometry.shapes.path import path
from chiplotle.geometry.transforms.perpendicular_displace \
import perpendicular_displace
def line_displaced(start_coord, end_coord, displacements):
'''Returns a Path defined as a line spanning points `start_coord` and
`end_coord`, displaced by scalars `displacements`.
The number of points in the path is determined by the lenght of
`displacements`.
'''
p = path([start_coord, end_coord])
perpendicular_displace(p, displacements)
return p
if __name__ == '__main__':
from chiplotle import *
import math
disp = [math.sin(i**0.7 / 3.14159 * 2) * 100 for i in range(200)]
line = line_displaced(Coordinate(0, 0), Coordinate(1000, 1000), disp)
io.view(line)
| drepetto/chiplotle | chiplotle/geometry/shapes/line_displaced.py | Python | gpl-3.0 | 763 |
import unittest
from bolt.discord.permissions import Permission
class TestPermission(unittest.TestCase):
def test_permission_from_list_to_list(self):
expected = ['MANAGE_WEBHOOKS', 'USE_EXTERNAL_EMOJIS']
permission = Permission(['MANAGE_WEBHOOKS', 'USE_EXTERNAL_EMOJIS'])
actual = permission.to_list()
self.assertListEqual(sorted(actual), sorted(expected))
def test_permission_from_int_to_list(self):
expected = ['ADMINISTRATOR', 'SEND_MESSAGES']
permission = Permission(2056)
actual = permission.to_list()
self.assertListEqual(sorted(actual), sorted(expected))
def test_permission_in_permission(self):
self.assertTrue("ADMINISTRATOR" in Permission(2056))
def test_permissions_in_permission(self):
self.assertTrue(["ADMINISTRATOR", "SEND_MESSAGES"] in Permission(2056))
def test_permission_not_in_permission(self):
self.assertTrue("USE_VAD" not in Permission(2056))
def test_permissions_not_in_permission(self):
self.assertTrue(["SPEAK", "MANAGE_EMOJIS"] not in Permission(2056))
def test_permission_add(self):
permission = Permission(2056)
self.assertTrue(permission.allows("ADMINISTRATOR"))
self.assertFalse(permission.allows("MENTION_EVERYONE"))
permission.add("MENTION_EVERYONE")
self.assertTrue(permission.allows("MENTION_EVERYONE"))
def test_permission_remove(self):
permission = Permission(2056)
self.assertTrue(permission.allows("ADMINISTRATOR"))
self.assertTrue(permission.allows("SEND_MESSAGES"))
permission.remove("SEND_MESSAGES")
self.assertFalse(permission.allows("SEND_MESSAGES"))
| Arcbot-Org/Arcbot | tests/discord/test_permission.py | Python | gpl-3.0 | 1,719 |
import requests
import hashlib
import json
import random
import sys
class ApiItemAmount(object):
def __new__(self, item_type, amount):
return {"type": item_type, "amount": amount}
class SagaAPI(object):
secret = ""
episodeLengths = {}
apiUrl = ""
clientApi = ""
unlockLevelItemId = -1
unlockLevelImage = ""
debug = True
def __init__(self, session, userId):
self.session = session
self.userId = userId
def api_get(self, method, params):
response = requests.get(self.apiUrl + "/" + method, params=params)
if self.debug:
print self.apiUrl + "/" + method + "\n"
print "===============================\n"
print response.text
print "\n"
return response
def hand_out_winnings(self, item_type, amount):
item = [
ApiItemAmount(item_type, amount)
]
params = {
"_session": self.session,
"arg0": json.dumps(item),
"arg1": 1,
"arg2": 1,
"arg3": "hash",
}
return self.api_get("handOutItemWinnings", params)
# gets the balance of all the items that the player has
def get_balance(self):
params = {"_session": self.session}
return self.api_get("getBalance", params)
def get_gameInitLight(self):
params = {"_session": self.session}
return self.api_get("gameInitLight", params)
# full list with level details
def get_gameInit(self):
params = {"_session": self.session}
return self.api_get("gameInit", params)
def add_life(self):
params = {"_session": self.session}
return self.api_get("addLife", params)
def is_level_unlocked(self, episode, level):
params = {"_session": self.session, "arg0": episode, "arg1": level}
response = self.api_get("isLevelUnlocked", params)
return response.text == "true"
def poll_episodeChampions(self, episode):
params = {"_session": self.session, "arg0": episode}
return self.api_get("getEpisodeChampions", params)
def poll_levelScores(self, episode, level):
params = {"_session": self.session, "arg0": episode, "arg1": level}
return self.api_get("getLevelToplist", params)
def post_unlockLevel(self, episode, level):
params = {"_session": self.session}
placement = "Map,%s,%s" % (episode, level)
payload = [{
"method": "ProductApi.purchase",
"id": 0,
"params": [{
"imageUrl": self.unlockLevelImage,
"orderItems": [{
"productPackageType": self.unlockLevelItemId,
"receiverCoreUserId": self.userId
}],
"placement": placement,
"title": "Level Unlock",
"description": "Buy your way to the next level.",
"currency": "KHC"
}]
}]
unlockAttempt = requests.post(self.clientApi, verify=False, params=params, data=json.dumps(payload)).json()
if self.debug:
print json.dumps(unlockAttempt, sort_keys = False, indent = 4)
return unlockAttempt[0]["result"]["status"] == "ok"
def start_game(self, episode, level):
params = {"_session": self.session, "arg0": episode, "arg1": level}
return self.api_get("gameStart", params).json()["seed"]
def end_game(self, episode, level, seed, score=None):
if score is None:
score = random.randrange(3000, 6000) * 100
dic = {
"timeLeftPercent": -1,
"episodeId": episode,
"levelId": level,
"score": score,
"variant": 0,
"seed": seed,
"reason": 0,
"userId": self.userId,
"secret": self.secret
}
dic["cs"] = hashlib.md5("%(episodeId)s:%(levelId)s:%(score)s:%(timeLeftPercent)s:%(userId)s:%(seed)s:%(secret)s" % dic).hexdigest()[:6]
params = {"_session": self.session, "arg0": json.dumps(dic)}
return self.api_get("gameEnd", params)
def print_scores(self, episode, level):
scores = self.poll_levelScores(episode, level).json()
print json.dumps(scores.values()[0][0], sort_keys = False, indent = 4)
print json.dumps(scores.values()[0][1], sort_keys = False, indent = 4)
print json.dumps(scores.values()[0][2], sort_keys = False, indent = 4)
def print_status(self):
print json.dumps(self.poll_status().json(), sort_keys = False, indent = 4)
def complete_level(self, level):
targetEpisode, targetLevel = self.get_episode_level(level)
is_unlocked = self.is_level_unlocked(targetEpisode, targetLevel)
if not is_unlocked:
self.complete_level(level - 1)
response = self.play_game(targetEpisode, targetLevel).json()
if response["episodeId"] == -1:
needUnlock = False
for event in response["events"]:
if event["type"] == "LEVEL_LOCKED":
needUnlock = True
break
if needUnlock:
self.post_unlockLevel(targetEpisode, targetLevel)
self.complete_level(level)
print "Beat episode {0} level {1}".format(targetEpisode, targetLevel)
def get_episode_level(self, level):
if len(self.episodeLengths) == 0:
response = self.get_gameInit()
episodeDescriptions = response.json()["universeDescription"]["episodeDescriptions"]
for episode in episodeDescriptions:
self.episodeLengths[episode["episodeId"]] = len(episode["levelDescriptions"])
targetEpisode = -1
targetLevel = level
currentEpisode = 1
while targetEpisode == -1:
if targetLevel > self.episodeLengths[currentEpisode]:
targetLevel = targetLevel - self.episodeLengths[currentEpisode]
currentEpisode = currentEpisode + 1
else:
targetEpisode = currentEpisode
break
return targetEpisode, targetLevel
def play_gameAutoScore(self, episode, level, starProgressions=None):
if starProgressions is not None:
minPoints = starProgressions["universeDescription"]["episodeDescriptions"][episode-1]["levelDescriptions"][level-1]["starProgressions"][2]["points"]
randomScore = 1
while (randomScore % 2 != 0):
# generate a random number at most 50000 points over the min 3 star and keep trying until it is even
randomScore = random.randrange(minPoints/10, minPoints/10+5000)
myScore = randomScore * 10
# print "Score: %s out of %s" % (myScore, minPoints)
else:
# revert to pulling the top scores. This probably won't work if none of your friends have made it to that level
scoreList = self.poll_levelScores(episode, level).json()
# take the top score and add 5000 points
myScore = scoreList.values()[0][0]["value"] + 5000
return self.play_game(episode, level, myScore)
def play_gameLoop(self, episode, level):
# create a JSON file full of tons and tons of data but only call it once since it is so large
starProgressions = self.get_gameInit().json()
while True:
try:
result = self.play_gameAutoScore(episode, level, starProgressions).json()
try:
# This is not quite right but it works since LEVEL_GOLD_REWARD still has a episodeId and levelId like LEVEL_UNLOCKED
# This only beats new levels that reported back the new unlocked level
data = json.loads(result["events"][0].values()[2])
data["episodeId"]
data["levelId"]
level = level + 1
except KeyError:
print "Next level wasn't reported, Trying to unlock episode %s..." % (episode+1)
self.post_unlockLevel(episode, level-1)
episode = episode + 1
level = 1
except:
print sys.exc_info()[0]
break
except IndexError:
print "Next level wasn't reported, Trying to unlock episode %s..." % (episode+1)
self.post_unlockLevel(episode, level-1)
episode = episode + 1
level = 1
except:
print sys.exc_info()[0]
break
def play_game(self, episode, level, score=None):
seed = self.start_game(episode, level)
return self.end_game(episode, level, seed, score) | boskee/regicide | regicide.py | Python | gpl-3.0 | 8,891 |
#!/usr/bin/python3
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from sklearn.cluster import KMeans
from sklearn import datasets
from PIL import Image, ImageChops
from scipy.spatial.distance import cdist
import matplotlib.pyplot as plt
from random import randint
import time
import ephem
from PIL import Image
import cv2
import glob
import sys
import os
import numpy as np
import datetime
from pathlib import Path
import subprocess
from amscommon import read_config
import math
import time
from sklearn.cluster import Birch
from collections import deque
video_dir = "/mnt/ams2/SD/"
def stack_stack(pic1, pic2):
frame_pil = Image.fromarray(pic1)
stacked_image = pic2
if stacked_image is None:
stacked_image = frame_pil
else:
stacked_image=ImageChops.lighter(stacked_image,frame_pil)
return(stacked_image)
def compute_straight_line(x1,y1,x2,y2,x3,y3):
print ("COMP STRAIGHT", x1,y1,x2,y2,x3,y3)
if x2 - x1 != 0:
a = (y2 - y1) / (x2 - x1)
else:
a = 0
if x3 - x1 != 0:
b = (y3 - y1) / (x3 - x1)
else:
b = 0
straight_line = a - b
if (straight_line < 1):
straight = "Y"
else:
straight = "N"
return(straight_line)
def crop_center(img,cropx,cropy):
y,x = img.shape
startx = x//2-(cropx//2) +12
starty = y//2-(cropy//2) + 4
return img[starty:starty+cropy,startx:startx+cropx]
def fig2data ( fig ):
"""
@brief Convert a Matplotlib figure to a 4D numpy array with RGBA channels and return it
@param fig a matplotlib figure
@return a numpy 3D array of RGBA values
"""
# draw the renderer
fig.canvas.draw ( )
# Get the RGBA buffer from the figure
w,h = fig.canvas.get_width_height()
buf = np.fromstring ( fig.canvas.tostring_argb(), dtype=np.uint8 )
buf.shape = ( w, h,4 )
# canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel to have it in RGBA mode
buf = np.roll ( buf, 3, axis = 2 )
return buf
def kmeans_cluster(points, num_clusters):
points = np.array(points)
print(points)
clusters = []
cluster_points = []
colors = ('r', 'g', 'b')
est = KMeans(n_clusters=num_clusters)
est.fit(points)
print (est.labels_)
print (len(points))
({i: np.where(est.labels_ == i)[0] for i in range(est.n_clusters)})
for i in set(est.labels_):
index = est.labels_ == i
cluster_idx = np.where(est.labels_ == i)
for idxg in cluster_idx:
for idx in idxg:
idx = int(idx)
point = points[idx]
#print ("IDX:",i, idx, point)
cluster_points.append(point)
clusters.append(cluster_points)
cluster_points = []
#print(points[:,0])
#print(points[:,1])
int_lb = est.labels_.astype(float)
#fig = gcf()
fig = Figure()
canvas = FigureCanvas(fig)
plot = fig.add_subplot(1,1,1)
plot.scatter(points[:,0], points[:,1], c=[plt.cm.Spectral(float(i) / 10) for i in est.labels_])
for cluster in clusters:
cxs = []
cys = []
for cp in cluster:
x,y,w,h = cp
cxs.append(x)
cys.append(y)
if len(cxs) > 3:
plot.plot(np.unique(cxs), np.poly1d(np.polyfit(cxs, cys, 1))(np.unique(cxs)))
plt.xlim(0,640)
plt.ylim(0,480)
plot.invert_yaxis()
fig.canvas.draw()
fig.savefig("/tmp/plot.png", dpi=fig.dpi)
#plt.show()
return(clusters)
def calc_dist(x1,y1,x2,y2):
dist = math.sqrt((x2 - x1)**2 + (y2 - y1)**2)
return dist
def find_angle(x1,x2,y1,y2):
if x2 - x1 != 0:
a1 = (y2 - y1) / (x2 - x1)
else:
a1 = 0
angle = math.atan(a1)
angle = math.degrees(angle)
return(angle)
def closest_node(node, nodes):
return nodes[cdist([node], nodes).argmin()]
def find_objects(index, points):
apoints = []
unused_points = []
cl_sort = []
sorted_points = []
last_angle = None
objects = []
group_pts = []
line_segments = []
stars = []
obj_points = []
big_cnts = []
count = 0
x1,y1,w1,h1 = points[index]
print ("Total Points found in image: ", len(points))
used_pts = {}
for i in range(0,len(points)-1):
x1,y1,w1,h1 = points[i]
for i in range(0,len(points)-1):
x2,y2,w2,h2 = points[i]
key = str(x1)+"."+str(y1)+"."+str(x2)+"."+str(y2)
used_pts[key] = 0
key2 = str(x2)+"."+str(y2)+"."+str(x1)+"."+str(y1)
used_pts[key2] = 0
possible_stars = []
for i in range(0,len(points)-1):
closest = []
x1,y1,w1,h1 = points[i]
for j in range(0,len(points)-1):
x2,y2,w2,h2 = points[j]
key = str(x1)+"."+str(y1)+"."+str(x2)+"."+str(y2)
key2 = str(x2)+"."+str(y2)+"."+str(x1)+"."+str(y1)
dist = calc_dist(x1,y1,x2,y2)
angle = find_angle(x1,y1,x2,y2)
if x1 != x2 and y1 != y2:
if used_pts[key] == 0 and used_pts[key2] == 0 :
#print("Closest Point:", (int(dist),int(angle),int(x1),int(y1),int(x2),int(y2)))
closest.append((int(dist),int(angle),int(x1),int(y1),int(x2),int(y2)))
used_pts[key] = 1
used_pts[key2] = 1
#print("Key has been used:", key, key2)
#else:
# print("Key already used try another one:", key, key2)
#else:
# print ("this point has already been used")
count = count + 1
# of all the close points, make sure that at least 2 points < 25 px dist exist.
conf_closest = []
for cls in closest:
if cls[0] < 100:
conf_closest.append(cls)
if len(closest) > 0:
distsort = np.unique(closest, axis=0)
dist,angle,x1,y1,x2,y2 = distsort[0]
if dist < 50 and len(conf_closest) > 1:
line_segments.append((int(dist),int(angle),int(x1),int(y1),int(x2),int(y2)))
obj_points.append((int(x1),int(y1), int(w1), int(h1)))
else:
possible_stars.append((int(x1),int(y1),int(w1),int(h1)))
#print("CLOSEST LINE SEGMENT FOR PT: ", distsort[0])
#else:
#print("ERROR! no close points to this one!", x1,y1)
if w1 > 15 or h1 > 15:
# print ("BIG!!! We have a big object here likely containing many line segments.")
big_cnts.append((int(x1),int(y1),int(w1),int(h1)))
for star in possible_stars:
close = 0
for line in line_segments:
dist,angle,x1,y1,x2,y2 = line
star_dist = calc_dist(star[0], star[1], x1,y1)
#print ("STARDIST: ", star_dist, star[0], star[1], x1,y1)
if star_dist < 60:
close = 1
if close == 1:
obj_points.append(star)
else:
stars.append(star)
#print ("OBJECT POINTS")
if len(line_segments) > 0:
sorted_lines = sorted(line_segments, key=lambda x: x[2])
else:
sorted_lines = []
#print ("LINE SEGMENTS:")
#for line in sorted_lines:
# print (line)
last_ang = 0
last_dist = 0
line_groups = []
line_group = []
orphan_lines = []
if len(sorted_lines) > 0:
for segment in sorted_lines:
dist,angle,x1,y1,x2,y2 = segment
if last_ang != 0 and (angle -5 < last_ang < angle + 5) and dist < 100:
#print ("Line Segment Part of Existing Group: ", segment)
line_group.append((dist,angle,x1,y1,x2,y2))
else:
#print ("New Group Started!", last_ang, angle )
# print ("Line Segment Part of New Group: ", segment)
if len(line_group) >= 3:
line_groups.append(line_group)
else:
#print("Last line segment was too small to be part of a group! These are random points or stars. Skip for now.")
for line in line_group:
orphan_lines.append(line)
line_group = []
line_group.append((dist,angle,x1,y1,x2,y2))
last_ang = angle
if len(line_group) >= 2:
line_groups.append(line_group)
else:
for line in line_group:
orphan_lines.append(line)
# now make sure all of the line segments in the line group can connect to at least one of the other segments
#print ("Total Line Groups as of now:", len(line_groups))
#print ("Total Orphan Lines as of now:", len(orphan_lines))
#print ("Confirm the line segments are all part of the same group", len(line_groups))
#print ("TOTAL POINTS: ", len(points))
#print ("TOTAL LINE GROUPS: ", len(line_groups))
#print ("ORPHAN GROUPS: ", len(orphan_lines))
#for point in points:
#print ("POINT: ", point)
gc = 1
if len(line_groups) > 0:
for line_group in line_groups:
lc = 1
for line in line_group:
#print("LINE:", line)
dist,ang,x1,y1,x2,y2 = line
#confirm_angle = find_angle(x1,y1,x2,y2)
#print ("GROUP", gc, lc, line, ang, confirm_angle)
lc = lc + 1
gc = gc + 1
#else:
#make sure the obj points are not false positives, if so move to stars.
(line_groups, orphan_lines, stars, obj_points, big_cnts) = conf_objs(line_groups, orphan_lines, stars, obj_points, big_cnts)
return(line_groups, orphan_lines, stars, obj_points, big_cnts)
def conf_objs(line_groups, orphan_lines, stars, obj_points, big_cnts):
print ("CONF OBJS")
print ("LINE GROUPS", len(line_groups))
print ("OBJ POINTS", len(obj_points))
conf_line_groups = []
mx = []
my = []
mw = []
mh = []
#first lets check the line groups and make sure at least 3 points are straight
for line_group in line_groups:
mx = []
my = []
mw = []
mh = []
lgc = 0
for dist,ang,x1,y1,x2,y2 in line_group:
mx.append(x1)
my.append(y1)
print (dist, ang, x1,y1,x2,y2)
print (lgc, "adding MX", x1, mx)
print (lgc, "adding MYs", y1, my)
#mx.append(x2)
#my.append(y2)
lgc = lgc + 1
if len(mx) > 2:
print ("MXs", mx)
print ("MYs", my)
st = compute_straight_line(mx[0],my[0],mx[1],my[1],mx[2],my[2])
else:
st = 100
if st <= 1:
print ("This group is straight")
conf_line_groups.append(line_group)
else:
print ("This group is NOT straight")
orphan_lines.append(line_group)
cc = 0
mx = []
my = []
mw = []
mh = []
for x,y,h,w in obj_points:
mx.append(x)
my.append(y)
mw.append(w)
mh.append(h)
cc = cc + 1
if len(mx) > 2:
st = compute_straight_line(mx[0],my[0],mx[1],my[1],mx[2],my[2])
else:
st = 100
if st <= 1:
print ("At least 3 of these are straight, we can continue.", st)
else:
print ("These 3 objects are not straight, and thus false!", st)
for x,y,h,w in obj_points:
stars.append((x,y,h,w))
obj_points = []
return(line_groups, orphan_lines, stars, obj_points, big_cnts)
def clean_line_groups(line_groups, orphan_lines):
cleaned_line_groups = []
cleaned_line_group = []
for line_group in line_groups:
if len(line_group) == 2:
# make sure these two groups are close enough to each other to be grouped.
(dist,angle,x1,y1,x2,y2) = line_group[0]
(xdist,xangle,xx1,xy1,xx2,xy2) = line_group[1]
group_dist = calc_dist(x1,y1,xx1,xy1)
if group_dist > 50 or (angle -5 < xangle < angle + 5):
orphan_lines.append(line_group[0])
orphan_lines.append(line_group[1])
else:
cleaned_line_group.append(line_group[0])
cleaned_line_group.append(line_group[1])
else:
cleaned_line_groups.append(line_group)
line_groups = cleaned_line_groups
print("CLG:", line_groups)
return(cleaned_line_groups, orphan_lines)
def confirm_cnts(crop):
crop = cv2.GaussianBlur(crop, (5, 5), 0)
avg_flux = np.average(crop)
max_flux = np.amax(crop)
thresh_limit = avg_flux / 2
_, crop_thresh = cv2.threshold(crop, thresh_limit, 255, cv2.THRESH_BINARY)
#(_, cnts, xx) = cv2.findContours(crop_thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
#if np.sum(crop_thresh) > (255 * 2):
#print ("CONFIRM:", max_flux, avg_flux, thresh_limit, np.sum(crop_thresh))
#cv2.imshow('pepe', crop_thresh)
#else:
# print ("FAILED:", max_flux, avg_flux, thresh_limit, np.sum(crop_thresh))
#cv2.imshow('pepe', crop)
#cv2.waitKey(100)
return(np.sum(crop_thresh))
def find_best_thresh(image, thresh_limit, type):
go = 1
while go == 1:
_, thresh = cv2.threshold(image, thresh_limit, 255, cv2.THRESH_BINARY)
(_, cnts, xx) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if type == 0:
cap = 80
else:
cap = 100
if len(cnts) > cap:
thresh_limit = thresh_limit + 1
else:
bad = 0
for (i,c) in enumerate(cnts):
x,y,w,h = cv2.boundingRect(cnts[i])
if w == image.shape[1]:
bad = 1
if type == 0 and (w >= 10 or h > 10):
bad = 1
if bad == 0:
go = 0
else:
thresh_limit = thresh_limit + 1
#print ("CNTs, BEST THRESH:", str(len(cnts)), thresh_limit)
return(thresh_limit)
def find_objects2(timage, tag, current_image, filename):
stars = []
big_cnts = []
obj_points = []
image = timage
thresh_limit = 10
thresh_limit = find_best_thresh(image, thresh_limit, 0)
# find best thresh limit code here!
line_objects = []
points = []
orphan_lines = []
_, thresh = cv2.threshold(image, thresh_limit, 255, cv2.THRESH_BINARY)
(_, cnts, xx) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
#print ("CNTS:", len(cnts))
hit = 0
objects = []
if len(cnts) < 500:
for (i,c) in enumerate(cnts):
x,y,w,h = cv2.boundingRect(cnts[i])
if w > 1 and h > 1:
if (w < 10 and h <10):
nothing = 0
# cv2.rectangle(image, (x,y), (x+w+5, y+h+5), (255),1)
#cv2.circle(image, (x,y), 20, (120), 1)
#if w != h:
# cv2.rectangle(image, (x,y), (x+w+5, y+h+5), (255),1)
else:
#cv2.rectangle(image, (x,y), (x+w+5, y+h+5), (255),1)
# Convert big object into points and add each one to the points array.
crop = timage[y:y+h,x:x+w]
points.append((x,y,w,h))
if w < 600 and h < 400:
crop_points = find_points_in_crop(crop,x,y,w,h)
for x,y,w,h in crop_points:
print("adding some points",x,y,w,h)
points.append((x,y,w,h))
points.append((x,y,w,h))
#objects.append((x,y,w,h))
else:
image[y:y+h,x:x+w] = [0]
else:
print ("WAY TO MANY CNTS:", len(cnts))
thresh_limit = thresh_limit + 5
return(points)
# find line objects
if (len(objects) + len(points)) > 0:
line_groups, orphan_lines, stars, obj_points = find_objects(0, points)
else:
line_groups = []
final_group = []
final_groups = []
reject_group = []
reject_groups = []
line_segments = flatten_line_groups(line_groups)
line_segments = sorted(line_segments, key = lambda x: (x[0],x[1]))
if len(line_segments) > 0:
final_group, reject_group = regroup_lines(line_segments)
print ("MIKE!:", len(final_group))
if len(final_group) > 1:
final_groups.append(final_group)
else:
for line in final_group:
orphan_lines.append(line)
if len(reject_group) > 3:
print (len(reject_group), "rejects left. do it again.")
reject_group = sorted(reject_group, key = lambda x: (x[1],x[0]))
final_group, reject_group = regroup_lines(reject_group)
if len(final_group) > 1:
final_groups.append(final_group)
else:
for line in final_group:
orphan_lines.append(line)
print (len(reject_group), "rejects left after 2nd try")
if len(reject_group) > 3:
print (len(reject_group), "rejects left. do it again.")
final_group, reject_group = regroup_lines(reject_group)
if len(final_group) > 1:
final_groups.append(final_group)
else:
for line in final_group:
orphan_lines.append(line)
print (len(reject_group), "rejects left after 3rd try")
# try to adopt the orphans!
if len(orphan_lines) >= 1:
print (orphan_lines)
final_group, reject_group = regroup_lines(orphan_lines)
if len(final_group) > 1:
final_groups.append(final_group)
if len(final_group) > 0:
print ("Adopted! : ", final_group)
orphan_lines = reject_group
if len(orphan_lines) >= 1:
final_group, reject_group = regroup_lines(reject_group)
if len(final_group) > 1:
final_groups.append(final_group)
if len(final_group) > 0:
print ("Adopted! : ", final_group)
orphan_lines = reject_group
if len(orphan_lines) >= 1:
final_group, reject_group = regroup_lines(reject_group)
if len(final_group) > 1:
final_groups.append(final_group)
if len(final_group) > 0:
print ("Adopted! : ", final_group)
orphan_lines = reject_group
final_groups, orphan_lines = clean_line_groups(final_groups, orphan_lines)
clusters= []
clusters_ab= []
last_x = None
last_y = None
last_ang = None
ang = None
if len(points) > 3:
num_clusters = int(len(points)/3)
clusters = kmeans_cluster(points, num_clusters)
#print ("MIKE CLUSTERS", len(clusters))
for cluster in clusters:
cxs = []
cys = []
for cp in cluster:
x,y,w,h = cp
cxs.append(x)
cys.append(y)
if last_x is not None:
ang = find_angle(x,y,last_x,last_y)
print ("CLUSTER ANGLE:", x,y,last_x,last_y,ang)
if last_ang is not None:
if ang - 5 < last_ang < ang + 5:
cv2.line(image, (x,y), (last_x,last_y), (200), 4)
last_x = x
last_y = y
last_ang = ang
a, b = best_fit (cxs,cys)
mnx = min(cxs)
mny = min(cys)
mmx = max(cxs)
mmy = max(cys)
cv2.rectangle(image, (mnx,mny), (mmx, mmy), (255),1)
#print ("MIKE MIKE XS,", cxs)
#print ("MIKE MIKE YS,", cys)
clusters_ab.append((a,b))
print ("MIKE AB,", a,b)
print ("FINAL ANALYSIS")
print (final_groups)
print ("--------------")
print ("File Name: ", filename)
print ("Total Points:", len(points))
print ("Total Line Segments:", len(line_segments))
print ("Total Final Line Groups:", len(final_groups))
print ("Total Clusters:", len(clusters))
cl =0
for a,b in clusters_ab:
print ("Cluster " + str(cl + 1) + " " + str(len(clusters[cl])) + " points")
print ("LINE AB " + str(a) + " " + str(b))
cl = cl + 1
#print (final_groups)
print ("Total Rejected Lines:", len(reject_group))
gc = 1
xs = ys = []
for line_group in final_groups:
lc = 1
for line in line_group:
dist,angle,x1,y1,x2,y2 = line
xs.append(x1)
xs.append(x2)
ys.append(y1)
ys.append(y2)
#print (gc, lc, line)
lc = lc + 1
gc = gc + 1
if len(xs) > 0 and len(ys) > 0:
mnx = min(xs)
mxx = max(xs)
mny = min(ys)
mxy = max(ys)
cv2.rectangle(image, (mnx,mny), (mxx, mxy), (255),1)
print ("Total Orphaned Lines:", len(orphan_lines))
if len(line_groups) > 0:
line_segments = flatten_line_groups(line_groups)
find_line_nodes(line_segments)
gc = 1
for line_group in line_groups:
lc = 1
line_group = sorted(line_group, key = lambda x: (x[2],x[3]))
dist,angle,sx1,sy1,sx2,sy2 = line_group[0]
for line in line_group:
dist,angle,x1,y1,x2,y2 = line
#s_ang = find_angle(sx1,sy1,x1,y1)
#if angle - 5 < s_ang < angle + 5:
# print("FINAL GROUP:", gc,lc,line, angle, s_ang)
# final_group.append((dist,angle,x1,y1,x2,y2))
#else:
# print("REJECT GROUP:", gc,lc,line, angle, s_ang)
# reject_group.append((dist,angle,x1,y1,x2,y2))
#seg_dist = find_closest_segment(line, line_group)
cv2.line(image, (x1,y1), (x2,y2), (255), 2)
cv2.putText(image, "L " + str(lc), (x1+25,y1+10), cv2.FONT_HERSHEY_SIMPLEX, .4, (255), 1)
lc = lc + 1
if len(line_group) > 0:
cv2.putText(image, "LG " + str(gc), (x1+25,y1), cv2.FONT_HERSHEY_SIMPLEX, .4, (255), 1)
gc = gc + 1
for line in orphan_lines:
#print("ORPHAN:", line)
dist,angle,x1,y1,x2,y2 = line
cv2.line(image, (x1,y1), (x2,y2), (255), 1)
cv2.putText(image, "Orph" , (x1+25,y1), cv2.FONT_HERSHEY_SIMPLEX, .4, (255), 1)
#cv2.ellipse(image,(ax,ay),(dist_x,dist_y),elp_ang,elp_ang,180,255,-1)
#a,b = best_fit(lxs, lys)
#plt.scatter(lxs,lys)
#plt.xlim(0,640)
#plt.ylim(0,480)
#yfit = [a + b * xi for xi in lxs]
#plt.plot(lxs,yfit)
#cv2.imshow('pepe', image)
#cv2.waitKey(1)
#plt.gca().invert_yaxis()
#plt.show()
#for x,y,w,h in points:
# if w > 25 or h > 25:
# cv2.rectangle(image, (x,y), (x+w+5, y+h+5), (255),1)
# else:
# cv2.circle(image, (x,y), 20, (120), 1)
edges = cv2.Canny(image.copy(),thresh_limit,255)
el = filename.split("/");
fn = el[-1]
cv2.putText(current_image, "File Name: " + fn, (10,440), cv2.FONT_HERSHEY_SIMPLEX, .4, (255), 1)
cv2.putText(current_image, str(tag), (10,450), cv2.FONT_HERSHEY_SIMPLEX, .4, (255), 1)
cv2.putText(current_image, "Points: " + str(len(points)), (10,460), cv2.FONT_HERSHEY_SIMPLEX, .4, (255), 1)
cv2.putText(current_image, "Line Groups: " + str(len(final_groups)), (10,470), cv2.FONT_HERSHEY_SIMPLEX, .4, (255), 1)
blend = cv2.addWeighted(image, .2, current_image, .8,0)
np_plt = cv2.imread("/tmp/plot.png")
np_plt = cv2.cvtColor(np_plt, cv2.COLOR_BGR2GRAY)
hh, ww = np_plt.shape
crop = cv2.resize(np_plt, (0,0), fx=1.1, fy=1.1)
crop = crop_center(crop, 640,480)
#blend = cv2.addWeighted(blend, .5, crop, .5,0)
#for x,y in stars:
# cv2.circle(blend, (x,y), 5, (255), 1)
#exit()
return(line_groups, points, clusters)
def regroup_lines(line_segments):
final_group = []
reject_group = []
sangles = []
dist,angle,sx1,sy1,sx2,sy2 = line_segments[0]
for line in line_segments:
dist,angle,x1,y1,x2,y2 = line
s_ang = find_angle(sx1,sy1,x1,y1)
sangles.append(s_ang)
mean_angle = np.median(np.array(sangles))
if len(line_segments ) > 0:
dist,angle,sx1,sy1,sx2,sy2 = line_segments[0]
for line in line_segments:
dist,angle,x1,y1,x2,y2 = line
s_ang = find_angle(sx1,sy1,x1,y1)
if mean_angle - 10 <= s_ang <= mean_angle + 10:
#print("FINAL GROUP:", line, angle, s_ang, mean_angle)
found = 0
for (dd,aa,ax1,ay1,ax2,ay2) in final_group:
if ax1 == x1 and ay1 == y1:
found = 1
if found == 0:
final_group.append((dist,angle,x1,y1,x2,y2))
else:
#print("REJECT GROUP:",line, angle, s_ang, mean_angle)
reject_group.append((dist,angle,x1,y1,x2,y2))
if len(line_segments ) > 0:
sdist,sangle,sx1,sy1,sx2,sy2 = line_segments[0]
for line in line_segments:
dist,angle,x1,y1,x2,y2 = line
s_ang = find_angle(sx1,sy1,x1,y1)
tdist = calc_dist(x1,y1,sx1,sy1)
if sangle - 10 <= angle <= sangle + 10 and tdist < 20:
found = 0
for (dd,aa,ax1,ay1,ax2,ay2) in final_group:
if ax1 == x1 and ay1 == y1:
found = 1
if found == 0:
print("FINAL GROUP:", line, angle, s_ang, mean_angle)
final_group.append((dist,angle,x1,y1,x2,y2))
else:
#print("REJECT GROUP:",line, angle, s_ang, mean_angle)
reject_group.append((dist,angle,x1,y1,x2,y2))
return(final_group, reject_group)
def flatten_line_groups(line_groups):
line_segments = []
for line_group in line_groups:
for line in line_group:
dist,angle,x1,y1,x2,y2 = line
line_segments.append((dist,angle,x1,y1,x2,y2))
return(line_segments)
def log_node(nodes, line, closest):
if len(nodes) == 0:
nodes.append((line,closest))
return(nodes)
def find_line_nodes(line_segments):
nodes = []
seg_list = []
rest = line_segments
for line in line_segments:
#print("LENLINE", len(line))
#print(line)
dist,angle,x1,y1,x2,y2 = line
closest, rest = sort_segs(x1,y1,rest)
#nodes = log_node(nodes, line, closest)
def sort_segs(x,y,seg_dist):
sorted_lines = sorted(seg_dist, key=lambda x: x[0])
#for line in sorted_lines:
# print ("SORTED LINE", line)
closest = []
rest = []
already_found = 0
for line in sorted_lines:
if len(line) == 6:
dist,angle,x1,y1,x2,y2 = line
else:
print("WTF!:", line)
seg_dist = calc_dist(x,y,x1,y1)
if seg_dist != 0 and already_found != 1:
closest.append((dist,angle,x1,y1,x2,y2))
else:
rest.append((dist,angle,x1,y1,x2,y2))
return(closest, rest)
def find_closest_segment(this_line,line_group):
seg_dist = []
dist, angle, x1,y1,x2,y2 = this_line
cx = (x1 + x2) / 2
cy = (y1 + y2) / 2
for line in line_group:
xdist, xangle, xx1,xy1,xx2,xy2 = line
xcx = (xx1 + xx2) / 2
xcy = (xy1 + xy2) / 2
dist = calc_dist(cx,cy,xcx,xcy)
if dist > 0:
seg_dist.append((dist, x1,y1,x2,y2))
sorted_lines = sorted(seg_dist, key=lambda x: x[0])
#for line in sorted_lines:
# print("CLOSEST SEGMENTS:", line)
def find_points_in_crop(crop,x,y,w,h):
print ("cropping")
go = 1
cnt_pts = []
thresh_limit = 250
canvas = np.zeros([480,640], dtype=crop.dtype)
canvas[y:y+h,x:x+w] = crop
for i in range(x,x+w):
for j in range(y,y+w):
if i % 5 == 0:
canvas[0:480,i:i+3] = 0
if j % 5 == 0:
canvas[j:j+3,0:640] = 0
#print ("CROP", crop.shape[0])
#if crop.shape[0] > 25:
#cv2.imshow('pepe', canvas)
#cv2.waitKey(1000)
last_cnts = []
while go == 1:
_, thresh = cv2.threshold(canvas, thresh_limit, 255, cv2.THRESH_BINARY)
(_, cnts, xx) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnt_limit = int((w + h) / 20)
if cnt_limit < 5:
cnt_limit = 5
if cnt_limit > 25:
cnt_limit = 25
#print ("CNTS at thresh:", len(cnts), thresh_limit)
thresh_limit = thresh_limit - 2
if len(cnts) >= cnt_limit:
for (i,c) in enumerate(cnts):
x,y,w,h = cv2.boundingRect(cnts[i])
if w > 1 and h > 1:
cnt_pts.append((x,y,w,h))
if len(last_cnts) >= len(cnt_pts) and len(last_cnts) > cnt_limit:
#cnt_pts = last_cnts
go = 0
if thresh_limit < 5:
cnt_pts = last_cnts
go = 0
if len(cnts) > 70:
go = 0
#print ("CNTS: ", len(cnts))
#print ("LAST CNTS: ", len(last_cnts))
#print ("THRESH LIMIT: ", thresh_limit)
#cv2.imshow('pepe', thresh)
#cv2.waitKey(100)
last_cnts = cnt_pts
return(cnt_pts)
def best_fit(X, Y):
xbar = sum(X)/len(X)
ybar = sum(Y)/len(Y)
n = len(X) # or len(Y)
numer = sum([xi*yi for xi,yi in zip(X, Y)]) - n * xbar * ybar
denum = sum([xi**2 for xi in X]) - n * xbar**2
b = numer / denum
a = ybar - b * xbar
print('best fit line:\ny = {:.2f} + {:.2f}x'.format(a, b))
return a, b
def diff_all(med_stack_all, background, median, before_image, current_image, after_image,filename ):
before_diff = cv2.absdiff(current_image.astype(current_image.dtype), before_image,)
after_diff = cv2.absdiff(current_image.astype(current_image.dtype), after_image,)
before_after_diff = cv2.absdiff(before_image.astype(current_image.dtype), after_image,)
median_three = np.median(np.array((before_image, after_image, current_image)), axis=0)
median = np.uint8(median)
median_sum = np.sum(median)
median_diff = cv2.absdiff(median_three.astype(current_image.dtype), median,)
blur_med = cv2.GaussianBlur(median, (5, 5), 0)
# find bright areas in median and mask them out of the current image
tm = find_best_thresh(blur_med, 30, 1)
_, median_thresh = cv2.threshold(blur_med, tm, 255, cv2.THRESH_BINARY)
#cv2.imshow('pepe', median_thresh)
#cv2.waitKey(1000)
(_, cnts, xx) = cv2.findContours(median_thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
hit = 0
real_cnts = []
print ("CNTS: ", len(cnts))
if len(cnts) < 1000:
for (i,c) in enumerate(cnts):
x,y,w,h = cv2.boundingRect(cnts[i])
if True:
w = w + 20
h = h + 20
x = x - 20
y = y - 20
if x < 0:
x = 0
if y < 0:
y = 0
if x+w > current_image.shape[1]:
x = current_image.shape[1]-1
if y+h > current_image.shape[0]:
y = current_image.shape[0]-1
if w > 0 and h > 0:
mask = current_image[y:y+h, x:x+w]
#cv2.rectangle(current_image, (x,y), (x+w+5, y+h+5), (255),1)
for xx in range(0, mask.shape[1]):
for yy in range(0, mask.shape[0]):
mask[yy,xx] = randint(0,6)
blur_mask = cv2.GaussianBlur(mask, (5, 5), 0)
current_image[y:y+h,x:x+w] = blur_mask
median[y:y+h,x:x+w] =blur_mask
# find the diff between the masked median and the masked current image
blur_cur = cv2.GaussianBlur(current_image, (5, 5), 0)
blur_med = cv2.GaussianBlur(median, (5, 5), 0)
cur_med_diff = cv2.absdiff(blur_cur.astype(blur_cur.dtype), blur_med,)
blend = cv2.addWeighted(current_image, .5, cur_med_diff, .5,0)
cur_med_diff =- median
#line_groups, points, clusters = find_objects2(blend, "Current Median Diff Blend", current_image, filename)
return(blend, current_image, filename)
#return(line_groups, points)
def inspect_image(med_stack_all, background, median, before_image, current_image, after_image, avg_cnt,avg_tot,avg_pts,filename):
rois = []
big_cnts = []
line_groups = []
orphan_lines = []
obj_points = []
stars = []
image_diff = cv2.absdiff(current_image.astype(current_image.dtype), background,)
orig_image = current_image
current_image = image_diff
blend, current_image, filename = diff_all(med_stack_all, background, median, before_image, current_image, after_image,filename)
points = find_objects2(blend, "Current Median Diff Blend", current_image, filename)
if len(points) > 2:
line_groups, orphan_lines, stars, obj_points, big_cnts = find_objects(0, points)
if len(obj_points) > 2:
line_groups, orphan_lines, stars2, obj_points, big_cnts = find_objects(0, obj_points)
stars = stars + stars2
print ("---FINAL ANALYSIS---")
print ("File: ", filename)
print ("Total Points: ", len(points))
print ("Line Groups: ", len(line_groups))
lg_points = 0
lg = 1
for line in line_groups:
print (" Group " + str(lg) + ": " + str(len(line)))
lg = lg + 1
lg_points = lg_points + len(line)
print ("Total Line Group Points: ", lg_points)
print ("Orphan Lines: ", len(line_groups))
print ("Stars: ", len(stars))
print ("Obj Points: ", len(obj_points))
print ("Big CNTS: ", len(big_cnts))
for x,y,w,h in big_cnts:
cv2.rectangle(blend, (x,y), (x+w+5, y+h+5), (255),1)
#for x,y,w,h in obj_points:
# if w > 25 or h > 25:
# cv2.rectangle(blend, (x,y), (x+w+5, y+h+5), (255),1)
# else:
# cv2.circle(blend, (x,y), 20, (120), 1)
#for x,y,w,h in stars:
# if w > 25 or h > 25:
# cv2.rectangle(blend, (x,y), (x+w+5, y+h+5), (255),1)
# else:
# cv2.circle(blend, (x,y), 5, (120), 1)
return(blend, points, line_groups, stars, obj_points, big_cnts)
def parse_file_date(orig_video_file):
#print(orig_video_file)
if ".mp4" in orig_video_file:
stacked_image_fn = orig_video_file.replace(".mp4", "-stack.jpg")
star_image_fn = orig_video_file.replace(".mp4", "-stars.jpg")
report_fn = orig_video_file.replace(".mp4", "-stack-report.txt")
video_report = orig_video_file.replace(".mp4", "-report.txt")
trim_file = orig_video_file.replace(".mp4", "-trim.mp4")
else:
stacked_image_fn = orig_video_file.replace(".avi", "-stack.jpg")
trim_file = orig_video_file.replace(".avi", "-trim.avi")
star_image_fn = orig_video_file.replace(".avi", "-stars.jpg")
report_fn = orig_video_file.replace(".avi", "-stack-report.txt")
el = orig_video_file.split("/")
file_name = el[-1]
file_name = file_name.replace("_", "-")
file_name = file_name.replace(".", "-")
#print ("FN", file_name)
xyear, xmonth, xday, xhour, xmin, xsec, xcam_num, ftype, xext = file_name.split("-")
cam_num = xcam_num.replace("cam", "")
date_str = xyear + "-" + xmonth + "-" + xday + " " + xhour + ":" + xmin + ":" + xsec
capture_date = date_str
return(capture_date)
def day_or_night(config, capture_date):
obs = ephem.Observer()
obs.pressure = 0
obs.horizon = '-0:34'
obs.lat = config['device_lat']
obs.lon = config['device_lng']
obs.date = capture_date
sun = ephem.Sun()
sun.compute(obs)
(sun_alt, x,y) = str(sun.alt).split(":")
saz = str(sun.az)
(sun_az, x,y) = saz.split(":")
#print ("SUN", sun_alt)
if int(sun_alt) < -1:
sun_status = "night"
else:
sun_status = "day"
return(sun_status, sun_alt)
def diff_stills(sdate, cam_num):
med_last_objects = []
last_objects = deque(maxlen=5)
diffed_files = []
config = read_config("conf/config-1.txt")
video_dir = "/mnt/ams2/SD/"
images = []
images_orig = []
images_blend = []
images_info = []
count = 0
last_image = None
last_thresh_sum = 0
hits = 0
avg_cnt = 0
avg_tot = 0
avg_pts = 0
count = 0
glob_dir = video_dir + "proc/" + sdate + "/" + "*cam" + str(cam_num) + "-stacked.jpg"
report_file = video_dir + "proc/" + sdate + "/" + sdate + "-cam" + str(cam_num) + "-report.txt"
master_stack_file = video_dir + "proc/" + sdate + "/" + sdate + "-cam" + str(cam_num) + "-master_stack.jpg"
#cv2.namedWindow('pepe')
mask_file = "conf/mask-" + str(cam_num) + ".txt"
file_exists = Path(mask_file)
mask_exists = 0
still_mask = [0,0,0,0]
if (file_exists.is_file()):
print("File found.")
ms = open(mask_file)
for lines in ms:
line, jk = lines.split("\n")
exec(line)
ms.close()
mask_exists = 1
(sm_min_x, sm_max_x, sm_min_y, sm_max_y) = still_mask
diffs = 0
image_list = []
file_list = []
sorted_list = []
print ("Loading still images from ", glob_dir)
fp = open(report_file, "w")
for filename in (glob.glob(glob_dir)):
capture_date = parse_file_date(filename)
sun_status, sun_alt = day_or_night(config, capture_date)
if sun_status != 'day' and int(sun_alt) <= -5:
#print("NIGHTTIME", capture_date, filename, sun_status)
file_list.append(filename)
else:
print ("This is a daytime or dusk file")
sorted_list = sorted(file_list)
for filename in sorted_list:
open_cv_image = cv2.imread(filename,0)
orig_image = open_cv_image
images_orig.append(orig_image)
print(filename)
open_cv_image[440:480, 0:640] = [0]
if mask_exists == 1:
open_cv_image[sm_min_y:sm_max_y, sm_min_x:sm_max_x] = [0]
images.append(open_cv_image)
#exit()
#time.sleep(5)
height , width = open_cv_image.shape
master_stack = None
# Define the codec and create VideoWriter object
#fourcc = cv2.VideoWriter_fourcc(*'H264')
#out = cv2.VideoWriter(outfile,fourcc, 5, (width,height),1)
#med_stack_all = np.median(np.array(images[50:150]), axis=0)
med_stack_all = np.median(np.array(images), axis=0)
#cv2.imshow('pepe', cv2.convertScaleAbs(med_stack_all))
#cv2.waitKey(1000)
objects = None
last_line_groups = []
last_points = []
for filename in sorted_list:
hit = 0
detect = 0
el = filename.split("/")
fn = el[-1]
#this_image = cv2.imread(filename,1)
this_image = images[count]
if count >= 1:
before_image = images[count-1]
else:
before_image = images[count+2]
if count >= len(file_list)-1:
after_image = images[count-2]
else:
after_image = images[count+1]
if count < 25:
median = np.median(np.array(images[0:count+25]), axis=0)
elif len(images) - count < 25:
median = np.median(np.array(images[count-25:count]), axis=0)
else:
median = np.median(np.array(images[count-25:count]), axis=0)
if count < 10:
background = images[count+1]
for i in range (0,10):
background = cv2.addWeighted(background, .8, images[count+i], .2,0)
else:
background = images[count-1]
for i in range (0,10):
background = cv2.addWeighted(background, .8, images[count-i], .2,0)
img_rpt_file = filename.replace("-stacked.jpg", "-stack-report.txt")
img_report = open(img_rpt_file, "w")
(blend, points, line_groups, stars, obj_points, big_cnts) = inspect_image(med_stack_all, background, median, before_image, this_image, after_image, avg_cnt,avg_tot,avg_pts, filename)
master_stack = stack_stack(blend, master_stack)
img_report.write("points=" + str(points) + "\n")
img_report.write("line_groups=" + str(line_groups) + "\n")
img_report.write("stars=" + str(stars) + "\n")
img_report.write("obj_points=" + str(obj_points) + "\n")
img_report.write("big_cnts=" + str(big_cnts) + "\n")
img_report.close()
images_blend.append(blend)
images_info.append((points, line_groups, stars, obj_points, big_cnts))
# block out the detections in the master image to remove it from the running mask
last_line_group = line_groups
last_points = points
for x,y,w,h in last_points:
images[count][y:y+h,x:x+w] = 5
count = count + 1
if len(big_cnts) > 0 or len(obj_points) >= 3:
hits = hits + 1
#cv2.imshow('pepe', blend)
#if len(line_groups) >= 1 or len(obj_points) > 3 or len(big_cnts) > 0:
#cv2.waitKey(1)
# while(1):
# k = cv2.waitKey(33)
# if k == 32:
# break
# if k == 27:
# exit()
#else:
#cv2.waitKey(1)
data = filename + "," + str(len(line_groups)) + "," + str(len(obj_points)) + "," + str(len(big_cnts)) + "\n"
fp.write(data)
print ("TOTAL: ", len(file_list))
print ("HITS: ", hits)
fp.close()
if master_stack is not None:
print("saving", master_stack_file)
master_stack.save(master_stack_file, "JPEG")
else:
print("Failed.")
hits = 1
for count in range(0, len(sorted_list) - 1):
file = sorted_list[count]
el = file.split("/")
st = el[-1]
report_str = st.replace("-stacked.jpg", "-report.txt")
video_str = st.replace("-stacked.jpg", ".mp4")
video_file = file.replace("-stacked.jpg", ".mp4")
(points, line_groups, stars, obj_points, big_cnts) = images_info[count]
if len(obj_points) > 3 or len(big_cnts) > 0:
for bc in big_cnts:
(x,y,w,h) = bc
obj_points.append((x,y,5,5))
obj_points.append((x+w,y+h,5,5))
np_obj_points = np.array(obj_points)
max_x = np.max(np_obj_points[:,0])
max_y = np.max(np_obj_points[:,1])
min_x = np.min(np_obj_points[:,0])
min_y = np.min(np_obj_points[:,1])
myimg = cv2.imread(sorted_list[count],0)
cv2.rectangle(myimg, (min_x,min_y), (max_x, max_y), (255),1)
#cv2.imshow('pepe', myimg)
#cv2.waitKey(1)
print ("-------")
print ("Count:", count)
print ("Hit:", hits)
print ("File:", sorted_list[count])
print ("Points:", str(len(points)))
print ("Line Groups:", str(len(line_groups)))
gc = 1
for line_group in line_groups:
for dist, ang, x1,y1,w1,h1 in line_group:
print ("GROUP: ", gc, dist, ang, x1,y1,w1,h1)
gc = gc + 1
print ("Stars:", str(len(stars)))
print ("Obj Points:", str(len(obj_points)))
print ("Big Cnts:", str(len(big_cnts)))
print ("Min/Max X/Y:", str(min_x), str(min_y), str(max_x), str(max_y))
print ("-------")
hits = hits + 1
video_report = video_file.replace(".mp4", "-report.txt")
file_exists = Path(video_report)
if (file_exists.is_file()):
print ("Already processed the video.")
#else:
# print("./PV.py " + video_file + " " + cam_num)
# os.system("./PV.py " + video_file + " " + cam_num)
else :
min_x = min_y = max_x = max_y = 0
#cmd = "grep \"Motion Frames:\" `find /mnt/ams2/SD/" + str(cam_num) + " |grep " + report_str + "`"
#output = subprocess.check_output(cmd, shell=True).decode("utf-8")
#output = output.replace("Motion Frames:", "motion_frames=")
#print (output)
#exec(output)
#if len(motion_frames) > 14:
# cmd = "find /mnt/ams2/SD/" + str(cam_num) + " |grep " + video_str
# video_file = subprocess.check_output(cmd, shell=True).decode("utf-8")
# print("This is probably a real event?")
# print(video_file)
sdate = sys.argv[1]
cam_num = sys.argv[2]
diff_stills(sdate, cam_num)
| mikehankey/fireball_camera | scan-stills2.py | Python | gpl-3.0 | 42,716 |
from math import sqrt
from collections import defaultdict, Counter
from fractions import Fraction
def reverse_erathos(n):
d = defaultdict(set)
for i in range(2, n + 1):
if i not in d:
j = 2 * i
while j <= n:
d[j].add(i)
j += i
return d
def totient(n, prime_decs):
if n not in prime_decs:
return n - 1
res = n
for prime in prime_decs[n]:
res *= 1 - Fraction(1, prime)
return int(res)
def compute_solution(n):
c = 1
prime_decs = reverse_erathos(n)
res = []
for i in range(2, n + 1):
if c % 50000 == 0:
print(c)
tot = totient(i, prime_decs)
if Counter(str(i)) == Counter(str(tot)):
res.append((i, tot, Fraction(i, tot)))
c += 1
return min(res, key = lambda x: x[2])
print(compute_solution(10000000), sep='\n')
| rodgzilla/project-euler | problem_070/problem.py | Python | gpl-3.0 | 895 |
# ===============================================================================
# Copyright (C) 2010 Diego Duclos
#
# This file is part of eos.
#
# eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with eos. If not, see <http://www.gnu.org/licenses/>.
# ===============================================================================
from sqlalchemy import Column, String, Integer, Boolean, ForeignKey, Table, Float
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.orm import relation, mapper, synonym, deferred
from sqlalchemy.orm.collections import attribute_mapped_collection
from eos.db import gamedata_meta
from eos.types import Icon, Attribute, Item, Effect, MetaType, Group, Traits
items_table = Table("invtypes", gamedata_meta,
Column("typeID", Integer, primary_key=True),
Column("typeName", String, index=True),
Column("description", String),
Column("raceID", Integer),
Column("factionID", Integer),
Column("volume", Float),
Column("mass", Float),
Column("capacity", Float),
Column("published", Boolean),
Column("marketGroupID", Integer, ForeignKey("invmarketgroups.marketGroupID")),
Column("iconID", Integer, ForeignKey("icons.iconID")),
Column("groupID", Integer, ForeignKey("invgroups.groupID"), index=True))
from .metaGroup import metatypes_table # noqa
from .traits import traits_table # noqa
mapper(Item, items_table,
properties={"group": relation(Group, backref="items"),
"icon": relation(Icon),
"_Item__attributes": relation(Attribute, collection_class=attribute_mapped_collection('name')),
"effects": relation(Effect, collection_class=attribute_mapped_collection('name')),
"metaGroup": relation(MetaType,
primaryjoin=metatypes_table.c.typeID == items_table.c.typeID,
uselist=False),
"ID": synonym("typeID"),
"name": synonym("typeName"),
"description": deferred(items_table.c.description),
"traits": relation(Traits,
primaryjoin=traits_table.c.typeID == items_table.c.typeID,
uselist=False)
})
Item.category = association_proxy("group", "category")
| Ebag333/Pyfa | eos/db/gamedata/item.py | Python | gpl-3.0 | 3,101 |
#!/usr/bin/env python
"""
A pure python ping implementation using raw socket.
Note that ICMP messages can only be sent from processes running as root.
Derived from ping.c distributed in Linux's netkit. That code is
copyright (c) 1989 by The Regents of the University of California.
That code is in turn derived from code written by Mike Muuss of the
US Army Ballistic Research Laboratory in December, 1983 and
placed in the public domain. They have my thanks.
Bugs are naturally mine. I'd be glad to hear about them. There are
certainly word - size dependenceies here.
Copyright (c) Matthew Dixon Cowles, <http://www.visi.com/~mdc/>.
Distributable under the terms of the GNU General Public License
version 2. Provided with no warranties of any sort.
Original Version from Matthew Dixon Cowles:
-> ftp://ftp.visi.com/users/mdc/ping.py
Rewrite by Jens Diemer:
-> http://www.python-forum.de/post-69122.html#69122
Rewrite by George Notaras:
-> http://www.g-loaded.eu/2009/10/30/python-ping/
Revision history
~~~~~~~~~~~~~~~~
November 8, 2009
----------------
Improved compatibility with GNU/Linux systems.
Fixes by:
* George Notaras -- http://www.g-loaded.eu
Reported by:
* Chris Hallman -- http://cdhallman.blogspot.com
Changes in this release:
- Re-use time.time() instead of time.clock(). The 2007 implementation
worked only under Microsoft Windows. Failed on GNU/Linux.
time.clock() behaves differently under the two OSes[1].
[1] http://docs.python.org/library/time.html#time.clock
May 30, 2007
------------
little rewrite by Jens Diemer:
- change socket asterisk import to a normal import
- replace time.time() with time.clock()
- delete "return None" (or change to "return" only)
- in checksum() rename "str" to "source_string"
November 22, 1997
-----------------
Initial hack. Doesn't do much, but rather than try to guess
what features I (or others) will want in the future, I've only
put in what I need now.
December 16, 1997
-----------------
For some reason, the checksum bytes are in the wrong order when
this is run under Solaris 2.X for SPARC but it works right under
Linux x86. Since I don't know just what's wrong, I'll swap the
bytes always and then do an htons().
December 4, 2000
----------------
Changed the struct.pack() calls to pack the checksum and ID as
unsigned. My thanks to Jerome Poincheval for the fix.
Last commit info:
~~~~~~~~~~~~~~~~~
$LastChangedDate: $
$Rev: $
$Author: $
"""
import os, socket, struct, select, time
# From /usr/include/linux/icmp.h; your milage may vary.
ICMP_ECHO_REQUEST = 8 # Seems to be the same on Solaris.
def checksum(source_string):
"""
I'm not too confident that this is right but testing seems
to suggest that it gives the same answers as in_cksum in ping.c
"""
sum = 0
countTo = (len(source_string)/2)*2
count = 0
while count<countTo:
thisVal = ord(source_string[count + 1])*256 + ord(source_string[count])
sum = sum + thisVal
sum = sum & 0xffffffff # Necessary?
count = count + 2
if countTo<len(source_string):
sum = sum + ord(source_string[len(source_string) - 1])
sum = sum & 0xffffffff # Necessary?
sum = (sum >> 16) + (sum & 0xffff)
sum = sum + (sum >> 16)
answer = ~sum
answer = answer & 0xffff
# Swap bytes. Bugger me if I know why.
answer = answer >> 8 | (answer << 8 & 0xff00)
return answer
def receive_one_ping(my_socket, ID, timeout):
"""
receive the ping from the socket.
"""
timeLeft = timeout
while True:
startedSelect = time.time()
whatReady = select.select([my_socket], [], [], timeLeft)
howLongInSelect = (time.time() - startedSelect)
if whatReady[0] == []: # Timeout
return
timeReceived = time.time()
recPacket, addr = my_socket.recvfrom(1024)
icmpHeader = recPacket[20:28]
type, code, checksum, packetID, sequence = struct.unpack(
"bbHHh", icmpHeader
)
if packetID == ID:
bytesInDouble = struct.calcsize("d")
timeSent = struct.unpack("d", recPacket[28:28 + bytesInDouble])[0]
return timeReceived - timeSent
timeLeft = timeLeft - howLongInSelect
if timeLeft <= 0:
return
def send_one_ping(my_socket, dest_addr, ID):
"""
Send one ping to the given >dest_addr<.
"""
dest_addr = socket.gethostbyname(dest_addr)
# Header is type (8), code (8), checksum (16), id (16), sequence (16)
my_checksum = 0
# Make a dummy heder with a 0 checksum.
header = struct.pack("bbHHh", ICMP_ECHO_REQUEST, 0, my_checksum, ID, 1)
bytesInDouble = struct.calcsize("d")
data = (192 - bytesInDouble) * "Q"
data = struct.pack("d", time.time()) + data
# Calculate the checksum on the data and the dummy header.
my_checksum = checksum(header + data)
# Now that we have the right checksum, we put that in. It's just easier
# to make up a new header than to stuff it into the dummy.
header = struct.pack(
"bbHHh", ICMP_ECHO_REQUEST, 0, socket.htons(my_checksum), ID, 1
)
packet = header + data
my_socket.sendto(packet, (dest_addr, 1)) # Don't know about the 1
def do_one(dest_addr, timeout):
"""
Returns either the delay (in seconds) or none on timeout.
"""
icmp = socket.getprotobyname("icmp")
try:
my_socket = socket.socket(socket.AF_INET, socket.SOCK_RAW, icmp)
except socket.error, (errno, msg):
if errno == 1:
# Operation not permitted
msg = msg + (
" - Note that ICMP messages can only be sent from processes"
" running as root."
)
raise socket.error(msg)
raise # raise the original error
my_ID = os.getpid() & 0xFFFF
send_one_ping(my_socket, dest_addr, my_ID)
delay = receive_one_ping(my_socket, my_ID, timeout)
my_socket.close()
return delay
def verbose_ping(dest_addr, timeout = 2, count = 4):
"""
Send >count< ping to >dest_addr< with the given >timeout< and display
the result.
"""
for i in xrange(count):
print "ping %s..." % dest_addr,
try:
delay = do_one(dest_addr, timeout)
except socket.gaierror, e:
print "failed. (socket error: '%s')" % e[1]
break
if delay == None:
print "failed. (timeout within %ssec.)" % timeout
else:
delay = delay * 1000
print "get ping in %0.4fms" % delay
print
if __name__ == '__main__':
#verbose_ping("192.168.0.4",timeout=0.1,count=1)
result=do_one("192.168.0.4", 0.1)
print result
| jredrejo/controlaula | Backend/src/ControlAula/Utils/ping.py | Python | gpl-3.0 | 7,021 |
#!/usr/bin/python
from src.sqllist import GLOBALS
class BasicResolver(object):
"""General resolver class"""
def __init__(self, conn=None):
self.conn = conn
pass
def resolve(self, detections, sources):
"""Template resolve function.
Returns resolution status and an array of xtrsrcid-runcatid of
resolved pairs (if possible)."""
return False, []
def load_detections(self, group_id):
cursor = self.conn.get_cursor("""
select xtrsrcid, ra, ra_err, decl, decl_err, f_int, f_int_err
from extractedsources e
where e.image_id = %s
and exists (select 1 from temp_associations ta
where ta.xtrsrc_id2 = e.xtrsrcid
and ta.image_id = e.image_id
and ta.group_head_id = %s)""" % (GLOBALS['i'], group_id))
detections = cursor.fetchall()
cursor.close()
return detections
def load_sources(self, group_id):
cursor = self.conn.get_cursor("""
select runcatid, wm_ra, wm_ra_err, wm_decl, wm_decl_err, wm_f_int, wm_f_int_err
from runningcatalog r,
runningcatalog_fluxes f,
images i
where i.imageid = %s
and f.band = i.band
and f.stokes = i.stokes
and r.runcatid = f.runcat_id
and exists (select 1 from temp_associations ta
where ta.runcat_id = r.runcatid
and ta.image_id = i.imageid
and ta.group_head_id = %s)""" % (GLOBALS['i'], group_id))
sources = cursor.fetchall()
cursor.close()
return sources
def run_resolve(self, group_id):
"""Get data from Database,
run resolver,
saev results to temp_associations"""
#--Run resolver--
is_ok, solutions = self.resolve(self.load_detections(group_id),
self.load_sources(group_id))
if is_ok:
#"delete" all associations from this group.
self.conn.execute("""
update temp_associations
set kind = -1
where image_id = %s
and group_head_id = %s;""" % (GLOBALS['i'], group_id))
#"restore" associations that are "ok"
for solution in solutions:
self.conn.execute("""update temp_associations
set kind = 1,
group_head_id = null
where image_id = %s
and group_head_id = %s
and xtrsrc_id2 = %s
and runcat_id = %s;""" % (GLOBALS['i'], group_id,
solution[0], solution[1]))
return is_ok
| jjdmol/LOFAR | CEP/GSM/bremen/src/resolve.py | Python | gpl-3.0 | 2,496 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014-2019 khalim19
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module defines a custom widget holding an array of GUI elements. The widget
is used as the default GUI for `setting.ArraySetting` instances.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from future.builtins import *
import collections
import contextlib
import pygtk
pygtk.require("2.0")
import gtk
import gobject
from .. import utils as pgutils
from . import draganddropcontext as draganddropcontext_
__all__ = [
"ItemBox",
"ArrayBox",
"ItemBoxItem",
]
class ItemBox(gtk.ScrolledWindow):
"""
This base class defines a scrollable box holding a vertical list of items.
Each item is an instance of `_ItemBoxItem` class or one of its subclasses.
"""
ITEM_SPACING = 4
VBOX_SPACING = 4
def __init__(self, item_spacing=ITEM_SPACING, *args, **kwargs):
super().__init__(*args, **kwargs)
self._item_spacing = item_spacing
self._drag_and_drop_context = draganddropcontext_.DragAndDropContext()
self._items = []
self._vbox_items = gtk.VBox(homogeneous=False)
self._vbox_items.set_spacing(self._item_spacing)
self._vbox = gtk.VBox(homogeneous=False)
self._vbox.set_spacing(self.VBOX_SPACING)
self._vbox.pack_start(self._vbox_items, expand=False, fill=False)
self.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.add_with_viewport(self._vbox)
self.get_child().set_shadow_type(gtk.SHADOW_NONE)
def add_item(self, item):
self._vbox_items.pack_start(item.widget, expand=False, fill=False)
item.button_remove.connect("clicked", self._on_item_button_remove_clicked, item)
item.widget.connect("key-press-event", self._on_item_widget_key_press_event, item)
self._setup_drag(item)
self._items.append(item)
return item
def reorder_item(self, item, position):
new_position = min(max(position, 0), len(self._items) - 1)
self._items.pop(self._get_item_position(item))
self._items.insert(new_position, item)
self._vbox_items.reorder_child(item.widget, new_position)
return new_position
def remove_item(self, item):
item_position = self._get_item_position(item)
if item_position < len(self._items) - 1:
next_item_position = item_position + 1
self._items[next_item_position].item_widget.grab_focus()
self._vbox_items.remove(item.widget)
item.remove_item_widget()
self._items.remove(item)
def clear(self):
for unused_ in range(len(self._items)):
self.remove_item(self._items[0])
def _setup_drag(self, item):
self._drag_and_drop_context.setup_drag(
item.item_widget,
self._get_drag_data,
self._on_drag_data_received,
[item],
[item],
self)
def _get_drag_data(self, dragged_item):
return str(self._items.index(dragged_item))
def _on_drag_data_received(self, dragged_item_index_str, destination_item):
dragged_item = self._items[int(dragged_item_index_str)]
self.reorder_item(dragged_item, self._get_item_position(destination_item))
def _on_item_widget_key_press_event(self, widget, event, item):
if event.state & gtk.gdk.MOD1_MASK: # Alt key
key_name = gtk.gdk.keyval_name(event.keyval)
if key_name in ["Up", "KP_Up"]:
self.reorder_item(
item, self._get_item_position(item) - 1)
elif key_name in ["Down", "KP_Down"]:
self.reorder_item(
item, self._get_item_position(item) + 1)
def _on_item_button_remove_clicked(self, button, item):
self.remove_item(item)
def _get_item_position(self, item):
return self._items.index(item)
class ItemBoxItem(object):
_HBOX_BUTTONS_SPACING = 3
_HBOX_SPACING = 3
def __init__(self, item_widget):
self._item_widget = item_widget
self._hbox = gtk.HBox(homogeneous=False)
self._hbox.set_spacing(self._HBOX_SPACING)
self._hbox_buttons = gtk.HBox(homogeneous=False)
self._hbox_buttons.set_spacing(self._HBOX_BUTTONS_SPACING)
self._event_box_buttons = gtk.EventBox()
self._event_box_buttons.add(self._hbox_buttons)
self._hbox.pack_start(self._item_widget, expand=True, fill=True)
self._hbox.pack_start(self._event_box_buttons, expand=False, fill=False)
self._event_box = gtk.EventBox()
self._event_box.add(self._hbox)
self._has_hbox_buttons_focus = False
self._button_remove = gtk.Button()
self._setup_item_button(self._button_remove, gtk.STOCK_CLOSE)
self._event_box.connect("enter-notify-event", self._on_event_box_enter_notify_event)
self._event_box.connect("leave-notify-event", self._on_event_box_leave_notify_event)
self._is_event_box_allocated_size = False
self._buttons_allocation = None
self._event_box.connect("size-allocate", self._on_event_box_size_allocate)
self._event_box_buttons.connect(
"size-allocate", self._on_event_box_buttons_size_allocate)
self._event_box.show_all()
self._hbox_buttons.set_no_show_all(True)
@property
def widget(self):
return self._event_box
@property
def item_widget(self):
return self._item_widget
@property
def button_remove(self):
return self._button_remove
def remove_item_widget(self):
self._hbox.remove(self._item_widget)
def _setup_item_button(self, item_button, icon, position=None):
item_button.set_relief(gtk.RELIEF_NONE)
button_icon = gtk.image_new_from_pixbuf(
item_button.render_icon(icon, gtk.ICON_SIZE_MENU))
item_button.add(button_icon)
self._hbox_buttons.pack_start(item_button, expand=False, fill=False)
if position is not None:
self._hbox_buttons.reorder_child(item_button, position)
item_button.show_all()
def _on_event_box_enter_notify_event(self, event_box, event):
if event.detail != gtk.gdk.NOTIFY_INFERIOR:
self._hbox_buttons.show()
def _on_event_box_leave_notify_event(self, event_box, event):
if event.detail != gtk.gdk.NOTIFY_INFERIOR:
self._hbox_buttons.hide()
def _on_event_box_size_allocate(self, event_box, allocation):
if self._is_event_box_allocated_size:
return
self._is_event_box_allocated_size = True
# Assign enough height to the HBox to make sure it does not resize when
# showing buttons.
if self._buttons_allocation.height >= allocation.height:
self._hbox.set_property("height-request", allocation.height)
def _on_event_box_buttons_size_allocate(self, event_box, allocation):
if self._buttons_allocation is not None:
return
self._buttons_allocation = allocation
# Make sure the width allocated to the buttons remains the same even if
# buttons are hidden. This avoids a problem with unreachable buttons when
# the horizontal scrollbar is displayed.
self._event_box_buttons.set_property(
"width-request", self._buttons_allocation.width)
self._hbox_buttons.hide()
class ArrayBox(ItemBox):
"""
This class can be used to edit `setting.ArraySetting` instances interactively.
Signals:
* `"array-box-changed"` - An item was added, reordered or removed by the user.
* `"array-box-item-changed"` - The contents of an item was modified by the
user. Currently, this signal is not invoked in this widget and can only be
invoked explicitly by calling `ArrayBox.emit("array-box-item-changed")`.
"""
__gsignals__ = {
b"array-box-changed": (gobject.SIGNAL_RUN_FIRST, None, ()),
b"array-box-item-changed": (gobject.SIGNAL_RUN_FIRST, None, ())}
_SIZE_HBOX_SPACING = 6
def __init__(
self,
new_item_default_value,
min_size=0,
max_size=None,
item_spacing=ItemBox.ITEM_SPACING,
max_width=None,
max_height=None,
*args,
**kwargs):
"""
Parameters:
* `new_item_default_value` - default value for new items.
* `min_size` - minimum number of elements.
* `max_size` - maximum number of elements. If `None`, the number of elements
is unlimited.
* `item_spacing` - vertical spacing in pixels between items.
* `max_width` - maximum width of the array box before the horizontal
scrollbar is displayed. The array box will resize automatically until the
maximum width is reached. If `max_width` is `None`, the width is fixed
to whatever width is provided by `gtk.ScrolledWindow`. If `max_width` is
zero or negative, the width is unlimited.
* `max_height` - maximum height of the array box before the vertical
scrollbar is displayed. For more information, see `max_width`.
"""
super().__init__(item_spacing=item_spacing, *args, **kwargs)
self._new_item_default_value = new_item_default_value
self._min_size = min_size if min_size >= 0 else 0
if max_size is None:
self._max_size = 2**32
else:
self._max_size = max_size if max_size >= min_size else min_size
self.max_width = max_width
self.max_height = max_height
self.on_add_item = pgutils.empty_func
self.on_reorder_item = pgutils.empty_func
self.on_remove_item = pgutils.empty_func
self._items_total_width = None
self._items_total_height = None
self._items_allocations = {}
self._locker = _ActionLocker()
self._init_gui()
def _init_gui(self):
self._size_spin_button = gtk.SpinButton(
gtk.Adjustment(
value=0,
lower=self._min_size,
upper=self._max_size,
step_incr=1,
page_incr=10,
),
digits=0)
self._size_spin_button.set_numeric(True)
self._size_spin_button.set_value(0)
self._size_spin_button_label = gtk.Label(_("Size"))
self._size_hbox = gtk.HBox()
self._size_hbox.set_spacing(self._SIZE_HBOX_SPACING)
self._size_hbox.pack_start(self._size_spin_button_label, expand=False, fill=False)
self._size_hbox.pack_start(self._size_spin_button, expand=False, fill=False)
self._vbox.pack_start(self._size_hbox, expand=False, fill=False)
self._vbox.reorder_child(self._size_hbox, 0)
self._size_spin_button.connect(
"value-changed", self._on_size_spin_button_value_changed)
def add_item(self, item_value=None, index=None):
if item_value is None:
item_value = self._new_item_default_value
item_widget = self.on_add_item(item_value, index)
item = _ArrayBoxItem(item_widget)
super().add_item(item)
item.widget.connect("size-allocate", self._on_item_widget_size_allocate, item)
if index is None:
item.label.set_label(self._get_item_name(len(self._items)))
if index is not None:
with self._locker.lock_temp("emit_array_box_changed_on_reorder"):
self.reorder_item(item, index)
if self._locker.is_unlocked("update_spin_button"):
with self._locker.lock_temp("emit_size_spin_button_value_changed"):
self._size_spin_button.spin(gtk.SPIN_STEP_FORWARD, increment=1)
return item
def reorder_item(self, item, new_position):
orig_position = self._get_item_position(item)
processed_new_position = super().reorder_item(item, new_position)
self.on_reorder_item(orig_position, processed_new_position)
self._rename_item_names(min(orig_position, processed_new_position))
if self._locker.is_unlocked("emit_array_box_changed_on_reorder"):
self.emit("array-box-changed")
def remove_item(self, item):
if (self._locker.is_unlocked("prevent_removal_below_min_size")
and len(self._items) == self._min_size):
return
if self._locker.is_unlocked("update_spin_button"):
with self._locker.lock_temp("emit_size_spin_button_value_changed"):
self._size_spin_button.spin(gtk.SPIN_STEP_BACKWARD, increment=1)
item_position = self._get_item_position(item)
super().remove_item(item)
if item in self._items_allocations:
self._update_height(-(self._items_allocations[item].height + self._item_spacing))
del self._items_allocations[item]
self.on_remove_item(item_position)
self._rename_item_names(item_position)
def set_values(self, values):
self._locker.lock("emit_size_spin_button_value_changed")
self._locker.lock("prevent_removal_below_min_size")
orig_on_remove_item = self.on_remove_item
self.on_remove_item = pgutils.empty_func
self.clear()
# This fixes an issue of items being allocated height of 1 when the array
# size was previously 0.
self.set_property("height-request", -1)
for index, value in enumerate(values):
self.add_item(value, index)
self.on_remove_item = orig_on_remove_item
self._size_spin_button.set_value(len(values))
self._locker.unlock("prevent_removal_below_min_size")
self._locker.unlock("emit_size_spin_button_value_changed")
def _setup_drag(self, item):
self._drag_and_drop_context.setup_drag(
# Using the entire item allows dragging only by the label rather than the
# widget itself. This avoids problems with widgets such as spin buttons
# that do not behave correctly when reordering and also avoids accidental
# clicking and modifying the widget by the user.
item.widget,
self._get_drag_data,
self._on_drag_data_received,
[item],
[item],
self)
def _on_size_spin_button_value_changed(self, size_spin_button):
if self._locker.is_unlocked("emit_size_spin_button_value_changed"):
self._locker.lock("update_spin_button")
new_size = size_spin_button.get_value_as_int()
if new_size > len(self._items):
num_elements_to_add = new_size - len(self._items)
for unused_ in range(num_elements_to_add):
self.add_item()
elif new_size < len(self._items):
num_elements_to_remove = len(self._items) - new_size
for unused_ in range(num_elements_to_remove):
self.remove_item(self._items[-1])
self.emit("array-box-changed")
self._locker.unlock("update_spin_button")
def _on_item_button_remove_clicked(self, button, item):
self._locker.lock("emit_size_spin_button_value_changed")
should_emit_signal = (
len(self._items) > self._min_size
or self._locker.is_locked("prevent_removal_below_min_size"))
super()._on_item_button_remove_clicked(button, item)
if should_emit_signal:
self.emit("array-box-changed")
self._locker.unlock("emit_size_spin_button_value_changed")
def _on_item_widget_size_allocate(self, item_widget, allocation, item):
if item in self._items_allocations:
self._update_width(allocation.width - self._items_allocations[item].width)
self._update_height(allocation.height - self._items_allocations[item].height)
else:
self._update_width(allocation.width)
self._update_height(allocation.height + self._item_spacing)
self._items_allocations[item] = allocation
def _update_width(self, width_diff):
if self._items_total_width is None:
self._items_total_width = self.get_allocation().width
if width_diff != 0:
self._update_dimension(
width_diff,
self._items_total_width,
self.max_width,
"width-request")
self._items_total_width = self._items_total_width + width_diff
def _update_height(self, height_diff):
if self._items_total_height is None:
self._items_total_height = self.get_allocation().height
if height_diff != 0:
self._update_dimension(
height_diff,
self._items_total_height,
self.max_height,
"height-request")
self._items_total_height = self._items_total_height + height_diff
def _update_dimension(
self,
size_diff,
total_size,
max_visible_size,
dimension_request_property):
if max_visible_size is None:
is_max_visible_size_unlimited = True
else:
is_max_visible_size_unlimited = max_visible_size <= 0
if not is_max_visible_size_unlimited:
visible_size = min(total_size, max_visible_size)
else:
visible_size = total_size
if (is_max_visible_size_unlimited
or (visible_size + size_diff <= max_visible_size
and total_size < max_visible_size)):
new_size = visible_size + size_diff
elif total_size >= max_visible_size and size_diff < 0:
if total_size + size_diff < max_visible_size:
new_size = total_size + size_diff
else:
new_size = max_visible_size
else:
new_size = max_visible_size
if max_visible_size is not None:
self.set_property(dimension_request_property, new_size)
def _rename_item_names(self, start_index):
for index, item in enumerate(self._items[start_index:]):
item.label.set_label(self._get_item_name(index + 1 + start_index))
@staticmethod
def _get_item_name(index):
return _("Element") + " " + str(index)
class _ArrayBoxItem(ItemBoxItem):
def __init__(self, item_widget):
super().__init__(item_widget)
self._label = gtk.Label()
self._label.show()
self._hbox.pack_start(self._label, expand=False, fill=False)
self._hbox.reorder_child(self._label, 0)
@property
def label(self):
return self._label
class _ActionLocker(object):
def __init__(self):
self._tokens = collections.defaultdict(int)
@contextlib.contextmanager
def lock_temp(self, key):
self.lock(key)
try:
yield
finally:
self.unlock(key)
def lock(self, key):
self._tokens[key] += 1
def unlock(self, key):
if self._tokens[key] > 0:
self._tokens[key] -= 1
def is_locked(self, key):
return self._tokens[key] > 0
def is_unlocked(self, key):
return self._tokens[key] == 0
gobject.type_register(ArrayBox)
| khalim19/gimp-plugin-export-layers | export_layers/pygimplib/gui/itembox.py | Python | gpl-3.0 | 18,637 |
"""
Follow Me activity for Sugar
Copyright (C) 2010 Peter Hewitt
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
class RC():
def __init__(self, nr, nc):
self.nr = nr
self.nc = nc
def inc_r(self, ind):
r, c = self.row_col(ind)
r += 1
if r == self.nr:
r = 0
if r == (self.nr - 1) and c == (self.nc - 1):
r = 0
return self.indx(r, c)
def dec_r(self, ind):
r, c = self.row_col(ind)
r -= 1
if r < 0:
r = self.nr - 1
if r == (self.nr - 1) and c == (self.nc - 1):
r = self.nr - 2
return self.indx(r, c)
def inc_c(self, ind):
r, c = self.row_col(ind)
c += 1
if c == self.nc:
c = 0
if r == (self.nr - 1) and c == (self.nc - 1):
c = 0
return self.indx(r, c)
def dec_c(self, ind):
r, c = self.row_col(ind)
c -= 1
if c < 0:
c = self.nc - 1
if r == (self.nr - 1) and c == (self.nc - 1):
c = self.nc - 2
return self.indx(r, c)
def row_col(self, ind):
i = 0
for r in range(self.nr):
for c in range(self.nc):
if i == ind:
return r, c
i += 1
def indx(self, r, c):
return r * self.nc + c
| walterbender/followme | rc_skip_last.py | Python | gpl-3.0 | 1,984 |
Subsets and Splits