code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
# Copyright (C) 2017 Roman Samoilenko <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import logging
import functools
from heralding.capabilities.handlerbase import HandlerBase
import asyncssh
from Crypto.PublicKey import RSA
logger = logging.getLogger(__name__)
class SSH(asyncssh.SSHServer, HandlerBase):
connections_list = []
def __init__(self, options, loop):
asyncssh.SSHServer.__init__(self)
HandlerBase.__init__(self, options, loop)
def connection_made(self, conn):
SSH.connections_list.append(conn)
self.address = conn.get_extra_info('peername')
self.dest_address = conn.get_extra_info('sockname')
self.connection = conn
self.handle_connection()
logger.debug('SSH connection received from %s.' %
conn.get_extra_info('peername')[0])
def connection_lost(self, exc):
self.session.set_auxiliary_data(self.get_auxiliary_data())
self.close_session(self.session)
if exc:
logger.debug('SSH connection error: ' + str(exc))
else:
logger.debug('SSH connection closed.')
def begin_auth(self, username):
return True
def password_auth_supported(self):
return True
def validate_password(self, username, password):
self.session.add_auth_attempt(
'plaintext', username=username, password=password)
return False
def handle_connection(self):
if HandlerBase.global_sessions > HandlerBase.MAX_GLOBAL_SESSIONS:
protocol = self.__class__.__name__.lower()
logger.warning(
'Got {0} session on port {1} from {2}:{3}, but not handling it because the global session limit has '
'been reached'.format(protocol, self.port, *self.address))
else:
self.session = self.create_session(self.address, self.dest_address)
def get_auxiliary_data(self):
data_fields = [
'client_version', 'recv_cipher', 'recv_mac', 'recv_compression'
]
data = {f: self.connection.get_extra_info(f) for f in data_fields}
return data
@staticmethod
def change_server_banner(banner):
"""_send_version code was copied from asyncssh.connection in order to change
internal local variable 'version', providing custom banner."""
@functools.wraps(asyncssh.connection.SSHConnection._send_version)
def _send_version(self):
"""Start the SSH handshake"""
version = bytes(banner, 'utf-8')
if self.is_client():
self._client_version = version
self._extra.update(client_version=version.decode('ascii'))
else:
self._server_version = version
self._extra.update(server_version=version.decode('ascii'))
self._send(version + b'\r\n')
asyncssh.connection.SSHConnection._send_version = _send_version
@staticmethod
def generate_ssh_key(ssh_key_file):
if not os.path.isfile(ssh_key_file):
with open(ssh_key_file, 'w') as _file:
rsa_key = RSA.generate(2048)
priv_key_text = str(rsa_key.exportKey('PEM', pkcs=1), 'utf-8')
_file.write(priv_key_text)
| johnnykv/heralding | heralding/capabilities/ssh.py | Python | gpl-3.0 | 3,627 |
"""checker for unnecessary indexing in a loop.
"""
from typing import List, Optional, Tuple, Union
import astroid
from astroid import nodes
from pylint.checkers import BaseChecker
from pylint.checkers.utils import check_messages
from pylint.interfaces import IAstroidChecker
class UnnecessaryIndexingChecker(BaseChecker):
__implements__ = IAstroidChecker
# name is the same as file name but without _checker part
name = "unnecessary_indexing"
# use dashes for connecting words in message symbol
msgs = {
"E9994": (
"For loop variable `%s` can be simplified by looping over the elements directly, "
"for example, `for my_variable in %s`.",
"unnecessary-indexing",
"Used when you have an loop variable in a for loop "
"where its only usage is to index the iterable",
)
}
# this is important so that your checker is executed before others
priority = -1
# pass in message symbol as a parameter of check_messages
@check_messages("unnecessary-indexing")
def visit_for(self, node: nodes.For) -> None:
# Check if the iterable of the for loop is of the form "range(len(<variable-name>))".
iterable = _iterable_if_range(node.iter)
if iterable is not None and _is_unnecessary_indexing(node):
args = node.target.name, iterable
self.add_message("unnecessary-indexing", node=node.target, args=args)
# Helper functions
def _is_unnecessary_indexing(node: nodes.For) -> bool:
"""Return whether the iteration variable in the for loop is ONLY used to index the iterable.
True if unnecessary usage, False otherwise or if iteration variable not used at all.
"""
index_nodes = _index_name_nodes(node.target.name, node)
return all(_is_redundant(index_node, node) for index_node in index_nodes) and index_nodes
def _iterable_if_range(node: nodes.NodeNG) -> Optional[str]:
"""Return the iterable's name if this node is in "range" form, or None otherwise.
Check for three forms:
- range(len(<variable-name>))
- range(0, len(<variable-name>))
- range(0, len(<variable-name>), 1)
"""
# Check outer function call is range
if (
not isinstance(node, nodes.Call)
or not isinstance(node.func, nodes.Name)
or not node.func.name == "range"
):
return None
# Check arguments to range
if len(node.args) > 1:
# Check that args[0] == Const(0)
arg1 = node.args[0]
if not isinstance(arg1, nodes.Const) or arg1.value != 0:
return None
if len(node.args) == 3 and (
not isinstance(node.args[2], nodes.Const) or node.args[2].value != 1
):
return None
# Finally, check 'stop' argument is of the form len(<variable-name>).
if len(node.args) == 1:
stop_arg = node.args[0]
else:
stop_arg = node.args[1]
if (
isinstance(stop_arg, nodes.Call)
and isinstance(stop_arg.func, nodes.Name)
and stop_arg.func.name == "len"
and len(stop_arg.args) == 1
and isinstance(stop_arg.args[0], nodes.Name)
):
return stop_arg.args[0].name
def _is_load_subscript(index_node: nodes.Name, for_node: nodes.For) -> bool:
"""Return whether or not <index_node> is used to subscript the iterable of <for_node>
and the subscript item is being loaded from, e.g., s += iterable[index_node].
NOTE: Index node is deprecated in Python 3.9
Returns True if the following conditions are met:
(3.9)
- The <index_node> Name node is inside of a Subscript node
- The item that is being indexed is the iterable of the for loop
- The Subscript node is being used in a load context
(3.8)
- The <index_node> Name node is inside of an Index node
- The Index node is inside of a Subscript node
- The item that is being indexed is the iterable of the for loop
- The Subscript node is being used in a load context
"""
iterable = _iterable_if_range(for_node.iter)
return (
isinstance(index_node.parent, nodes.Subscript)
and isinstance(index_node.parent.value, nodes.Name)
and index_node.parent.value.name == iterable
and index_node.parent.ctx == astroid.Load
)
def _is_redundant(index_node: Union[nodes.AssignName, nodes.Name], for_node: nodes.For) -> bool:
"""Return whether or not <index_node> is redundant in <for_node>.
The lookup method is used in case the original loop variable is shadowed
in the for loop's body.
"""
_, assignments = index_node.lookup(index_node.name)
if not assignments:
return False
elif isinstance(index_node, nodes.AssignName):
return assignments[0] != for_node.target
else: # isinstance(index_node, nodes.Name)
return assignments[0] != for_node.target or _is_load_subscript(index_node, for_node)
def _index_name_nodes(index: str, for_node: nodes.For) -> List[Union[nodes.AssignName, nodes.Name]]:
"""Return a list of <index> AssignName and Name nodes contained in the body of <for_node>.
Remove uses of variables that shadow <index>.
"""
scope = for_node.scope()
return [
name_node
for name_node in for_node.nodes_of_class((nodes.AssignName, nodes.Name))
if name_node.name == index
and name_node != for_node.target
and name_node.lookup(name_node.name)[0] == scope
]
def register(linter):
linter.register_checker(UnnecessaryIndexingChecker(linter))
| pyta-uoft/pyta | python_ta/checkers/unnecessary_indexing_checker.py | Python | gpl-3.0 | 5,590 |
#! /usr/bin/env python
import os
import argparse
from pymsbayes.fileio import expand_path
from pymsbayes.config import MsBayesConfig
from pymsbayes.utils.functions import is_file, is_dir, is_executable
class SmartHelpFormatter(argparse.HelpFormatter):
'''
A class to allow customizable line breaks for an argument help message
on a per argument basis.
'''
def _split_lines(self, text, width):
if text.startswith('r|'):
return text[2:].splitlines()
return argparse.HelpFormatter._split_lines(self, text, width)
def arg_is_path(path):
try:
if not os.path.exists(path):
raise
except:
msg = 'path {0!r} does not exist'.format(path)
raise argparse.ArgumentTypeError(msg)
return expand_path(path)
def arg_is_file(path):
try:
if not is_file(path):
raise
except:
msg = '{0!r} is not a file'.format(path)
raise argparse.ArgumentTypeError(msg)
return expand_path(path)
def arg_is_config(path):
try:
if not MsBayesConfig.is_config(path):
raise
except:
msg = '{0!r} is not an msBayes config file'.format(path)
raise argparse.ArgumentTypeError(msg)
return expand_path(path)
def arg_is_dir(path):
try:
if not is_dir(path):
raise
except:
msg = '{0!r} is not a directory'.format(path)
raise argparse.ArgumentTypeError(msg)
return expand_path(path)
def arg_is_executable(path):
try:
p = ToolPathManager.get_external_tool(path)
if not p:
raise
except:
msg = '{0!r} is not an executable'.format(path)
raise argparse.ArgumentTypeError(msg)
return p
def arg_is_nonnegative_int(i):
try:
if int(i) < 0:
raise
except:
msg = '{0!r} is not a non-negative integer'.format(i)
raise argparse.ArgumentTypeError(msg)
return int(i)
def arg_is_positive_int(i):
try:
if int(i) < 1:
raise
except:
msg = '{0!r} is not a positive integer'.format(i)
raise argparse.ArgumentTypeError(msg)
return int(i)
def arg_is_positive_float(i):
try:
if float(i) <= 0.0:
raise
except:
msg = '{0!r} is not a positive real number'.format(i)
raise argparse.ArgumentTypeError(msg)
return float(i)
def get_sort_index_help_message():
return (
'''r|The sorting index used by
`dpp-msbayes.pl`/`msbayes.pl` and `obsSumStats.pl`
scripts to determine how the summary statistic vectors
calculated from the alignments of the observed and
simulated data are to be grouped and sorted.
The default is %(default)s.
0: Do not group or sort. The identity and order of
the summary statistics of each alignment are
maintained and compared when calculating
Euclidean distance.
1-7: **NOTE**, options 1-7 all re-sort the summary
statistics in some way, and thus compare the
statistics from *different* alignments when
calculating the Euclidean distance. This is not
valid and these options should *NOT* be used.
They are maintained for backwards compatibility
with the original msBayes.
8-11: All of these options group the summary
statistics from multiple loci by taxon and then
calculate moments of each statistic across the
loci for each taxon, and then use these moments
to calculate Euclidean distance. The order of
the taxa is maintained, and so this is valid,
but you are losing a lot of information
contained in your loci by simply taking the mean
(option 11) across them. If you have A LOT of
loci, this sacrifice might be necessary to
reduce the number of summary statistics.
**NOTE**, options 8-10 are NOT well tested.
8: Use the first 4 moments (mean, variance,
skewness, and kurtosis) of each statistic.
9: Use the first 3 moments (mean, variance,
and skewness) of each statistic.
10: Use the first 2 moments (mean and variance)
of each statistic.
11: Use the first 1 moment (mean) of each
statistic.'''
)
| joaks1/PyMsBayes | pymsbayes/utils/argparse_utils.py | Python | gpl-3.0 | 4,193 |
/*
* stereographic projection
*
* Copyright 2010 dan collins <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
*/
from numpy import *
def Convert(channelpnts):
#assuming a half ~sphere, we
rad = max(chan['lpnt'][:,1]) - min(chan['lpnt'][:,1])
| badbytes/pymeg | meg/stereographicprojection.py | Python | gpl-3.0 | 1,056 |
#!/usr/bin/env python
import asyncio
import json
from bson import json_util
from pymongo import MongoClient
from .search import Query
from .crawl import Spider
class ClientSpider(Spider):
"""
Indexes the client website and saves all pages to a mongodb collection
"""
def __init__(self, client):
self.client = client
uri = client.website
super().__init__(uri)
async def save_pages(self):
"""Save pages to mongodb"""
pages = await self.crawl()
mongo_client = MongoClient('localhost', 27017)
database = mongo_client.pages
collection = database[self.client.name]
collection.delete_many({}) # delete previous pages
collection.insert_many(pages) # insert new pages
# Dump loaded BSON to valid JSON string and reload it as dict
pages_sanitised = json.loads(json_util.dumps(pages))
return pages_sanitised
class ClientQuery(Query):
"""
Query a client
"""
def __init__(self, client, query):
self.client = client
pages = client.get_pages(client.name)
query = query
super().__init__(pages, query)
def modify_search(self):
"""Modify search with client settings"""
pages = self.search()
# Dump loaded BSON to valid JSON string and reload it as dict
pages_sanitised = json.loads(json_util.dumps(pages))
return pages_sanitised
if __name__ == "__main__":
import doctest
doctest.testmod()
| apt-helion/viperidae | api/developer.py | Python | gpl-3.0 | 1,525 |
# -*- coding: utf-8 -*-
# * Authors:
# * TJEBBES Gaston <[email protected]>
# * Arezki Feth <[email protected]>;
# * Miotte Julien <[email protected]>;
import os
from autonomie.models.user.userdatas import (
ZoneOption,
ZoneQualificationOption,
StudyLevelOption,
SocialStatusOption,
ActivityTypeOption,
PcsOption,
PrescripteurOption,
NonAdmissionOption,
ParcoursStatusOption,
SocialDocTypeOption,
CaeSituationOption,
AntenneOption,
)
from autonomie.models.career_path import (
TypeContratOption,
EmployeeQualityOption,
TypeSortieOption,
MotifSortieOption,
)
from autonomie.views.admin.tools import (
get_model_admin_view,
)
from autonomie.views.admin.userdatas import (
USERDATAS_URL,
UserDatasIndexView,
)
def includeme(config):
"""
Configure route and views for userdatas management
"""
for model in (
CaeSituationOption,
AntenneOption,
ZoneOption,
ZoneQualificationOption,
StudyLevelOption,
SocialStatusOption,
EmployeeQualityOption,
ActivityTypeOption,
PcsOption,
PrescripteurOption,
NonAdmissionOption,
ParcoursStatusOption,
MotifSortieOption,
SocialDocTypeOption,
TypeSortieOption,
TypeContratOption,
):
view = get_model_admin_view(model, r_path=USERDATAS_URL)
config.add_route(view.route_name, view.route_name)
config.add_admin_view(view, parent=UserDatasIndexView)
| CroissanceCommune/autonomie | autonomie/views/admin/userdatas/options.py | Python | gpl-3.0 | 1,533 |
# -*- coding: utf8 -*-
"""Configuration params for sherlock."""
import os
basedir = os.path.abspath(os.path.dirname(__file__))
CORS_HEADER = 'Content-Type'
TOKEN_TIMEOUT = 9999
SECRET_KEY = os.urandom(25)
SQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db_repository')
SQLALCHEMY_TRACK_MODIFICATIONS = True
| leoGalani/sherlock | config.py | Python | gpl-3.0 | 309 |
# #############################################################################
#
# buckshot.py - given a set of numbers and a marker name, make sarsoft markers
# corresponding to all three lat-lon coordinate systems
#
#
# developed for Nevada County Sheriff's Search and Rescue
# Copyright (c) 2015 Tom Grundy
#
# http://ncssarradiologsoftware.sourceforge.net
#
# Contact the author at [email protected]
# Attribution, feedback, bug reports and feature requests are appreciated
#
# REVISION HISTORY
#-----------------------------------------------------------------------------
# DATE | AUTHOR | NOTES
#-----------------------------------------------------------------------------
# 5-29-16 TMG optionally write a GPX file, with color and symbol data;
# skip the URL export step if the URL field is blank;
# rearrange GUI accordingly
# 3-3-17 TMG bug fixes and cleanup (fixes github issues 6,7,8,9)
# 3-11-17 TMG fix issue 10 (crash when URL is other than localhost)
# 4-16-17 TMG fix issue 3 (mark the best match) - don't attempt an
# algorithm - just let the user select one possibility
# as the best match; this required changing the fields
# to QListWidgets; make sure the best match is exported
# to sarsoft with appropriate text, and to gpx with
# appropriate icon for Locus Map (android app). Can
# investigate best icons for other apps/programs later.
# 1-21-18 TMG fix issue 12 (integrate with search-in-progress) by
# creating a new sartopo folder each time, and placing
# all newly created buckshot markers in that folder
# 8-29-18 TMG fix #14 (work with either API version) by using external
# module sartopo_python (see separate GitHub project
# by that name)
# 10-7-18 TMG overhaul to work with significant api changes in v4151
# of sar.jar - probably not backwards compatible - required
# changes to sartopo_python also; allow URL on network
# other than localhost
#
# #############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# See included file LICENSE.txt for full license terms, also
# available at http://opensource.org/licenses/gpl-3.0.html
#
# ############################################################################
#
# Originally written and tested on Windows Vista Home Basic 32-bit
# with PyQt 5.4 and Python 3.4.2; should run for Windows Vista and higher
#
# Note, this file must be encoded as UTF-8, to preserve degree signs in the code
#
# ############################################################################
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
import xml.dom.minidom
import regex as re
from parse import *
import sys
import requests
import json
import os
from sartopo_python import SartopoSession
from buckshot_ui import Ui_buckshot
# valid delimiters: space, period, X, x, D, d, M, m, ', S, s, "
# 'best match' = all correct delimiters in all the correct places
# 'close match' = some delimieter in all the correct places
# first, define exactly what will qualify as an 'exact match';
# then, relax that definition some to define a 'close match'
# make a canonical form of the input string;
# make a canonical form of each possibility;
# if they are identical, it is an exact match
# i string actual coordinates should this be a match?
# 1. 39d120d 39.0dN -120.0dW exact
# 2. 39.0d120.0d exact
# 3. 39d-120d exact
# 4. 39120 close
# 5. 3901200 close
# 4. 39d12m120d12m 39d12.0mN -120d12.0mW exact
#
# preprocess the input string:
# 1. remove minus sign(s)
# 2. convert to lowercase
# 3. replace ' with m
# 4. replace " with s
# 5. replace all letters other than [dmsx] with <space>
# 6. replace all multiple-spaces with just one space
# 7. do some work to find the lat/lon break ('x'):
# 7a. for each known delimiter [dms]: if the next delimiter is d, then insert
# 'x' immediately after the current delimiter
# preprocessed input string characteristics (i.e. canonical form):
# - no minus signs
# - unknown delimiters are represented by a <space>
# - known delimiters are [.dmsx]
# criteria for exact match:
#
delimiterRegEx="[ .XxDdMm'Ss\"]"
bestMatchLabelPrefix="*"
closeMatchLabelPrefix="+"
class MyWindow(QDialog,Ui_buckshot):
def __init__(self,parent):
QDialog.__init__(self)
self.setWindowFlags(self.windowFlags()|Qt.WindowMinMaxButtonsHint)
self.parent=parent
self.ui=Ui_buckshot()
self.ui.setupUi(self)
self.setAttribute(Qt.WA_DeleteOnClose)
self.coordDdStringList=[]
self.coordDMmStringList=[]
self.coordDMSsStringList=[]
# default gpx dir: ~\Documents if it exists, ~ otherwise
self.gpxDefaultDir=os.path.expanduser("~")
docDir=self.gpxDefaultDir+"\\Documents"
if os.path.isdir(docDir):
self.gpxDefaultDir=docDir
self.ui.gpxFileNameField.setText(self.gpxDefaultDir+"\\buckshot_blank.gpx")
self.bestMatch=""
def markerNameChanged(self):
print("markerNameChanged called")
markerName=self.ui.markerNameField.text()
fileName=self.ui.gpxFileNameField.text()
idx=fileName.find("buckshot_")
if idx > -1:
self.ui.gpxFileNameField.setText(fileName[0:idx]+"buckshot_"+markerName+".gpx")
def gpxSetFileName(self):
markerName=self.ui.markerNameField.text()
initVal=self.ui.gpxFileNameField.text()
if initVal=="":
initVal=self.gpxDefaultDir+"\\buckshot_"+markerName+".gpx"
gpxFileName=QFileDialog.getSaveFileName(self,"GPX filename",initVal,filter="gpx (*.gpx)")
# cancel from file dialog returns a real array with a blank filename;
# prevent this from blanking out the filename field
if gpxFileName[0]!="":
self.ui.gpxFileNameField.setText(gpxFileName[0])
def writeGPX(self,markerList):
gpxFileName=self.ui.gpxFileNameField.text()
print("Writing GPX file "+gpxFileName)
# make sure the file is writable; if not, return False here
gpxFile=self.fnameValidate(gpxFileName)
if not gpxFile:
return False
doc=xml.dom.minidom.Document()
gpx=doc.createElement("gpx")
gpx.setAttribute("creator","BUCKSHOT")
gpx.setAttribute("version","1.1")
gpx.setAttribute("xmlns","http://www.topografix.com/GPX/1/1")
# each element in markerList will result in a gpx wpt token.
# markerList element syntax = [name,lat,lon,color]
# <desc> CDATA contains SARSoft marker and color
# <sym> CDATA contains Locus marker, parsed from marker name
# some relevant Locus markers:
# z-ico01 = red down arrow
# z-ico02 = red x
# z-ico03 = red donut
# z-ico04 = red dot
# z-ico05 = red down triangle
# same sequence as above: 06-10 = cyan; 11-15=green; 16-20=yellow
# misc-sunny = large green star bubble
for marker in markerList:
## print("marker:"+str(marker)+"\n")
[title,lat,lon,color,symbol]=marker
description=""
if title.startswith(bestMatchLabelPrefix):
description="User-selected best match!"
if title.startswith(closeMatchLabelPrefix):
description="CLOSE match for specified coordinates"
wpt=doc.createElement("wpt")
wpt.setAttribute("lat",str(lat))
wpt.setAttribute("lon",str(lon))
name=doc.createElement("name")
desc=doc.createElement("desc")
sym=doc.createElement("sym")
# descCDATAStr="comments=&url=%23"+marker[3][1:]
descCDATAStr=description
descCDATA=doc.createCDATASection(descCDATAStr)
if "_Dd" in title: # red
if title.startswith(bestMatchLabelPrefix):
symCDATAStr="z-ico01"
else:
symCDATAStr="z-ico04"
elif "_DMm" in title: # cyan
if title.startswith(bestMatchLabelPrefix):
symCDATAStr="z-ico06"
else:
symCDATAStr="z-ico09"
elif "_DMSs" in title: # yellow
if title.startswith(bestMatchLabelPrefix):
symCDATAStr="z-ico16"
else:
symCDATAStr="z-ico19"
else:
if title.startswith(bestMatchLabelPrefix):
symCDATAStr="z-ico11"
else:
symCDATAStr="z-ico14"
name.appendChild(doc.createTextNode(title))
desc.appendChild(descCDATA)
symCDATA=doc.createCDATASection(symCDATAStr)
sym.appendChild(symCDATA)
wpt.appendChild(name)
wpt.appendChild(desc)
wpt.appendChild(sym)
gpx.appendChild(wpt)
doc.appendChild(gpx)
gpxFile.write(doc.toprettyxml())
gpxFile.close()
return True
# calcLatLon - make guesses about actual coordinates based on a string of numbers
# called from textChanged of coordsField
# assumptions:
# - Degrees Latitude is a two-digit number starting with 2, 3, or 4
# - Degrees Longitude is a three-digit number starting with one, second digit
# either 0, 1, or 2
# - space or minus sign is a known delimiter and assumed to be correct
def calcLatLon(self):
### code to get overlapping matches (i.e. each possible longitude whole number) and their indices:
##import regex as re
##matches=re.finditer("1[012][0123456789]",numbers,overlapped=True)
##[match.span() for match in matches]
coordString=self.ui.coordsField.text()
# shortCoordString = the 'canonical' form that the possibilities will
# be compared to, to check for close or exact matches. Same as
# coordString, with standardized D/M/S delimiters; cannot eliminate all
# spaces at this point since they may or may not be important delimiters;
# therefore, will need to insert a space into the shortCoordString before
# longitude for each possibility on the fly during parsing; this ensures
# that original coordString with NO spaces at all can still make an
# best match.
shortCoordString=coordString.lower()
shortCoordString=re.sub(r'[Xx]',' ',shortCoordString) # replace X or x with space for canonical form
shortCoordString=re.sub(r'\s+',' ',shortCoordString) # get rid of duplicate spaces
shortCoordString=re.sub(r'\'','m',shortCoordString)
shortCoordString=re.sub(r'"','s',shortCoordString)
print("Short coordinate string for comparison:"+shortCoordString+"\n")
# different approach:
# make a list of the indeces and kinds of delimiters;
# if the indeces all match, it is a 'close' match;
# if the indeces all match AND each one is of the same kind, it is an 'best' match
delimIter=re.finditer(r'[ .dDmMsS\'"-]+',coordString)
## numbers=re.sub(r'[ .dDmMsS\'"-]','',coordString)
numbers=re.sub(r'\D','',coordString)
print("Raw Numbers:"+numbers+"\n")
## numbers=self.ui.numbersField.text()
self.coordDdStringList=[]
self.coordDMmStringList=[]
self.coordDMSsStringList=[]
latDegIndex=0
lonDegIndex=-1
pattern=re.compile('1[012][0123456789]') # assume longitude 100-129 west
matches=pattern.finditer(numbers,2,overlapped=True)
## print(str([match.span() for match in matches]))
for lonDegMobj in matches:
print(str(lonDegMobj.span()))
## lonDegMobj=pattern.search(numbers,2) # skip the first two characters
## if lonDegMobj!=None:
lonDegIndex=lonDegMobj.start()
lonDeg=lonDegMobj.group()
print("lonDegIndex: '"+str(lonDegIndex)+"'")
print("Longitude Degrees: '"+lonDeg+"'")
lonRestIndex=lonDegIndex+3
lonRest=numbers[lonRestIndex:]
print("Longitude rest: '"+lonRest+"'")
if int(numbers[0])>1 and int(numbers[0])<5: #assume latitude 20-49 north
latDeg=numbers[0:2]
latRest=numbers[2:lonDegIndex]
print("Latitude degrees: '"+latDeg+"'")
print("Latitude rest: '"+latRest+"'")
# initialize whole minutes and seconds to unrealizable values
# for use in the 'possible' section below
latMin1="99"
latMin2="99"
latSec11="99"
latSec12="99"
latSec21="99"
latSec22="99"
lonMin1="99"
lonMin2="99"
lonSec11="99"
lonSec12="99"
lonSec21="99"
lonSec22="99"
# initialize "rest" arguments to blank strings
latMin1Rest=""
latMin2Rest=""
latSec11Rest=""
latSec12Rest=""
latSec21Rest=""
latSec22Rest=""
lonMin1Rest=""
lonMin2Rest=""
lonSec11Rest=""
lonSec12Rest=""
lonSec21Rest=""
lonSec22Rest=""
# parse minutes and seconds from the rest of the string
# whole minutes and whole seconds could be one digit or two digits
if len(latRest)>0:
print("t1")
latMin1=latRest[0]
if len(latRest)>1:
print("t2")
latMin1Rest=latRest[1:]
latMin2=latRest[0:2]
if len(latRest)>2:
print("t2.5")
latMin2Rest=latRest[2:]
if len(latMin1Rest)>0:
print("t3")
latSec1=latMin1Rest[0:]
if len(latSec1)>0:
print("t4")
latSec11=latSec1[0]
if len(latSec1)>1:
print("t5")
latSec11Rest=latSec1[1:]
latSec12=latSec1[0:2]
if len(latSec1)>2:
print("t5.5")
latSec12Rest=latSec1[2:]
if len(latMin2Rest)>0:
print("t6")
latSec2=latMin2Rest[0:]
if len(latSec2)>0:
print("t7")
latSec21=latSec2[0]
if len(latSec2)>1:
print("t8")
latSec21Rest=latSec2[1:]
latSec22=latSec2[0:2]
if len(latSec2)>2:
print("t9")
latSec22Rest=latSec2[2:]
else:
latSec2="0" # account for implied zero seconds
latSec21="0"
else:
latSec1="0" # account for implied zero seconds
latSec11="0"
if len(lonRest)>0:
lonMin1=lonRest[0]
if len(lonRest)>1:
lonMin1Rest=lonRest[1:]
lonMin2=lonRest[0:2]
if len(lonRest)>2:
lonMin2Rest=lonRest[2:]
if len(lonMin1Rest)>0:
lonSec1=lonMin1Rest[0:]
if len(lonSec1)>0:
lonSec11=lonSec1[0]
if len(lonSec1)>1:
lonSec11Rest=lonSec1[1:]
lonSec12=lonSec1[0:2]
if len(lonSec1)>2:
lonSec12Rest=lonSec1[2:]
if len(lonMin2Rest)>0:
lonSec2=lonMin2Rest[0:]
if len(lonSec2)>0:
lonSec21=lonSec2[0]
if len(lonSec2)>1:
lonSec21Rest=lonSec2[1:]
lonSec22=lonSec2[0:2]
if len(lonSec2)>2:
lonSec22Rest=lonSec2[2:]
else:
lonSec2="0" # account for implied zero seconds
lonSec21="0"
else:
lonSec1="0" # account for implied zero seconds
lonSec11="0"
# set flags as to which ones are possible
# (whole min/sec <60 (2-digit) or <10 (1-digit))
latMin1Possible=int(latMin1)<10
latMin2Possible=int(latMin2)>9 and int(latMin2)<60
latSec11Possible=int(latSec11)<10
latSec12Possible=int(latSec12)<60
latSec21Possible=int(latSec21)<10
latSec22Possible=int(latSec22)<60
lonMin1Possible=int(lonMin1)<10
lonMin2Possible=int(lonMin2)>9 and int(lonMin2)<60
lonSec11Possible=int(lonSec11)<10
lonSec12Possible=int(lonSec12)<60
lonSec21Possible=int(lonSec21)<10
lonSec22Possible=int(lonSec22)<60
print("latMin1Possible:"+str(latMin1Possible)+":"+latMin1+":"+latMin1Rest)
print("latMin2Possible:"+str(latMin2Possible)+":"+latMin2+":"+latMin2Rest)
print("latSec11Possible:"+str(latSec11Possible)+":"+latSec11+":"+latSec11Rest)
print("latSec12Possible:"+str(latSec12Possible)+":"+latSec12+":"+latSec12Rest)
print("latSec21Possible:"+str(latSec21Possible)+":"+latSec21+":"+latSec21Rest)
print("latSec22Possible:"+str(latSec22Possible)+":"+latSec22+":"+latSec22Rest)
print("lonMin1Possible:"+str(lonMin1Possible)+":"+lonMin1+":"+lonMin1Rest)
print("lonMin2Possible:"+str(lonMin2Possible)+":"+lonMin2+":"+lonMin2Rest)
print("lonSec11Possible:"+str(lonSec11Possible)+":"+lonSec11+":"+lonSec11Rest)
print("lonSec12Possible:"+str(lonSec12Possible)+":"+lonSec12+":"+lonSec12Rest)
print("lonSec21Possible:"+str(lonSec21Possible)+":"+lonSec21+":"+lonSec21Rest)
print("lonSec22Possible:"+str(lonSec22Possible)+":"+lonSec22+":"+lonSec22Rest)
# zero-pad right-of-decimal if needed, i.e. no blank strings right-of-decimal
latRest=latRest or "0"
lonRest=lonRest or "0"
latMin1Rest=latMin1Rest or "0"
latMin2Rest=latMin2Rest or "0"
lonMin1Rest=lonMin1Rest or "0"
lonMin2Rest=lonMin2Rest or "0"
latSec11Rest=latSec11Rest or "0"
latSec12Rest=latSec12Rest or "0"
latSec21Rest=latSec21Rest or "0"
latSec22Rest=latSec22Rest or "0"
lonSec11Rest=lonSec11Rest or "0"
lonSec12Rest=lonSec12Rest or "0"
lonSec21Rest=lonSec21Rest or "0"
lonSec22Rest=lonSec22Rest or "0"
# build the lists of possible coordinate strings for each coordinate system
# (if only one of lat/lon per pair is possible, then the pair is
# not possible)
self.coordDdStringList.append(str(latDeg+"."+latRest+"deg N x "+lonDeg+"."+lonRest+"deg W"))
if latMin1Possible and lonMin1Possible:
self.coordDMmStringList.append(str(latDeg+"deg "+latMin1+"."+latMin1Rest+"min N x "+lonDeg+"deg "+lonMin1+"."+lonMin1Rest+"min W"))
if latMin1Possible and lonMin2Possible:
self.coordDMmStringList.append(str(latDeg+"deg "+latMin1+"."+latMin1Rest+"min N x "+lonDeg+"deg "+lonMin2+"."+lonMin2Rest+"min W"))
if latMin2Possible and lonMin1Possible:
self.coordDMmStringList.append(str(latDeg+"deg "+latMin2+"."+latMin2Rest+"min N x "+lonDeg+"deg "+lonMin1+"."+lonMin1Rest+"min W"))
if latMin2Possible and lonMin2Possible:
self.coordDMmStringList.append(str(latDeg+"deg "+latMin2+"."+latMin2Rest+"min N x "+lonDeg+"deg "+lonMin2+"."+lonMin2Rest+"min W"))
if latSec11Possible and lonSec11Possible:
self.coordDMSsStringList.append(str(latDeg+"deg "+latMin1+"min "+latSec11+"."+latSec11Rest+"sec N x "+lonDeg+"deg "+lonMin1+"min "+lonSec11+"."+lonSec11Rest+"sec W"))
if latSec11Possible and lonSec12Possible:
self.coordDMSsStringList.append(str(latDeg+"deg "+latMin1+"min "+latSec11+"."+latSec11Rest+"sec N x "+lonDeg+"deg "+lonMin1+"min "+lonSec12+"."+lonSec12Rest+"sec W"))
if latSec11Possible and lonSec21Possible:
self.coordDMSsStringList.append(str(latDeg+"deg "+latMin1+"min "+latSec11+"."+latSec11Rest+"sec N x "+lonDeg+"deg "+lonMin2+"min "+lonSec21+"."+lonSec21Rest+"sec W"))
if latSec11Possible and lonSec22Possible:
self.coordDMSsStringList.append(str(latDeg+"deg "+latMin1+"min "+latSec11+"."+latSec11Rest+"sec N x "+lonDeg+"deg "+lonMin2+"min "+lonSec22+"."+lonSec22Rest+"sec W"))
if latSec12Possible and lonSec11Possible:
self.coordDMSsStringList.append(str(latDeg+"deg "+latMin1+"min "+latSec12+"."+latSec12Rest+"sec N x "+lonDeg+"deg "+lonMin1+"min "+lonSec11+"."+lonSec11Rest+"sec W"))
if latSec12Possible and lonSec12Possible:
self.coordDMSsStringList.append(str(latDeg+"deg "+latMin1+"min "+latSec12+"."+latSec12Rest+"sec N x "+lonDeg+"deg "+lonMin1+"min "+lonSec12+"."+lonSec12Rest+"sec W"))
if latSec12Possible and lonSec21Possible:
self.coordDMSsStringList.append(str(latDeg+"deg "+latMin1+"min "+latSec12+"."+latSec12Rest+"sec N x "+lonDeg+"deg "+lonMin2+"min "+lonSec21+"."+lonSec21Rest+"sec W"))
if latSec12Possible and lonSec22Possible:
self.coordDMSsStringList.append(str(latDeg+"deg "+latMin1+"min "+latSec12+"."+latSec12Rest+"sec N x "+lonDeg+"deg "+lonMin2+"min "+lonSec22+"."+lonSec22Rest+"sec W"))
if latSec21Possible and lonSec11Possible:
self.coordDMSsStringList.append(str(latDeg+"deg "+latMin2+"min "+latSec21+"."+latSec21Rest+"sec N x "+lonDeg+"deg "+lonMin1+"min "+lonSec11+"."+lonSec11Rest+"sec W"))
if latSec21Possible and lonSec12Possible:
self.coordDMSsStringList.append(str(latDeg+"deg "+latMin2+"min "+latSec21+"."+latSec21Rest+"sec N x "+lonDeg+"deg "+lonMin1+"min "+lonSec12+"."+lonSec12Rest+"sec W"))
if latSec21Possible and lonSec21Possible:
self.coordDMSsStringList.append(str(latDeg+"deg "+latMin2+"min "+latSec21+"."+latSec21Rest+"sec N x "+lonDeg+"deg "+lonMin2+"min "+lonSec21+"."+lonSec21Rest+"sec W"))
if latSec21Possible and lonSec22Possible:
self.coordDMSsStringList.append(str(latDeg+"deg "+latMin2+"min "+latSec21+"."+latSec21Rest+"sec N x "+lonDeg+"deg "+lonMin2+"min "+lonSec22+"."+lonSec22Rest+"sec W"))
if latSec22Possible and lonSec11Possible:
self.coordDMSsStringList.append(str(latDeg+"deg "+latMin2+"min "+latSec22+"."+latSec22Rest+"sec N x "+lonDeg+"deg "+lonMin1+"min "+lonSec11+"."+lonSec11Rest+"sec W"))
if latSec22Possible and lonSec12Possible:
self.coordDMSsStringList.append(str(latDeg+"deg "+latMin2+"min "+latSec22+"."+latSec22Rest+"sec N x "+lonDeg+"deg "+lonMin1+"min "+lonSec12+"."+lonSec12Rest+"sec W"))
if latSec22Possible and lonSec21Possible:
self.coordDMSsStringList.append(str(latDeg+"deg "+latMin2+"min "+latSec22+"."+latSec22Rest+"sec N x "+lonDeg+"deg "+lonMin2+"min "+lonSec21+"."+lonSec21Rest+"sec W"))
if latSec22Possible and lonSec22Possible:
self.coordDMSsStringList.append(str(latDeg+"deg "+latMin2+"min "+latSec22+"."+latSec22Rest+"sec N x "+lonDeg+"deg "+lonMin2+"min "+lonSec22+"."+lonSec22Rest+"sec W"))
else:
print("Latitiude not found.")
else:
print("Longitude not found.")
# self.ui.DdField.setPlainText("\n".join(self.coordDdStringList))
# self.ui.DMmField.setPlainText("\n".join(self.coordDMmStringList))
# self.ui.DMSsField.setPlainText("\n".join(self.coordDMSsStringList))
self.ui.DdField.clear()
self.ui.DdField.addItems(self.coordDdStringList)
self.ui.DMmField.clear()
self.ui.DMmField.addItems(self.coordDMmStringList)
self.ui.DMSsField.clear()
self.ui.DMSsField.addItems(self.coordDMSsStringList)
print("Possible Dd coordinates:\n"+str(self.coordDdStringList))
print("Possible DMm coordinates:\n"+str(self.coordDMmStringList))
print("Possible DMSs coordinates:\n"+str(self.coordDMSsStringList))
# now find the 'short' string corresponding to each possibility, and
# see how close of a match it is to the originally entered string
# (highlight the row in the GUI, and change the marker name and symbol)
for n,DdString in enumerate(self.coordDdStringList):
DdShort=DdString.replace("deg ","d")
DdShort=DdShort.replace("N x "," ")
DdShort=DdShort.replace("W","")
print("DdShort:"+DdShort)
if DdShort==shortCoordString:
print(" EXACT MATCH!")
self.coordDdStringList[n]=bestMatchLabelPrefix+DdString
# self.ui.DdField.setPlainText("\n".join(self.coordDdStringList))
self.ui.DdField.clear()
self.ui.DdField.addItems(self.coordDdStringList)
for n,DMmString in enumerate(self.coordDMmStringList):
DMmShort=DMmString.replace("deg ","d")
DMmShort=DMmShort.replace("min ","m")
DMmShort=DMmShort.replace("N x "," ")
DMmShort=DMmShort.replace("W","")
print("DMmShort:"+DMmShort)
if DMmShort==shortCoordString:
print(" EXACT MATCH!")
self.coordDMmStringList[n]=bestMatchLabelPrefix+DMmString
# self.ui.DMmField.setPlainText("\n".join(self.coordDMmStringList))
self.ui.DMmField.clear()
self.ui.DMmField.addItems(self.coordDMmStringList)
for n,DMSsString in enumerate(self.coordDMSsStringList):
DMSsShort=DMSsString.replace("deg ","d")
DMSsShort=DMSsShort.replace("min ","m")
DMSsShort=DMSsShort.replace("sec ","s")
DMSsShort=DMSsShort.replace("N x "," ")
DMSsShort=DMSsShort.replace("W","")
print("DMSsShort:"+DMSsShort)
if DMSsShort==shortCoordString:
print(" EXACT MATCH!")
self.coordDMSsStringList[n]=bestMatchLabelPrefix+DMSsString
# self.ui.DMSsField.setPlainText("\n".join(self.coordDMSsStringList))
self.ui.DMSsField.clear()
self.ui.DMSsField.addItems(self.coordDMSsStringList)
# possibilityClicked: when any row is clicked, unhighlight / unselect any
# highlighted/selected rows in the other two coordinate system list widgets,
# and use the selected row as the 'best match' possibility
def possibilityDdClicked(self):
clicked=self.ui.DdField.selectedItems()[0].text()
if clicked==self.bestMatch:
self.bestMatch=""
self.ui.DdField.clearSelection()
else:
self.bestMatch=clicked
print(self.bestMatch)
self.ui.DMmField.clearSelection()
self.ui.DMSsField.clearSelection()
def possibilityDMmClicked(self):
clicked=self.ui.DMmField.selectedItems()[0].text()
if clicked==self.bestMatch:
self.bestMatch=""
self.ui.DMmField.clearSelection()
else:
self.bestMatch=clicked
print(self.bestMatch)
self.ui.DdField.clearSelection()
self.ui.DMSsField.clearSelection()
def possibilityDMSsClicked(self):
clicked=self.ui.DMSsField.selectedItems()[0].text()
if clicked==self.bestMatch:
self.bestMatch=""
self.ui.DMSsField.clearSelection()
else:
self.bestMatch=clicked
print(self.bestMatch)
self.ui.DdField.clearSelection()
self.ui.DMmField.clearSelection()
#fnameValidate: try writing a test file to the specified filename;
# return the filehandle if valid, or print the error message and return False
# if invalid for whatever reason
def fnameValidate(self,filename):
try:
f=open(filename,"w")
except (IOError,FileNotFoundError) as err:
QMessageBox.warning(self,"Invalid Filename","GPX filename is not valid:\n\n"+str(err)+"\n\nNo markers written to GPX or URL. Fix or blank out the filename, and try again.")
return False
else:
return f
def createMarkers(self):
print("createMarkers called")
# if a gpx filename is specified, validate it first; if invalid, force
# the user to fix it or blank it out before generating any URL markers
if not self.fnameValidate(self.ui.gpxFileNameField.text()):
return
DdIdx=0
DMmIdx=0
DMSsIdx=0
DdIdxFlag=len(self.coordDdStringList)>1
DMmIdxFlag=len(self.coordDMmStringList)>1
DMSsIdxFlag=len(self.coordDMSsStringList)>1
markerName=self.ui.markerNameField.text()
if markerName=="":
markerName="X"
# for best match, use a ring with center dot
# for close match, use a hollow ring
# appropriate prefixes were determined from decoding json POST request
# of a live header when creating each type of marker by hand
# final URL values:
# simple dot: "#<hex_color>"
# target: "c:target,<hex_color>" (notice, no pound sign)
# ring: "c:ring,<hex_color>" (notice, no pound sign)
bestMatchSymbol="c:target"
closeMatchSymbol="c:ring"
# build a list of markers; each marker is a list:
# [markerName,lat,lon,color]
markerList=[]
for DdString in self.coordDdStringList:
DdIdx=DdIdx+1
labelPrefix=""
symbol="point"
# if DdString.startswith(bestMatchLabelPrefix):
if DdString==self.bestMatch:
DdString=DdString.replace(bestMatchLabelPrefix,"")
labelPrefix=bestMatchLabelPrefix
symbol=bestMatchSymbol
if DdString.startswith(closeMatchLabelPrefix):
DdString=DdString.replace(closeMatchLabelPrefix,"")
labelPrefix=closeMatchLabelPrefix
symbol=closeMatchSymbol
print(" Dd : '"+DdString+"'")
r=parse("{:g}deg N x {:g}deg W",DdString)
print(r)
if DdIdxFlag:
idx=str(DdIdx)
else:
idx=""
markerList.append([labelPrefix+markerName+"_Dd"+idx,r[0],-r[1],"FF0000",symbol])
for DMmString in self.coordDMmStringList:
DMmIdx=DMmIdx+1
labelPrefix=""
symbol="point"
# if DMmString.startswith(bestMatchLabelPrefix):
if DMmString==self.bestMatch:
DMmString=DMmString.replace(bestMatchLabelPrefix,"")
labelPrefix=bestMatchLabelPrefix
symbol=bestMatchSymbol
if DMmString.startswith(closeMatchLabelPrefix):
DMmString=DMmString.replace(closeMatchLabelPrefix,"")
labelPrefix=closeMatchLabelPrefix
symbol=closeMatchSymbol
print(" DMm : "+DMmString)
r=parse("{:g}deg {:g}min N x {:g}deg {:g}min W",DMmString)
print(r)
if DMmIdxFlag:
idx=str(DMmIdx)
else:
idx=""
markerList.append([labelPrefix+markerName+"_DMm"+idx,r[0]+r[1]/60.0,-(r[2]+r[3]/60.0),"FF00FF",symbol])
for DMSsString in self.coordDMSsStringList:
DMSsIdx=DMSsIdx+1
labelPrefix=""
symbol="point"
# if DMSsString.startswith(bestMatchLabelPrefix):
if DMSsString==self.bestMatch:
DMSsString=DMSsString.replace(bestMatchLabelPrefix,"")
labelPrefix=bestMatchLabelPrefix
symbol=bestMatchSymbol
if DMSsString.startswith(closeMatchLabelPrefix):
DMSsString=DMSsString.replace(closeMatchLabelPrefix,"")
labelPrefix=closeMatchLabelPrefix
symbol=closeMatchSymbol
print(" DMSs: "+DMSsString)
r=parse("{:g}deg {:g}min {:g}sec N x {:g}deg {:g}min {:g}sec W",DMSsString)
print(r)
if DMSsIdxFlag:
idx=str(DMSsIdx)
else:
idx=""
markerList.append([labelPrefix+markerName+"_DMSs"+idx,r[0]+r[1]/60.0+r[2]/3600.0,-(r[3]+r[4]/60.0+r[5]/3600.0),"0000FF",symbol])
print("Final marker list:")
print(str(markerList))
if self.writeGPX(markerList):
infoStr="\nWrote GPX? YES"
else:
infoStr="\nWrote GPX? NO"
if self.ui.URLField.text():
url=self.ui.URLField.text()
p=url.lower().replace("http://","").split("/")
domainAndPort=p[0]
mapID=p[-1]
print("domainAndPort: "+domainAndPort)
print("map ID: "+mapID)
sts=SartopoSession(domainAndPort=domainAndPort,mapID=mapID)
fid=sts.addFolder("Buckshot")
print(" folder id="+str(fid))
for marker in markerList:
[title,lat,lon,color,symbol]=marker
description=""
if title.startswith(bestMatchLabelPrefix):
description="User-selected best match!"
if title.startswith(closeMatchLabelPrefix):
description="CLOSE match for specified coordinates"
sts.addMarker(lat,lon,title,description,color,symbol,None,fid)
infoStr+="\nWrote URL? YES"
else:
infoStr+="\nWrote URL? NO"
print("No URL specified; skipping URL export.")
QMessageBox.information(self,"Markers Created","Markers created successfully.\n"+infoStr)
def main():
app = QApplication(sys.argv)
w = MyWindow(app)
w.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| ncssar/buckshot | buckshot.py | Python | gpl-3.0 | 30,547 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-01-09 11:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tmv_app', '0072_runstats_dyn_win_threshold'),
]
operations = [
migrations.AlterField(
model_name='runstats',
name='dyn_win_threshold',
field=models.FloatField(default=0.1),
),
]
| mcallaghan/tmv | BasicBrowser/tmv_app/migrations/0073_auto_20180109_1124.py | Python | gpl-3.0 | 471 |
# smallest number 2^500500 divisors , modulo 500500507
# 单调递减
# (2^a0)(2^a1)(2^a2) = 2^500500
# (2^ai)(2^aj) -> (2^{ai-1})(2^{aj+1}) : p_j^(2^{aj})/p_i^(2^{ai-1}) < 1 , (a > b)
# (2^aj)ln(p_j) < (2^{ai-1})ln(p_i)
# if 2^{a_i-a_j-1} > ln(p_j)/ln(p_i), then a_i-=1,a_j-=1
#
# i < j
# p_j^(2^(a_j-1)) < p_i^(2^a_i)
# p_i^(2^(a_i-1)) < p_j^(2^a_j)
#
# (n-log(log(p_i)/log(p_j)/2)/log(2))/2 > ai > (n-log(2*log(p_i)/log(p_j))/log(2))/2
#
# n = a_i+a_j
# a_i = floor(
# (
# n + 1 - (log(
# log(p_i)/log(p_j)
# )/log(2))
# )/2
# )
import math
N = 9_000_000
MOD = 500500507
def mypow(v, mi):
r = 1
while mi > 0:
if mi % 2 == 1:
r *= v
r %= MOD
v *= v
v %= MOD
mi //= 2
return r
def prepare():
p = [0 for i in range(N+10)]
ps = []
for i in range(2,N+1):
if p[i] != 0:
continue;
ps.append(i)
j = i*i
while j <= N:
p[j] = i
j+=i
print("prime count:",len(ps),",max prime:",ps[-1])
logij = [0 for i in range(500500)]
for i in range(500500):
logij[i] = math.log(math.log(ps[i])/math.log(ps[i+1])/2)/math.log(4)
print("logij finished")
# p[i] = i is prime
# ps : prime s
# logij : adjacent
# sum 以外的部分
return p,ps,logij
# dst 幂次 , p 质数校验数组, ps质数数组, 预先运算 sum以外部分
def main(dst,p,ps,logij):
ans = [0 for i in range(500500)]
ans[0] = dst
maxi = 1 # ans 长度
loop = True
while loop:
loop = False
i = 0
# for range not work when i want modify i
while i+1 < len(ans) and ans[i] > 0:
cnt = ans[i]+ans[i+1]
dstai = math.floor(cnt/2 - logij[i])
if dstai != ans[i]:
ans[i] = dstai;
ans[i+1] = cnt - dstai
# just from last start
if i > 0:
i -= 1
continue
i+=1
if i >= maxi and ans[i] > 0:
maxi = i+1
print(f"len[{maxi}]\tfirst[{ans[0]}]\tlast[{ans[i]}]", flush=True)
assert(i+1 < len(ans))
assert(ans[maxi] == 0)
# print("arr:",ans[:maxi])
print(f"fixlen [{maxi}]\tfirst[{ans[0]}]\tlast[{ans[maxi-1]}]", flush=True)
# check movable first and last
# if math.log(math.log(ps[maxi])/math.log(2)) < math.log(2)*(ans[0]-1):
newans0 = math.floor((ans[0]+1-math.log(math.log(2)/math.log(ps[maxi]))/math.log(2))/2)
if newans0 != ans[0]:
ans[maxi] = ans[0] - newans0
ans[0] = newans0
assert(ans[0] > 0)
assert(ans[maxi] > 0)
loop = True
output = 1
for i in range(len(ans)):
output *= mypow(ps[i],2**ans[i] - 1);
output %= MOD
print("ans:", output, flush=True)
p,ps,logij = prepare()
main(4,p,ps,logij)
main(500500,p,ps,logij)
# len[206803] first[5] last[1]
# fixlen [206803] first[5] last[1]
# ans: 287659177
# 跑了3个小时 , 没有所有位满足最小
| CroMarmot/MyOICode | ProjectEuler/unsolved/p500.py | Python | gpl-3.0 | 3,132 |
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 29 18:02:06 2015
"""
#####################################################################################################
# Groupe d'Étude pour la Traduction/le Traitement Automatique des Langues et de la Parole (GETALP)
# Homepage: http://getalp.imag.fr
#
# Author: Tien LE ([email protected])
# Advisors: Laurent Besacier & Benjamin Lecouteux
# URL: tienhuong.weebly.com
#####################################################################################################
import os
import sys
import datetime
#when import module/class in other directory
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))#in order to test with line by line on the server
from feature.common_functions import *
from config.configuration import *
#**************************************************************************#
def get_duration(datetime_start, datetime_end):
dt1 = datetime.datetime.strptime(datetime_start, '%Y-%m-%d %H:%M:%S.%f')
dt2 = datetime.datetime.strptime(datetime_end, '%Y-%m-%d %H:%M:%S.%f')
duration = dt2-dt1
return duration
#**************************************************************************#
"""
noruego NOUN
de OTHER
"""
def count_num_of_words_for_polysemy_count_target(file_input_path):
if not os.path.exists(file_input_path):
raise TypeError('Not Existed file corpus input with format - column')
#end if
#for reading: file_input_path
file_reader = open(file_input_path, mode = 'r', encoding = 'utf-8')
num_of_words_all = 0
num_of_words_other = 0
str_other = "OTHER"
for line in file_reader:
line = line.strip()
if len(line) == 0:
continue
#end if
num_of_words_all += 1
items = split_string_to_list_delimeter_tab(line)
if items[1].strip() == str_other:
num_of_words_other += 1
#end if
#end for
#close file
file_reader.close()
num_of_words_result = num_of_words_all - num_of_words_other
print("num_of_words_all = %d" %num_of_words_all)
print("num_of_words_result = %d" %num_of_words_result)
#**************************************************************************#
def convert_from_second_to_format_h_m_s(num_of_second):
m, s = divmod(num_of_second, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
print("d:h:m:s = %d:%d:%02d:%02d" % (d, h, m, s))
#**************************************************************************#
#**************************************************************************#
#**************************************************************************#
#**************************************************************************#
if __name__ == "__main__":
#Test case:
current_config = load_configuration()
datetime_start="2015-03-31 20:09:23.282432"
datetime_end="2015-04-01 00:25:42.996360"
dt = get_duration(datetime_start, datetime_end)
print(dt)
file_input_path = current_config.BABEL_NET_CORPUS_ES
count_num_of_words_for_polysemy_count_target(file_input_path)
convert_from_second_to_format_h_m_s(449369)
print ('OK') | besacier/WCE-LIG | wce_system/preprocessing/measure_performance_system.py | Python | gpl-3.0 | 3,171 |
from setuptools import setup
setup(name="waterworks",
version="0.0.0",
description="Message agregation daemon.",
url="https://github.com/Aeva/waterworks",
author="Aeva Palecek",
author_email="[email protected]",
license="GPLv3",
packages=["waterworks"],
zip_safe=False,
entry_points = {
"console_scripts" : [
"waterworks=waterworks.waterworks:start_daemon",
],
},
install_requires = [
])
| Aeva/waterworks | setup.py | Python | gpl-3.0 | 508 |
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def invertTree(self, root):
"""
:type root: TreeNode
:rtype: TreeNode
"""
if not root:
return
root.left, root.right = root.right, root.left
self.invertTree(root.left)
self.invertTree(root.right)
return root
| zqfan/leetcode | algorithms/226. Invert Binary Tree/solution.py | Python | gpl-3.0 | 490 |
# Copyright (C) 2011 Pierre de Buyl
# This file is part of pyMPCD
# pyMPCD is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# pyMPCD is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with pyMPCD. If not, see <http://www.gnu.org/licenses/>.
"""
MPCD.py - This file contains the base class to perform MPCD simulations: MPCD_system.
"""
## \namespace pyMPCD::MPCD
# \brief MPCD.py - This file contains the base class to perform MPCD simulations: MPCD_system.
#
# MPCD methods define the basic
import numpy as np
from scipy.misc import factorial
from MPCD_f import mpcd_mod
## Defines all variables for a MPCD system
#
# A simulation box is either declared via MPCD_system or through one test_case or from a file.
#
# \b Example
# \code
#import pyMPCD # import the pyMPCD package
#box = pyMPCD.MPCD_system( (8,8,8), 10, 1.0) # Create a PBC system of 8x8x8 cells, density 10 and cell size 1.0
# \endcode
class MPCD_system():
"""
The MPCD_system class contains all variables relevant for MPCD simulations: box information, periodic boundaries, particles positions, ...
"""
## Initializes a MPCD_system
#
# \param N_cells a 3 elements list or tuple that contains the number of cells in each dimension.
# \param density an integer number that define the average number of particles per cell.
# \param a the linear cell size.
# \param N_species The number of solvent species to consider.
def __init__( self, N_cells , density , a , N_species = 1):
"""
Defines a MPCD_system with periodic boundary conditions.
N_cells is the number of cells in the 3 dimensions.
density is the reference density for initialization and for filling cells with virtual particles.
a is the linear cell size.
"""
## Number of physical cells for the simulation.
self.N_cells = np.array( N_cells , dtype=np.int32)
# Check the input for N_cells
if (len(self.N_cells) != 3): raise Exception
## The number of actual binning cells. Is higher than N_cells to allow for non-periodic systems.
self.N_grid = self.N_cells + 1
## The average density (number of particles per cell).
self.density = int(density)
## The total number of MPCD particles.
self.so_N = np.prod( self.N_cells ) * self.density
## The linear cell size.
self.a = float(a)
## The number of solvent species
self.N_species = N_species
# Check that N_species is valid
if (N_species < 1): raise Exception
## The shift applied to the system.
self.shift = np.zeros( (3,) , dtype=np.float64 )
## NumPy array for the position of the MPCD solvent.
self.so_r = np.zeros( (self.so_N , 3) , dtype=np.float64 )
## A view to so_r in Fortran order.
self.so_r_f = self.so_r.T
## NumPy array for the velocity of the MPCD solvent.
self.so_v = np.zeros( (self.so_N , 3) , dtype=np.float64 )
## A view to so_v in Fortran order.
self.so_v_f = self.so_v.T
## The local temperature in the cells.
self.cells_temperature = np.zeros( (self.N_grid[0], self.N_grid[1], self.N_grid[2]), dtype=np.float64 )
## NumPy array for the species.
self.so_species = np.zeros( (self.so_N, ), dtype=np.int32)
## NumPy array holding the mass of each species.
self.so_mass = np.ones( (self.N_species, ), dtype=np.float64)
## The size of the box.
self.L = self.N_cells*self.a
## The MPCD time step, used for streaming.
self.tau = float(0.)
## The number of particles in each cell.
self.cells = np.zeros( self.N_grid, dtype=np.int32 )
## A view to cells in Fortran order.
self.cells_f = self.cells.T
## The list of particles, per cell.
self.par_list = np.zeros( (self.N_grid[0], self.N_grid[1], self.N_grid[2], 64), dtype=np.int32 )
## A view to par_list in Fortran order.
self.par_list_f = self.par_list.T
## The cell-wise center-of-mass velocity.
self.v_com = np.zeros( (self.N_grid[0], self.N_grid[1], self.N_grid[2], 3), dtype=np.float64 )
## The origin of the grid.
self.root = np.zeros( (3,), dtype=np.float64)
## Boundary conditions, in the three directions. 0 = PBC , 1 = elastic collision with virtual particles.
self.BC = np.zeros( (3,) , dtype=np.int32 )
## Wall velocity, on each wall. indices = wall dir (x,y,z) , wall low/high , v.
self.wall_v0 = np.zeros( (3, 2, 3) , dtype=np.float64 )
## Wall temperature for the virtual particles on each wall side. indices = wall dir (x,y,z), wall low/high.
self.wall_temp = np.zeros( (3, 2) , dtype=np.float64 )
## Magnitude of the acceleration provided by gravity, if applicable.
self.gravity = float(0.)
## Number of chemical reactions to consider.
self.N_reactions = 0
## Kind of chemical reaction.
self.reaction_kind = []
## Rates for the chemical reactions.
self.rates = []
## Stoechiometry for reactants.
self.reactants = []
## Stoechiometry for products.
self.products = []
def __str__(self):
return str(type(self))+' size '+str(self.N_cells)+' , '+str(self.so_N)+' solvent particles'
def add_reaction(self, rate, reactants, products):
if ( not (len(reactants) == len(products) == self.N_species) ):
print("Bad input for add_reaction")
return
if ( (min(reactants)<0) or (min(products)<0) ):
print("Bad input for add_reaction")
return
N_reactants = np.sum(reactants)
N_products = np.sum(products)
if ( (N_reactants==1) and (N_products==1) ):
kind = 0
elif ( reactants[0]==1 and reactants[1]==2 and products[1]==3):
kind = 1
else:
print("reaction type not supported in add_reaction")
return
self.N_reactions += 1
self.reaction_kind.append(kind)
self.rates.append(rate)
self.reactants.append(reactants)
self.products.append(products)
def close_reaction(self):
if (self.N_reactions > 0):
self.rates = np.array(self.rates)
self.reactants = np.array(self.reactants,ndmin=2)
self.products = np.array(self.products,ndmin=2)
## Initializes the particles according to a normal or flat velocity profile.
# \param temp Initial temperature of the system.
# \param boltz if True or unset, use a normal velocity profile of temperature T.
# Else, use a flat profile.
def init_v(self, temp, boltz=True):
"""
Initializes the particles according to a normal distribution of
temperature temp and resets the total velocity of the system.
If boltz is set to False, a uniform distribution is used instead
"""
if boltz:
self.so_v[:,:] = np.random.randn( self.so_v.shape[0], self.so_v.shape[1] ) * np.sqrt(temp)
self.so_v /= np.sqrt(self.so_mass[self.so_species]).reshape( (self.so_N, 1) )
else:
self.so_v[:,:] = np.random.rand( self.so_v.shape[0], self.so_v.shape[1] )
self.so_v[:,:] -= 0.5
self.so_v[:,:] *= 2.*np.sqrt(6.*temp/2.)
tot_v = np.sum( self.so_v*(self.so_mass[self.so_species]).reshape( (self.so_N, 1) ) , axis=0 ) / self.so_mass[self.so_species].sum()
tot_v = tot_v.reshape( ( 1 , tot_v.shape[0] ) )
self.so_v -= tot_v
## Places particles in the simulation box at random according to a uniform distribution.
def init_r(self):
"""
Places particles in the simulation box at random according to a uniform distribution.
"""
self.so_r[:,:] = np.random.rand( self.so_r.shape[0], self.so_r.shape[1] )
self.so_r *= (self.a*self.N_cells).reshape( ( 1 , self.so_r.shape[1] ) )
## Advances the particles according to their velocities.
def stream(self):
"""Advances the particles according to their velocities."""
self.so_r[:] += self.so_v*self.tau
## Advances the particles according to their velocities by calling a Fortran routine.
def stream_f(self):
""""Advances the particles according to their velocities by calling a Fortran routine."""
mpcd_mod.stream(self.so_r_f, self.so_v_f, self.tau)
## Advances the particles according to their velocities and a constant acceleration in the z direction.
# Also updates the velocities to take into account the acceleration.
def accel(self):
"""
Advances the particles according to their velocities and a constant acceleration in the z direction.
Also updates the velocities to take into account the acceleration.
"""
self.so_r[:] += self.so_v*self.tau + np.array( [0., 0., self.gravity] ).reshape( (1, 3) )*0.5*self.tau**2
self.so_v[:] += np.array( [0., 0., self.gravity] ).reshape( (1, 3) )*self.tau
## Corrects particles positions and velocities to take into account the boundary conditions.
def boundaries(self):
"""
Corrects particles positions and velocities to take into account the boundary conditions.
PBC keep the particles in the box by sending them to their periodic in-box location.
Elastic walls reflect particles and reverse the velocities.
"""
for i in range(3): # take each dim
if (self.BC[i] == 0): # if PBC, simply appy x = mod( x , L ) to keep particles in the box
self.so_r[:,i] = np.remainder( self.so_r[:,i] , self.L[i] )
elif (self.BC[i] == 1): # if elastic wall, reflect "too high" particles around L and "too low" particles around 0
j_out = ( self.so_r[:,i] > self.L[i] )
self.so_r[j_out,i] = 2.*self.L[i] - self.so_r[j_out,i]
self.so_v[j_out,i] = - self.so_v[j_out,i]
j_out = ( self.so_r[:,i] < 0 )
self.so_r[j_out,i] = - self.so_r[j_out,i]
self.so_v[j_out,i] = - self.so_v[j_out,i]
else:
print "unknown boundary condition ", self.BC[i]
raise Exception
def check_in_box(self):
"""
A test routine to check that all particles are actually in the box [0:L]
"""
r_min = self.so_r.min(axis=0)
t_min = ( r_min >= 0. ).min()
r_max = self.so_r.max(axis=0)
t_max = ( r_max < self.L ).min()
if ( t_min and t_max ):
return True
else:
return False
def print_check_in_box(self):
if (self.check_in_box()):
print "All particles inside the box"
else:
print "Some particles outside the box"
def null_shift(self):
"""
Resets the shift to zero.
"""
self.shift[:] = 0.
self.root[:] = self.shift[:] - self.a
def rand_shift(self):
"""
Applies a random shift in [0:a[ to the system.
"""
self.shift[:] = np.random.random( self.shift.shape[0] )*self.a
self.root[:] = self.shift[:] - self.a
def idx(self, i , cijk):
"""
Returns in cijk the three cell indices for particle i.
"""
np.floor( (self.so_r[i,:] - self.root) / self.a , cijk )
for j in range(3):
my_n = self.N_cells[j]
if (self.BC[j] == 0):
if ( cijk[j] >= my_n ): cijk[j] -= my_n
## Bins the particles into the MPCD cells.
def fill_box(self):
"""
Bins the particles into the MPCD cells.
"""
cijk = np.zeros( (3,) , dtype=np.int32 )
self.cells[:] = 0
self.par_list[:] = 0
for i in range(self.so_N):
self.idx( i , cijk )
my_n = self.cells[ cijk[0] , cijk[1] , cijk[2] ]
self.par_list[ cijk[0] , cijk[1] , cijk[2] , my_n ] = i
self.cells[ cijk[0] , cijk[1] , cijk[2] ] = my_n + 1
## Bins the particles into the MPCD cells by calling a Fortran routine.
def fill_box_f(self):
""""Bins the particles into the MPCD cells by calling a Fortran routine."""
mpcd_mod.fill_box(self.so_r_f, self.cells_f, self.par_list_f, self.a, self.root)
## Computes the center of mass velocity for all the cells in the system.
# \param self A MPCD_system instance.
def compute_v_com(self):
"""
Computes the c.o.m. velocity for all cells.
"""
self.v_com[:] = 0
for ci in range(self.N_grid[0]):
for cj in range(self.N_grid[1]):
for ck in range(self.N_grid[2]):
mass_local = 0.
v_local = np.zeros( (3, ) , dtype=np.float64)
n_local = self.cells[ci,cj,ck]
for i in range( n_local ):
part = self.par_list[ci,cj,ck,i]
v_local += self.so_v[part,:]*self.so_mass[self.so_species[part]]
mass_local += self.so_mass[self.so_species[part]]
if (n_local > 0): self.v_com[ci,cj,ck,:] = v_local/mass_local
## Computes the temperature of the cells.
# The temperature is computed as \f$ \frac{ \sum_{i=1}^{N^\xi} m_i (v_i-v_0) ^2 }{N^\xi-1} \f$
# where \f$ v_0 \f$ is the center of mass velocity of the cell.
# \param self A MPCD_system instance.
def compute_cells_temperature(self):
for ci in range(self.N_grid[0]):
for cj in range(self.N_grid[1]):
for ck in range(self.N_grid[2]):
v_local = self.v_com[ci,cj,ck,:]
T_local = 0.
n_local = self.cells[ci,cj,ck]
for i in range(n_local):
part = self.par_list[ci,cj,ck,i]
T_local += self.so_mass[self.so_species[part]]*((self.so_v[part,:]-v_local)**2).sum()
if (n_local>1):
T_local /= 3.*(n_local-1)
self.cells_temperature[ci,cj,ck] = T_local
def MPCD_step_axis(self):
"""
Performs a MPCD collision step where the axis is one of x, y or z, chosen at random.
"""
v_therm = np.zeros((3,))
nn = self.N_cells.copy()
nn += self.BC
is_wall = False
v_wall = np.zeros( (3,) )
local_N = np.zeros( (self.N_species,) , dtype=np.int32)
amu = np.zeros( (self.N_reactions,), dtype=np.float64)
for ci in range(nn[0]):
for cj in range(nn[1]):
for ck in range(nn[2]):
# Choose an axis to perform the rotation
rand_axis = np.random.randint(0,3)
axis1 = ( rand_axis + 1 ) % 3
axis2 = ( rand_axis + 2 ) % 3
if (np.random.randint(2)==0):
r_sign = 1
else:
r_sign = -1
# test if is a wall
is_wall = False
local_i = [ ci , cj , ck ]
for i in range(3):
if (self.BC[i]==1):
if ( (local_i[i]==0) or (local_i[i]==nn[i]-1) ):
is_wall=True
v_wall[:] = self.wall_v0[ i , min( local_i[i], 1 ) , : ]
v_temp = float(self.wall_temp[ i , min( local_i[i] , 1 ) ])
# number of particles in the cell
local_n = self.cells[ci,cj,ck]
# c.o.m. velocity in the cell
local_v = self.v_com[ci,cj,ck,:].copy()
# if cell is a wall, add virtual particles
if (is_wall):
if (local_n < self.density):
local_v = (
(np.random.randn(3) * np.sqrt(v_temp * (self.density - local_n) ) )
+ v_wall*(self.density - local_n)
+ local_v * local_n
) / self.density
v_therm +=local_v
# perform cell-wise collisions
local_N *= 0
for i in range(local_n):
part = self.par_list[ci,cj,ck,i]
self.so_v[part,:] -= local_v
temp = self.so_v[part,axis2]
self.so_v[part,axis2] = r_sign*self.so_v[part,axis1]
self.so_v[part,axis1] = -r_sign*temp
self.so_v[part,:] += local_v
local_N[self.so_species[part]] += 1
# evaluate reaction probability
a0 = 0.
for i in range(self.N_reactions):
amu[i] = self.combi( i, local_N )*self.rates[i]
a0 += amu[i]
P_something = a0*self.tau
reac = -1
if (np.random.rand() < P_something):
amu /= a0
amu = np.cumsum(amu)
x = np.random.rand()
for i in range(self.N_reactions):
if (x < amu[i]):
reac = i
break
# apply reaction reac
if (reac>=0):
if (self.reaction_kind[reac]==0):
reac_s = np.where(self.reactants[i]==1)[0][0]
prod_s = np.where(self.products[i]==1)[0][0]
for i in range(local_n):
part = self.par_list[ci,cj,ck,i]
if (self.so_species[part]==reac_s):
self.so_species[part] = prod_s
break
elif (self.reaction_kind[reac]==1):
reac_s = np.where(self.reactants[i]==1)[0][0]
prod_s = np.where(self.products[i]==3)[0][0]
for i in range(local_n):
part = self.par_list[ci,cj,ck,i]
if (self.so_species[part]==reac_s):
self.so_species[part] = prod_s
break
else:
raise Exception
def combi(self, reac, N):
R = self.reactants[reac]
P = self.products[reac]
r = 1.0
for i in range(self.N_species):
if ( N[i] < R[i] ):
return 0
r *= factorial(N[i],exact=1)
r /= factorial(N[i]-R[i],exact=1)
return r
## Exchanges the positions, momenta and species of two solvent particles.
# \param i index of the first particle to be exchanged.
# \param j index of the second particle to be exchanged.
def exchange_solvent(self,i,j):
"""
Exchanges the positions, momenta and species of two solvent particles.
"""
tmp_copy = self.so_r[i,:].copy()
self.so_r[i,:] = self.so_r[j,:]
self.so_r[j,:] = tmp_copy
tmp_copy = self.so_v[i,:].copy()
self.so_v[i,:] = self.so_v[j,:]
self.so_v[j,:] = tmp_copy
tmp_copy = self.so_species[i].copy()
self.so_species[i] = self.so_species[j]
self.so_species[j] = tmp_copy
## Sorts the solvent in the x,y,z cell order.
def sort_solvent(self):
"""
Sorts the solvent in the x,y,z cell order.
"""
nn = self.N_cells.copy()
nn += self.BC
array_idx = 0
for ci in range(nn[0]):
for cj in range(nn[1]):
for ck in range(nn[2]):
local_n = self.cells[ci,cj,ck]
for i in range(local_n):
self.exchange_solvent(self.par_list[ci,cj,ck,i],array_idx)
array_idx += 1
def one_full_step(self):
"""
Performs a full step of MPCD without gravitation, including the
streaming, taking into account the boundary conditions and the MPCD
collision step.
"""
self.stream()
self.boundaries()
self.rand_shift()
self.fill_box()
self.compute_v_com()
self.MPCD_step_axis()
def one_full_accel(self):
"""
Performs a full step of MPCD with gravitation, including the
streaming, taking into account the boundary conditions and the MPCD
collision step.
"""
self.accel()
self.boundaries()
self.rand_shift()
self.fill_box()
self.compute_v_com()
self.MPCD_step_axis()
def one_full_step_f(self):
"""
Performs a full step of MPCD without gravitation, including the
streaming, taking into account the boundary conditions and the MPCD
collision step.
The streaming and binning steps are performed in Fortran.
"""
self.stream_f()
self.boundaries()
self.rand_shift()
self.fill_box_f()
self.compute_v_com()
self.MPCD_step_axis()
| pdebuyl/pyMPCD | pyMPCD/MPCD.py | Python | gpl-3.0 | 21,970 |
# -*- test-case-name: twisted.web.test.test_web -*-
# Copyright (c) 2001-2009 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.web.static}.
"""
import os, re, StringIO
from zope.interface.verify import verifyObject
from twisted.internet import abstract, interfaces
from twisted.python.compat import set
from twisted.python.runtime import platform
from twisted.python.filepath import FilePath
from twisted.python import log
from twisted.trial.unittest import TestCase
from twisted.web import static, http, script, resource
from twisted.web.test.test_web import DummyRequest
from twisted.web.test._util import _render
class StaticFileTests(TestCase):
"""
Tests for the basic behavior of L{File}.
"""
def _render(self, resource, request):
return _render(resource, request)
def test_notFound(self):
"""
If a request is made which encounters a L{File} before a final segment
which does not correspond to any file in the path the L{File} was
created with, a not found response is sent.
"""
base = FilePath(self.mktemp())
base.makedirs()
file = static.File(base.path)
request = DummyRequest(['foobar'])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 404)
d.addCallback(cbRendered)
return d
def test_emptyChild(self):
"""
The C{''} child of a L{File} which corresponds to a directory in the
filesystem is a L{DirectoryLister}.
"""
base = FilePath(self.mktemp())
base.makedirs()
file = static.File(base.path)
request = DummyRequest([''])
child = resource.getChildForRequest(file, request)
self.assertIsInstance(child, static.DirectoryLister)
self.assertEqual(child.path, base.path)
def test_securityViolationNotFound(self):
"""
If a request is made which encounters a L{File} before a final segment
which cannot be looked up in the filesystem due to security
considerations, a not found response is sent.
"""
base = FilePath(self.mktemp())
base.makedirs()
file = static.File(base.path)
request = DummyRequest(['..'])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 404)
d.addCallback(cbRendered)
return d
def test_forbiddenResource(self):
"""
If the file in the filesystem which would satisfy a request cannot be
read, L{File.render} sets the HTTP response code to I{FORBIDDEN}.
"""
base = FilePath(self.mktemp())
base.setContent('')
# Make sure we can delete the file later.
self.addCleanup(base.chmod, 0700)
# Get rid of our own read permission.
base.chmod(0)
file = static.File(base.path)
request = DummyRequest([''])
d = self._render(file, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 403)
d.addCallback(cbRendered)
return d
if platform.isWindows():
test_forbiddenResource.skip = "Cannot remove read permission on Windows"
def test_indexNames(self):
"""
If a request is made which encounters a L{File} before a final empty
segment, a file in the L{File} instance's C{indexNames} list which
exists in the path the L{File} was created with is served as the
response to the request.
"""
base = FilePath(self.mktemp())
base.makedirs()
base.child("foo.bar").setContent("baz")
file = static.File(base.path)
file.indexNames = ['foo.bar']
request = DummyRequest([''])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(''.join(request.written), 'baz')
self.assertEqual(request.outgoingHeaders['content-length'], '3')
d.addCallback(cbRendered)
return d
def test_staticFile(self):
"""
If a request is made which encounters a L{File} before a final segment
which names a file in the path the L{File} was created with, that file
is served as the response to the request.
"""
base = FilePath(self.mktemp())
base.makedirs()
base.child("foo.bar").setContent("baz")
file = static.File(base.path)
request = DummyRequest(['foo.bar'])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(''.join(request.written), 'baz')
self.assertEqual(request.outgoingHeaders['content-length'], '3')
d.addCallback(cbRendered)
return d
def test_staticFileDeletedGetChild(self):
"""
A L{static.File} created for a directory which does not exist should
return childNotFound from L{static.File.getChild}.
"""
staticFile = static.File(self.mktemp())
request = DummyRequest(['foo.bar'])
child = staticFile.getChild("foo.bar", request)
self.assertEquals(child, staticFile.childNotFound)
def test_staticFileDeletedRender(self):
"""
A L{static.File} created for a file which does not exist should render
its C{childNotFound} page.
"""
staticFile = static.File(self.mktemp())
request = DummyRequest(['foo.bar'])
request2 = DummyRequest(['foo.bar'])
d = self._render(staticFile, request)
d2 = self._render(staticFile.childNotFound, request2)
def cbRendered2(ignored):
def cbRendered(ignored):
self.assertEquals(''.join(request.written),
''.join(request2.written))
d.addCallback(cbRendered)
return d
d2.addCallback(cbRendered2)
return d2
def test_processors(self):
"""
If a request is made which encounters a L{File} before a final segment
which names a file with an extension which is in the L{File}'s
C{processors} mapping, the processor associated with that extension is
used to serve the response to the request.
"""
base = FilePath(self.mktemp())
base.makedirs()
base.child("foo.bar").setContent(
"from twisted.web.static import Data\n"
"resource = Data('dynamic world','text/plain')\n")
file = static.File(base.path)
file.processors = {'.bar': script.ResourceScript}
request = DummyRequest(["foo.bar"])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(''.join(request.written), 'dynamic world')
self.assertEqual(request.outgoingHeaders['content-length'], '13')
d.addCallback(cbRendered)
return d
def test_ignoreExt(self):
"""
The list of ignored extensions can be set by passing a value to
L{File.__init__} or by calling L{File.ignoreExt} later.
"""
file = static.File(".")
self.assertEqual(file.ignoredExts, [])
file.ignoreExt(".foo")
file.ignoreExt(".bar")
self.assertEqual(file.ignoredExts, [".foo", ".bar"])
file = static.File(".", ignoredExts=(".bar", ".baz"))
self.assertEqual(file.ignoredExts, [".bar", ".baz"])
def test_ignoredExtensionsIgnored(self):
"""
A request for the I{base} child of a L{File} succeeds with a resource
for the I{base<extension>} file in the path the L{File} was created
with if such a file exists and the L{File} has been configured to
ignore the I{<extension>} extension.
"""
base = FilePath(self.mktemp())
base.makedirs()
base.child('foo.bar').setContent('baz')
base.child('foo.quux').setContent('foobar')
file = static.File(base.path, ignoredExts=(".bar",))
request = DummyRequest(["foo"])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(''.join(request.written), 'baz')
d.addCallback(cbRendered)
return d
def test_createPickleChild(self):
"""
L{static.File.createPickleChild} is deprecated.
"""
path = FilePath(self.mktemp())
path.makedirs()
static.File(path.path).createPickleChild("foo", None)
warnings = self.flushWarnings([self.test_createPickleChild])
self.assertEqual(warnings[0]['category'], DeprecationWarning)
self.assertEqual(
warnings[0]['message'],
"File.createPickleChild is deprecated since Twisted 9.0. "
"Resource persistence is beyond the scope of Twisted Web.")
self.assertEqual(len(warnings), 1)
class StaticMakeProducerTests(TestCase):
"""
Tests for L{File.makeProducer}.
"""
def makeResourceWithContent(self, content, type=None, encoding=None):
"""
Make a L{static.File} resource that has C{content} for its content.
@param content: The bytes to use as the contents of the resource.
@param type: Optional value for the content type of the resource.
"""
fileName = self.mktemp()
fileObject = open(fileName, 'w')
fileObject.write(content)
fileObject.close()
resource = static.File(fileName)
resource.encoding = encoding
resource.type = type
return resource
def contentHeaders(self, request):
"""
Extract the content-* headers from the L{DummyRequest} C{request}.
This returns the subset of C{request.outgoingHeaders} of headers that
start with 'content-'.
"""
contentHeaders = {}
for k, v in request.outgoingHeaders.iteritems():
if k.startswith('content-'):
contentHeaders[k] = v
return contentHeaders
def test_noRangeHeaderGivesNoRangeStaticProducer(self):
"""
makeProducer when no Range header is set returns an instance of
NoRangeStaticProducer.
"""
resource = self.makeResourceWithContent('')
request = DummyRequest([])
producer = resource.makeProducer(request, resource.openForReading())
self.assertIsInstance(producer, static.NoRangeStaticProducer)
def test_noRangeHeaderSets200OK(self):
"""
makeProducer when no Range header is set sets the responseCode on the
request to 'OK'.
"""
resource = self.makeResourceWithContent('')
request = DummyRequest([])
resource.makeProducer(request, resource.openForReading())
self.assertEqual(http.OK, request.responseCode)
def test_noRangeHeaderSetsContentHeaders(self):
"""
makeProducer when no Range header is set sets the Content-* headers
for the response.
"""
length = 123
contentType = "text/plain"
contentEncoding = 'gzip'
resource = self.makeResourceWithContent(
'a'*length, type=contentType, encoding=contentEncoding)
request = DummyRequest([])
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
{'content-type': contentType, 'content-length': str(length),
'content-encoding': contentEncoding},
self.contentHeaders(request))
def test_singleRangeGivesSingleRangeStaticProducer(self):
"""
makeProducer when the Range header requests a single byte range
returns an instance of SingleRangeStaticProducer.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=1-3'
resource = self.makeResourceWithContent('abcdef')
producer = resource.makeProducer(request, resource.openForReading())
self.assertIsInstance(producer, static.SingleRangeStaticProducer)
def test_singleRangeSets206PartialContent(self):
"""
makeProducer when the Range header requests a single, satisfiable byte
range sets the response code on the request to 'Partial Content'.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=1-3'
resource = self.makeResourceWithContent('abcdef')
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
http.PARTIAL_CONTENT, request.responseCode)
def test_singleRangeSetsContentHeaders(self):
"""
makeProducer when the Range header requests a single, satisfiable byte
range sets the Content-* headers appropriately.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=1-3'
contentType = "text/plain"
contentEncoding = 'gzip'
resource = self.makeResourceWithContent('abcdef', type=contentType, encoding=contentEncoding)
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
{'content-type': contentType, 'content-encoding': contentEncoding,
'content-range': 'bytes 1-3/6', 'content-length': '3'},
self.contentHeaders(request))
def test_singleUnsatisfiableRangeReturnsSingleRangeStaticProducer(self):
"""
makeProducer still returns an instance of L{SingleRangeStaticProducer}
when the Range header requests a single unsatisfiable byte range.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=4-10'
resource = self.makeResourceWithContent('abc')
producer = resource.makeProducer(request, resource.openForReading())
self.assertIsInstance(producer, static.SingleRangeStaticProducer)
def test_singleUnsatisfiableRangeSets416ReqestedRangeNotSatisfiable(self):
"""
makeProducer sets the response code of the request to of 'Requested
Range Not Satisfiable' when the Range header requests a single
unsatisfiable byte range.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=4-10'
resource = self.makeResourceWithContent('abc')
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
http.REQUESTED_RANGE_NOT_SATISFIABLE, request.responseCode)
def test_singleUnsatisfiableRangeSetsContentHeaders(self):
"""
makeProducer when the Range header requests a single, unsatisfiable
byte range sets the Content-* headers appropriately.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=4-10'
contentType = "text/plain"
resource = self.makeResourceWithContent('abc', type=contentType)
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
{'content-type': 'text/plain', 'content-length': '0',
'content-range': 'bytes */3'},
self.contentHeaders(request))
def test_singlePartiallyOverlappingRangeSetsContentHeaders(self):
"""
makeProducer when the Range header requests a single byte range that
partly overlaps the resource sets the Content-* headers appropriately.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=2-10'
contentType = "text/plain"
resource = self.makeResourceWithContent('abc', type=contentType)
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
{'content-type': 'text/plain', 'content-length': '1',
'content-range': 'bytes 2-2/3'},
self.contentHeaders(request))
def test_multipleRangeGivesMultipleRangeStaticProducer(self):
"""
makeProducer when the Range header requests a single byte range
returns an instance of MultipleRangeStaticProducer.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=1-3,5-6'
resource = self.makeResourceWithContent('abcdef')
producer = resource.makeProducer(request, resource.openForReading())
self.assertIsInstance(producer, static.MultipleRangeStaticProducer)
def test_multipleRangeSets206PartialContent(self):
"""
makeProducer when the Range header requests a multiple satisfiable
byte ranges sets the response code on the request to 'Partial
Content'.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=1-3,5-6'
resource = self.makeResourceWithContent('abcdef')
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
http.PARTIAL_CONTENT, request.responseCode)
def test_mutipleRangeSetsContentHeaders(self):
"""
makeProducer when the Range header requests a single, satisfiable byte
range sets the Content-* headers appropriately.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=1-3,5-6'
resource = self.makeResourceWithContent(
'abcdefghijkl', encoding='gzip')
producer = resource.makeProducer(request, resource.openForReading())
contentHeaders = self.contentHeaders(request)
# The only content-* headers set are content-type and content-length.
self.assertEqual(
set(['content-length', 'content-type']),
set(contentHeaders.keys()))
# The content-length depends on the boundary used in the response.
expectedLength = 5
for boundary, offset, size in producer.rangeInfo:
expectedLength += len(boundary)
self.assertEqual(expectedLength, contentHeaders['content-length'])
# Content-type should be set to a value indicating a multipart
# response and the boundary used to separate the parts.
self.assertIn('content-type', contentHeaders)
contentType = contentHeaders['content-type']
self.assertNotIdentical(
None, re.match(
'multipart/byteranges; boundary="[^"]*"\Z', contentType))
# Content-encoding is not set in the response to a multiple range
# response, which is a bit wussy but works well enough with the way
# static.File does content-encodings...
self.assertNotIn('content-encoding', contentHeaders)
def test_multipleUnsatisfiableRangesReturnsMultipleRangeStaticProducer(self):
"""
makeProducer still returns an instance of L{SingleRangeStaticProducer}
when the Range header requests multiple ranges, none of which are
satisfiable.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=10-12,15-20'
resource = self.makeResourceWithContent('abc')
producer = resource.makeProducer(request, resource.openForReading())
self.assertIsInstance(producer, static.MultipleRangeStaticProducer)
def test_multipleUnsatisfiableRangesSets416ReqestedRangeNotSatisfiable(self):
"""
makeProducer sets the response code of the request to of 'Requested
Range Not Satisfiable' when the Range header requests multiple ranges,
none of which are satisfiable.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=10-12,15-20'
resource = self.makeResourceWithContent('abc')
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
http.REQUESTED_RANGE_NOT_SATISFIABLE, request.responseCode)
def test_multipleUnsatisfiableRangeSetsContentHeaders(self):
"""
makeProducer when the Range header requests multiple ranges, none of
which are satisfiable, sets the Content-* headers appropriately.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=4-10'
contentType = "text/plain"
request.headers['range'] = 'bytes=10-12,15-20'
resource = self.makeResourceWithContent('abc', type=contentType)
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
{'content-length': '0', 'content-range': 'bytes */3'},
self.contentHeaders(request))
def test_oneSatisfiableRangeIsEnough(self):
"""
makeProducer when the Range header requests multiple ranges, at least
one of which matches, sets the response code to 'Partial Content'.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=1-3,100-200'
resource = self.makeResourceWithContent('abcdef')
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
http.PARTIAL_CONTENT, request.responseCode)
class StaticProducerTests(TestCase):
"""
Tests for the abstract L{StaticProducer}.
"""
def test_stopProducingClosesFile(self):
"""
L{StaticProducer.stopProducing} closes the file object the producer is
producing data from.
"""
fileObject = StringIO.StringIO()
producer = static.StaticProducer(None, fileObject)
producer.stopProducing()
self.assertTrue(fileObject.closed)
def test_stopProducingSetsRequestToNone(self):
"""
L{StaticProducer.stopProducing} sets the request instance variable to
None, which indicates to subclasses' resumeProducing methods that no
more data should be produced.
"""
fileObject = StringIO.StringIO()
producer = static.StaticProducer(DummyRequest([]), fileObject)
producer.stopProducing()
self.assertIdentical(None, producer.request)
class NoRangeStaticProducerTests(TestCase):
"""
Tests for L{NoRangeStaticProducer}.
"""
def test_implementsIPullProducer(self):
"""
L{NoRangeStaticProducer} implements L{IPullProducer}.
"""
verifyObject(
interfaces.IPullProducer,
static.NoRangeStaticProducer(None, None))
def test_resumeProducingProducesContent(self):
"""
L{NoRangeStaticProducer.resumeProducing} writes content from the
resource to the request.
"""
request = DummyRequest([])
content = 'abcdef'
producer = static.NoRangeStaticProducer(
request, StringIO.StringIO(content))
# start calls registerProducer on the DummyRequest, which pulls all
# output from the producer and so we just need this one call.
producer.start()
self.assertEqual(content, ''.join(request.written))
def test_resumeProducingBuffersOutput(self):
"""
L{NoRangeStaticProducer.start} writes at most
C{abstract.FileDescriptor.bufferSize} bytes of content from the
resource to the request at once.
"""
request = DummyRequest([])
bufferSize = abstract.FileDescriptor.bufferSize
content = 'a' * (2*bufferSize + 1)
producer = static.NoRangeStaticProducer(
request, StringIO.StringIO(content))
# start calls registerProducer on the DummyRequest, which pulls all
# output from the producer and so we just need this one call.
producer.start()
expected = [
content[0:bufferSize],
content[bufferSize:2*bufferSize],
content[2*bufferSize:]
]
self.assertEqual(expected, request.written)
def test_finishCalledWhenDone(self):
"""
L{NoRangeStaticProducer.resumeProducing} calls finish() on the request
after it is done producing content.
"""
request = DummyRequest([])
finishDeferred = request.notifyFinish()
callbackList = []
finishDeferred.addCallback(callbackList.append)
producer = static.NoRangeStaticProducer(
request, StringIO.StringIO('abcdef'))
# start calls registerProducer on the DummyRequest, which pulls all
# output from the producer and so we just need this one call.
producer.start()
self.assertEqual([None], callbackList)
class SingleRangeStaticProducerTests(TestCase):
"""
Tests for L{SingleRangeStaticProducer}.
"""
def test_implementsIPullProducer(self):
"""
L{SingleRangeStaticProducer} implements L{IPullProducer}.
"""
verifyObject(
interfaces.IPullProducer,
static.SingleRangeStaticProducer(None, None, None, None))
def test_resumeProducingProducesContent(self):
"""
L{SingleRangeStaticProducer.resumeProducing} writes the given amount
of content, starting at the given offset, from the resource to the
request.
"""
request = DummyRequest([])
content = 'abcdef'
producer = static.SingleRangeStaticProducer(
request, StringIO.StringIO(content), 1, 3)
# DummyRequest.registerProducer pulls all output from the producer, so
# we just need to call start.
producer.start()
self.assertEqual(content[1:4], ''.join(request.written))
def test_resumeProducingBuffersOutput(self):
"""
L{SingleRangeStaticProducer.start} writes at most
C{abstract.FileDescriptor.bufferSize} bytes of content from the
resource to the request at once.
"""
request = DummyRequest([])
bufferSize = abstract.FileDescriptor.bufferSize
content = 'abc' * bufferSize
producer = static.SingleRangeStaticProducer(
request, StringIO.StringIO(content), 1, bufferSize+10)
# DummyRequest.registerProducer pulls all output from the producer, so
# we just need to call start.
producer.start()
expected = [
content[1:bufferSize+1],
content[bufferSize+1:bufferSize+11],
]
self.assertEqual(expected, request.written)
def test_finishCalledWhenDone(self):
"""
L{SingleRangeStaticProducer.resumeProducing} calls finish() on the
request after it is done producing content.
"""
request = DummyRequest([])
finishDeferred = request.notifyFinish()
callbackList = []
finishDeferred.addCallback(callbackList.append)
producer = static.SingleRangeStaticProducer(
request, StringIO.StringIO('abcdef'), 1, 1)
# start calls registerProducer on the DummyRequest, which pulls all
# output from the producer and so we just need this one call.
producer.start()
self.assertEqual([None], callbackList)
class MultipleRangeStaticProducerTests(TestCase):
"""
Tests for L{MultipleRangeStaticProducer}.
"""
def test_implementsIPullProducer(self):
"""
L{MultipleRangeStaticProducer} implements L{IPullProducer}.
"""
verifyObject(
interfaces.IPullProducer,
static.MultipleRangeStaticProducer(None, None, None))
def test_resumeProducingProducesContent(self):
"""
L{MultipleRangeStaticProducer.resumeProducing} writes the requested
chunks of content from the resource to the request, with the supplied
boundaries in between each chunk.
"""
request = DummyRequest([])
content = 'abcdef'
producer = static.MultipleRangeStaticProducer(
request, StringIO.StringIO(content), [('1', 1, 3), ('2', 5, 1)])
# DummyRequest.registerProducer pulls all output from the producer, so
# we just need to call start.
producer.start()
self.assertEqual('1bcd2f', ''.join(request.written))
def test_resumeProducingBuffersOutput(self):
"""
L{MultipleRangeStaticProducer.start} writes about
C{abstract.FileDescriptor.bufferSize} bytes of content from the
resource to the request at once.
To be specific about the 'about' above: it can write slightly more,
for example in the case where the first boundary plus the first chunk
is less than C{bufferSize} but first boundary plus the first chunk
plus the second boundary is more, but this is unimportant as in
practice the boundaries are fairly small. On the other side, it is
important for performance to bundle up several small chunks into one
call to request.write.
"""
request = DummyRequest([])
content = '0123456789' * 2
producer = static.MultipleRangeStaticProducer(
request, StringIO.StringIO(content),
[('a', 0, 2), ('b', 5, 10), ('c', 0, 0)])
producer.bufferSize = 10
# DummyRequest.registerProducer pulls all output from the producer, so
# we just need to call start.
producer.start()
expected = [
'a' + content[0:2] + 'b' + content[5:11],
content[11:15] + 'c',
]
self.assertEqual(expected, request.written)
def test_finishCalledWhenDone(self):
"""
L{MultipleRangeStaticProducer.resumeProducing} calls finish() on the
request after it is done producing content.
"""
request = DummyRequest([])
finishDeferred = request.notifyFinish()
callbackList = []
finishDeferred.addCallback(callbackList.append)
producer = static.MultipleRangeStaticProducer(
request, StringIO.StringIO('abcdef'), [('', 1, 2)])
# start calls registerProducer on the DummyRequest, which pulls all
# output from the producer and so we just need this one call.
producer.start()
self.assertEqual([None], callbackList)
class RangeTests(TestCase):
"""
Tests for I{Range-Header} support in L{twisted.web.static.File}.
@type file: L{file}
@ivar file: Temporary (binary) file containing the content to be served.
@type resource: L{static.File}
@ivar resource: A leaf web resource using C{file} as content.
@type request: L{DummyRequest}
@ivar request: A fake request, requesting C{resource}.
@type catcher: L{list}
@ivar catcher: List which gathers all log information.
"""
def setUp(self):
"""
Create a temporary file with a fixed payload of 64 bytes. Create a
resource for that file and create a request which will be for that
resource. Each test can set a different range header to test different
aspects of the implementation.
"""
path = FilePath(self.mktemp())
# This is just a jumble of random stuff. It's supposed to be a good
# set of data for this test, particularly in order to avoid
# accidentally seeing the right result by having a byte sequence
# repeated at different locations or by having byte values which are
# somehow correlated with their position in the string.
self.payload = ('\xf8u\xf3E\x8c7\xce\x00\x9e\xb6a0y0S\xf0\xef\xac\xb7'
'\xbe\xb5\x17M\x1e\x136k{\x1e\xbe\x0c\x07\x07\t\xd0'
'\xbckY\xf5I\x0b\xb8\x88oZ\x1d\x85b\x1a\xcdk\xf2\x1d'
'&\xfd%\xdd\x82q/A\x10Y\x8b')
path.setContent(self.payload)
self.file = path.open()
self.resource = static.File(self.file.name)
self.resource.isLeaf = 1
self.request = DummyRequest([''])
self.request.uri = self.file.name
self.catcher = []
log.addObserver(self.catcher.append)
def tearDown(self):
"""
Clean up the resource file and the log observer.
"""
self.file.close()
log.removeObserver(self.catcher.append)
def _assertLogged(self, expected):
"""
Asserts that a given log message occurred with an expected message.
"""
logItem = self.catcher.pop()
self.assertEquals(logItem["message"][0], expected)
self.assertEqual(
self.catcher, [], "An additional log occured: %r" % (logItem,))
def test_invalidRanges(self):
"""
L{File._parseRangeHeader} raises L{ValueError} when passed
syntactically invalid byte ranges.
"""
f = self.resource._parseRangeHeader
# there's no =
self.assertRaises(ValueError, f, 'bytes')
# unknown isn't a valid Bytes-Unit
self.assertRaises(ValueError, f, 'unknown=1-2')
# there's no - in =stuff
self.assertRaises(ValueError, f, 'bytes=3')
# both start and end are empty
self.assertRaises(ValueError, f, 'bytes=-')
# start isn't an integer
self.assertRaises(ValueError, f, 'bytes=foo-')
# end isn't an integer
self.assertRaises(ValueError, f, 'bytes=-foo')
# end isn't equal to or greater than start
self.assertRaises(ValueError, f, 'bytes=5-4')
def test_rangeMissingStop(self):
"""
A single bytes range without an explicit stop position is parsed into a
two-tuple giving the start position and C{None}.
"""
self.assertEqual(
self.resource._parseRangeHeader('bytes=0-'), [(0, None)])
def test_rangeMissingStart(self):
"""
A single bytes range without an explicit start position is parsed into
a two-tuple of C{None} and the end position.
"""
self.assertEqual(
self.resource._parseRangeHeader('bytes=-3'), [(None, 3)])
def test_range(self):
"""
A single bytes range with explicit start and stop positions is parsed
into a two-tuple of those positions.
"""
self.assertEqual(
self.resource._parseRangeHeader('bytes=2-5'), [(2, 5)])
def test_rangeWithSpace(self):
"""
A single bytes range with whitespace in allowed places is parsed in
the same way as it would be without the whitespace.
"""
self.assertEqual(
self.resource._parseRangeHeader(' bytes=1-2 '), [(1, 2)])
self.assertEqual(
self.resource._parseRangeHeader('bytes =1-2 '), [(1, 2)])
self.assertEqual(
self.resource._parseRangeHeader('bytes= 1-2'), [(1, 2)])
self.assertEqual(
self.resource._parseRangeHeader('bytes=1 -2'), [(1, 2)])
self.assertEqual(
self.resource._parseRangeHeader('bytes=1- 2'), [(1, 2)])
self.assertEqual(
self.resource._parseRangeHeader('bytes=1-2 '), [(1, 2)])
def test_nullRangeElements(self):
"""
If there are multiple byte ranges but only one is non-null, the
non-null range is parsed and its start and stop returned.
"""
self.assertEqual(
self.resource._parseRangeHeader('bytes=1-2,\r\n, ,\t'), [(1, 2)])
def test_multipleRanges(self):
"""
If multiple byte ranges are specified their starts and stops are
returned.
"""
self.assertEqual(
self.resource._parseRangeHeader('bytes=1-2,3-4'),
[(1, 2), (3, 4)])
def test_bodyLength(self):
"""
A correct response to a range request is as long as the length of the
requested range.
"""
self.request.headers['range'] = 'bytes=0-43'
self.resource.render(self.request)
self.assertEquals(len(''.join(self.request.written)), 44)
def test_invalidRangeRequest(self):
"""
An incorrect range request (RFC 2616 defines a correct range request as
a Bytes-Unit followed by a '=' character followed by a specific range.
Only 'bytes' is defined) results in the range header value being logged
and a normal 200 response being sent.
"""
self.request.headers['range'] = range = 'foobar=0-43'
self.resource.render(self.request)
expected = "Ignoring malformed Range header %r" % (range,)
self._assertLogged(expected)
self.assertEquals(''.join(self.request.written), self.payload)
self.assertEquals(self.request.responseCode, http.OK)
self.assertEquals(
self.request.outgoingHeaders['content-length'],
str(len(self.payload)))
def parseMultipartBody(self, body, boundary):
"""
Parse C{body} as a multipart MIME response separated by C{boundary}.
Note that this with fail the calling test on certain syntactic
problems.
"""
sep = "\r\n--" + boundary
parts = ''.join(body).split(sep)
self.assertEquals('', parts[0])
self.assertEquals('--\r\n', parts[-1])
parsed_parts = []
for part in parts[1:-1]:
before, header1, header2, blank, partBody = part.split('\r\n', 4)
headers = header1 + '\n' + header2
self.assertEqual('', before)
self.assertEqual('', blank)
partContentTypeValue = re.search(
'^content-type: (.*)$', headers, re.I|re.M).group(1)
start, end, size = re.search(
'^content-range: bytes ([0-9]+)-([0-9]+)/([0-9]+)$',
headers, re.I|re.M).groups()
parsed_parts.append(
{'contentType': partContentTypeValue,
'contentRange': (start, end, size),
'body': partBody})
return parsed_parts
def test_multipleRangeRequest(self):
"""
The response to a request for multipe bytes ranges is a MIME-ish
multipart response.
"""
startEnds = [(0, 2), (20, 30), (40, 50)]
rangeHeaderValue = ','.join(["%s-%s"%(s,e) for (s, e) in startEnds])
self.request.headers['range'] = 'bytes=' + rangeHeaderValue
self.resource.render(self.request)
self.assertEquals(self.request.responseCode, http.PARTIAL_CONTENT)
boundary = re.match(
'^multipart/byteranges; boundary="(.*)"$',
self.request.outgoingHeaders['content-type']).group(1)
parts = self.parseMultipartBody(''.join(self.request.written), boundary)
self.assertEquals(len(startEnds), len(parts))
for part, (s, e) in zip(parts, startEnds):
self.assertEqual(self.resource.type, part['contentType'])
start, end, size = part['contentRange']
self.assertEqual(int(start), s)
self.assertEqual(int(end), e)
self.assertEqual(int(size), self.resource.getFileSize())
self.assertEqual(self.payload[s:e+1], part['body'])
def test_multipleRangeRequestWithRangeOverlappingEnd(self):
"""
The response to a request for multipe bytes ranges is a MIME-ish
multipart response, even when one of the ranged falls off the end of
the resource.
"""
startEnds = [(0, 2), (40, len(self.payload) + 10)]
rangeHeaderValue = ','.join(["%s-%s"%(s,e) for (s, e) in startEnds])
self.request.headers['range'] = 'bytes=' + rangeHeaderValue
self.resource.render(self.request)
self.assertEquals(self.request.responseCode, http.PARTIAL_CONTENT)
boundary = re.match(
'^multipart/byteranges; boundary="(.*)"$',
self.request.outgoingHeaders['content-type']).group(1)
parts = self.parseMultipartBody(''.join(self.request.written), boundary)
self.assertEquals(len(startEnds), len(parts))
for part, (s, e) in zip(parts, startEnds):
self.assertEqual(self.resource.type, part['contentType'])
start, end, size = part['contentRange']
self.assertEqual(int(start), s)
self.assertEqual(int(end), min(e, self.resource.getFileSize()-1))
self.assertEqual(int(size), self.resource.getFileSize())
self.assertEqual(self.payload[s:e+1], part['body'])
def test_implicitEnd(self):
"""
If the end byte position is omitted, then it is treated as if the
length of the resource was specified by the end byte position.
"""
self.request.headers['range'] = 'bytes=23-'
self.resource.render(self.request)
self.assertEquals(''.join(self.request.written), self.payload[23:])
self.assertEquals(len(''.join(self.request.written)), 41)
self.assertEquals(self.request.responseCode, http.PARTIAL_CONTENT)
self.assertEquals(
self.request.outgoingHeaders['content-range'], 'bytes 23-63/64')
self.assertEquals(self.request.outgoingHeaders['content-length'], '41')
def test_implicitStart(self):
"""
If the start byte position is omitted but the end byte position is
supplied, then the range is treated as requesting the last -N bytes of
the resource, where N is the end byte position.
"""
self.request.headers['range'] = 'bytes=-17'
self.resource.render(self.request)
self.assertEquals(''.join(self.request.written), self.payload[-17:])
self.assertEquals(len(''.join(self.request.written)), 17)
self.assertEquals(self.request.responseCode, http.PARTIAL_CONTENT)
self.assertEquals(
self.request.outgoingHeaders['content-range'], 'bytes 47-63/64')
self.assertEquals(self.request.outgoingHeaders['content-length'], '17')
def test_explicitRange(self):
"""
A correct response to a bytes range header request from A to B starts
with the A'th byte and ends with (including) the B'th byte. The first
byte of a page is numbered with 0.
"""
self.request.headers['range'] = 'bytes=3-43'
self.resource.render(self.request)
written = ''.join(self.request.written)
self.assertEquals(written, self.payload[3:44])
self.assertEquals(self.request.responseCode, http.PARTIAL_CONTENT)
self.assertEquals(
self.request.outgoingHeaders['content-range'], 'bytes 3-43/64')
self.assertEquals(
str(len(written)), self.request.outgoingHeaders['content-length'])
def test_explicitRangeOverlappingEnd(self):
"""
A correct response to a bytes range header request from A to B when B
is past the end of the resource starts with the A'th byte and ends
with the last byte of the resource. The first byte of a page is
numbered with 0.
"""
self.request.headers['range'] = 'bytes=40-100'
self.resource.render(self.request)
written = ''.join(self.request.written)
self.assertEquals(written, self.payload[40:])
self.assertEquals(self.request.responseCode, http.PARTIAL_CONTENT)
self.assertEquals(
self.request.outgoingHeaders['content-range'], 'bytes 40-63/64')
self.assertEquals(
str(len(written)), self.request.outgoingHeaders['content-length'])
def test_statusCodeRequestedRangeNotSatisfiable(self):
"""
If a range is syntactically invalid due to the start being greater than
the end, the range header is ignored (the request is responded to as if
it were not present).
"""
self.request.headers['range'] = 'bytes=20-13'
self.resource.render(self.request)
self.assertEquals(self.request.responseCode, http.OK)
self.assertEquals(''.join(self.request.written), self.payload)
self.assertEquals(
self.request.outgoingHeaders['content-length'],
str(len(self.payload)))
def test_invalidStartBytePos(self):
"""
If a range is unsatisfiable due to the start not being less than the
length of the resource, the response is 416 (Requested range not
satisfiable) and no data is written to the response body (RFC 2616,
section 14.35.1).
"""
self.request.headers['range'] = 'bytes=67-108'
self.resource.render(self.request)
self.assertEquals(
self.request.responseCode, http.REQUESTED_RANGE_NOT_SATISFIABLE)
self.assertEquals(''.join(self.request.written), '')
self.assertEquals(self.request.outgoingHeaders['content-length'], '0')
# Sections 10.4.17 and 14.16
self.assertEquals(
self.request.outgoingHeaders['content-range'],
'bytes */%d' % (len(self.payload),))
class DirectoryListerTest(TestCase):
"""
Tests for L{static.DirectoryLister}.
"""
def _request(self, uri):
request = DummyRequest([''])
request.uri = uri
return request
def test_renderHeader(self):
"""
L{static.DirectoryLister} prints the request uri as header of the
rendered content.
"""
path = FilePath(self.mktemp())
path.makedirs()
lister = static.DirectoryLister(path.path)
data = lister.render(self._request('foo'))
self.assertIn("<h1>Directory listing for foo</h1>", data)
self.assertIn("<title>Directory listing for foo</title>", data)
def test_renderUnquoteHeader(self):
"""
L{static.DirectoryLister} unquote the request uri before printing it.
"""
path = FilePath(self.mktemp())
path.makedirs()
lister = static.DirectoryLister(path.path)
data = lister.render(self._request('foo%20bar'))
self.assertIn("<h1>Directory listing for foo bar</h1>", data)
self.assertIn("<title>Directory listing for foo bar</title>", data)
def test_escapeHeader(self):
"""
L{static.DirectoryLister} escape "&", "<" and ">" after unquoting the
request uri.
"""
path = FilePath(self.mktemp())
path.makedirs()
lister = static.DirectoryLister(path.path)
data = lister.render(self._request('foo%26bar'))
self.assertIn("<h1>Directory listing for foo&bar</h1>", data)
self.assertIn("<title>Directory listing for foo&bar</title>", data)
def test_renderFiles(self):
"""
L{static.DirectoryLister} is able to list all the files inside a
directory.
"""
path = FilePath(self.mktemp())
path.makedirs()
path.child('file1').setContent("content1")
path.child('file2').setContent("content2" * 1000)
lister = static.DirectoryLister(path.path)
data = lister.render(self._request('foo'))
body = """<tr class="odd">
<td><a href="file1">file1</a></td>
<td>8B</td>
<td>[text/html]</td>
<td></td>
</tr>
<tr class="even">
<td><a href="file2">file2</a></td>
<td>7K</td>
<td>[text/html]</td>
<td></td>
</tr>"""
self.assertIn(body, data)
def test_renderDirectories(self):
"""
L{static.DirectoryLister} is able to list all the directories inside
a directory.
"""
path = FilePath(self.mktemp())
path.makedirs()
path.child('dir1').makedirs()
path.child('dir2 & 3').makedirs()
lister = static.DirectoryLister(path.path)
data = lister.render(self._request('foo'))
body = """<tr class="odd">
<td><a href="dir1/">dir1/</a></td>
<td></td>
<td>[Directory]</td>
<td></td>
</tr>
<tr class="even">
<td><a href="dir2%20%26%203/">dir2 & 3/</a></td>
<td></td>
<td>[Directory]</td>
<td></td>
</tr>"""
self.assertIn(body, data)
def test_renderFiltered(self):
"""
L{static.DirectoryLister} takes a optional C{dirs} argument that
filter out the list of of directories and files printed.
"""
path = FilePath(self.mktemp())
path.makedirs()
path.child('dir1').makedirs()
path.child('dir2').makedirs()
path.child('dir3').makedirs()
lister = static.DirectoryLister(path.path, dirs=["dir1", "dir3"])
data = lister.render(self._request('foo'))
body = """<tr class="odd">
<td><a href="dir1/">dir1/</a></td>
<td></td>
<td>[Directory]</td>
<td></td>
</tr>
<tr class="even">
<td><a href="dir3/">dir3/</a></td>
<td></td>
<td>[Directory]</td>
<td></td>
</tr>"""
self.assertIn(body, data)
def test_oddAndEven(self):
"""
L{static.DirectoryLister} gives an alternate class for each odd and
even rows in the table.
"""
lister = static.DirectoryLister(None)
elements = [{"href": "", "text": "", "size": "", "type": "",
"encoding": ""} for i in xrange(5)]
content = lister._buildTableContent(elements)
self.assertEquals(len(content), 5)
self.assertTrue(content[0].startswith('<tr class="odd">'))
self.assertTrue(content[1].startswith('<tr class="even">'))
self.assertTrue(content[2].startswith('<tr class="odd">'))
self.assertTrue(content[3].startswith('<tr class="even">'))
self.assertTrue(content[4].startswith('<tr class="odd">'))
def test_mimeTypeAndEncodings(self):
"""
L{static.DirectoryLister} is able to detect mimetype and encoding of
listed files.
"""
path = FilePath(self.mktemp())
path.makedirs()
path.child('file1.txt').setContent("file1")
path.child('file2.py').setContent("python")
path.child('file3.conf.gz').setContent("conf compressed")
path.child('file4.diff.bz2').setContent("diff compressed")
directory = os.listdir(path.path)
directory.sort()
contentTypes = {
".txt": "text/plain",
".py": "text/python",
".conf": "text/configuration",
".diff": "text/diff"
}
lister = static.DirectoryLister(path.path, contentTypes=contentTypes)
dirs, files = lister._getFilesAndDirectories(directory)
self.assertEquals(dirs, [])
self.assertEquals(files, [
{'encoding': '',
'href': 'file1.txt',
'size': '5B',
'text': 'file1.txt',
'type': '[text/plain]'},
{'encoding': '',
'href': 'file2.py',
'size': '6B',
'text': 'file2.py',
'type': '[text/python]'},
{'encoding': '[gzip]',
'href': 'file3.conf.gz',
'size': '15B',
'text': 'file3.conf.gz',
'type': '[text/configuration]'},
{'encoding': '[bzip2]',
'href': 'file4.diff.bz2',
'size': '15B',
'text': 'file4.diff.bz2',
'type': '[text/diff]'}])
def test_brokenSymlink(self):
"""
If on the file in the listing points to a broken symlink, it should not
be returned by L{static.DirectoryLister._getFilesAndDirectories}.
"""
path = FilePath(self.mktemp())
path.makedirs()
file1 = path.child('file1')
file1.setContent("file1")
file1.linkTo(path.child("file2"))
file1.remove()
lister = static.DirectoryLister(path.path)
directory = os.listdir(path.path)
directory.sort()
dirs, files = lister._getFilesAndDirectories(directory)
self.assertEquals(dirs, [])
self.assertEquals(files, [])
if getattr(os, "symlink", None) is None:
test_brokenSymlink.skip = "No symlink support"
def test_childrenNotFound(self):
"""
Any child resource of L{static.DirectoryLister} renders an HTTP
I{NOT FOUND} response code.
"""
path = FilePath(self.mktemp())
path.makedirs()
lister = static.DirectoryLister(path.path)
request = self._request('')
child = resource.getChildForRequest(lister, request)
result = _render(child, request)
def cbRendered(ignored):
self.assertEquals(request.responseCode, http.NOT_FOUND)
result.addCallback(cbRendered)
return result
def test_repr(self):
"""
L{static.DirectoryLister.__repr__} gives the path of the lister.
"""
path = FilePath(self.mktemp())
lister = static.DirectoryLister(path.path)
self.assertEquals(repr(lister),
"<DirectoryLister of %r>" % (path.path,))
self.assertEquals(str(lister),
"<DirectoryLister of %r>" % (path.path,))
def test_formatFileSize(self):
"""
L{static.formatFileSize} format an amount of bytes into a more readable
format.
"""
self.assertEquals(static.formatFileSize(0), "0B")
self.assertEquals(static.formatFileSize(123), "123B")
self.assertEquals(static.formatFileSize(4567), "4K")
self.assertEquals(static.formatFileSize(8900000), "8M")
self.assertEquals(static.formatFileSize(1234000000), "1G")
self.assertEquals(static.formatFileSize(1234567890000), "1149G")
class TestFileTransferDeprecated(TestCase):
"""
L{static.FileTransfer} is deprecated.
"""
def test_deprecation(self):
"""
Instantiation of L{FileTransfer} produces a deprecation warning.
"""
static.FileTransfer(StringIO.StringIO(), 0, DummyRequest([]))
warnings = self.flushWarnings([self.test_deprecation])
self.assertEqual(len(warnings), 1)
self.assertEqual(warnings[0]['category'], DeprecationWarning)
self.assertEqual(
warnings[0]['message'],
'FileTransfer is deprecated since Twisted 9.0. '
'Use a subclass of StaticProducer instead.')
| Donkyhotay/MoonPy | twisted/web/test/test_static.py | Python | gpl-3.0 | 53,951 |
"""
============================
``ctypes`` Utility Functions
============================
See Also
---------
load_library : Load a C library.
ndpointer : Array restype/argtype with verification.
as_ctypes : Create a ctypes array from an ndarray.
as_array : Create an ndarray from a ctypes array.
References
----------
.. [1] "SciPy Cookbook: ctypes", http://www.scipy.org/Cookbook/Ctypes
Examples
--------
Load the C library:
>>> _lib = np.ctypeslib.load_library('libmystuff', '.') #doctest: +SKIP
Our result type, an ndarray that must be of type double, be 1-dimensional
and is C-contiguous in memory:
>>> array_1d_double = np.ctypeslib.ndpointer(
... dtype=np.double,
... ndim=1, flags='CONTIGUOUS') #doctest: +SKIP
Our C-function typically takes an array and updates its values
in-place. For example::
void foo_func(double* x, int length)
{
int i;
for (i = 0; i < length; i++) {
x[i] = i*i;
}
}
We wrap it using:
>>> _lib.foo_func.restype = None #doctest: +SKIP
>>> _lib.foo_func.argtypes = [array_1d_double, c_int] #doctest: +SKIP
Then, we're ready to call ``foo_func``:
>>> out = np.empty(15, dtype=np.double)
>>> _lib.foo_func(out, len(out)) #doctest: +SKIP
"""
from __future__ import division, absolute_import, print_function
__all__ = ['load_library', 'ndpointer', 'test', 'ctypes_load_library',
'c_intp', 'as_ctypes', 'as_array']
import sys, os
from numpy import integer, ndarray, dtype as _dtype, deprecate, array
from numpy.core.multiarray import _flagdict, flagsobj
try:
import ctypes
except ImportError:
ctypes = None
if ctypes is None:
def _dummy(*args, **kwds):
"""
Dummy object that raises an ImportError if ctypes is not available.
Raises
------
ImportError
If ctypes is not available.
"""
raise ImportError("ctypes is not available.")
ctypes_load_library = _dummy
load_library = _dummy
as_ctypes = _dummy
as_array = _dummy
from numpy import intp as c_intp
_ndptr_base = object
else:
import numpy.core._internal as nic
c_intp = nic._getintp_ctype()
del nic
_ndptr_base = ctypes.c_void_p
# Adapted from Albert Strasheim
def load_library(libname, loader_path):
if ctypes.__version__ < '1.0.1':
import warnings
warnings.warn("All features of ctypes interface may not work " \
"with ctypes < 1.0.1")
ext = os.path.splitext(libname)[1]
if not ext:
# Try to load library with platform-specific name, otherwise
# default to libname.[so|pyd]. Sometimes, these files are built
# erroneously on non-linux platforms.
from numpy.distutils.misc_util import get_shared_lib_extension
so_ext = get_shared_lib_extension()
libname_ext = [libname + so_ext]
# mac, windows and linux >= py3.2 shared library and loadable
# module have different extensions so try both
so_ext2 = get_shared_lib_extension(is_python_ext=True)
if not so_ext2 == so_ext:
libname_ext.insert(0, libname + so_ext2)
try:
import sysconfig
so_ext3 = '.%s-%s.so' % (sysconfig.get_config_var('SOABI'),
sysconfig.get_config_var('MULTIARCH'))
libname_ext.insert(0, libname + so_ext3)
except (KeyError, ImportError):
pass
else:
libname_ext = [libname]
loader_path = os.path.abspath(loader_path)
if not os.path.isdir(loader_path):
libdir = os.path.dirname(loader_path)
else:
libdir = loader_path
for ln in libname_ext:
libpath = os.path.join(libdir, ln)
if os.path.exists(libpath):
try:
return ctypes.cdll[libpath]
except OSError:
## defective lib file
raise
## if no successful return in the libname_ext loop:
raise OSError("no file with expected extension")
ctypes_load_library = deprecate(load_library, 'ctypes_load_library',
'load_library')
def _num_fromflags(flaglist):
num = 0
for val in flaglist:
num += _flagdict[val]
return num
_flagnames = ['C_CONTIGUOUS', 'F_CONTIGUOUS', 'ALIGNED', 'WRITEABLE',
'OWNDATA', 'UPDATEIFCOPY']
def _flags_fromnum(num):
res = []
for key in _flagnames:
value = _flagdict[key]
if (num & value):
res.append(key)
return res
class _ndptr(_ndptr_base):
def _check_retval_(self):
"""This method is called when this class is used as the .restype
asttribute for a shared-library function. It constructs a numpy
array from a void pointer."""
return array(self)
@property
def __array_interface__(self):
return {'descr': self._dtype_.descr,
'__ref': self,
'strides': None,
'shape': self._shape_,
'version': 3,
'typestr': self._dtype_.descr[0][1],
'data': (self.value, False),
}
@classmethod
def from_param(cls, obj):
if not isinstance(obj, ndarray):
raise TypeError("argument must be an ndarray")
if cls._dtype_ is not None \
and obj.dtype != cls._dtype_:
raise TypeError("array must have data type %s" % cls._dtype_)
if cls._ndim_ is not None \
and obj.ndim != cls._ndim_:
raise TypeError("array must have %d dimension(s)" % cls._ndim_)
if cls._shape_ is not None \
and obj.shape != cls._shape_:
raise TypeError("array must have shape %s" % str(cls._shape_))
if cls._flags_ is not None \
and ((obj.flags.num & cls._flags_) != cls._flags_):
raise TypeError("array must have flags %s" %
_flags_fromnum(cls._flags_))
return obj.ctypes
# Factory for an array-checking class with from_param defined for
# use with ctypes argtypes mechanism
_pointer_type_cache = {}
def ndpointer(dtype=None, ndim=None, shape=None, flags=None):
"""
Array-checking restype/argtypes.
An ndpointer instance is used to describe an ndarray in restypes
and argtypes specifications. This approach is more flexible than
using, for example, ``POINTER(c_double)``, since several restrictions
can be specified, which are verified upon calling the ctypes function.
These include data type, number of dimensions, shape and flags. If a
given array does not satisfy the specified restrictions,
a ``TypeError`` is raised.
Parameters
----------
dtype : data-type, optional
Array data-type.
ndim : int, optional
Number of array dimensions.
shape : tuple of ints, optional
Array shape.
flags : str or tuple of str
Array flags; may be one or more of:
- C_CONTIGUOUS / C / CONTIGUOUS
- F_CONTIGUOUS / F / FORTRAN
- OWNDATA / O
- WRITEABLE / W
- ALIGNED / A
- UPDATEIFCOPY / U
Returns
-------
klass : ndpointer type object
A type object, which is an ``_ndtpr`` instance containing
dtype, ndim, shape and flags information.
Raises
------
TypeError
If a given array does not satisfy the specified restrictions.
Examples
--------
>>> clib.somefunc.argtypes = [np.ctypeslib.ndpointer(dtype=np.float64,
... ndim=1,
... flags='C_CONTIGUOUS')]
... #doctest: +SKIP
>>> clib.somefunc(np.array([1, 2, 3], dtype=np.float64))
... #doctest: +SKIP
"""
if dtype is not None:
dtype = _dtype(dtype)
num = None
if flags is not None:
if isinstance(flags, str):
flags = flags.split(',')
elif isinstance(flags, (int, integer)):
num = flags
flags = _flags_fromnum(num)
elif isinstance(flags, flagsobj):
num = flags.num
flags = _flags_fromnum(num)
if num is None:
try:
flags = [x.strip().upper() for x in flags]
except:
raise TypeError("invalid flags specification")
num = _num_fromflags(flags)
try:
return _pointer_type_cache[(dtype, ndim, shape, num)]
except KeyError:
pass
if dtype is None:
name = 'any'
elif dtype.names:
name = str(id(dtype))
else:
name = dtype.str
if ndim is not None:
name += "_%dd" % ndim
if shape is not None:
try:
strshape = [str(x) for x in shape]
except TypeError:
strshape = [str(shape)]
shape = (shape,)
shape = tuple(shape)
name += "_"+"x".join(strshape)
if flags is not None:
name += "_"+"_".join(flags)
else:
flags = []
klass = type("ndpointer_%s"%name, (_ndptr,),
{"_dtype_": dtype,
"_shape_" : shape,
"_ndim_" : ndim,
"_flags_" : num})
_pointer_type_cache[dtype] = klass
return klass
if ctypes is not None:
ct = ctypes
################################################################
# simple types
# maps the numpy typecodes like '<f8' to simple ctypes types like
# c_double. Filled in by prep_simple.
_typecodes = {}
def prep_simple(simple_type, dtype):
"""Given a ctypes simple type, construct and attach an
__array_interface__ property to it if it does not yet have one.
"""
try: simple_type.__array_interface__
except AttributeError: pass
else: return
typestr = _dtype(dtype).str
_typecodes[typestr] = simple_type
def __array_interface__(self):
return {'descr': [('', typestr)],
'__ref': self,
'strides': None,
'shape': (),
'version': 3,
'typestr': typestr,
'data': (ct.addressof(self), False),
}
simple_type.__array_interface__ = property(__array_interface__)
simple_types = [
((ct.c_byte, ct.c_short, ct.c_int, ct.c_long, ct.c_longlong), "i"),
((ct.c_ubyte, ct.c_ushort, ct.c_uint, ct.c_ulong, ct.c_ulonglong), "u"),
((ct.c_float, ct.c_double), "f"),
]
# Prep that numerical ctypes types:
for types, code in simple_types:
for tp in types:
prep_simple(tp, "%c%d" % (code, ct.sizeof(tp)))
################################################################
# array types
_ARRAY_TYPE = type(ct.c_int * 1)
def prep_array(array_type):
"""Given a ctypes array type, construct and attach an
__array_interface__ property to it if it does not yet have one.
"""
try: array_type.__array_interface__
except AttributeError: pass
else: return
shape = []
ob = array_type
while type(ob) is _ARRAY_TYPE:
shape.append(ob._length_)
ob = ob._type_
shape = tuple(shape)
ai = ob().__array_interface__
descr = ai['descr']
typestr = ai['typestr']
def __array_interface__(self):
return {'descr': descr,
'__ref': self,
'strides': None,
'shape': shape,
'version': 3,
'typestr': typestr,
'data': (ct.addressof(self), False),
}
array_type.__array_interface__ = property(__array_interface__)
def prep_pointer(pointer_obj, shape):
"""Given a ctypes pointer object, construct and
attach an __array_interface__ property to it if it does not
yet have one.
"""
try: pointer_obj.__array_interface__
except AttributeError: pass
else: return
contents = pointer_obj.contents
dtype = _dtype(type(contents))
inter = {'version': 3,
'typestr': dtype.str,
'data': (ct.addressof(contents), False),
'shape': shape}
pointer_obj.__array_interface__ = inter
################################################################
# public functions
def as_array(obj, shape=None):
"""Create a numpy array from a ctypes array or a ctypes POINTER.
The numpy array shares the memory with the ctypes object.
The size parameter must be given if converting from a ctypes POINTER.
The size parameter is ignored if converting from a ctypes array
"""
tp = type(obj)
try: tp.__array_interface__
except AttributeError:
if hasattr(obj, 'contents'):
prep_pointer(obj, shape)
else:
prep_array(tp)
return array(obj, copy=False)
def as_ctypes(obj):
"""Create and return a ctypes object from a numpy array. Actually
anything that exposes the __array_interface__ is accepted."""
ai = obj.__array_interface__
if ai["strides"]:
raise TypeError("strided arrays not supported")
if ai["version"] != 3:
raise TypeError("only __array_interface__ version 3 supported")
addr, readonly = ai["data"]
if readonly:
raise TypeError("readonly arrays unsupported")
tp = _typecodes[ai["typestr"]]
for dim in ai["shape"][::-1]:
tp = tp * dim
result = tp.from_address(addr)
result.__keep = ai
return result
| ruibarreira/linuxtrail | usr/lib/python2.7/dist-packages/numpy/ctypeslib.py | Python | gpl-3.0 | 14,076 |
'''
TypeString is a core class that extends mimetypes
'''
import re
import os
import mimetypes
import magic
UNKNOWN_MIMETYPE = ('application/x-empty', 'application/octet-stream', 'text/plain')
class TypeString:
def __init__(self, s):
self.str = s
# Extract arguments
if ':' in s:
self.ts_format, _, arguments_str = s.partition(':')
self.arguments = tuple(arguments_str.split(','))
else:
self.ts_format = s
self.arguments = tuple()
# Check if is mimetype, extension or qualifier
self.is_qualifier = False
self.mimetype = None
self.extension = None
if '/' in self.ts_format:
self.mimetype = self.ts_format
ext = mimetypes.guess_extension(self.mimetype)
if ext:
self.extension = ext.strip('.').upper()
elif self.ts_format.isupper():
self.extension = self.ts_format
fn = 'fn.%s' % self.extension
self.mimetype, _ = mimetypes.guess_type(fn) # discard encoding
else:
# Is qualifier, can't determine mimetype
self.is_qualifier = True
def modify_basename(self, basename):
if self.extension:
ext = self.extension.lower()
else:
ext = self.ts_format.replace('/', ':')
if self.arguments:
ext = '.'.join(self.arguments + (ext,))
return '%s.%s' % (basename, ext)
def __str__(self):
return self.str
def __repr__(self):
return "TypeString(%s)" % repr(str(self))
def guess_typestring(path):
'''
Guesses a TypeString from the given path
'''
with open(path, 'rb') as fd:
mimetype = magic.from_buffer(fd.read(128), mime=True)
if mimetype and mimetype not in UNKNOWN_MIMETYPE:
return TypeString(mimetype)
# Otherwise, tries based on extension
_, ext = os.path.splitext(path)
return TypeString(ext.strip('.').upper())
| michaelpb/omnithumb | omnithumb/types/typestring.py | Python | gpl-3.0 | 2,012 |
#!/usr/bin/env python
# encoding: utf-8
from lewei import Cloud as lewei
CLOUDS = {
"lewei": lewei,
} | jt6562/mytvoc | clouds/__init__.py | Python | gpl-3.0 | 110 |
# -*- coding: utf-8 -*-
# home made test
# Sign convention for fiber sections.
from __future__ import division
import xc_base
import geom
import xc
from solution import predefined_solutions
from model import predefined_spaces
from materials import typical_materials
from postprocess import prop_statistics
import math
__author__= "Luis C. Pérez Tato (LCPT)"
__copyright__= "Copyright 2014, LCPT"
__license__= "GPL"
__version__= "3.0"
__email__= "[email protected]"
# Problem type
feProblem= xc.FEProblem()
preprocessor= feProblem.getPreprocessor
#Constant positive strain.
epsilon= 3.5e-3
epsilon1= epsilon
epsilon2= epsilon
epsilon3= epsilon
epsilon4= epsilon
#Read section definition from file.
import os
pth= os.path.dirname(__file__)
#print "pth= ", pth
if(not pth):
pth= "."
execfile(pth+"/../../aux/four_fiber_section.py")
sigma= E*epsilon
F= sigma*fiberArea
N0Teor= 4*F
My0Teor= 0.0
Mz0Teor= 0.0
R0Teor=xc.Vector([N0Teor,My0Teor,Mz0Teor])
D0Teor=xc.Vector([epsilon,0.0,0.0])
ratioN0= abs(N0-N0Teor)/N0Teor
ratioN0S= abs(N0S-N0Teor)/N0Teor
ratioMy0= abs(My0-My0Teor)
ratioMy0S= abs(My0S-My0Teor)
ratioMz0= abs(Mz0-Mz0Teor)
ratioMz0S= abs(Mz0S-Mz0Teor)
ratioR0= (R0Teor-R0).Norm()
ratioD0= (D0Teor-D0).Norm()
fourFibersSection.revertToStart()
# Positive My (section)
epsilon1= -epsilon
epsilon2= -epsilon
epsilon3= epsilon
epsilon4= epsilon
f1.getMaterial().setTrialStrain(epsilon1,0.0)
f2.getMaterial().setTrialStrain(epsilon2,0.0)
f3.getMaterial().setTrialStrain(epsilon3,0.0)
f4.getMaterial().setTrialStrain(epsilon4,0.0)
N1= fourFibersSection.getFibers().getResultant()
My1= fourFibersSection.getFibers().getMy(0.0)
Mz1= fourFibersSection.getFibers().getMz(0.0)
fourFibersSection.setupFibers()
RR= fourFibersSection.getStressResultant()
R1= xc.Vector([RR[0],RR[2],RR[1]]) # N= RR[0], My= RR[2], Mz= RR[1]
deformationPlane1= fourFibersSection.getFibers().getDeformationPlane()
fourFibersSection.setTrialDeformationPlane(deformationPlane1)
DD= fourFibersSection.getSectionDeformation()
D1= xc.Vector([DD[0],DD[2],DD[1]]) # epsilon= DD[0], Ky= DD[2], Kz= DD[1]
N1S= fourFibersSection.getN()
My1S= fourFibersSection.getMy()
Mz1S= fourFibersSection.getMz()
N1Teor= 0.0
My1Teor= 2*F*widthOverZ
Mz1Teor= 0.0
R1Teor=xc.Vector([N1Teor,My1Teor,Mz1Teor])
Ky1Teor= 2*epsilon/widthOverZ
D1Teor=xc.Vector([0.0,Ky1Teor,0.0])
ratioN1= abs(N1-N1Teor)
ratioN1S= abs(N1S-N1Teor)
ratioMy1= abs(My1-My1Teor)/My1Teor
ratioMy1S= abs(My1S-My1Teor)/My1Teor
ratioMz1= abs(Mz1-Mz1Teor)
ratioMz1S= abs(Mz1S-Mz1Teor)
ratioR1= (R1Teor-R1).Norm()
ratioD1= (D1Teor-D1).Norm()
# Positive Mz (section)
fourFibersSection.revertToStart()
epsilon1= epsilon
epsilon2= -epsilon
epsilon3= -epsilon
epsilon4= epsilon
f1.getMaterial().setTrialStrain(epsilon1,0.0)
f2.getMaterial().setTrialStrain(epsilon2,0.0)
f3.getMaterial().setTrialStrain(epsilon3,0.0)
f4.getMaterial().setTrialStrain(epsilon4,0.0)
N2= fourFibersSection.getFibers().getResultant()
My2= fourFibersSection.getFibers().getMy(0.0)
Mz2= fourFibersSection.getFibers().getMz(0.0)
deformationPlane2= fourFibersSection.getFibers().getDeformationPlane()
fourFibersSection.setupFibers()
RR= fourFibersSection.getStressResultant()
R2= xc.Vector([RR[0],RR[2],RR[1]]) # N= RR[0], My= RR[2], Mz= RR[1]
fourFibersSection.setTrialDeformationPlane(deformationPlane2)
DD= fourFibersSection.getSectionDeformation()
D2= xc.Vector([DD[0],DD[2],DD[1]]) # epsilon= DD[0], Ky= DD[2], Kz= DD[1]
N2S= fourFibersSection.getN()
My2S= fourFibersSection.getMy()
Mz2S= fourFibersSection.getMz()
N2Teor= 0.0
My2Teor= 0.0
Mz2Teor= -4*F*depthOverY/2.0 #Mz positive is in the opposite direction with respecto to the positive y-axis. ???
R2Teor=xc.Vector([N2Teor,My2Teor,Mz2Teor])
Kz2Teor= 2*epsilon/depthOverY
D2Teor=xc.Vector([0.0,0.0,-Kz2Teor]) #Negative ???
ratioN2= abs(N2-N2Teor)
ratioN2S= abs(N2S-N2Teor)
ratioMy2= abs(My2-My2Teor)
ratioMy2S= abs(My2S-My2Teor)
ratioMz2= abs(Mz2-Mz2Teor)/Mz2Teor
ratioMz2S= abs(Mz2S-Mz2Teor)/Mz2Teor
ratioR2= (R2Teor-R2).Norm()
ratioD2= (D2Teor-D2).Norm()
# Positive Mz, negative My (section)
fourFibersSection.revertToStart()
epsilon= 3.5e-3
epsilon1= epsilon
epsilon2= 0.0
epsilon3= -epsilon
epsilon4= 0.0
f1.getMaterial().setTrialStrain(epsilon1,0.0)
f2.getMaterial().setTrialStrain(epsilon2,0.0)
f3.getMaterial().setTrialStrain(epsilon3,0.0)
f4.getMaterial().setTrialStrain(epsilon4,0.0)
N3= fourFibersSection.getFibers().getResultant()
My3= fourFibersSection.getFibers().getMy(0.0)
Mz3= fourFibersSection.getFibers().getMz(0.0)
deformationPlane3= fourFibersSection.getFibers().getDeformationPlane()
fourFibersSection.setupFibers()
RR= fourFibersSection.getStressResultant()
R3= xc.Vector([RR[0],RR[2],RR[1]]) # N= RR[0], My= RR[2], Mz= RR[1]
fourFibersSection.setTrialDeformationPlane(deformationPlane3)
DD= fourFibersSection.getSectionDeformation()
D3= xc.Vector([DD[0],DD[2],DD[1]]) # epsilon= DD[0], Ky= DD[2], Kz= DD[1]
N3S= fourFibersSection.getN()
My3S= fourFibersSection.getMy()
Mz3S= fourFibersSection.getMz()
N3Teor= 0.0
My3Teor= -2*F*widthOverZ/2.0
Mz3Teor= -2*F*depthOverY/2.0
R3Teor=xc.Vector([N3Teor,My3Teor,Mz3Teor])
Ky3Teor= -epsilon/widthOverZ
Kz3Teor= epsilon/depthOverY
D3Teor=xc.Vector([0.0,Ky3Teor,-Kz3Teor]) #Negative ???
ratioN3= abs(N3-N3Teor)
ratioN3S= abs(N3S-N3Teor)
ratioMy3= abs(My3-My3Teor)
ratioMy3S= abs(My3S-My3Teor)
ratioMz3= abs(Mz3-Mz3Teor)/Mz3Teor
ratioMz3S= abs(Mz3S-Mz3Teor)/Mz3Teor
ratioR3= (R3Teor-R3).Norm()
ratioD3= (D3Teor-D3).Norm()
import math
error= math.sqrt(ratioN0**2+ratioMy0**2+ratioMz0**2+ratioN0S**2+ratioMy0S**2+ratioMz0S**2+ratioR0**2+ratioD0**2+ratioN1**2+ratioMy1**2+ratioMz1**2+ratioN1S**2+ratioMy1S**2+ratioMz1S**2+ratioR1**2+ratioD1**2+ratioN2**2+ratioMy2**2+ratioMz2**2+ratioN2S**2+ratioMy2S**2+ratioMz2S**2+ratioR2**2+ratioD2**2+ratioN3**2+ratioMy3**2+ratioMz3**2+ratioN3S**2+ratioMy3S**2+ratioMz3S**2+ratioR3**2+ratioD3**2)
print 'N0= ', N0, ' N0S= ', N0S, ' N0Teor= ', N0Teor, ' ratioN0= ', ratioN0, ' ratioN0S= ', ratioN0S
print 'My0= ', My0, ' My0S= ', My0S, ' My0Teor= ', My0Teor, ' ratioMy0= ', ratioMy0, ' ratioMy0S= ', ratioMy0S
print 'Mz0= ', Mz0, ' Mz0S= ', Mz0S, ' Mz0Teor= ', Mz0Teor, ' ratioMz0= ', ratioMz0, ' ratioMz0S= ', ratioMz0S
print 'R0= ', R0, ' R0Teor= ', R0Teor, ' ratioR0= ', ratioR0
print 'D0= ', D0, ' D0Teor= ', D0Teor, ' ratioD0= ', ratioD0
print 'N1= ', N1, ' N1S= ', N1S, ' N1Teor= ', N1Teor, ' ratioN1= ', ratioN1, ' ratioN1S= ', ratioN1S
print 'My1= ', My1, ' My1S= ', My1S, ' My1Teor= ', My1Teor, ' ratioMy1= ', ratioMy1, ' ratioMy1S= ', ratioMy1S
print 'Mz1= ', Mz1, ' Mz1S= ', Mz1S, ' Mz1Teor= ', Mz1Teor, ' ratioMz1= ', ratioMz1, ' ratioMz1S= ', ratioMz1S
print 'R1= ', R1, ' R1Teor= ', R1Teor, ' ratioR1= ', ratioR1
print 'D1= ', D1, ' D1Teor= ', D1Teor, ' ratioD1= ', ratioD1
print 'N2= ', N2, ' N2S= ', N2S, ' N2Teor= ', N2Teor, ' ratioN2= ', ratioN2, ' ratioN2S= ', ratioN2S
print 'My2= ', My2, ' My2S= ', My2S, ' My2Teor= ', My2Teor, ' ratioMy2= ', ratioMy2, ' ratioMy2S= ', ratioMy2S
print 'Mz2= ', Mz2, ' Mz2S= ', Mz2S, ' Mz2Teor= ', Mz2Teor, ' ratioMz2= ', ratioMz2, ' ratioMz2S= ', ratioMz2S
print 'R2= ', R2, ' R2Teor= ', R2Teor, ' ratioR2= ', ratioR2
print 'D2= ', D2, ' D2Teor= ', D2Teor, ' ratioD2= ', ratioD2
print 'N3= ', N3, ' N3S= ', N3S, ' N3Teor= ', N3Teor, ' ratioN3= ', ratioN3, ' ratioN3S= ', ratioN3S
print 'My3= ', My3, ' My3S= ', My3S, ' My3Teor= ', My3Teor, ' ratioMy3= ', ratioMy3, ' ratioMy3S= ', ratioMy3S
print 'Mz3= ', Mz3, ' Mz3S= ', Mz3S, ' Mz3Teor= ', Mz3Teor, ' ratioMz3= ', ratioMz3, ' ratioMz3S= ', ratioMz3S
print 'R3= ', R3, ' R3Teor= ', R3Teor, ' ratioR3= ', ratioR3
print 'D3= ', D3, ' D3Teor= ', D3Teor, ' ratioD3= ', ratioD3
print 'error= ', error
from miscUtils import LogMessages as lmsg
fname= os.path.basename(__file__)
if (error < 1e-3):
print "test ",fname,": ok."
else:
lmsg.error(fname+' ERROR.')
| lcpt/xc | verif/tests/materials/fiber_section/test_fiber_section_sign_convention01.py | Python | gpl-3.0 | 7,833 |
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from actionlib/TestRequestActionGoal.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import actionlib.msg
import genpy
import actionlib_msgs.msg
import std_msgs.msg
class TestRequestActionGoal(genpy.Message):
_md5sum = "1889556d3fef88f821c7cb004e4251f3"
_type = "actionlib/TestRequestActionGoal"
_has_header = True #flag to mark the presence of a Header object
_full_text = """# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
Header header
actionlib_msgs/GoalID goal_id
TestRequestGoal goal
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
================================================================================
MSG: actionlib_msgs/GoalID
# The stamp should store the time at which this goal was requested.
# It is used by an action server when it tries to preempt all
# goals that were requested before a certain time
time stamp
# The id provides a way to associate feedback and
# result message with specific goal requests. The id
# specified must be unique.
string id
================================================================================
MSG: actionlib/TestRequestGoal
# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
int32 TERMINATE_SUCCESS = 0
int32 TERMINATE_ABORTED = 1
int32 TERMINATE_REJECTED = 2
int32 TERMINATE_LOSE = 3
int32 TERMINATE_DROP = 4
int32 TERMINATE_EXCEPTION = 5
int32 terminate_status
bool ignore_cancel # If true, ignores requests to cancel
string result_text
int32 the_result # Desired value for the_result in the Result
bool is_simple_client
duration delay_accept # Delays accepting the goal by this amount of time
duration delay_terminate # Delays terminating for this amount of time
duration pause_status # Pauses the status messages for this amount of time
"""
__slots__ = ['header','goal_id','goal']
_slot_types = ['std_msgs/Header','actionlib_msgs/GoalID','actionlib/TestRequestGoal']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,goal_id,goal
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(TestRequestActionGoal, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.goal_id is None:
self.goal_id = actionlib_msgs.msg.GoalID()
if self.goal is None:
self.goal = actionlib.msg.TestRequestGoal()
else:
self.header = std_msgs.msg.Header()
self.goal_id = actionlib_msgs.msg.GoalID()
self.goal = actionlib.msg.TestRequestGoal()
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_2I.pack(_x.goal_id.stamp.secs, _x.goal_id.stamp.nsecs))
_x = self.goal_id.id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_iB.pack(_x.goal.terminate_status, _x.goal.ignore_cancel))
_x = self.goal.result_text
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_iB6i.pack(_x.goal.the_result, _x.goal.is_simple_client, _x.goal.delay_accept.secs, _x.goal.delay_accept.nsecs, _x.goal.delay_terminate.secs, _x.goal.delay_terminate.nsecs, _x.goal.pause_status.secs, _x.goal.pause_status.nsecs))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.goal_id is None:
self.goal_id = actionlib_msgs.msg.GoalID()
if self.goal is None:
self.goal = actionlib.msg.TestRequestGoal()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.goal_id.stamp.secs, _x.goal_id.stamp.nsecs,) = _struct_2I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.goal_id.id = str[start:end].decode('utf-8')
else:
self.goal_id.id = str[start:end]
_x = self
start = end
end += 5
(_x.goal.terminate_status, _x.goal.ignore_cancel,) = _struct_iB.unpack(str[start:end])
self.goal.ignore_cancel = bool(self.goal.ignore_cancel)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.goal.result_text = str[start:end].decode('utf-8')
else:
self.goal.result_text = str[start:end]
_x = self
start = end
end += 29
(_x.goal.the_result, _x.goal.is_simple_client, _x.goal.delay_accept.secs, _x.goal.delay_accept.nsecs, _x.goal.delay_terminate.secs, _x.goal.delay_terminate.nsecs, _x.goal.pause_status.secs, _x.goal.pause_status.nsecs,) = _struct_iB6i.unpack(str[start:end])
self.goal.is_simple_client = bool(self.goal.is_simple_client)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_2I.pack(_x.goal_id.stamp.secs, _x.goal_id.stamp.nsecs))
_x = self.goal_id.id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_iB.pack(_x.goal.terminate_status, _x.goal.ignore_cancel))
_x = self.goal.result_text
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_iB6i.pack(_x.goal.the_result, _x.goal.is_simple_client, _x.goal.delay_accept.secs, _x.goal.delay_accept.nsecs, _x.goal.delay_terminate.secs, _x.goal.delay_terminate.nsecs, _x.goal.pause_status.secs, _x.goal.pause_status.nsecs))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.goal_id is None:
self.goal_id = actionlib_msgs.msg.GoalID()
if self.goal is None:
self.goal = actionlib.msg.TestRequestGoal()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.goal_id.stamp.secs, _x.goal_id.stamp.nsecs,) = _struct_2I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.goal_id.id = str[start:end].decode('utf-8')
else:
self.goal_id.id = str[start:end]
_x = self
start = end
end += 5
(_x.goal.terminate_status, _x.goal.ignore_cancel,) = _struct_iB.unpack(str[start:end])
self.goal.ignore_cancel = bool(self.goal.ignore_cancel)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.goal.result_text = str[start:end].decode('utf-8')
else:
self.goal.result_text = str[start:end]
_x = self
start = end
end += 29
(_x.goal.the_result, _x.goal.is_simple_client, _x.goal.delay_accept.secs, _x.goal.delay_accept.nsecs, _x.goal.delay_terminate.secs, _x.goal.delay_terminate.nsecs, _x.goal.pause_status.secs, _x.goal.pause_status.nsecs,) = _struct_iB6i.unpack(str[start:end])
self.goal.is_simple_client = bool(self.goal.is_simple_client)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_iB6i = struct.Struct("<iB6i")
_struct_3I = struct.Struct("<3I")
_struct_iB = struct.Struct("<iB")
_struct_2I = struct.Struct("<2I")
| UnbDroid/robomagellan | Codigos/Raspberry/desenvolvimentoRos/devel/lib/python2.7/dist-packages/actionlib/msg/_TestRequestActionGoal.py | Python | gpl-3.0 | 12,401 |
#
# Copyright (C) 2018, 2020
# Smithsonian Astrophysical Observatory
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Support downloading data from URLs in CIAO.
A collection of routines related to data download used in CIAO.
retrieve_url
------------
CIAO 4.11 does not include any SSL support, instead relying on the OS.
This can cause problems on certain platforms. So try with Python and
then fall through to curl or wget. This can hopefully be removed for
CIAO 4.12 or later, but kept in just for now.
find_downloadable_files
-----------------------
Given a URL of a directory, return the files and sub-directories
available. This requires that the web server supports the Apache
mod_autoindex functionality, and is written for accessing the
Chandra Data Archive. Support for other web sites is not guaranteed.
find_all_downloadable_files
---------------------------
Similar to find_downloadble_files but recurses through all sub-directories.
ProgressBar
-----------
Display a "progress" bar, indicating the progress of a download.
This has very-limited functionality.
download_progress
-----------------
Download a URL to a file, supporting
- continuation of a previous partial download
- a rudimentary progress bar to display progress
Stability
---------
This is an internal module, and so the API it provides is not
considered stable (e.g. we may remove this module at any time). Use
at your own risk.
"""
import os
import sys
import ssl
import time
from io import BytesIO
from subprocess import check_output
import urllib.error
import urllib.request
import http.client
from html.parser import HTMLParser
import ciao_contrib.logger_wrapper as lw
logger = lw.initialize_module_logger("downloadutils")
v0 = logger.verbose0
v1 = logger.verbose1
v2 = logger.verbose1
v3 = logger.verbose3
v4 = logger.verbose4
__all__ = ('retrieve_url',
'find_downloadable_files',
'find_all_downloadable_files',
'ProgressBar',
'download_progress')
def manual_download(url):
"""Try curl then wget to query the URL.
Parameters
----------
url : str
The URL for the query; see construct_query
Returns
-------
ans : StringIO instance
The response
"""
v3("Fall back to curl or wget to download: {}".format(url))
# Should package this up nicely, but hardcode for the moment.
#
# It is not clear if this is sufficient to catch "no curl"
# while allowing errors like "no access to the internet"
# to not cause too much pointless work.
#
args = ['curl', '--silent', '-L', url]
v4("About to execute: {}".format(args))
try:
rsp = check_output(args)
except FileNotFoundError as exc1:
v3("Unable to call curl: {}".format(exc1))
args = ['wget', '--quiet', '-O-', url]
v4("About to execute: {}".format(args))
try:
rsp = check_output(args)
except FileNotFoundError as exc2:
v3("Unable to call wget: {}".format(exc2))
emsg = "Unable to access the URL {}.\n".format(url) + \
"Please install curl or wget (and if you " + \
"continue to see this message, contact the " + \
"CXC HelpDesk)."
raise RuntimeError(emsg)
return BytesIO(rsp)
def retrieve_url(url, timeout=None):
"""Handle possible problems retrieving the URL contents.
Using URLs with the https scheme causes problems for certain OS
set ups because CIAO 4.11 does not provide SSL support, but relies
on the system libraries to work. This is "supported" by falling
over from Python to external tools (curl or wget).
Parameters
----------
url : str
The URL to retrieve.
timeout : optional
The timeout parameter for the urlopen call; if not
None then the value is in seconds.
Returns
-------
response : HTTPResponse instance
The response
"""
try:
v3("Retrieving URL: {} timeout={}".format(url, timeout))
if timeout is None:
return urllib.request.urlopen(url)
return urllib.request.urlopen(url, timeout=timeout)
except urllib.error.URLError as ue:
v3("Error opening URL: {}".format(ue))
v3("error.reason = {}".format(ue.reason))
# Assume this is the error message indicating "no SSL support"
# There is a new (in CIAO 4.11) message
# "urlopen error [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:719)"
#
# It appears that the reason attribute can be an object, so
# for now explicitly convert to a string:
reason = str(ue.reason)
if reason.find('unknown url type: https') != -1 or \
reason.find('CERTIFICATE_VERIFY_FAILED') != -1:
return manual_download(url)
# There used to be a check on the reason for the error,
# converting it into a "user-friendly" message, but this
# was error prone (the check itself was faulty) and
# potentially hid useful error information. So just
# re-raise the error here after logging it.
#
raise
class DirectoryContents(HTMLParser):
"""Extract the output of the mod_autoindex Apache directive.
Limited testing. It assumes that the files are given as links,
there's no other links on the page, and the parent directory is
listed as 'parent directory' (after removing the white space and
converting to lower case). There is special casing to remove links
where the text does not match the name of the link. This is to
handle query fragments, which are used to change the ordering of
the table display rather than be an actual link.
"""
def __init__(self, *args, **kwargs):
self.dirs = []
self.files = []
self.current = None
super().__init__(*args, **kwargs)
def add_link(self):
"""We've found a link, add it to the store"""
if self.current is None:
return
if self.current.endswith('/'):
store = self.dirs
else:
store = self.files
store.append(self.current)
self.current = None
def handle_starttag(self, tag, attrs):
if tag.upper() != 'A':
return
# In case we have a missing close tag
self.add_link()
attrs = dict(attrs)
try:
href = attrs['href']
except KeyError:
raise ValueError("Missing href attribute for a tag")
self.current = href
def handle_endtag(self, tag):
# do not expect end tags within <a> here, so we can
# treat it as the end of the a link if we find it
# (to support missing end tags).
#
if self.current is None:
return
self.add_link()
def handle_data(self, data):
if self.current is None:
return
# Skip the link to the parent directory, and skip any where
# the text is different to the href (e.g. to catch query-only
# links which are used to change the display rather than being
# a link).
#
data = data.strip()
if data.lower() == 'parent directory':
self.current = None
elif self.current != data:
v4(f"Dropping link={self.current} as test={data}")
self.current = None
def unpack_filelist_html(txt, baseurl):
"""Extract the contents of the page (assumed to be a directory listing).
Parameters
----------
txt : str
The HTML contents to parse.
baseurl : str
The URL of the page.
Returns
-------
urls : dict
The keys are directories and files, and the contents are
a list of absolute URLs (as strings).
"""
parser = DirectoryContents()
parser.feed(txt)
if not baseurl.endswith('/'):
baseurl += '/'
dirs = [baseurl + d for d in parser.dirs]
files = [baseurl + f for f in parser.files]
return {'directories': dirs, 'files': files}
def find_downloadable_files(urlname, headers):
"""Find the files and directories present in the given URL.
Report the files present at the given directory, for those
web servers which support an Apache-like mod_autoindex
function (i.e. return a HTML file listing the files and
sub-directories).
Parameters
----------
urlname : str
This must represent a directory.
headers : dict
The headers to add to the HTTP request (e.g. user-agent).
Returns
-------
urls : dict
The keys are directories and files, and the contents are
a list of absolute URLs (as strings).
See Also
--------
find_all_downloadable_files
Notes
-----
This is intended for use with the Chandra Data Archive, and
so there's no guarantee it will work for other web servers:
they may not return the necessary information, or use a
different markup.
Requests are made with *no* SSL validation (since there are
problems with CIAO 4.12 installed via ciao-install on a Ubuntu
machine).
There is no attempt to make a "nice" error message for a user
here, as that is better done in the calling code.
"""
no_context = ssl._create_unverified_context()
req = urllib.request.Request(urlname, headers=headers)
with urllib.request.urlopen(req, context=no_context) as rsp:
html_contents = rsp.read().decode('utf-8')
return unpack_filelist_html(html_contents, urlname)
def find_all_downloadable_files(urlname, headers):
"""Find the files present in the given URL, including sub-directories.
Report the files present at the given directory and
sub-directory, for those web servers which support an Apache-like
mod_autoindex function (i.e. return a HTML file listing the files
and sub-directories).
Parameters
----------
urlname : str
This must represent a directory.
headers : dict
The headers to add to the HTTP request (e.g. user-agent).
Returns
-------
urls : list of str
A list of absolute URLs.
See Also
--------
find_downloadable_files
Notes
-----
This is intended for use with the Chandra Data Archive, and
so there's no guarantee it will work for other web servers:
they may not return the necessary information, or use a
different markup.
Requests are made with *no* SSL validation (since there are
problems with CIAO 4.12 installed via ciao-install on a Ubuntu
machine).
"""
v3("Finding all files available at: {}".format(urlname))
base = find_downloadable_files(urlname, headers)
out = base['files']
todo = base['directories']
v4("Found sub-directories: {}".format(todo))
while True:
v4("Have {} sub-directories to process".format(len(todo)))
if todo == []:
break
durl = todo.pop()
v3("Recursing into {}".format(durl))
subdir = find_downloadable_files(durl, headers)
out += subdir['files']
v4("Adding sub-directories: {}".format(subdir['directories']))
todo += subdir['directories']
return out
class ProgressBar:
"""A very-simple progress "bar".
This just displays the hash marks for each segment of a
download to stdout. It is called from the code doing the
actual download. There is no logic to conditionally display
the output in this class - for instance based on the
current verbose setting - since this should be handled
by the code deciding to call this obejct or not.
Parameters
----------
size : int
The number of bytes to download.
nhash : int
The number of hashes representing the full download
(so each hash represents 100/nhash % of the file)
hashchar : char
The character to display when a chunk has been
downloaded.
Examples
--------
The bar is created with the total number of bytes to download,
then it starts (with an optional number of already-downloaded
bytes), and each chunk that is added is reported with the add
method. Once the download has finished the end method is called.
Note that the methods (start, add, and end) may cause output
to stdout.
>>> progress = ProgressBar(213948)
>>> progress.start()
...
>>> progress.add(8192)
...
>>> progress.add(8192)
...
>>> progress.end()
"""
def __init__(self, size, nhash=20, hashchar='#'):
if size < 0:
raise ValueError("size can not be negative")
if nhash < 1:
raise ValueError("must have at least one hash")
self.size = size
self.nhash = nhash
self.hashchar = hashchar
self.hashsize = size // nhash
self.hdl = sys.stdout
self.added = 0
self.hashes = 0
def start(self, nbytes=0):
"""Initialize the download.
Parameters
----------
nbytes : int, optional
The number of bytes of the file that has already been
downloaded. If not zero this may cause hash marks to be
displayed.
"""
self.added = nbytes
self.hashes = self.added // self.hashsize
if self.hashes == 0:
return
self.hdl.write(self.hashchar * self.hashes)
self.hdl.flush()
def add(self, nbytes):
"""Add the number of bytes for this segment.
This must only be called after start.
Parameters
----------
nbytes : int, optional
The number of bytes added to the file.
"""
if nbytes < 0:
raise ValueError("nbytes must be positive")
if nbytes == 0:
return
self.added += nbytes
hashes = self.added // self.hashsize
if hashes == self.hashes:
return
nadd = hashes - self.hashes
self.hdl.write(self.hashchar * nadd)
self.hdl.flush()
self.hashes = hashes
def end(self):
"""Finished the download.
This is mainly to allow for handling of rounding errors.
"""
nadd = self.nhash - self.hashes
if nadd <= 0:
return
# Don't bother trying to correct for any rounding errors
# if the file wasn't fully downloaded.
#
if self.added < self.size:
return
self.hdl.write(self.hashchar * nadd)
self.hdl.flush()
def myint(x):
"""Convert to an integer, my way."""
return int(x + 0.5)
def stringify_dt(dt):
"""Convert a time interval into a "human readable" string.
Parameters
----------
dt : number
The number of seconds.
Returns
-------
lbl : str
The "human readable" version of the time difference.
Examples
--------
>>> stringify_dt(0.2)
'< 1 s'
>>> stringify_dt(62.3)
'1 m 2 s'
>>> stringify_dt(2402.24)
'40 m 2 s'
"""
if dt < 1:
return "< 1 s"
d = myint(dt // (24 * 3600))
dt2 = dt % (24 * 3600)
h = myint(dt2 // 3600)
dt3 = dt % 3600
m = myint(dt3 // 60)
s = myint(dt3 % 60)
if d > 0:
lbl = "%d day" % d
if d > 1:
lbl += "s"
if h > 0:
lbl += " %d h" % h
elif h > 0:
lbl = "%d h" % h
if m > 0:
lbl += " %d m" % m
elif m > 0:
lbl = "%d m" % m
if s > 0:
lbl += " %d s" % s
else:
lbl = "%d s" % s
return lbl
def stringify_size(s):
"""Convert a file size to a text string.
Parameters
----------
size : int
File size, in bytes
Returns
-------
filesize : str
A "nice" representation of the size
Examples
--------
>>> stringify_size(1023)
'< 1 Kb'
>>> stringify_size(1024)
'1 Kb'
>>> stringify_size(1025)
'1 Kb'
>>> stringify_size(54232)
'53 Kb'
>>> stringify_size(4545833)
'4 Mb'
>>> stringify_size(45458330000)
'4.2 Gb'
"""
if s < 1024:
lbl = "< 1 Kb"
elif s < 1024 * 1024:
lbl = "%d Kb" % (myint(s / 1024.0))
elif s < 1024 * 1024 * 1024:
lbl = "%d Mb" % (myint(s / (1024 * 1024.0)))
else:
lbl = "%.1f Gb" % (s / (1024 * 1024 * 1024.0))
return lbl
def download_progress(url, size, outfile,
headers=None,
progress=None,
chunksize=8192,
verbose=True):
"""Download url and store in outfile, reporting progress.
The download will use chunks, logging the output to the
screen, and will not re-download partial data (e.g.
from a partially-completed earlier attempt). Information
on the state of the download will be displayed to stdout
unless verbose is False. This routine requires that
we already know the size of the file.
Parameters
----------
url : str
The URL to download; this must be http or https based.
size : int
The file size in bytes.
outfile : str
The output file (relative to the current working directory).
Any sub-directories must already exist.
headers : dict, optional
Any additions to the HTTP header in the request (e.g. to
set 'User-Agent'). If None, a user-agent string of
"ciao_contrib.downloadutils.download_progress" is
used).
progress : ProgressBar instance, optional
If not specified a default instance (20 '#' marks) is used.
chunksize : int, optional
The chunk size to use, in bytes.
verbose : bool, optional
Should progress information on the download be written to
stdout?
Notes
-----
This routine assumes that the HTTP server supports ranged
requests [1]_, and ignores SSL validation of the request.
The assumption is that the resource is static (i.e. it hasn't
been updated since content was downloaded). This means that it
is possible the output will be invalid, for instance if it
has increased in length since the last time it was fully
downloaded, or changed and there was a partial download.
References
----------
.. [1] https://developer.mozilla.org/en-US/docs/Web/HTTP/Range_requests
"""
# I used http.client rather than urllib because I read somewhere
# that urllib did not handle streaming requests (e.g. it would just
# read in everything in one go and then you read from the in-memory
# buffer).
#
# From https://stackoverflow.com/a/24900110 - is it still true?
#
purl = urllib.request.urlparse(url)
if purl.scheme == 'https':
no_context = ssl._create_unverified_context()
conn = http.client.HTTPSConnection(purl.netloc, context=no_context)
elif purl.scheme == 'http':
conn = http.client.HTTPConnection(purl.netloc)
else:
raise ValueError("Unsupported URL scheme: {}".format(url))
startfrom = 0
try:
fsize = os.path.getsize(outfile)
except OSError:
fsize = None
if fsize is not None:
equal_size = fsize == size
v3("Checking on-disk file size " +
"({}) against archive size ".format(fsize) +
"({}): {}".format(size, equal_size))
if equal_size:
if verbose:
# Ugly, since this is set up to match what is needed by
# ciao_contrib.cda.data.ObsIdFile.download rather than
# being generic. Need to look at how messages are
# displayed.
#
sys.stdout.write("{:>20s}\n".format("already downloaded"))
sys.stdout.flush()
return (0, 0)
if fsize > size:
v0("Archive size is less than disk size for " +
"{} - {} vs {} bytes.".format(outfile,
size,
fsize))
return (0, 0)
startfrom = fsize
try:
outfp = open(outfile, 'ab')
except IOError:
raise IOError("Unable to create '{}'".format(outfile))
# Is this seek needed?
if startfrom > 0 and outfp.tell() == 0:
outfp.seek(0, 2)
if progress is None:
progress = ProgressBar(size)
if headers is None:
headers = {'User-Agent':
'ciao_contrib.downloadutils.download_progress'}
else:
# Ensure we copy the header dictionary, since we are going
# to add to it. It is assumed that a shallow copy is
# enough.
#
headers = headers.copy()
# Could hide this if startfrom = 0 and size <= chunksize, but
# it doesn't seem worth it.
#
headers['Range'] = 'bytes={}-{}'.format(startfrom, size - 1)
time0 = time.time()
conn.request('GET', url, headers=headers)
with conn.getresponse() as rsp:
# Assume that rsp.status != 206 would cause some form
# of an error so we don't need to check for this here.
#
if verbose:
progress.start(startfrom)
# Note that the progress bar reflects the expected size, not
# the actual size; this may not be ideal (but don't expect the
# sizes to change so it doesn't really matter).
#
while True:
chunk = rsp.read(chunksize)
if not chunk:
break
outfp.write(chunk)
if verbose:
progress.add(len(chunk))
if verbose:
progress.end()
time1 = time.time()
nbytes = outfp.tell()
outfp.close()
dtime = time1 - time0
if verbose:
rate = (nbytes - startfrom) / (1024 * dtime)
tlabel = stringify_dt(dtime)
sys.stdout.write(" {:>13s} {:.1f} kb/s\n".format(tlabel, rate))
if size != nbytes:
v0("WARNING file sizes do not match: expected {} but downloaded {}".format(size, nbytes))
return (nbytes, dtime)
| cxcsds/ciao-contrib | ciao_contrib/downloadutils.py | Python | gpl-3.0 | 22,858 |
# coding=utf-8
import random
def consumer():
r = None
while 1:
data = yield r
print 'Consuming: {}'.format(data)
r = data + 1
def producer(consumer):
n = 3
consumer.send(None)
while n:
data = random.choice(range(10))
print('Producing: {}'.format(data))
rs = consumer.send(data)
print 'Consumer return: {}'.format(rs)
n -= 1
consumer.close()
c = consumer()
producer(c)
| dongweiming/web_develop | chapter13/section5/use_yield.py | Python | gpl-3.0 | 462 |
import unittest
from decimal import Decimal
from itertools import product
from .riemann import riemann_sum as riemann, RiemannMethod
# Test functions
# lowercase are functions, uppercase are their antiderivatives assuming C = 0.
def f1(x):
return 0
def F1(x):
return 0
def f2(x):
return 3
def F2(x):
return 3 * x
def f3(x):
return x
def F3(x):
return x ** 2 / 2
def f4(x):
return 8 * x
def F4(x):
return 4 * x ** 2
def f5(x):
return 6 * x ** 2
def F5(x):
return 2 * x ** 3
class RiemannTests(unittest.TestCase):
tolerance = Decimal('0.005')
dx = Decimal('0.00001')
functions = [(f1, F1), (f2, F2), (f3, F3), (f4, F4), (f5, F5)]
test_points = [
(0, 1), # Simplest case for most functions
(0, 3), # From zero
(-2, 0), # To zero
(4, 10), # Detatched from zero, positive
(-10, -4), # Detatched from zero, negative
(-5, 7) # Across zero
]
test_points.extend([tuple(reversed(bounds)) for bounds in test_points]) # List, not generator, to evaluate everything before appending
def test_riemann_sum(self):
"""
Test the riemann_sum function by ensuring its results are within a certain small tolerance of the actual value.
The tolerance is set above, and used as `self._tolerance`.
Every function above is tested for every method and every pair of test points, as well as for each pair reversed.
"""
for (func, antiderivative), method, (x1, x2) in product(self.functions, RiemannMethod, self.test_points):
with self.subTest(function=func.__name__, x1=x1, x2=x2, method=method):
estimate = riemann(func, x1, x2, self.dx, method)
actual = Decimal(antiderivative(x2) - antiderivative(x1))
self.assertAlmostEqual(estimate, actual, delta=self.tolerance)
def test_methods(self):
"""
Test the different methods of Riemann summing to ensure they exhibit the known errors/biases.
For example, a left-hand Riemann sum underestimates increasing functions and overestimates decreasing ones.
"""
def func(x):
return x ** 3
def antiderivative(x):
return x ** 4 / 4
x1, x2, dx = map(Decimal, (-10, 10, '0.5')) # Intentionally large dx to exacerbate over/under-estimation
actual = Decimal(antiderivative(x2) - antiderivative(x1))
# Because x^3 is always increasing, left-hand should underestimate it.
estimate_left = riemann(func, x1, x2, dx, RiemannMethod.left)
self.assertLess(estimate_left, actual)
# Because x^3 is always increasing, right-hand should overestimate it.
estimate_right = riemann(func, x1, x2, dx, RiemannMethod.right)
self.assertGreater(estimate_right, actual)
# Because x^3 is rotationally symmetrical about the origin and the input range is symmetrical about the origin, middle and trapezoid should be dead-on.
estimate_middle = riemann(func, x1, x2, dx, RiemannMethod.middle)
estimate_trapezoid = riemann(func, x1, x2, dx, RiemannMethod.trapezoid)
self.assertEqual(estimate_middle, actual)
self.assertEqual(estimate_trapezoid, actual)
| BayMinimum/Hacktoberfest-Mathematics | calculus/riemann_sum/python/riemann_sum_test.py | Python | gpl-3.0 | 2,960 |
# This file is part of Tryton. The COPYRIGHT file at the top level of
# this repository contains the full copyright notices and license terms.
import unittest
import trytond.tests.test_tryton
from trytond.tests.test_tryton import ModuleTestCase
class WebdavTestCase(ModuleTestCase):
'Test Webdav module'
module = 'webdav'
def suite():
suite = trytond.tests.test_tryton.suite()
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(
WebdavTestCase))
return suite
| tryton/webdav | tests/test_webdav.py | Python | gpl-3.0 | 504 |
# -*- coding: utf-8 -*-
# EDIS - a simple cross-platform IDE for C
#
# This file is part of Edis
# Copyright 2014-2015 - Gabriel Acosta <acostadariogabriel at gmail>
# License: GPLv3 (see http://www.gnu.org/licenses/gpl.html)
from PyQt4.QtGui import (
QGraphicsOpacityEffect,
QFrame
)
from PyQt4.QtCore import (
QPropertyAnimation,
Qt
)
from PyQt4.Qsci import QsciScintilla
class Minimap(QsciScintilla):
def __init__(self, weditor):
QsciScintilla.__init__(self, weditor)
self._weditor = weditor
self._indentation = self._weditor._indentation
self.setLexer(self._weditor.lexer())
# Configuración Scintilla
self.setMouseTracking(True)
self.SendScintilla(QsciScintilla.SCI_SETHSCROLLBAR, False)
self.SendScintilla(QsciScintilla.SCI_HIDESELECTION, True)
self.setFolding(QsciScintilla.NoFoldStyle, 1)
self.setReadOnly(True)
self.setCaretWidth(0)
self.setStyleSheet("background: transparent; border: 0px;")
# Opacity
self.effect = QGraphicsOpacityEffect()
self.setGraphicsEffect(self.effect)
self.effect.setOpacity(0.5)
# Deslizador
self.slider = Slider(self)
self.slider.hide()
def resizeEvent(self, event):
super(Minimap, self).resizeEvent(event)
self.slider.setFixedWidth(self.width())
lines_on_screen = self._weditor.SendScintilla(
QsciScintilla.SCI_LINESONSCREEN)
self.slider.setFixedHeight(lines_on_screen * 4)
def update_geometry(self):
self.setFixedHeight(self._weditor.height())
self.setFixedWidth(self._weditor.width() * 0.13)
x = self._weditor.width() - self.width()
self.move(x, 0)
self.zoomIn(-3)
def update_code(self):
text = self._weditor.text().replace('\t', ' ' * self._indentation)
self.setText(text)
def leaveEvent(self, event):
super(Minimap, self).leaveEvent(event)
self.slider.animation.setStartValue(0.2)
self.slider.animation.setEndValue(0)
self.slider.animation.start()
def enterEvent(self, event):
super(Minimap, self).enterEvent(event)
if not self.slider.isVisible():
self.slider.show()
else:
self.slider.animation.setStartValue(0)
self.slider.animation.setEndValue(0.2)
self.slider.animation.start()
class Slider(QFrame):
def __init__(self, minimap):
QFrame.__init__(self, minimap)
self._minimap = minimap
self.setStyleSheet("background: gray; border-radius: 3px;")
# Opacity
self.effect = QGraphicsOpacityEffect()
self.setGraphicsEffect(self.effect)
self.effect.setOpacity(0.2)
# Animación
self.animation = QPropertyAnimation(self.effect, "opacity")
self.animation.setDuration(150)
# Cursor
self.setCursor(Qt.OpenHandCursor)
def mouseMoveEvent(self, event):
super(Slider, self).mouseMoveEvent(event)
#FIXME: funciona algo loco
pos = self.mapToParent(event.pos())
dy = pos.y() - (self.height() / 2)
if dy < 0:
dy = 0
self.move(0, dy)
pos.setY(pos.y() - event.pos().y())
self._minimap._weditor.verticalScrollBar().setValue(pos.y())
self._minimap.verticalScrollBar().setSliderPosition(
self._minimap.verticalScrollBar().sliderPosition() + 2)
self._minimap.verticalScrollBar().setValue(pos.y() - event.pos().y())
def mousePressEvent(self, event):
super(Slider, self).mousePressEvent(event)
self.setCursor(Qt.ClosedHandCursor)
def mouseReleaseEvent(self, event):
super(Slider, self).mouseReleaseEvent(event)
self.setCursor(Qt.OpenHandCursor) | centaurialpha/edis | src/ui/editor/minimap.py | Python | gpl-3.0 | 3,836 |
#!/usr/bin/env python
# coding: utf-8
"""setuptools based setup module"""
from setuptools import setup
# from setuptools import find_packages
# To use a consistent encoding
import codecs
from os import path
import osvcad
here = path.abspath(path.dirname(__file__))
# Get the long description from the README_SHORT file
with codecs.open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name=osvcad.__name__,
version=osvcad.__version__,
description=osvcad.__description__,
long_description=long_description,
url=osvcad.__url__,
download_url=osvcad.__download_url__,
author=osvcad.__author__,
author_email=osvcad.__author_email__,
license=osvcad.__license__,
classifiers=[
'Development Status :: 3 - Alpha',
'Topic :: Software Development',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6'
],
keywords=['OpenCascade', 'PythonOCC', 'ccad', 'CAD', 'parts', 'json'],
packages=['osvcad',
'osvcad.jupyter',
'osvcad.utils',
'osvcad.ui'],
install_requires=[],
# OCC, scipy and wx cannot be installed via pip
extras_require={'dev': [],
'test': ['pytest', 'coverage'], },
package_data={},
data_files=[('osvcad/ui/icons',
['osvcad/ui/icons/blue_folder.png',
'osvcad/ui/icons/file_icon.png',
'osvcad/ui/icons/folder.png',
'osvcad/ui/icons/green_folder.png',
'osvcad/ui/icons/open.png',
'osvcad/ui/icons/python_icon.png',
'osvcad/ui/icons/quit.png',
'osvcad/ui/icons/refresh.png',
'osvcad/ui/icons/save.png']),
('osvcad/ui',
['osvcad/ui/osvcad.ico',
'osvcad/ui/osvcadui.ini'])],
entry_points={},
scripts=['bin/osvcad-ui']
)
| osv-team/osvcad | setup.py | Python | gpl-3.0 | 2,202 |
# Copyright (C) 2011 Pawel Stiasny
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import curses
import locale
import pickle
import os
import os.path
from fractions import Fraction
from .tablature import Fret, Chord, Bar, Tablature, ChordRange
from . import symbols
from . import music
from .player import Player
locale.setlocale(locale.LC_ALL, '')
encoding = locale.getpreferredencoding()
class Editor:
screen_initiated = False
cursor_prev_bar_x = 2
insert_duration = Fraction('1/4')
st = ''
file_name = None
terminate = False
visible_meta = 'meter'
continuous_playback = False
yanked_bar = None
string = 0
def __init__(self, stdscr, tab = Tablature()):
self.root = stdscr
self.tab = tab
self.nmap = {}
self.motion_commands = {}
self.commands = {}
self.player = Player()
def init_screen(self):
screen_height, screen_width = self.root.getmaxyx()
self.stdscr = curses.newwin(screen_height - 1, 0, 0, 0)
self.stdscr.keypad(1)
if self.file_name:
self.set_term_title(self.file_name + ' - VITABS')
else:
self.set_term_title('[unnamed] - VITABS')
self.status_line = curses.newwin(0, 0, screen_height - 1, 0)
self.status_line.scrollok(False)
self.first_visible_bar = self.tab.cursor_bar
self.redraw_view()
self.cy = 2
self.move_cursor()
curses.doupdate()
self.screen_initiated = True
def make_motion_cmd(self, f):
'''Turn a motion command into a normal mode command'''
def motion_wrap(ed, num):
m = f(ed, num)
if m is not None:
ed.make_motion(f(ed, num))
motion_wrap.__name__ = f.__name__
motion_wrap.__doc__ = f.__doc__
motion_wrap.nosidefx = True
return motion_wrap
def mark_changed(self):
if not getattr(self.tab, 'changed', False):
if self.file_name:
self.set_term_title(self.file_name + ' + - VITABS')
else:
self.set_term_title('[unnamed] + - VITABS')
self.tab.changed = True
def register_handlers(self, module):
'''Add commands defined in the module'''
for f in module.__dict__.values():
if hasattr(f, 'normal_keys'):
if getattr(f, 'motion_command', False):
for k in f.normal_keys:
self.nmap[k] = self.make_motion_cmd(f)
self.motion_commands[k] = f
else:
for k in f.normal_keys:
self.nmap[k] = f
if hasattr(f, 'handles_command'):
self.commands[f.handles_command] = f
def load_tablature(self, filename):
'''Unpickle tab from a file'''
try:
if os.path.isfile(filename):
infile = open(filename, 'rb')
self.tab = pickle.load(infile)
infile.close()
else:
self.tab = Tablature()
self.file_name = filename
self.set_term_title(filename + ' - VITABS')
self.st = '{0} ({1} bars, tuning: {2})'.format(
filename, len(self.tab.bars),
music.tuning_str(getattr(self.tab, 'tuning', music.standard_E)))
except:
self.st = 'Error: Can\'t open the specified file'
def save_tablature(self, filename):
'''Pickle tab to a file'''
if hasattr(self.tab, 'changed'):
self.tab.changed = False
delattr(self.tab, 'changed')
try:
outfile = open(filename, 'wb')
pickle.dump(self.tab, outfile)
outfile.close()
self.file_name = filename
except:
self.st = 'Error: Can\'t save'
self.set_term_title(filename + ' - VITABS')
def set_term_title(self, text):
'''Atempt to change virtual terminal window title'''
import sys
try:
term = os.environ['TERM']
if 'xterm' in term or 'rxvt' in term:
sys.stdout.write('\033]0;' + text + '\007')
sys.stdout.flush()
except:
pass
def draw_bar(self, y, x, bar):
'''Render a single bar at specified position'''
stdscr = self.stdscr
screen_width = self.stdscr.getmaxyx()[1]
stdscr.vline(y, x - 1, curses.ACS_VLINE, 6)
gcd = bar.gcd()
total_width = bar.total_width(gcd)
for i in range(6):
stdscr.hline(y + i, x, curses.ACS_HLINE, total_width)
x += 1
for chord in bar.chords:
for i in list(chord.strings.keys()):
if x < screen_width:
stdscr.addstr(y+i, x, str(chord.strings[i]), curses.A_BOLD)
# should it really be here?
if self.visible_meta == 'length':
dstr = music.len_str(chord.duration)
if x + len(dstr) < screen_width:
stdscr.addstr(y - 1, x, dstr)
width = int(chord.duration / gcd)
x = x + width*2 + 1
if x + 1 < screen_width:
stdscr.vline(y, x + 1, curses.ACS_VLINE, 6)
return x + 2
def draw_bar_meta(self, y, x, bar, prev_bar, index):
'''Print additional bar info at specified position'''
if self.visible_meta == 'meter':
if (prev_bar == None
or bar.sig_num != prev_bar.sig_num
or bar.sig_den != prev_bar.sig_den):
self.stdscr.addstr(
y, x,
str(bar.sig_num) + '/' + str(bar.sig_den))
elif self.visible_meta == 'number':
self.stdscr.addstr(y, x, str(index))
elif self.visible_meta == 'label':
if hasattr(bar, 'label'):
self.stdscr.addstr(y, x, bar.label)
def draw_tab(self, t):
'''Render the whole tablature'''
x = 2
y = 1
prev_bar = None
screen_height, screen_width = self.stdscr.getmaxyx()
for i, tbar in enumerate(t.bars[self.first_visible_bar - 1 : ]):
bar_width = tbar.total_width(tbar.gcd())
if x + bar_width >= screen_width and x != 2:
x = 2
y += 8
if y + 8 > screen_height:
break
self.draw_bar_meta(y, x, tbar, prev_bar, self.first_visible_bar + i)
x = self.draw_bar(y + 1, x, tbar)
self.last_visible_bar = i + self.first_visible_bar
prev_bar = tbar
def redraw_view(self):
'''Redraw tab window'''
self.stdscr.erase()
self.draw_tab(self.tab) # merge theese functions?
self.stdscr.noutrefresh()
def term_resized(self):
'''Called when the terminal window is resized, updates window sizes'''
height, width = self.root.getmaxyx()
self.status_line.mvwin(height - 1, 0)
self.stdscr.resize(height - 1, width)
self.redraw_view()
self.move_cursor()
def redraw_status(self):
'''Update status bar'''
width = self.status_line.getmaxyx()[1]
self.status_line.erase()
# general purpose status line
self.status_line.addstr(0, 0, self.st)
# position indicator
self.status_line.addstr(
0, width - 8,
'{0},{1}'.format(self.tab.cursor_bar, self.tab.cursor_chord))
# note length indicator
self.status_line.addstr(
0, width - 16,
str(self.tab.get_cursor_chord().duration))
# meter incomplete indicator
cb = self.tab.get_cursor_bar()
if cb.real_duration() != cb.required_duration():
self.status_line.addstr(0, width - 18, 'M')
self.status_line.noutrefresh()
def pager(self, lines):
'''Display a list of lines in a paged fashion'''
self.root.scrollok(True)
self.root.clear()
i = 0
h = self.root.getmaxyx()[0]
for line in lines:
self.root.addstr(i, 1, line)
i += 1
if i == h - 1:
self.root.addstr(i, 0, '<Space> NEXT PAGE')
self.root.refresh()
while self.get_char() != ord(' '): pass
self.root.clear()
i = 0
self.root.addstr(h - 1, 0, '<Space> CONTINUE')
while self.get_char(self.root) != ord(' '): pass
self.root.scrollok(False)
self.root.clear()
self.redraw_view()
def move_cursor(self, new_bar=None, new_chord=None, cache_lengths=False):
'''Set new cursor position'''
if not new_bar: new_bar = self.tab.cursor_bar
if not new_chord: new_chord = self.tab.cursor_chord
if not cache_lengths: self.cursor_prev_bar_x = None
# make sure the cursor stays inside the visible bar range
if new_bar < self.first_visible_bar or new_bar > self.last_visible_bar:
self.first_visible_bar = new_bar
self.redraw_view()
newbar_i = self.tab.bars[new_bar - 1]
# calculate the width of preceeding bars
screen_height, screen_width = self.stdscr.getmaxyx()
if new_bar != self.tab.cursor_bar or self.cursor_prev_bar_x == None:
self.cursor_prev_bar_x = 2
self.cy = 2
if new_bar > self.first_visible_bar:
for b in self.tab.bars[self.first_visible_bar - 1 : new_bar - 1]:
barw = b.total_width(b.gcd()) + 1
self.cursor_prev_bar_x += barw
if (self.cursor_prev_bar_x > screen_width and
self.cursor_prev_bar_x != 2 + barw):
self.cursor_prev_bar_x = 2 + barw
self.cy += 8
# should the cursor bar be wrapped?
newbar_w = newbar_i.total_width(newbar_i.gcd()) + 1
if newbar_w + self.cursor_prev_bar_x > screen_width:
self.cursor_prev_bar_x = 2
self.cy += 8
# width of preceeding chords
offset = 1
gcd = newbar_i.gcd()
for c in newbar_i.chords[:new_chord - 1]:
offset += int(c.duration / gcd)*2 + 1
self.tab.cursor_bar = new_bar
self.tab.cursor_chord = new_chord
self.cx = self.cursor_prev_bar_x + offset
def make_motion(self, pos):
self.move_cursor(pos[0], 1 if pos[1] is None else pos[1],
cache_lengths=True)
def go_left(self, num=1):
'''Returns position pair [num] chords left from the cursor'''
if self.tab.cursor_chord <= num:
if self.tab.cursor_bar > 1:
return (self.tab.cursor_bar - 1,
len(self.tab.bars[self.tab.cursor_bar - 2].chords))
else:
return (1, 1)
else:
return (self.tab.cursor_bar, self.tab.cursor_chord - num)
def move_cursor_left(self):
self.make_motion(self.go_left())
def go_right(self, num=1):
'''Returns position pair [num] chords right from the cursor'''
if self.tab.cursor_chord + num > len(self.tab.get_cursor_bar().chords):
if self.tab.cursor_bar < len(self.tab.bars):
return (self.tab.cursor_bar + 1, 1)
else:
return self.tab.last_position()
else:
return (self.tab.cursor_bar, self.tab.cursor_chord + num)
def move_cursor_right(self):
self.make_motion(self.go_right())
def play_range(self, fro, to):
def redraw_playback_status():
self.st = 'Playing... <CTRL-C> to abort'
self.redraw_status()
curses.setsyx(self.cy - 1, self.cx)
curses.doupdate()
def move_to_beginning():
self.move_cursor(fro[0], fro[1])
redraw_playback_status()
return True
def update_playback_status():
self.move_cursor_right()
redraw_playback_status()
return True
p = self.player
p.before_repeat = move_to_beginning
p.post_play_chord = update_playback_status
p.set_instrument(getattr(self.tab, 'instrument', 24))
p.play(ChordRange(self.tab, fro, to), self.continuous_playback)
self.st = ''
def get_char(self, parent=None):
'''Get a character from terminal, handling things like terminal
resize'''
if parent is None:
parent = self.stdscr
c = parent.getch()
if c == curses.KEY_RESIZE:
self.term_resized()
return c
def insert_mode(self, free_motion=False):
'''Switch to insert mode and listen for keys'''
if free_motion:
self.st = '-- REPLACE --'
else:
self.st = '-- INSERT --'
self.redraw_view()
insert_beg = self.tab.cursor_position()
insert_end = insert_beg
while True:
self.redraw_status()
curses.setsyx(self.cy + self.string, self.cx)
curses.doupdate()
c = self.get_char()
if c == 27: # ESCAPE
self.st = ''
break
elif ord('0') <= c <= ord('9'):
curch = self.tab.get_cursor_chord()
string = self.string
if string in curch.strings and curch.strings[string].fret < 10:
st_dec = curch.strings[string].fret * 10
curch.strings[string].fret = st_dec + c - ord('0')
else:
curch.strings[string] = Fret(c - ord('0'))
self.redraw_view()
elif c == curses.KEY_DC or c == ord('x'):
if self.string in self.tab.get_cursor_chord().strings:
del self.tab.get_cursor_chord().strings[self.string]
self.redraw_view()
elif c == curses.KEY_UP or c == ord('k'):
self.string = max(self.string - 1, 0)
elif c == curses.KEY_DOWN or c == ord('j'):
self.string = min(self.string + 1, 5)
elif c == ord('E'): self.string = 5
elif c == ord('A'): self.string = 4
elif c == ord('D'): self.string = 3
elif c == ord('G'): self.string = 2
elif c == ord('B'): self.string = 1
elif c == ord('e'): self.string = 0
elif c == ord(' '):
# TODO: don't repeat yourself...
self.tab.get_cursor_bar().chords.insert(
self.tab.cursor_chord,
Chord(self.insert_duration))
self.redraw_view()
self.move_cursor_right()
self.move_cursor()
insert_end = (insert_end[0], insert_end[1] + 1)
elif (c == curses.KEY_RIGHT or c == ord('l')) and not free_motion:
right = (self.tab.cursor_bar, self.tab.cursor_chord + 1)
if right > insert_end:
self.tab.get_cursor_bar().chords.insert(
self.tab.cursor_chord,
Chord(self.insert_duration))
self.redraw_view()
insert_end = right
self.make_motion(right)
self.move_cursor()
elif (c == curses.KEY_LEFT or c == ord('h')) and not free_motion:
left = self.go_left()
if left >= insert_beg:
self.make_motion(left)
elif c == curses.KEY_RIGHT or c == ord('l') and free_motion:
self.move_cursor_right()
elif (c == curses.KEY_LEFT or c == ord('h')) and free_motion:
self.move_cursor_left()
try:
# try to find a symbol in key -> symbol dict
sym = symbols.keys[c]
fr = self.tab.get_cursor_chord().strings[self.string]
if sym in fr.symbols:
fr.symbols.remove(sym)
else:
fr.symbols.append(sym)
self.redraw_view()
except KeyError:
pass
def exec_command(self, args, apply_to=None):
cmd = args[0]
try:
if apply_to is not None:
try:
self.commands[cmd](self, args, apply_to=apply_to)
except TypeError:
self.st = 'Command does not accept range'
else:
self.commands[cmd](self, args)
except KeyError:
self.st = 'Invalid command'
def command_mode(self):
'''Read a command'''
import sys
curses.echo()
self.status_line.erase()
self.status_line.addstr(0, 0, ":")
try:
line = self.status_line.getstr(0, 1).decode(encoding)
except KeyboardInterrupt:
line = ''
words = line.split(' ')
cmd = words[0]
curses.noecho()
if cmd:
try:
self.exec_command(words)
except:
exc = sys.exc_info()
self.st = "Exception: " + str(exc[0].__name__) + ": " + \
str(exc[1])
self.redraw_view()
def _is_number(self, char):
return (ord('0') <= char <= ord('9'))
def _parse_numeric_arg(self, c, num_arg):
if num_arg:
num_arg = num_arg * 10 + c - ord('0')
elif c != ord('0'):
num_arg = c - ord('0')
return num_arg
def expect_range(self, num=None, whole_bar_cmd=None):
'''Get a motion command and return a range from cursor position to
motion'''
num_motion = None
c = self.get_char()
while self._is_number(c) and (c != ord('0') or num_motion):
num_motion = self._parse_numeric_arg(c, num_motion)
c = self.get_char()
if num_motion and num: total_num = num * num_motion
elif num_motion: total_num = num_motion
else: total_num = num
cur = self.tab.cursor_position()
if whole_bar_cmd and c == whole_bar_cmd:
return ChordRange(self.tab,
(cur[0], 1),
(cur[0], None))
try:
dest = self.motion_commands[c](self, total_num)
if dest:
if dest > cur:
return ChordRange(self.tab, cur, dest)
else:
return ChordRange(self.tab, dest, cur)
except KeyError:
return None
def normal_mode(self):
'''Enter normal mode, returns on quit'''
num_arg = None
t = self.tab
while True:
if self.terminate:
break
self.redraw_status()
self.st = ''
curses.setsyx(self.cy - 1, self.cx)
curses.doupdate()
# TODO: accept multi-char commands
try:
c = self.get_char()
if c in self.nmap:
cmd = self.nmap[c]
cmd(self, num_arg)
if not (getattr(cmd, 'nosidefx', False)):
self.mark_changed()
self.redraw_view()
if self._is_number(c):
num_arg = self._parse_numeric_arg(c, num_arg)
if num_arg: self.st = str(num_arg)
else:
# reset after a command
num_arg = None
if c == 27: # ESCAPE
self.st = ''
except KeyboardInterrupt:
self.st = 'Use :q<Enter> to quit'
| pstiasny/VITABS | vitabs/editor.py | Python | gpl-3.0 | 20,458 |
"""
Updates the version in the binary executable of the Forged Alliance game. Will write a new ForgedAlliance.version.exe
file.
Usage:
update_version <version> [--file=<file>] [--dest=<dest>]
Options:
--file=<file> The binary file to update [default: ForgedAlliance.exe]
--dest=<dest> The folder path where to create the patched filed [default: .]
"""
import os
import struct
import shutil
import logging
from docopt import docopt
logger = logging.getLogger(__name__)
def update_exe_version(source, destination, version):
"""
:param source: Path to the static base copy of ForgedAlliance.exe - Hardcoded in API
:param destination: Path this update is being copied to
:param version: New mod version
:return:
"""
# os.path.join due to Python 2.7 compatibility
destination = os.path.join(str(destination), "ForgedAlliance.%s.exe" % version)
shutil.copyfile(str(source), str(destination))
addr = [0xd3d3f, 0x47612c, 0x476665]
f = open(str(destination), 'rb+')
for a in addr:
v = struct.pack("<L", int(version))
f.seek(a+1, 0)
f.write(v)
f.close()
logger.info("Saved ForgedAlliance.%s.exe" % version)
return f
if __name__ == '__main__':
arguments = docopt(__doc__)
source, destination, version = arguments.get('--file'), arguments.get('--dest'), arguments.get('<version>')
update_exe_version(source, destination, version)
| FAForever/faftools | faf/tools/fa/update_version.py | Python | gpl-3.0 | 1,436 |
from django.views.generic import CreateView, DetailView, UpdateView, ListView
from django.views.generic import DeleteView
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from django import http
from django.contrib import messages
from .. import forms
from .. import models
class CarNew(CreateView):
model = models.Car
form_class = forms.CarForm
template_name = 'web/car_new.html'
success_url = reverse_lazy('car_list')
def form_valid(self, form):
form.instance.owner = self.request.user
return super(CarNew, self).form_valid(form)
class CarUpdate(UpdateView):
model = models.Car
form_class = forms.CarForm
template_name = 'web/car_new.html'
success_url = reverse_lazy('cars')
def dispatch(self, request, *args, **kwargs):
obj = models.Car.objects.filter(pk=kwargs['pk']).filter(
owner=self.request.user)
if not obj:
messages.error(request, _('This car is not yours.'))
return http.HttpResponseRedirect(reverse_lazy('car_list'))
return super(CarUpdate, self).dispatch(request, *args, **kwargs)
class CarList(ListView):
model = models.Car
def get_queryset(self):
return models.Car.objects.filter(owner=self.request.user).all()
class CarDetail(DetailView):
model = models.Car
class CarDelete(DeleteView):
model = models.Car
| jizdoteka/jizdoteka-web | apps/web/views/car.py | Python | gpl-3.0 | 1,433 |
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 13 23:48:22 2015
@author: thorsten
"""
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
import os, sys
ROOT_DIR = os.path.dirname(__file__)
sys.path.append(os.path.join(ROOT_DIR,'..','..','CSXCAD','python'))
extensions = [
Extension("*", [os.path.join(os.path.dirname(__file__), "openEMS","*.pyx")],
language="c++", # generate C++ code
libraries = ['CSXCAD','openEMS', 'nf2ff']),
]
setup(
name="openEMS",
version = '0.0.33',
description = "Python interface for the openEMS FDTD library",
classifiers = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
'Topic :: Software Development :: Libraries :: Python Modules',
'Operating System :: POSIX :: Linux',
'Operating System :: Microsoft :: Windows',
],
author = 'Thorsten Liebig',
author_email = '[email protected]',
maintainer = 'Thorsten Liebig',
maintainer_email = '[email protected]',
url = 'http://openEMS.de',
packages=["openEMS", ],
package_data={'openEMS': ['*.pxd']},
ext_modules = cythonize(extensions)
)
| georgmichel/openEMS | python/setup.py | Python | gpl-3.0 | 1,466 |
#!/usr/bin/env python3
import gi
import os
import webbrowser
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
class ButtonWindow(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="Hall Launcher")
self.set_border_width(10)
hbox = Gtk.Box(spacing=100)
hbox.set_homogeneous(False)
vbox_top = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing = 10)
vbox_top.set_homogeneous(False)
vbox_bottom = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing = 10)
vbox_bottom.set_homogeneous(False)
vbox_next = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing = 10)
vbox_next.set_homogeneous(False)
hbox.pack_start(vbox_top, True, True, 0)
hbox.pack_start(vbox_bottom, True, True, 0)
hbox.pack_start(vbox_next, True, True, 0)
button1 = Gtk.Button.new_with_label("HallLinux")
button1.connect("clicked", self.on_halllinux_clicked)
button2 = Gtk.Button.new_with_mnemonic("Mousepad Text Editor")
button2.connect("clicked", self.open_mousepad_clicked)
button_google_chrome = Gtk.Button.new_with_mnemonic("Google Chromium")
button_google_chrome.connect("clicked", self.google_chromium)
button_google_firefox = Gtk.Button.new_with_mnemonic("Google Firefox")
button_google_firefox.connect("clicked", self.google_firefox)
button_youtube_chrome = Gtk.Button.new_with_mnemonic("Youtube Chromium")
button_youtube_chrome.connect("clicked", self.youtube_chromium)
button_youtube_firefox = Gtk.Button.new_with_mnemonic("Youtube Firefox")
button_youtube_firefox.connect("clicked", self.youtube_firefox)
button_drive = Gtk.Button.new_with_mnemonic("Google Drive")
button_drive.connect("clicked", self.google_drive)
button_keep = Gtk.Button.new_with_mnemonic("Google Keep")
button_keep.connect("clicked", self.google_keep)
button_quit = Gtk.Button.new_with_mnemonic("QUIT")
button_quit.connect("clicked", self.quit_clicked)
vbox_top.pack_start(button1, True, True, 0)
vbox_top.pack_start(button2, True, True, 0)
vbox_top.pack_start(button_google_chrome, True, True, 0)
vbox_top.pack_start(button_google_firefox, True, True, 0)
vbox_bottom.pack_start(button_youtube_chrome, True, True, 0)
vbox_bottom.pack_start(button_youtube_firefox, True, True, 0)
vbox_bottom.pack_start(button_drive, True, True, 0)
vbox_bottom.pack_start(button_keep, True, True, 0)
vbox_next.pack_start(button_quit, True, True, 0)
self.add(hbox)
def on_halllinux_clicked(self, button):
webbrowser.get('chromium').open('www.halllinux.com')
def google_chromium(self, button):
webbrowser.get('chromium').open('www.google.com')
def google_firefox(self, button):
webbrowser.get('firefox').open('www.google.com')
def youtube_chromium(self, button):
webbrowser.get('chromium').open('www.youtube.com')
def youtube_firefox(self, button):
webbrowser.get('firefox').open('www.youtube.com')
def google_drive(self, button):
webbrowser.get('chromium').open('drive.google.com')
def google_keep(self, button):
webbrowser.get('chromium').open('keep.google.com')
def open_mousepad_clicked(self, button):
os.system("mousepad&!")
def quit_clicked(self, button):
print("Closing application")
Gtk.main_quit()
win = ButtonWindow()
win.connect("delete-event", Gtk.main_quit)
win.show_all()
Gtk.main()
| HallLinux/Hall_Launcher | hall_launcher.py | Python | gpl-3.0 | 3,662 |
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Basic tests for Page Templates used in content-space.
$Id: test_dtmlpage.py 67630 2006-04-27 00:54:03Z jim $
"""
import unittest
from zope.security.checker import NamesChecker, defineChecker
from zope.traversing.adapters import Traverser, DefaultTraversable
from zope.traversing.interfaces import ITraverser, ITraversable
from zope.app.testing.placelesssetup import PlacelessSetup
from zope.app.testing import ztapi
from zope.app.container.contained import contained
from zope.app.dtmlpage.dtmlpage import DTMLPage
class Data(object):
def __init__(self, **kw):
self.__dict__.update(kw)
def __getitem__(self, name):
return getattr(self, name)
class DTMLPageTests(PlacelessSetup, unittest.TestCase):
def setUp(self):
super(DTMLPageTests, self).setUp()
ztapi.provideAdapter(None, ITraverser, Traverser)
ztapi.provideAdapter(None, ITraversable, DefaultTraversable)
defineChecker(Data, NamesChecker(['URL', 'name', '__getitem__']))
def test(self):
page = DTMLPage()
page.setSource(
u'<html>'
u'<head><title><dtml-var title></title></head>'
u'<body>'
u'<a href="<dtml-var "REQUEST.URL[\'1\']">">'
u'<dtml-var name>'
u'</a></body></html>'
)
page = contained(page, Data(name=u'zope'))
out = page.render(Data(URL={u'1': u'http://foo.com/'}),
title=u"Zope rules")
out = ' '.join(out.split())
self.assertEqual(
out,
u'<html><head><title>Zope rules</title></head><body>'
u'<a href="http://foo.com/">'
u'zope'
u'</a></body></html>'
)
def test_suite():
return unittest.makeSuite(DTMLPageTests)
if __name__=='__main__':
unittest.TextTestRunner().run(test_suite())
| Donkyhotay/MoonPy | zope/app/dtmlpage/tests/test_dtmlpage.py | Python | gpl-3.0 | 2,514 |
class LogisticRegression():
def __init__(self, input_size, output_size):
self.W = np.random.uniform(size=(input_size, output_size),
high=0.1, low=-0.1)
self.b = np.random.uniform(size=output_size,
high=0.1, low=-0.1)
self.output_size = output_size
def forward(self, X):
Z = np.dot(X, self.W) + self.b
sZ = softmax(Z)
return sZ
def predict(self, X):
if len(X.shape) == 1:
return np.argmax(self.forward(X))
else:
return np.argmax(self.forward(X), axis=1)
def grad_loss(self, x, y_true):
y_pred = self.forward(x)
dnll_output = y_pred - one_hot(self.output_size, y_true)
grad_W = np.outer(x, dnll_output)
grad_b = dnll_output
grads = {"W": grad_W, "b": grad_b}
return grads
def train(self, x, y, learning_rate):
# Traditional SGD update without momentum
grads = self.grad_loss(x, y)
self.W = self.W - learning_rate * grads["W"]
self.b = self.b - learning_rate * grads["b"]
def loss(self, x, y):
nll = NegLogLike(one_hot(self.output_size, y), self.forward(x))
return nll
def accuracy(self, X, y):
y_preds = np.argmax(self.forward(X), axis=1)
acc = np.mean(y_preds == y)
return acc | wikistat/Apprentissage | BackPropagation/solutions/lr_class.py | Python | gpl-3.0 | 1,424 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-14 14:30
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('barbadosdb', '0007_club_users'),
]
operations = [
migrations.RenameField(
model_name='club',
old_name='user',
new_name='users',
),
]
| codento/barbados | barbados/barbadosdb/migrations/0008_club_users2.py | Python | gpl-3.0 | 415 |
from argparse import ArgumentParser, ArgumentTypeError
from locale import getdefaultlocale
from multiprocessing import Pool
from contextlib import redirect_stdout
from io import StringIO
from zdict import constants, utils, easter_eggs
from zdict.api import dump
from zdict.completer import DictCompleter
from zdict.loader import get_dictionary_map
from zdict.utils import readline, check_zdict_dir_and_db
def user_set_encoding_and_is_utf8():
# Check user's encoding settings
try:
(lang, enc) = getdefaultlocale()
except ValueError:
print("Didn't detect your LC_ALL environment variable.")
print("Please export LC_ALL with some UTF-8 encoding.")
print("For example: `export LC_ALL=en_US.UTF-8`")
return False
else:
if enc != "UTF-8":
print("zdict only works with encoding=UTF-8, ")
print("but your encoding is: {} {}".format(lang, enc))
print("Please export LC_ALL with some UTF-8 encoding.")
print("For example: `export LC_ALL=en_US.UTF-8`")
return False
return True
def get_args():
# parse args
parser = ArgumentParser(prog='zdict')
parser.add_argument(
'words',
metavar='word',
type=str,
nargs='*',
help='Words for searching its translation'
)
parser.add_argument(
"-v", "--version",
action="version",
version='%(prog)s-' + constants.VERSION
)
parser.add_argument(
"-d", "--disable-db-cache",
default=False,
action="store_true",
help="Temporarily not using the result from db cache.\
(still save the result into db)"
)
parser.add_argument(
"-t", "--query-timeout",
type=float,
default=5.0,
action="store",
help="Set timeout for every query. default is 5 seconds."
)
def positive_int_only(value):
ivalue = int(value)
if ivalue <= 0:
raise ArgumentTypeError(
"%s is an invalid positive int value" % value
)
return ivalue
parser.add_argument(
"-j", "--jobs",
type=positive_int_only,
nargs="?",
default=0, # 0: not using, None: auto, N (1, 2, ...): N jobs
action="store",
help="""
Allow N jobs at once.
Do not pass any argument to use the number of CPUs in the system.
"""
)
parser.add_argument(
"-sp", "--show-provider",
default=False,
action="store_true",
help="Show the dictionary provider of the queried word"
)
parser.add_argument(
"-su", "--show-url",
default=False,
action="store_true",
help="Show the url of the queried word"
)
available_dictionaries = list(dictionary_map.keys())
available_dictionaries.append('all')
parser.add_argument(
"-dt", "--dict",
default="yahoo",
action="store",
choices=available_dictionaries,
metavar=','.join(available_dictionaries),
help="""
Must be seperated by comma and no spaces after each comma.
Choose the dictionary you want. (default: yahoo)
Use 'all' for qureying all dictionaries.
If 'all' or more than 1 dictionaries been chosen,
--show-provider will be set to True in order to
provide more understandable output.
"""
)
parser.add_argument(
"-ld", "--list-dicts",
default=False,
action="store_true",
help="Show currently supported dictionaries."
)
parser.add_argument(
"-V", "--verbose",
default=False,
action="store_true",
help="Show more information for the queried word.\
(If the chosen dictionary have implemented verbose related functions)"
)
parser.add_argument(
"-c", "--force-color",
default=False,
action="store_true",
help="Force color printing (zdict automatically disable color printing \
when output is not a tty, use this option to force color printing)"
)
parser.add_argument(
'--dump', dest='pattern',
nargs='?',
default=None, const=r'^.*$',
help='Dump the querying history, can be filtered with regex'
)
parser.add_argument(
"-D", "--debug",
default=False,
action="store_true",
help="Print raw html prettified by BeautifulSoup for debugging."
)
return parser.parse_args()
def set_args(args):
if args.force_color:
utils.Color.set_force_color()
args.dict = args.dict.split(',')
if 'all' in args.dict:
args.dict = tuple(dictionary_map.keys())
else:
# Uniq and Filter the dict not in supported dictionary list then sort.
args.dict = sorted(set(d for d in args.dict if d in dictionary_map))
if len(args.dict) > 1:
args.show_provider = True
return args
def lookup_string_wrapper(dict_class, word, args):
import sys
if args.force_color:
utils.Color.set_force_color()
else:
utils.Color.set_force_color(sys.stdout.isatty())
dictionary = dict_class(args)
f = StringIO()
with redirect_stdout(f):
dictionary.lookup(word)
return f.getvalue()
def init_worker():
# When -j been used, make subprocesses ignore KeyboardInterrupt
# for not showing KeyboardInterrupt traceback error message.
import signal
signal.signal(signal.SIGINT, signal.SIG_IGN)
def normal_mode(args):
if args.jobs == 0:
# user didn't use `-j`
for word in args.words:
for d in args.dict:
zdict = dictionary_map[d](args)
zdict.lookup(word)
else:
# user did use `-j`
# If processes is None, os.cpu_count() is used.
pool = Pool(args.jobs, init_worker)
for word in args.words:
futures = [
pool.apply_async(lookup_string_wrapper,
(dictionary_map[d], word, args))
for d in args.dict
]
results = [i.get() for i in futures]
print(''.join(results))
easter_eggs.lookup_pyjokes(word)
class MetaInteractivePrompt():
def __init__(self, args):
self.args = args
self.dicts = tuple(
dictionary_map[d](self.args) for d in self.args.dict
)
self.dict_classes = tuple(dictionary_map[d] for d in self.args.dict)
if self.args.jobs == 0:
# user didn't use `-j`
self.pool = None
else:
# user did use `-j`
# If processes is None, os.cpu_count() is used.
self.pool = Pool(self.args.jobs, init_worker)
def __del__(self):
del self.dicts
def prompt(self):
user_input = input('[zDict]: ').strip()
if user_input:
if self.pool:
futures = [
self.pool.apply_async(lookup_string_wrapper,
(d, user_input, self.args))
for d in self.dict_classes
]
results = [i.get() for i in futures]
print(''.join(results))
else:
for dictionary_instance in self.dicts:
dictionary_instance.lookup(user_input)
else:
return
def loop_prompt(self):
while True:
self.prompt()
def interactive_mode(args):
# configure readline and completer
readline.parse_and_bind("tab: complete")
readline.set_completer(DictCompleter().complete)
zdict = MetaInteractivePrompt(args)
zdict.loop_prompt()
def execute_zdict(args):
if args.list_dicts:
for provider in sorted(dictionary_map):
print(
'{}: {}\n{}\n'.format(
provider,
dictionary_map[provider](args).title,
dictionary_map[provider](args).HOMEPAGE_URL,
)
)
exit()
if args.pattern:
for word in dump(pattern=args.pattern):
print(word)
exit()
try:
if args.words:
normal_mode(args)
else:
interactive_mode(args)
except (KeyboardInterrupt, EOFError):
print()
return
def main():
if user_set_encoding_and_is_utf8():
check_zdict_dir_and_db()
global dictionary_map
dictionary_map = get_dictionary_map()
args = get_args()
args = set_args(args)
execute_zdict(args)
else:
exit()
| zdict/zdict | zdict/zdict.py | Python | gpl-3.0 | 8,711 |
# Generated by Django 2.0.9 on 2019-01-25 03:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('base', '0009_auto_20170824_0722'),
]
operations = [
migrations.AlterField(
model_name='alphagramtag',
name='tag',
field=models.CharField(choices=[('D5', 'Very Easy'), ('D4', 'Easy'), ('D3', 'Average'), ('D2', 'Hard'), ('D1', 'Very Hard')], max_length=2),
),
]
| domino14/Webolith | djAerolith/base/migrations/0010_auto_20190124_1902.py | Python | gpl-3.0 | 488 |
# Authors:
# Jason Gerard DeRose <[email protected]>
#
# Copyright (C) 2008 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Test the `ipalib.crud` module.
"""
from ipatests.util import read_only, raises, get_api, ClassChecker
from ipalib import crud, frontend, plugable, config
from ipalib.parameters import Str
class CrudChecker(ClassChecker):
"""
Class for testing base classes in `ipalib.crud`.
"""
def get_api(self, args=tuple(), options=tuple()):
"""
Return a finalized `ipalib.plugable.API` instance.
"""
(api, home) = get_api()
class user(frontend.Object):
takes_params = (
'givenname',
Str('sn', flags='no_update'),
Str('uid', primary_key=True),
'initials',
Str('uidnumber', flags=['no_create', 'no_search'])
)
class user_verb(self.cls):
takes_args = args
takes_options = options
api.register(user)
api.register(user_verb)
api.finalize()
return api
class test_Create(CrudChecker):
"""
Test the `ipalib.crud.Create` class.
"""
_cls = crud.Create
def test_get_args(self):
"""
Test the `ipalib.crud.Create.get_args` method.
"""
api = self.get_api()
assert list(api.Method.user_verb.args) == ['uid']
assert api.Method.user_verb.args.uid.required is True
def test_get_options(self):
"""
Test the `ipalib.crud.Create.get_options` method.
"""
api = self.get_api()
assert list(api.Method.user_verb.options) == \
['givenname', 'sn', 'initials', 'all', 'raw', 'version']
for param in api.Method.user_verb.options():
if param.name != 'version':
assert param.required is True
api = self.get_api(options=(Str('extra?'),))
assert list(api.Method.user_verb.options) == \
['givenname', 'sn', 'initials', 'extra', 'all', 'raw', 'version']
assert api.Method.user_verb.options.extra.required is False
class test_Update(CrudChecker):
"""
Test the `ipalib.crud.Update` class.
"""
_cls = crud.Update
def test_get_args(self):
"""
Test the `ipalib.crud.Update.get_args` method.
"""
api = self.get_api()
assert list(api.Method.user_verb.args) == ['uid']
assert api.Method.user_verb.args.uid.required is True
def test_get_options(self):
"""
Test the `ipalib.crud.Update.get_options` method.
"""
api = self.get_api()
assert list(api.Method.user_verb.options) == \
['givenname', 'initials', 'uidnumber', 'all', 'raw', 'version']
for param in api.Method.user_verb.options():
if param.name in ['all', 'raw']:
assert param.required is True
else:
assert param.required is False
class test_Retrieve(CrudChecker):
"""
Test the `ipalib.crud.Retrieve` class.
"""
_cls = crud.Retrieve
def test_get_args(self):
"""
Test the `ipalib.crud.Retrieve.get_args` method.
"""
api = self.get_api()
assert list(api.Method.user_verb.args) == ['uid']
assert api.Method.user_verb.args.uid.required is True
def test_get_options(self):
"""
Test the `ipalib.crud.Retrieve.get_options` method.
"""
api = self.get_api()
assert list(api.Method.user_verb.options) == ['all', 'raw', 'version']
class test_Delete(CrudChecker):
"""
Test the `ipalib.crud.Delete` class.
"""
_cls = crud.Delete
def test_get_args(self):
"""
Test the `ipalib.crud.Delete.get_args` method.
"""
api = self.get_api()
assert list(api.Method.user_verb.args) == ['uid']
assert api.Method.user_verb.args.uid.required is True
def test_get_options(self):
"""
Test the `ipalib.crud.Delete.get_options` method.
"""
api = self.get_api()
assert list(api.Method.user_verb.options) == ['version']
assert len(api.Method.user_verb.options) == 1
class test_Search(CrudChecker):
"""
Test the `ipalib.crud.Search` class.
"""
_cls = crud.Search
def test_get_args(self):
"""
Test the `ipalib.crud.Search.get_args` method.
"""
api = self.get_api()
assert list(api.Method.user_verb.args) == ['criteria']
assert api.Method.user_verb.args.criteria.required is False
def test_get_options(self):
"""
Test the `ipalib.crud.Search.get_options` method.
"""
api = self.get_api()
assert list(api.Method.user_verb.options) == \
['givenname', 'sn', 'uid', 'initials', 'all', 'raw', 'version']
for param in api.Method.user_verb.options():
if param.name in ['all', 'raw']:
assert param.required is True
else:
assert param.required is False
class test_CrudBackend(ClassChecker):
"""
Test the `ipalib.crud.CrudBackend` class.
"""
_cls = crud.CrudBackend
def get_subcls(self):
class ldap(self.cls):
pass
return ldap
def check_method(self, name, *args):
o = self.cls()
e = raises(NotImplementedError, getattr(o, name), *args)
assert str(e) == 'CrudBackend.%s()' % name
sub = self.subcls()
e = raises(NotImplementedError, getattr(sub, name), *args)
assert str(e) == 'ldap.%s()' % name
def test_create(self):
"""
Test the `ipalib.crud.CrudBackend.create` method.
"""
self.check_method('create')
def test_retrieve(self):
"""
Test the `ipalib.crud.CrudBackend.retrieve` method.
"""
self.check_method('retrieve', 'primary key', 'attribute')
def test_update(self):
"""
Test the `ipalib.crud.CrudBackend.update` method.
"""
self.check_method('update', 'primary key')
def test_delete(self):
"""
Test the `ipalib.crud.CrudBackend.delete` method.
"""
self.check_method('delete', 'primary key')
def test_search(self):
"""
Test the `ipalib.crud.CrudBackend.search` method.
"""
self.check_method('search')
| cluck/freeipa | ipatests/test_ipalib/test_crud.py | Python | gpl-3.0 | 7,084 |
# -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import json
import os
import re
import sys
THIS_DIR = os.path.abspath('.')
BASE_DIR = os.path.abspath('..')
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, BASE_DIR)
bower_metadata = json.load(open(os.path.join(BASE_DIR, 'bower.json')))
npm_metadata = json.load(open(os.path.join(BASE_DIR, 'package.json')))
def setup(app):
app.add_config_value('readthedocs', False, True)
readthedocs = os.environ.get('READTHEDOCS') == 'True'
if readthedocs:
os.environ['GMUSICPROCURATOR_SETTINGS'] = 'default_settings.py'
# -- General configuration ----------------------------------------------------
AUTHORS = u', '.join(bower_metadata['authors'])
TITLE = u'GMusicProcurator'
LONG_TITLE = u'{0} Documentation'.format(TITLE)
SUMMARY = bower_metadata['description']
SHORT_COPYRIGHT = u'2014, {0}. Some Rights Reserved.'.format(AUTHORS)
COPYRIGHT = u'''{0}
This work is licensed under a
Creative Commons Attribution-ShareAlike 4.0
International License'''.format(SHORT_COPYRIGHT)
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'sphinxcontrib.autohttp.flask',
]
if not readthedocs:
extensions += [
'sphinxcontrib.coffeedomain',
]
try:
import rst2pdf
except ImportError:
rst2pdf = None
if rst2pdf:
extensions.append('rst2pdf.pdfbuilder')
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = TITLE
copyright = COPYRIGHT
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = re.match(r'\d+\.\d+', npm_metadata['version']).group(0)
# The full version, including alpha/beta/rc tags.
release = npm_metadata['version']
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
if readthedocs:
exclude_patterns += [
'coffeescript.rst',
]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# intersphinx extension
intersphinx_mapping = {
'py': ('http://docs.python.org/2.7/', None)
}
mdn_inv = os.path.join(THIS_DIR, 'mdn-js-objects.inv')
bb_inv = os.path.join(THIS_DIR, 'backbone.inv')
if not readthedocs:
if os.path.exists(mdn_inv):
mdn = 'https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/'
intersphinx_mapping['js'] = (mdn, mdn_inv)
if os.path.exists(bb_inv):
intersphinx_mapping['backbone'] = ('http://backbonejs.org/', bb_inv)
# coffeedomain extension
coffee_src_dir = os.path.join(BASE_DIR, 'gmusicprocurator', 'static', 'cs')
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': [
'localtoc.html',
'relations.html',
'sourcelink.html',
'searchbox.html',
'copyright_sidebar.html',
],
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'GMusicProcuratordoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'GMusicProcurator.tex', LONG_TITLE, AUTHORS, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'gmusicprocurator', LONG_TITLE, [AUTHORS], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'GMusicProcurator', LONG_TITLE, AUTHORS,
'GMusicProcurator', SUMMARY, 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = TITLE
epub_author = AUTHORS
epub_publisher = AUTHORS
epub_copyright = COPYRIGHT
# The basename for the epub file. It defaults to the project name.
# epub_basename = TITLE
# The HTML theme for the epub output. Since the default themes are not
# optimized for small screen space, using the same theme for HTML and epub
# output is usually not wise. This defaults to 'epub', a theme designed to
# save visual space.
# epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
# epub_identifier = ''
# A unique identification for the text.
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
# epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
# epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_post_files = []
# A list of files that should not be packed into the epub file.
# epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
# epub_tocdepth = 3
# Allow duplicate toc entries.
# epub_tocdup = True
# Choose between 'default' and 'includehidden'.
# epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
# epub_fix_images = False
# Scale large images.
# epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# epub_show_urls = 'inline'
# If false, no index is generated.
# epub_use_index = True
# -- Options for PDF output --------------------------------------------------
pdf_documents = [
('index', u'gmusicprocurator', TITLE, AUTHORS),
]
| malept/gmusicprocurator | docs/conf.py | Python | gpl-3.0 | 12,282 |
#!python3
import os
import sys
import fnmatch
import re
import shutil
import subprocess
PYTEST = "pytest"
FLAKE8 = "flake8"
BLACK = "black"
BLACK_FLAGS = ["-l", "79"]
PYGETTEXT = os.path.join(sys.base_prefix, "tools", "i18n", "pygettext.py")
INCLUDE_PATTERNS = {"*.py"}
EXCLUDE_PATTERNS = {
"build/*",
"docs/*",
"mu/contrib/*",
"mu/modes/api/*",
"utils/*",
}
_exported = {}
def _walk(
start_from=".", include_patterns=None, exclude_patterns=None, recurse=True
):
if include_patterns:
_include_patterns = set(os.path.normpath(p) for p in include_patterns)
else:
_include_patterns = set()
if exclude_patterns:
_exclude_patterns = set(os.path.normpath(p) for p in exclude_patterns)
else:
_exclude_patterns = set()
for dirpath, dirnames, filenames in os.walk(start_from):
for filename in filenames:
filepath = os.path.normpath(os.path.join(dirpath, filename))
if not any(
fnmatch.fnmatch(filepath, pattern)
for pattern in _include_patterns
):
continue
if any(
fnmatch.fnmatch(filepath, pattern)
for pattern in _exclude_patterns
):
continue
yield filepath
if not recurse:
break
def _rmtree(dirpath, cascade_errors=False):
try:
shutil.rmtree(dirpath)
except OSError:
if cascade_errors:
raise
def _rmfiles(start_from, pattern):
"""Remove files from a directory and its descendants
Starting from `start_from` directory and working downwards,
remove all files which match `pattern`, eg *.pyc
"""
for filepath in _walk(start_from, {pattern}):
os.remove(filepath)
def export(function):
"""Decorator to tag certain functions as exported, meaning
that they show up as a command, with arguments, when this
file is run.
"""
_exported[function.__name__] = function
return function
@export
def test(*pytest_args):
"""Run the test suite
Call py.test to run the test suite with additional args.
The subprocess runner will raise an exception if py.test exits
with a failure value. This forces things to stop if tests fail.
"""
print("\ntest")
os.environ["LANG"] = "en_GB.utf8"
return subprocess.run(
[sys.executable, "-m", PYTEST] + list(pytest_args)
).returncode
@export
def coverage():
"""View a report on test coverage
Call py.test with coverage turned on
"""
print("\ncoverage")
os.environ["LANG"] = "en_GB.utf8"
return subprocess.run(
[
sys.executable,
"-m",
PYTEST,
"-v",
"--cov-config",
".coveragerc",
"--cov-report",
"term-missing",
"--cov=mu",
"tests/",
]
).returncode
@export
def flake8(*flake8_args):
"""Run the flake8 code checker
Call flake8 on all files as specified by setup.cfg
"""
print("\nflake8")
os.environ["PYFLAKES_BUILTINS"] = "_"
return subprocess.run([sys.executable, "-m", FLAKE8]).returncode
@export
def tidy():
"""Tidy code with the 'black' formatter."""
clean()
print("\nTidy")
for target in [
"setup.py",
"make.py",
"mu",
"package",
"tests",
"utils",
]:
return_code = subprocess.run(
[sys.executable, "-m", BLACK, target] + BLACK_FLAGS
).returncode
if return_code != 0:
return return_code
return 0
@export
def black():
"""Check code with the 'black' formatter."""
clean()
print("\nblack")
# Black is no available in Python 3.5, in that case let the tests continue
try:
import black as black_ # noqa: F401
except ImportError as e:
python_version = sys.version_info
if python_version.major == 3 and python_version.minor == 5:
print("Black checks are not available in Python 3.5.")
return 0
else:
print(e)
return 1
for target in [
"setup.py",
"make.py",
"mu",
"package",
"tests",
"utils",
]:
return_code = subprocess.run(
[sys.executable, "-m", BLACK, target, "--check"] + BLACK_FLAGS
).returncode
if return_code != 0:
return return_code
return 0
@export
def check():
"""Run all the checkers and tests"""
print("\nCheck")
funcs = [clean, black, flake8, coverage]
for func in funcs:
return_code = func()
if return_code != 0:
return return_code
return 0
@export
def clean():
"""Reset the project and remove auto-generated assets"""
print("\nClean")
_rmtree("build")
_rmtree("dist")
_rmtree(".eggs")
_rmtree("docs/_build")
_rmtree(".pytest_cache")
_rmtree("lib")
_rmtree(".git/avatar/") # Created with `make video`
_rmtree("venv-pup") # Created wth `make macos/win64`
# TODO: recursive __pycache__ directories
_rmfiles(".", ".coverage")
_rmfiles(".", "*.egg-info")
_rmfiles(".", "*.mp4") # Created with `make video`
_rmfiles(".", "*.pyc")
_rmfiles("mu/locale", "*.pot")
_rmfiles("mu/wheels", "*.zip")
return 0
def _translate_lang(lang):
"""Returns `value` from `lang` expected to be like 'LANG=value'."""
match = re.search(r"^LANG=(.*)$", lang)
if not match:
raise RuntimeError("Need LANG=xx_XX argument.")
value = match.group(1)
if not value:
raise RuntimeError("Need LANG=xx_XX argument.")
return value
_MU_LOCALE_DIRNAME = os.path.join("mu", "locale")
_MESSAGES_POT_FILENAME = os.path.join(_MU_LOCALE_DIRNAME, "messages.pot")
@export
def translate_begin(lang=""):
"""Create/update a mu.po file for translation."""
lang = _translate_lang(lang)
result = _translate_extract()
if result != 0:
raise RuntimeError("Failed creating the messages catalog file.")
mu_po_filename = os.path.join(
_MU_LOCALE_DIRNAME,
lang,
"LC_MESSAGES",
"mu.po",
)
update = os.path.exists(mu_po_filename)
cmd = [
"pybabel",
"update" if update else "init",
"-i",
_MESSAGES_POT_FILENAME,
"-o",
mu_po_filename,
"--locale={locale}".format(locale=lang),
]
result = subprocess.run(cmd).returncode
print(
"{action} {mu_po_filename}.".format(
action="Updated" if update else "Created",
mu_po_filename=repr(mu_po_filename),
)
)
print(
"Review its translation strings "
"and finalize with 'make translate_done'."
)
return result
_TRANSLATE_IGNORE_DIRNAMES = {
os.path.join("mu", "modes", "api"),
os.path.join("mu", "contrib"),
}
def _translate_ignore(dirname):
"""Return True if `dirname` files should be excluded from translation."""
return any(dirname.startswith(dn) for dn in _TRANSLATE_IGNORE_DIRNAMES)
def _translate_filenames():
"""Returns a sorted list of filenames with translatable strings."""
py_filenames = []
for dirname, _, filenames in os.walk("mu"):
if _translate_ignore(dirname):
continue
py_filenames.extend(
os.path.join(dirname, fn) for fn in filenames if fn.endswith(".py")
)
return sorted(py_filenames)
def _translate_extract():
"""Creates the message catalog template messages.pot file."""
cmd = [
"pybabel",
"extract",
"-o",
_MESSAGES_POT_FILENAME,
*_translate_filenames(),
]
return subprocess.run(cmd).returncode
@export
def translate_done(lang=""):
"""Compile translation strings in mu.po to mu.mo file."""
lang = _translate_lang(lang)
lc_messages_dirname = os.path.join(
_MU_LOCALE_DIRNAME,
lang,
"LC_MESSAGES",
)
mu_po_filename = os.path.join(lc_messages_dirname, "mu.po")
mu_mo_filename = os.path.join(lc_messages_dirname, "mu.mo")
cmd = [
"pybabel",
"compile",
"-i",
mu_po_filename,
"-o",
mu_mo_filename,
"--locale={locale}".format(locale=lang),
]
return subprocess.run(cmd).returncode
@export
def translate_test(lang=""):
"""Run translate_done and lauch Mu in the given LANG."""
result = translate_done(lang)
if result != 0:
raise RuntimeError("Failed compiling the mu.po file.")
local_env = dict(os.environ)
local_env["LANG"] = _translate_lang(lang)
return subprocess.run(
[sys.executable, "-m", "mu"], env=local_env
).returncode
@export
def run():
"""Run Mu from within a virtual environment"""
clean()
if not os.environ.get("VIRTUAL_ENV"):
raise RuntimeError(
"Cannot run Mu;" "your Python virtualenv is not activated"
)
return subprocess.run([sys.executable, "-m", "mu"]).returncode
@export
def dist():
"""Generate a source distribution and a binary wheel"""
if check() != 0:
raise RuntimeError("Check failed")
print("Checks pass; good to package")
return subprocess.run(
[sys.executable, "setup.py", "sdist", "bdist_wheel"]
).returncode
@export
def publish_test():
"""Upload to a test PyPI"""
dist()
print("Packaging complete; upload to PyPI")
return subprocess.run(
[
sys.executable,
"-m",
"twine",
"upload",
"-r",
"test",
"--sign",
"dist/*",
]
).returncode
@export
def publish_live():
"""Upload to PyPI"""
dist()
print("Packaging complete; upload to PyPI")
return subprocess.run(
[sys.executable, "-m", "twine", "upload", "--sign", "dist/*"]
).returncode
_PUP_PBS_URLs = {
32: "https://github.com/indygreg/python-build-standalone/releases/download/20200822/cpython-3.7.9-i686-pc-windows-msvc-shared-pgo-20200823T0159.tar.zst", # noqa: E501
64: None,
}
def _build_windows_msi(bitness=64):
"""Build Windows MSI installer"""
try:
pup_pbs_url = _PUP_PBS_URLs[bitness]
except KeyError:
raise ValueError("bitness") from None
if check() != 0:
raise RuntimeError("Check failed")
print("Fetching wheels")
subprocess.check_call([sys.executable, "-m", "mu.wheels"])
print("Building {}-bit Windows installer".format(bitness))
if pup_pbs_url:
os.environ["PUP_PBS_URL"] = pup_pbs_url
cmd_sequence = (
[sys.executable, "-m", "virtualenv", "venv-pup"],
["./venv-pup/Scripts/pip.exe", "install", "pup"],
[
"./venv-pup/Scripts/pup.exe",
"package",
"--launch-module=mu",
"--nice-name=Mu Editor",
"--icon-path=./package/icons/win_icon.ico",
"--license-path=./LICENSE",
".",
],
["cmd.exe", "/c", "dir", r".\dist"],
)
try:
for cmd in cmd_sequence:
print("Running:", " ".join(cmd))
subprocess.check_call(cmd)
finally:
shutil.rmtree("./venv-pup", ignore_errors=True)
@export
def win32():
"""Build 32-bit Windows installer"""
_build_windows_msi(bitness=32)
@export
def win64():
"""Build 64-bit Windows installer"""
_build_windows_msi(bitness=64)
@export
def docs():
"""Build the docs"""
cwd = os.getcwd()
os.chdir("docs")
if os.name == "nt":
cmds = ["cmd", "/c", "make.bat", "html"]
else:
cmds = ["make", "html"]
try:
return subprocess.run(cmds).returncode
except Exception:
return 1
finally:
os.chdir(cwd)
@export
def help():
"""Display all commands with their description in alphabetical order"""
module_doc = sys.modules["__main__"].__doc__ or "check"
print(module_doc + "\n" + "=" * len(module_doc) + "\n")
for command, function in sorted(_exported.items()):
doc = function.__doc__
if doc:
first_line = doc.splitlines()[0]
else:
first_line = ""
print("make {} - {}".format(command, first_line))
def main(command="help", *args):
"""Dispatch on command name, passing all remaining parameters to the
module-level function.
"""
try:
function = _exported[command]
except KeyError:
raise RuntimeError("No such command: %s" % command)
else:
return function(*args)
if __name__ == "__main__":
sys.exit(main(*sys.argv[1:]))
| mu-editor/mu | make.py | Python | gpl-3.0 | 12,681 |
# Generated by Django 2.0 on 2018-02-26 22:11
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('CreateYourLaws', '0012_codeblock_is_cdp'),
]
operations = [
migrations.RenameField(
model_name='codeblock',
old_name='is_cdp',
new_name='is_cbp',
),
]
| denisjul/democratos | democratos/CreateYourLaws/migrations/0013_auto_20180226_2211.py | Python | gpl-3.0 | 369 |
import json
import socket
import sys
attackSocket = socket.socket()
attackSocket.connect(('localhost', 8080))
attackSocket.send("{0}\r\n".format(
json.dumps(
{'membership': "full", 'channel': sys.argv[1], 'message': ' '.join(sys.argv[3:]), 'type': "start", 'which': sys.argv[2]}
)).encode('utf-8'))
attackSocket.close()
| randomrandomlol123/fewgewgewgewhg | sender.py | Python | gpl-3.0 | 325 |
import os
import unittest
from vsg.rules import process
from vsg import vhdlFile
from vsg.tests import utils
sTestDir = os.path.dirname(__file__)
lFile, eError =vhdlFile.utils.read_vhdlfile(os.path.join(sTestDir,'rule_035_test_input.vhd'))
lExpected = []
lExpected.append('')
utils.read_file(os.path.join(sTestDir, 'rule_035_test_input.fixed.vhd'), lExpected)
lExpectedCompactAlignmentFalse = []
lExpectedCompactAlignmentFalse.append('')
utils.read_file(os.path.join(sTestDir, 'rule_035_test_input.fixed_compact_alignment_false.vhd'), lExpectedCompactAlignmentFalse)
class test_process_rule(unittest.TestCase):
def setUp(self):
self.oFile = vhdlFile.vhdlFile(lFile)
self.assertIsNone(eError)
def test_rule_035(self):
oRule = process.rule_035()
self.assertTrue(oRule)
self.assertEqual(oRule.name, 'process')
self.assertEqual(oRule.identifier, '035')
lExpected = [30, 31, 35, 38]
oRule.analyze(self.oFile)
self.assertEqual(lExpected, utils.extract_violation_lines_from_violation_object(oRule.violations))
def test_fix_rule_035(self):
oRule = process.rule_035()
oRule.fix(self.oFile)
lActual = self.oFile.get_lines()
self.assertEqual(lExpected, lActual)
oRule.analyze(self.oFile)
self.assertEqual(oRule.violations, [])
def test_rule_035_compact_alignment_false(self):
oRule = process.rule_035()
oRule.compact_alignment = False
oRule.include_lines_without_comments = False
lExpected = [30, 31, 38]
oRule.analyze(self.oFile)
self.assertEqual(lExpected, utils.extract_violation_lines_from_violation_object(oRule.violations))
def test_fix_rule_035_compact_alignment_false(self):
oRule = process.rule_035()
oRule.compact_alignment = False
oRule.include_lines_without_comments = False
oRule.fix(self.oFile)
lActual = self.oFile.get_lines()
self.assertEqual(lExpectedCompactAlignmentFalse, lActual)
oRule.analyze(self.oFile)
self.assertEqual(oRule.violations, [])
| jeremiah-c-leary/vhdl-style-guide | vsg/tests/process/test_rule_035.py | Python | gpl-3.0 | 2,131 |
"""
Indivo Model for VideoMessage
"""
from fact import Fact
from django.db import models
from django.conf import settings
class VideoMessage(Fact):
file_id=models.CharField(max_length=200)
storage_type=models.CharField(max_length=200)
subject=models.CharField(max_length=200)
from_str=models.CharField(max_length=200)
date_recorded=models.DateTimeField(null=True)
date_sent=models.DateTimeField(null=True)
def __unicode__(self):
return 'VideoMessage %s' % self.id
| newmediamedicine/indivo_server_1_0 | indivo/models/fact_objects/videomessage.py | Python | gpl-3.0 | 489 |
# -*- coding: utf-8 -*-
# Copyright (C) 2007-2010 Toms Bauģis <toms.baugis at gmail.com>
# This file is part of Project Hamster.
# Project Hamster is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Project Hamster is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Project Hamster. If not, see <http://www.gnu.org/licenses/>.
import gtk
import gobject
import time
import datetime as dt
from ..lib import stuff, graphics, pytweener
from ..configuration import conf
class Selection(graphics.Sprite):
def __init__(self, start_time = None, end_time = None):
graphics.Sprite.__init__(self, z_order = 100)
self.start_time, self.end_time = None, None
self.width, self.height = None, None
self.fill = None # will be set to proper theme color on render
self.fixed = False
self.start_label = graphics.Label("", 8, "#333", visible = False)
self.end_label = graphics.Label("", 8, "#333", visible = False)
self.duration_label = graphics.Label("", 8, "#FFF", visible = False)
self.add_child(self.start_label, self.end_label, self.duration_label)
self.connect("on-render", self.on_render)
def on_render(self, sprite):
if not self.fill: # not ready yet
return
self.graphics.rectangle(0, 0, self.width, self.height)
self.graphics.fill(self.fill, 0.3)
self.graphics.rectangle(0.5, 0.5, self.width, self.height)
self.graphics.stroke(self.fill)
# adjust labels
self.start_label.visible = self.fixed == False and self.start_time is not None
if self.start_label.visible:
self.start_label.text = self.start_time.strftime("%H:%M")
if self.x - self.start_label.width - 5 > 0:
self.start_label.x = -self.start_label.width - 5
else:
self.start_label.x = 5
self.start_label.y = self.height
self.end_label.visible = self.fixed == False and self.end_time is not None
if self.end_label.visible:
self.end_label.text = self.end_time.strftime("%H:%M")
self.end_label.x = self.width + 5
self.end_label.y = self.height
duration = self.end_time - self.start_time
duration = int(duration.seconds / 60)
self.duration_label.text = "%02d:%02d" % (duration / 60, duration % 60)
self.duration_label.visible = self.duration_label.width < self.width
if self.duration_label.visible:
self.duration_label.y = (self.height - self.duration_label.height) / 2
self.duration_label.x = (self.width - self.duration_label.width) / 2
else:
self.duration_label.visible = False
class DayLine(graphics.Scene):
__gsignals__ = {
"on-time-chosen": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT, gobject.TYPE_PYOBJECT)),
}
def __init__(self, start_time = None):
graphics.Scene.__init__(self)
day_start = conf.get("day_start_minutes")
self.day_start = dt.time(day_start / 60, day_start % 60)
self.view_time = start_time or dt.datetime.combine(dt.date.today(), self.day_start)
self.scope_hours = 24
self.fact_bars = []
self.categories = []
self.connect("on-enter-frame", self.on_enter_frame)
self.connect("on-mouse-move", self.on_mouse_move)
self.connect("on-mouse-down", self.on_mouse_down)
self.connect("on-mouse-up", self.on_mouse_up)
self.connect("on-click", self.on_click)
self.plot_area = graphics.Sprite()
self.selection = Selection()
self.chosen_selection = Selection()
self.plot_area.add_child(self.selection, self.chosen_selection)
self.drag_start = None
self.current_x = None
self.snap_points = []
self.add_child(self.plot_area)
def plot(self, date, facts, select_start, select_end = None):
for bar in self.fact_bars:
self.plot_area.sprites.remove(bar)
self.fact_bars = []
for fact in facts:
fact_bar = graphics.Rectangle(0, 0, fill = "#aaa", stroke="#aaa") # dimensions will depend on screen situation
fact_bar.fact = fact
if fact.category in self.categories:
fact_bar.category = self.categories.index(fact.category)
else:
fact_bar.category = len(self.categories)
self.categories.append(fact.category)
self.plot_area.add_child(fact_bar)
self.fact_bars.append(fact_bar)
self.view_time = dt.datetime.combine((select_start - dt.timedelta(hours=self.day_start.hour, minutes=self.day_start.minute)).date(), self.day_start)
if select_start and select_start > dt.datetime.now():
select_start = dt.datetime.now()
self.chosen_selection.start_time = select_start
if select_end and select_end > dt.datetime.now():
select_end = dt.datetime.now()
self.chosen_selection.end_time = select_end
self.chosen_selection.width = None
self.chosen_selection.fixed = True
self.chosen_selection.visible = True
self.redraw()
def on_mouse_down(self, scene, event):
self.drag_start = self.current_x
self.chosen_selection.visible = False
def on_mouse_up(self, scene, event):
if self.drag_start:
self.drag_start = None
start_time = self.selection.start_time
if start_time > dt.datetime.now():
start_time = dt.datetime.now()
end_time = self.selection.end_time
self.new_selection()
self.emit("on-time-chosen", start_time, end_time)
def on_click(self, scene, event, target):
self.drag_start = None
# If self.selection.start_time is somehow None, just reset the selection.
# This can sometimes happen when dragging left to right in small
# increments. https://bugzilla.gnome.org/show_bug.cgi?id=669478
if self.selection == None or self.selection.start_time == None:
self.new_selection()
return
start_time = self.selection.start_time
if start_time > dt.datetime.now():
start_time = dt.datetime.now()
end_time = None
if self.fact_bars:
times = [bar.fact.start_time for bar in self.fact_bars if bar.fact.start_time - start_time > dt.timedelta(minutes=5)]
times.extend([bar.fact.start_time + bar.fact.delta for bar in self.fact_bars if bar.fact.start_time + bar.fact.delta - start_time > dt.timedelta(minutes=5)])
if times:
end_time = min(times)
self.new_selection()
self.emit("on-time-chosen", start_time, end_time)
def new_selection(self):
self.plot_area.sprites.remove(self.selection)
self.selection = Selection()
self.plot_area.add_child(self.selection)
self.redraw()
def on_mouse_move(self, scene, event):
if self.current_x:
active_bar = None
# find if we are maybe on a bar
for bar in self.fact_bars:
if bar.x < self.current_x < bar.x + bar.width:
active_bar = bar
break
if active_bar:
self.set_tooltip_text("%s - %s" % (active_bar.fact.activity, active_bar.fact.category))
else:
self.set_tooltip_text("")
self.redraw()
def on_enter_frame(self, scene, context):
g = graphics.Graphics(context)
self.plot_area.y = 15.5
self.plot_area.height = self.height - 30
vertical = min(self.plot_area.height / 5 , 7)
minute_pixel = (self.scope_hours * 60.0 - 15) / self.width
snap_points = []
g.set_line_style(width=1)
bottom = self.plot_area.y + self.plot_area.height
for bar in self.fact_bars:
bar.y = vertical * bar.category + 5
bar.height = vertical
bar_start_time = bar.fact.start_time - self.view_time
minutes = bar_start_time.seconds / 60 + bar_start_time.days * self.scope_hours * 60
bar.x = round(minutes / minute_pixel) + 0.5
bar.width = round((bar.fact.delta).seconds / 60 / minute_pixel)
if not snap_points or bar.x - snap_points[-1][0] > 1:
snap_points.append((bar.x, bar.fact.start_time))
if not snap_points or bar.x + bar.width - snap_points[-1][0] > 1:
snap_points.append((bar.x + bar.width, bar.fact.start_time + bar.fact.delta))
self.snap_points = snap_points
if self.chosen_selection.start_time and self.chosen_selection.width is None:
# we have time but no pixels
minutes = round((self.chosen_selection.start_time - self.view_time).seconds / 60 / minute_pixel) + 0.5
self.chosen_selection.x = minutes
if self.chosen_selection.end_time:
self.chosen_selection.width = round((self.chosen_selection.end_time - self.chosen_selection.start_time).seconds / 60 / minute_pixel)
else:
self.chosen_selection.width = 0
self.chosen_selection.height = self.chosen_selection.parent.height
# use the oportunity to set proper colors too
self.chosen_selection.fill = self.get_style().bg[gtk.STATE_SELECTED].to_string()
self.chosen_selection.duration_label.color = self.get_style().fg[gtk.STATE_SELECTED].to_string()
self.selection.visible = self._mouse_in # TODO - think harder about the mouse_out event
self.selection.width = 0
self.selection.height = self.selection.parent.height
if self.mouse_x:
start_x = max(min(self.mouse_x, self.width-1), 0) #mouse, but within screen regions
# check for snap points
start_x = start_x + 0.5
minutes = int(round(start_x * minute_pixel / 15)) * 15
start_time = self.view_time + dt.timedelta(hours = minutes / 60, minutes = minutes % 60)
if snap_points:
delta, closest_snap, time = min((abs(start_x - i), i, time) for i, time in snap_points)
if abs(closest_snap - start_x) < 5 and (not self.drag_start or self.drag_start != closest_snap):
start_x = closest_snap
minutes = (time.hour - self.day_start.hour) * 60 + time.minute - self.day_start.minute
start_time = time
self.current_x = minutes / minute_pixel
end_time, end_x = None, None
if self.drag_start:
minutes = int(self.drag_start * minute_pixel)
end_time = self.view_time + dt.timedelta(hours = minutes / 60, minutes = minutes % 60)
end_x = round(self.drag_start) + 0.5
if end_time and end_time < start_time:
start_time, end_time = end_time, start_time
start_x, end_x = end_x, start_x
self.selection.start_time = start_time
self.selection.end_time = end_time
self.selection.x = start_x
if end_time:
self.selection.width = end_x - start_x
self.selection.y = 0
self.selection.fill = self.get_style().bg[gtk.STATE_SELECTED].to_string()
self.selection.duration_label.color = self.get_style().fg[gtk.STATE_SELECTED].to_string()
#time scale
g.set_color("#000")
background = self.get_style().bg[gtk.STATE_NORMAL].to_string()
text = self.get_style().text[gtk.STATE_NORMAL].to_string()
tick_color = g.colors.contrast(background, 80)
layout = g.create_layout(size = 8)
for i in range(self.scope_hours * 60):
label_time = (self.view_time + dt.timedelta(minutes=i))
g.set_color(tick_color)
if label_time.minute == 0:
g.move_to(round(i / minute_pixel) + 0.5, bottom - 15)
g.line_to(round(i / minute_pixel) + 0.5, bottom)
g.stroke()
elif label_time.minute % 15 == 0:
g.move_to(round(i / minute_pixel) + 0.5, bottom - 5)
g.line_to(round(i / minute_pixel) + 0.5, bottom)
g.stroke()
if label_time.minute == 0 and label_time.hour % 4 == 0:
if label_time.hour == 0:
g.move_to(round(i / minute_pixel) + 0.5, self.plot_area.y)
g.line_to(round(i / minute_pixel) + 0.5, bottom)
label_minutes = label_time.strftime("%b %d")
else:
label_minutes = label_time.strftime("%H<small><sup>%M</sup></small>")
g.set_color(text)
layout.set_markup(label_minutes)
label_w, label_h = layout.get_pixel_size()
g.move_to(round(i / minute_pixel) + 2, 0)
context.show_layout(layout)
#current time
if self.view_time < dt.datetime.now() < self.view_time + dt.timedelta(hours = self.scope_hours):
minutes = round((dt.datetime.now() - self.view_time).seconds / 60 / minute_pixel) + 0.5
g.move_to(minutes, self.plot_area.y)
g.line_to(minutes, bottom)
g.stroke("#f00", 0.4)
snap_points.append(minutes - 0.5)
| landonb/hamster-applet | src/hamster/widgets/dayline.py | Python | gpl-3.0 | 13,958 |
import simplejson
import traceback
import logging
import os
import requests
from collections import OrderedDict
from string import ascii_letters, digits
ID = "nem"
permission = 1
nem_logger = logging.getLogger("NEM_Tools")
# Colour Constants for List and Multilist command
COLOURPREFIX = unichr(3)
COLOUREND = COLOURPREFIX
BOLD = unichr(2)
DARKGREEN = COLOURPREFIX + "03"
RED = COLOURPREFIX + "05"
PURPLE = COLOURPREFIX + "06"
ORANGE = COLOURPREFIX + "07"
BLUE = COLOURPREFIX + "12"
PINK = COLOURPREFIX + "13"
GRAY = COLOURPREFIX + "14"
LIGHTGRAY = COLOURPREFIX + "15"
ALLOWED_IN_FILENAME = "-_.() %s%s" % (ascii_letters, digits)
# Colour Constants End
class NotEnoughClasses():
def getLatestVersion(self):
try:
return self.fetch_json("https://bot.notenoughmods.com/?json")
except:
print("Failed to get NEM versions, falling back to hard-coded")
nem_logger.exception("Failed to get NEM versions, falling back to hard-coded.")
#traceb = str(traceback.format_exc())
# print(traceb)
return ["1.4.5", "1.4.6-1.4.7", "1.5.1", "1.5.2", "1.6.1", "1.6.2", "1.6.4",
"1.7.2", "1.7.4", "1.7.5", "1.7.7", "1.7.9", "1.7.10"]
def __init__(self):
self.requests_session = requests.Session()
self.requests_session.headers = {
'User-agent': 'NotEnoughMods:Tools/1.X (+https://github.com/NotEnoughMods/NotEnoughModPolling)'
}
self.requests_session.max_redirects = 5
self.cache_dir = os.path.join("commands", "NEM", "cache")
self.cache_last_modified = {}
self.cache_etag = {}
self.versions = self.getLatestVersion()
self.version = self.versions[len(self.versions) - 1]
def normalize_filename(self, name):
return ''.join(c for c in name if c in ALLOWED_IN_FILENAME)
def fetch_page(self, url, timeout=10, decode_json=False, cache=False):
try:
if cache:
fname = self.normalize_filename(url)
filepath = os.path.join(self.cache_dir, fname)
if os.path.exists(filepath):
# get it (conditionally) and, if necessary, store the new version
headers = {}
# set etag if it exists
etag = self.cache_etag.get(url)
if etag:
headers['If-None-Match'] = etag
# set last-modified if it exists
last_modified = self.cache_last_modified.get(url)
if last_modified:
headers['If-Modified-Since'] = '"{}"'.format(last_modified)
request = self.requests_session.get(url, timeout=timeout, headers=headers)
if request.status_code == 304:
# load from cache
with open(filepath, 'r') as f:
# and return it
if decode_json:
return simplejson.load(f)
else:
return f.read()
else:
# cache the new version
with open(filepath, 'w') as f:
f.write(request.content)
self.cache_etag[url] = request.headers.get('etag')
self.cache_last_modified[url] = request.headers.get('last-modified')
# and return it
if decode_json:
return request.json()
else:
return request.content
else:
# get it and cache it
request = self.requests_session.get(url, timeout=timeout)
with open(filepath, 'w') as f:
f.write(request.content)
self.cache_etag[url] = request.headers.get('etag')
self.cache_last_modified[url] = request.headers.get('last-modified')
if decode_json:
return request.json()
else:
return request.content
else:
# no caching
request = self.requests_session.get(url, timeout=timeout)
if decode_json:
return request.json()
else:
return request.content
except:
traceback.print_exc()
pass
# most likely a timeout
def fetch_json(self, *args, **kwargs):
return self.fetch_page(*args, decode_json=True, **kwargs)
NEM = NotEnoughClasses()
def execute(self, name, params, channel, userdata, rank):
try:
command = commands[params[0]]
command(self, name, params, channel, userdata, rank)
except:
self.sendMessage(channel, "Invalid sub-command!")
self.sendMessage(channel, "See \"=nem help\" for help")
def setlist(self, name, params, channel, userdata, rank):
if len(params) != 2:
self.sendMessage(channel,
"{name}: Insufficient amount of parameters provided.".format(name=name)
)
self.sendMessage(channel,
"{name}: {setlistHelp}".format(name=name,
setlistHelp=help["setlist"][0])
)
else:
NEM.version = str(params[1])
self.sendMessage(channel,
"switched list to: "
"{bold}{blue}{version}{colourEnd}".format(bold=BOLD,
blue=BLUE,
version=params[1],
colourEnd=COLOUREND)
)
def multilist(self, name, params, channel, userdata, rank):
if len(params) != 2:
self.sendMessage(channel,
"{name}: Insufficient amount of parameters provided.".format(name=name))
self.sendMessage(channel,
"{name}: {multilistHelp}".format(name=name,
multilistHelp=help["multilist"][0])
)
else:
try:
jsonres = {}
results = OrderedDict()
modName = params[1]
for version in NEM.versions:
jsonres[version] = NEM.fetch_json("https://bot.notenoughmods.com/" + requests.utils.quote(version) + ".json", cache=True)
for i, mod in enumerate(jsonres[version]):
if modName.lower() == mod["name"].lower():
results[version] = i
break
else:
aliases = [mod_alias.lower() for mod_alias in mod["aliases"]]
if modName.lower() in aliases:
results[version] = i
count = len(results)
if count == 0:
self.sendMessage(channel, name + ": mod not present in NEM.")
return
elif count == 1:
count = str(count) + " MC version"
else:
count = str(count) + " MC versions"
self.sendMessage(channel, "Listing " + count + " for \"" + params[1] + "\":")
for version in results.iterkeys():
alias = ""
modData = jsonres[version][results[version]]
if modData["aliases"]:
alias_joinText = "{colourEnd}, {colour}".format(colourEnd=COLOUREND,
colour=PINK)
alias_text = alias_joinText.join(modData["aliases"])
alias = "({colour}{text}{colourEnd}) ".format(colourEnd=COLOUREND,
colour=PINK,
text=alias_text)
comment = ""
if modData["comment"] != "":
comment = "({colour}{text}{colourEnd}) ".format(colourEnd=COLOUREND,
colour=GRAY,
text=modData["comment"])
dev = ""
try:
if modData["dev"] != "":
dev = ("({colour}dev{colourEnd}: "
"{colour2}{version}{colourEnd}) ".format(colourEnd=COLOUREND,
colour=GRAY,
colour2=RED,
version=modData["dev"])
)
except Exception as error:
print(error)
traceback.print_exc()
self.sendMessage(channel,
"{bold}{blue}{mcversion}{colourEnd}{bold}: "
"{purple}{name}{colourEnd} {aliasString}"
"{darkgreen}{version}{colourEnd} {devString}"
"{comment}{orange}{shorturl}{colourEnd}".format(name=modData["name"],
aliasString=alias,
devString=dev,
comment=comment,
version=modData["version"],
shorturl=modData["shorturl"],
mcversion=version,
bold=BOLD,
blue=BLUE,
purple=PURPLE,
darkgreen=DARKGREEN,
orange=ORANGE,
colourEnd=COLOUREND)
)
except Exception as error:
self.sendMessage(channel, name + ": " + str(error))
traceback.print_exc()
def list(self, name, params, channel, userdata, rank):
if len(params) < 2:
self.sendMessage(channel,
"{name}: Insufficient amount of parameters provided.".format(name=name))
self.sendMessage(channel,
"{name}: {helpEntry}".format(name=name,
helpEntry=help["list"][0])
)
return
if len(params) >= 3:
version = params[2]
else:
version = NEM.version
try:
result = NEM.fetch_page("https://bot.notenoughmods.com/" + requests.utils.quote(version) + ".json", cache=True)
if not result:
self.sendMessage(channel,
"{0}: Could not fetch the list. Are you sure it exists?".format(name)
)
return
jsonres = simplejson.loads(result, strict=False)
results = []
i = -1
for mod in jsonres:
i = i + 1
if str(params[1]).lower() in mod["name"].lower():
results.append(i)
continue
else:
aliases = mod["aliases"]
for alias in aliases:
if params[1].lower() in alias.lower():
results.append(i)
break
count = len(results)
if count == 0:
self.sendMessage(channel, name + ": no results found.")
return
elif count == 1:
count = str(count) + " result"
else:
count = str(count) + " results"
self.sendMessage(channel,
"Listing {count} for \"{term}\" in "
"{bold}{colour}{version}"
"{colourEnd}{bold}".format(count=count,
term=params[1],
version=version,
bold=BOLD,
colourEnd=COLOUREND,
colour=BLUE)
)
for line in results:
alias = COLOURPREFIX
if jsonres[line]["aliases"]:
alias_joinText = "{colourEnd}, {colour}".format(colourEnd=COLOUREND,
colour=PINK)
alias_text = alias_joinText.join(jsonres[line]["aliases"])
alias = "({colour}{text}{colourEnd}) ".format(colourEnd=COLOUREND,
colour=PINK,
text=alias_text)
comment = ""
if jsonres[line]["comment"] != "":
comment = "({colour}{text}{colourEnd}) ".format(colourEnd=COLOUREND,
colour=GRAY,
text=jsonres[line]["comment"])
dev = ""
try:
if jsonres[line]["dev"] != "":
dev = ("({colour}dev{colourEnd}): "
"{colour2}{version}{colourEnd})".format(colourEnd=COLOUREND,
colour=GRAY,
colour2=RED,
version=jsonres[line]["dev"])
)
except Exception as error:
print(error)
traceback.print_exc()
self.sendMessage(channel,
"{purple}{name}{colourEnd} {aliasString}"
"{darkgreen}{version}{colourEnd} {devString}"
"{comment}{orange}{shorturl}{colourEnd}".format(name=jsonres[line]["name"],
aliasString=alias,
devString=dev,
comment=comment,
version=jsonres[line]["version"],
shorturl=jsonres[line]["shorturl"],
purple=PURPLE,
darkgreen=DARKGREEN,
orange=ORANGE,
colourEnd=COLOUREND)
)
except Exception as error:
self.sendMessage(channel, "{0}: {1}".format(name, error))
traceback.print_exc()
def compare(self, name, params, channel, userdata, rank):
try:
oldVersion, newVersion = params[1], params[2]
oldJson = NEM.fetch_json("https://bot.notenoughmods.com/" + requests.utils.quote(oldVersion) + ".json", cache=True)
newJson = NEM.fetch_json("https://bot.notenoughmods.com/" + requests.utils.quote(newVersion) + ".json", cache=True)
newMods = {modInfo["name"].lower(): True for modInfo in newJson}
missingMods = []
for modInfo in oldJson:
old_modName = modInfo["name"].lower()
if old_modName not in newMods:
missingMods.append(modInfo["name"])
path = "commands/modbot.mca.d3s.co/htdocs/compare/{0}...{1}.json".format(oldVersion, newVersion)
with open(path, "w") as f:
f.write(simplejson.dumps(missingMods, sort_keys=True, indent=4 * ' '))
self.sendMessage(channel,
"{0} mods died trying to update to {1}".format(len(missingMods), newVersion)
)
except Exception as error:
self.sendMessage(channel, "{0}: {1}".format(name, error))
traceback.print_exc()
def about(self, name, params, channel, userdata, rank):
self.sendMessage(channel, "Not Enough Mods toolkit for IRC by SinZ & Yoshi2 v4.0")
def help(self, name, params, channel, userdata, rank):
if len(params) == 1:
self.sendMessage(channel,
"{0}: Available commands: {1}".format(name,
", ".join(help))
)
else:
command = params[1]
if command in help:
for line in help[command]:
self.sendMessage(channel, name + ": " + line)
else:
self.sendMessage(channel, name + ": Invalid command provided")
def force_cacheRedownload(self, name, params, channel, userdata, rank):
if self.rankconvert[rank] >= 3:
for version in NEM.versions:
url = "https://bot.notenoughmods.com/" + requests.utils.quote(version) + ".json"
normalized = NEM.normalize_filename(url)
filepath = os.path.join(NEM.cache_dir, normalized)
if os.path.exists(filepath):
NEM.cache_last_modified[normalized] = 0
self.sendMessage(channel, "Cache Timestamps have been reset. Cache will be redownloaded on the next fetching.")
commands = {
"list": list,
"multilist": multilist,
"about": about,
"help": help,
"setlist": setlist,
"compare": compare,
"forceredownload": force_cacheRedownload
}
help = {
"list": ["=nem list <search> <version>",
"Searches the NotEnoughMods database for <search> and returns all results to IRC."],
"about": ["=nem about",
"Shows some info about the NEM plugin."],
"help": ["=nem help [command]",
"Shows the help info about [command] or lists all commands for this plugin."],
"setlist": ["=nem setlist <version>",
"Sets the default version to be used by other commands to <version>."],
"multilist": ["=nem multilist <modName or alias>",
"Searches the NotEnoughMods database for modName or alias in all MC versions."],
"compare": ["=nem compare <oldVersion> <newVersion>",
"Compares the NEMP entries for two different MC versions and says how many mods haven't been updated to the new version."]
}
| NotEnoughMods/NotEnoughModPolling | NotEnoughMods_Tools.py | Python | gpl-3.0 | 19,543 |
def __load():
import imp, os, sys
ext = 'pygame/imageext.so'
for path in sys.path:
if not path.endswith('lib-dynload'):
continue
ext_path = os.path.join(path, ext)
if os.path.exists(ext_path):
mod = imp.load_dynamic(__name__, ext_path)
break
else:
raise ImportError(repr(ext) + " not found")
__load()
del __load
| mokuki082/EggDrop | code/build/bdist.macosx-10.6-intel/python3.4-standalone/app/temp/pygame/imageext.py | Python | gpl-3.0 | 397 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-02-01 20:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('djreceive', '0024_auto_20170131_1732'),
]
operations = [
migrations.RemoveField(
model_name='audioabxtrial',
name='result',
),
migrations.AddField(
model_name='audioabxtrial',
name='A',
field=models.CharField(max_length=1024, null=True),
),
migrations.AddField(
model_name='audioabxtrial',
name='B',
field=models.CharField(max_length=1024, null=True),
),
migrations.AddField(
model_name='audioabxtrial',
name='X',
field=models.CharField(max_length=1024, null=True),
),
migrations.AddField(
model_name='audioabxtrial',
name='correct',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='audioabxtrial',
name='key_press',
field=models.IntegerField(null=True),
),
]
| rivasd/djPsych | djreceive/migrations/0025_auto_20170201_1516.py | Python | gpl-3.0 | 1,227 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import *
from unittest import TestCase
__author__ = 'nicolas'
import os
class TestTauPyModel(TestCase):
def test_create_taup_model(self):
"""
See if the create model function in the tau interface runs smoothly.
"""
from taupy import tau
try:
os.remove("ak135.taup")
except FileNotFoundError:
pass
ak135 = tau.TauPyModel("ak135", taup_model_path=".")
os.remove("ak135.taup")
| obspy/TauPy | taupy/tests/test_tauPyModel.py | Python | gpl-3.0 | 645 |
# GNU Solfege - free ear training software
# Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2011,
# 2013 Tom Cato Amundsen
# Copyright (C) 2013 Jan Baumgart (Folkwang Universitaet der Kuenste)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from __future__ import division
from gi.repository import GObject
from gi.repository import Gtk
from gi.repository import Gdk
from solfege import abstract
from solfege import abstract_solmisation_addon
from solfege import gu
from solfege import lessonfile
from solfege import mpd
from solfege.const import solmisation_syllables, solmisation_notenames
class Teacher(abstract.Teacher, abstract_solmisation_addon.SolmisationAddOnClass):
OK = 0
ERR_PICKY = 1
ERR_NO_ELEMS = 2
def __init__(self, exname):
abstract.Teacher.__init__(self, exname)
self.lessonfileclass = lessonfile.HeaderLessonfile
def play_question(self):
if self.q_status == self.QSTATUS_NO:
return
self.play(self.get_music_string())
def guess_answer(self, a):
assert self.q_status in [self.QSTATUS_NEW, self.QSTATUS_WRONG]
v = []
for idx in range(len(self.m_question)):
v.append(self.m_question[idx] == a[idx])
if not [x for x in v if x == 0]:
self.q_status = self.QSTATUS_SOLVED
self.maybe_auto_new_question()
return 1
else:
self.q_status = self.QSTATUS_WRONG
class RhythmViewer(Gtk.Frame):
def __init__(self, parent):
Gtk.Frame.__init__(self)
self.set_shadow_type(Gtk.ShadowType.IN)
self.g_parent = parent
self.g_box = Gtk.HBox()
self.g_box.show()
self.g_box.set_spacing(gu.PAD_SMALL)
self.g_box.set_border_width(gu.PAD)
self.add(self.g_box)
self.m_data = []
# the number of rhythm elements the viewer is supposed to show
self.m_num_notes = 0
self.g_face = None
self.__timeout = None
def set_num_notes(self, i):
self.m_num_notes = i
def clear(self):
for child in self.g_box.get_children():
child.destroy()
self.m_data = []
def create_holders(self):
"""
create those |__| that represents one beat
"""
if self.__timeout:
GObject.source_remove(self.__timeout)
self.clear()
for x in range(self.m_num_notes):
self.g_box.pack_start(gu.create_png_image('holder'), False, False, 0)
self.m_data = []
def clear_wrong_part(self):
"""When the user have answered the question, this method is used
to clear all but the first correct elements."""
# this assert is always true because if there is no rhythm element,
# then there is a rhythm holder ( |__| )
assert self.m_num_notes == len(self.g_parent.m_t.m_question)
self.g_face.destroy()
self.g_face = None
for n in range(self.m_num_notes):
if self.m_data[n] != self.g_parent.m_t.m_question[n]:
break
for x in range(n, len(self.g_box.get_children())):
self.g_box.get_children()[n].destroy()
self.m_data = self.m_data[:n]
for x in range(n, self.m_num_notes):
self.g_box.pack_start(gu.create_png_image('holder'), False, False, 0)
def add_rhythm_element(self, i):
assert len(self.m_data) <= self.m_num_notes
if len(self.g_box.get_children()) >= self.m_num_notes:
self.g_box.get_children()[self.m_num_notes-1].destroy()
vbox = Gtk.VBox()
vbox.show()
# im = gu.create_rhythm_image(const.RHYTHMS[i])
im = self.g_parent.solbutton(i, False)
vbox.pack_start(im, True, True, 0)
vbox.pack_start(gu.create_png_image('rhythm-wrong'), False, False, 0)
vbox.get_children()[-1].hide()
self.g_box.pack_start(vbox, False, False, 0)
self.g_box.reorder_child(vbox, len(self.m_data))
self.m_data.append(i)
def backspace(self):
if len(self.m_data) > 0:
if self.g_face:
self.g_box.get_children()[-2].destroy()
self.g_face.destroy()
self.g_face = None
self.g_box.get_children()[len(self.m_data)-1].destroy()
self.g_box.pack_start(gu.create_png_image('holder'), False, False, 0)
del self.m_data[-1]
def mark_wrong(self, idx):
"""
Mark the rhythm elements that was wrong by putting the content of
graphics/rhythm-wrong.png (normally a red line) under the element.
"""
self.g_box.get_children()[idx].get_children()[1].show()
def len(self):
"return the number of rhythm elements currently viewed"
return len(self.m_data)
def sad_face(self):
l = gu.HarmonicProgressionLabel(_("Wrong"))
l.show()
self.g_box.pack_start(l, False, False, 0)
self.g_face = Gtk.EventBox()
self.g_face.connect('button_press_event', self.on_sadface_event)
self.g_face.show()
im = Gtk.Image()
im.set_from_stock('solfege-sadface', Gtk.IconSize.LARGE_TOOLBAR)
im.show()
self.g_face.add(im)
self.g_box.pack_start(self.g_face, False, False, 0)
def happy_face(self):
l = gu.HarmonicProgressionLabel(_("Correct"))
l.show()
self.g_box.pack_start(l, False, False, 0)
self.g_face = Gtk.EventBox()
self.g_face.connect('button_press_event', self.on_happyface_event)
self.g_face.show()
im = Gtk.Image()
im.set_from_stock('solfege-happyface', Gtk.IconSize.LARGE_TOOLBAR)
im.show()
self.g_face.add(im)
self.g_box.pack_start(self.g_face, False, False, 0)
def on_happyface_event(self, obj, event):
if event.type == Gdk.EventType.BUTTON_PRESS and event.button == 1:
self.g_parent.new_question()
def on_sadface_event(self, obj, event):
if event.type == Gdk.EventType.BUTTON_PRESS and event.button == 1:
self.clear_wrong_part()
def flash(self, s):
self.clear()
l = Gtk.Label(label=s)
l.set_name("Feedback")
l.set_alignment(0.0, 0.5)
l.show()
self.g_box.pack_start(l, True, True, 0)
self.g_box.set_size_request(
max(l.size_request().width + gu.PAD * 2, self.g_box.size_request().width),
max(l.size_request().height + gu.PAD * 2, self.g_box.size_request().height))
self.__timeout = GObject.timeout_add(2000, self.unflash)
def unflash(self, *v):
self.__timeout = None
self.clear()
class Gui(abstract.Gui, abstract_solmisation_addon.SolmisationAddOnGuiClass):
lesson_heading = _("Solmisation Diktat")
def __init__(self, teacher):
abstract.Gui.__init__(self, teacher)
self.m_key_bindings = {'backspace_ak': self.on_backspace}
self.g_answer_box = Gtk.VBox()
self.answer_buttons = []
self.m_answer_buttons = {}
#-------
hbox = gu.bHBox(self.practise_box)
b = Gtk.Button(_("Play"))
b.show()
b.connect('clicked', self.play_users_answer)
hbox.pack_start(b, False, True, 0)
self.practise_box.pack_start(Gtk.HBox(), False, False,
padding=gu.PAD_SMALL)
self.g_rhythm_viewer = RhythmViewer(self)
self.g_rhythm_viewer.create_holders()
hbox.pack_start(self.g_rhythm_viewer, True, True, 0)
self.practise_box.pack_start(self.g_answer_box, False, False, 0)
# action area
self.std_buttons_add(
('new', self.new_question),
('repeat', self.repeat_question),
#('play_answer', self.play_users_answer),
('give_up', self.give_up),
('backspace', self.on_backspace))
self.practise_box.show_all()
##############
# config_box #
##############
self.add_select_elements_gui()
#--------
self.config_box.pack_start(Gtk.HBox(), False, False,
padding=gu.PAD_SMALL)
self.add_select_num_notes_gui()
#-----
self.config_box.pack_start(Gtk.HBox(), False, False,
padding=gu.PAD_SMALL)
#------
hbox = gu.bHBox(self.config_box, False)
hbox.set_spacing(gu.PAD_SMALL)
hbox.pack_start(Gtk.Label(_("Beats per minute:")), False, False, 0)
spin = gu.nSpinButton(self.m_exname, 'bpm',
Gtk.Adjustment(60, 20, 240, 1, 10))
hbox.pack_start(spin, False, False, 0)
hbox = gu.bHBox(self.config_box, False)
hbox.set_spacing(gu.PAD_SMALL)
hbox.pack_start(gu.nCheckButton(self.m_exname,
"show_first_note",
_("Show the first tone")), False, False, 0)
hbox.pack_start(gu.nCheckButton(self.m_exname,
"play_cadence",
_("Play cadence")), False, False, 0)
self._add_auto_new_question_gui(self.config_box)
self.config_box.show_all()
def solbutton(self, i, connect):
if i > len(solmisation_syllables) or i < 0:
btn = Gtk.Button()
else:
btn = Gtk.Button(solmisation_syllables[i])
btn.show()
if connect:
btn.connect('clicked', self.guess_element, i)
return btn
def select_element_cb(self, button, element_num):
super(Gui, self).select_element_cb(button, element_num)
self.m_answer_buttons[element_num].set_sensitive(button.get_active())
#self.update_answer_buttons()
def on_backspace(self, widget=None):
if self.m_t.q_status == self.QSTATUS_SOLVED:
return
self.g_rhythm_viewer.backspace()
if not self.g_rhythm_viewer.m_data:
self.g_backspace.set_sensitive(False)
def play_users_answer(self, widget):
if self.g_rhythm_viewer.m_data:
melody = ""
p = mpd.MusicalPitch()
for k in self.g_rhythm_viewer.m_data:
melody += " " + mpd.transpose_notename(solmisation_notenames[k], self.m_t.m_transp) + "4"
self.m_t.play(r"\staff{ \time 1000000/4 %s }" % melody)
def guess_element(self, sender, i):
if self.m_t.q_status == self.QSTATUS_NO:
self.g_rhythm_viewer.flash(_("Click 'New' to begin."))
return
if self.m_t.q_status == self.QSTATUS_SOLVED:
return
if self.g_rhythm_viewer.len() == len(self.m_t.m_question):
self.g_rhythm_viewer.clear_wrong_part()
self.g_backspace.set_sensitive(True)
self.g_rhythm_viewer.add_rhythm_element(i)
if self.g_rhythm_viewer.len() == len(self.m_t.m_question):
if self.m_t.guess_answer(self.g_rhythm_viewer.m_data):
self.g_rhythm_viewer.happy_face()
self.std_buttons_answer_correct()
else:
v = []
for idx in range(len(self.m_t.m_question)):
v.append(self.m_t.m_question[idx] == self.g_rhythm_viewer.m_data[idx])
for x in range(len(v)):
if not v[x]:
self.g_rhythm_viewer.mark_wrong(x)
self.g_rhythm_viewer.sad_face()
self.std_buttons_answer_wrong()
def new_question(self, widget=None):
g = self.m_t.new_question()
if g == self.m_t.OK:
self.g_first_rhythm_button.grab_focus()
self.g_rhythm_viewer.set_num_notes(self.get_int('num_notes'))
self.g_rhythm_viewer.create_holders()
self.std_buttons_new_question()
if self.m_t.get_bool('show_first_note'):
self.g_rhythm_viewer.add_rhythm_element(self.m_t.m_question[0])
try:
self.m_t.play_question()
except Exception, e:
if not self.standard_exception_handler(e, self.on_end_practise):
raise
return
elif g == self.m_t.ERR_PICKY:
self.g_rhythm_viewer.flash(_("You have to solve this question first."))
else:
assert g == self.m_t.ERR_NO_ELEMS
self.g_repeat.set_sensitive(False)
self.g_rhythm_viewer.flash(_("You have to configure this exercise properly"))
def repeat_question(self, *w):
self.m_t.play_question()
self.g_first_rhythm_button.grab_focus()
def update_answer_buttons(self):
"""
(Re)create the buttons needed to answer the questions.
We recreate the buttons for each lesson file because the
header may specify a different set of rhythm elements to use.
"""
for but in self.answer_buttons:
but.destroy()
self.answer_buttons = []
self.g_first_rhythm_button = None
gs = Gtk.SizeGroup(Gtk.SizeGroupMode.HORIZONTAL)
for i, v in enumerate((
[1, 4, -1, 8, 11, -1, 15, 18, 21, -1, 25, 28, -1, 32],
[0, 3, 6, 7, 10, 13, 14, 17, 20, 23, 24, 27, 30, 31, 34],
[2, 5, -1, 9, 12, -1, 16, 19, 22, -1, 26, 29, -1, 33])):
hbox = Gtk.HBox(True, 0)
for k in v:
b = self.solbutton(k, True)
gs.add_widget(b)
b.set_sensitive(False)
for n in self.m_t.m_P.header.solmisation_elements:
if k == n:
b.set_sensitive(True)
if not self.g_first_rhythm_button:
self.g_first_rhythm_button = b
hbox.pack_start(b, True, True, 0)
self.answer_buttons.append(b)
if k != -1:
self.m_answer_buttons[k] = b
spacing = Gtk.Alignment()
if i in (0, 2):
spacing.set_property('left-padding', 16)
spacing.set_property('right-padding', 16)
spacing.add(hbox)
self.g_answer_box.pack_start(spacing, True, True, 0)
spacing.show_all()
def on_start_practise(self):
# FIXME for now, we run in custom_mode all the time, so we don't
# have to add lots of lesson files. We can change this later.
#self.m_t.m_custom_mode = self.get_bool('gui/expert_mode')
self.m_t.m_custom_mode = True
super(Gui, self).on_start_practise()
if not self.m_t.m_P.header.solmisation_elements:
self.m_t.m_P.header.solmisation_elements = self.m_t.elements[:]
self.update_answer_buttons()
self.std_buttons_start_practise()
if self.m_t.m_custom_mode:
self.update_select_elements_buttons()
self.g_element_frame.show()
else:
self.g_element_frame.hide()
self.m_t.set_default_header_values()
if 'show_first_note' in self.m_t.m_P.header:
self.m_t.set_bool('show_first_note', self.m_t.m_P.header.show_first_note)
if 'play_cadence' in self.m_t.m_P.header:
self.m_t.set_bool('play_cadence', self.m_t.m_P.header.play_cadence)
self.g_rhythm_viewer.flash(_("Click 'New' to begin."))
def on_end_practise(self):
self.m_t.end_practise()
self.std_buttons_end_practise()
self.g_rhythm_viewer.create_holders()
def give_up(self, widget=None):
if self.m_t.q_status == self.QSTATUS_NO:
return
self.g_rhythm_viewer.clear()
for i in self.m_t.m_question:
self.g_rhythm_viewer.add_rhythm_element(i)
self.m_t.q_status = self.QSTATUS_SOLVED
self.std_buttons_give_up()
| RannyeriDev/Solfege | solfege/exercises/solmisation.py | Python | gpl-3.0 | 16,440 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2018-02-15 11:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sigad', '0033_auto_20180207_1028'),
]
operations = [
migrations.AddField(
model_name='documento',
name='capa',
field=models.BooleanField(choices=[(True, 'Sim'), (False, 'Não')], default=False, help_text='Só um objeto pode ser capa de sua classe. Caso haja outro já selecionado, ele será desconsiderado.', verbose_name='Capa de sua Classe'),
),
migrations.AlterField(
model_name='classe',
name='template_classe',
field=models.IntegerField(choices=[(1, 'Listagem em Linha'), (2, 'Galeria Albuns'), (3, 'Página dos Parlamentares'), (4, 'Página individual de Parlamentar'), (5, 'Banco de Imagens'), (6, 'Galeria de Áudios'), (7, 'Galeria de Vídeos'), (99, 'Documento Específico')], default=1, verbose_name='Template para a Classe'),
),
]
| cmjatai/cmj | cmj/sigad/migrations/0034_auto_20180215_0953.py | Python | gpl-3.0 | 1,088 |
import libtcodpy as libtcod
#globs are imported almost everywhere, so unnecessary libtcod imports should be omitted
##########################################################
# Options
##########################################################
#it's better to pack them into separate file
LIMIT_FPS = 20
#size of the map
MAP_WIDTH = 100
MAP_HEIGHT = 80
#size of the camera screen
CAMERA_WIDTH = 80
CAMERA_HEIGHT = 43
#size of the window
SCREEN_WIDTH = 80
SCREEN_HEIGHT = 50
#map generating settings
MAX_ROOM_ENEMIES = 3
MAX_ROOM_ITEMS = 3
#BSP options
DEPTH = 10 #number of node splittings
ROOM_MIN_SIZE = 6
FULL_ROOMS = False
#sizes and coordinates for GUI
PANEL_HEIGHT = 7
BAR_WIDTH = 20
PANEL_Y = SCREEN_HEIGHT - PANEL_HEIGHT
MESSAGE_X = BAR_WIDTH + 2
MESSAGE_WIDTH = SCREEN_WIDTH - BAR_WIDTH - 2
MESSAGE_HEIGHT = PANEL_HEIGHT - 1
#inventory properties
MAX_INVENTORY_SIZE = 26
INVENTORY_WIDTH = 50
#item properties
STIM_HEAL_AMOUNT = 50
DISCHARGE_DAMAGE = 100
ITEM_USING_RANGE = 5
CONFUSE_TURNS = 10
GRENADE_DAMAGE = 30
GRENADE_RADIUS = 3
#weapon properties
WEAPON_RANGE = 5
LASER_PISTOL_DAMAGE = 20
LASER_RIFLE_DAMAGE = 50
#experience properties
LEVEL_UP_BASE = 100
LEVEL_UP_FACTOR = 150
#FOV properties
FOV_ALGO = 0
FOV_LIGHT_WALLS = True
TORCH_RADIUS = 10
##########################################################
#colors of invisible tiles
c_hid_wall = libtcod.Color(22, 7, 115)
c_hid_floor = libtcod.Color(55, 46, 133)
#colors of visible tiles
c_vis_wall = libtcod.Color(148, 122, 23)
c_vis_floor = libtcod.Color(180, 155, 58)
#colors of highlighted tiles
c_hi_wall = libtcod.Color(67, 128, 211)
c_hi_floor = libtcod.Color(105, 150, 211)
#possible tile types
#properties description:
# title - just a name for a specific terrain, also used as key for a corresponding tile type in 'TYLE_TYPES' dict
# walkable - indicates, if tiles of this type can hold an object (character, item and so on) on the top of itself
# transparent - indicates, whether the tile blocks line of sight during calculating FOV for characters or not
# vis_color, hid_color, hi_color - visible color, hidden color, highlighted color respectively; used in 'calculate_color' function
#
#current types: 'floor', 'wall'
#
#in order to add new type, just create additional dict holding its properties as shown below and append it to 'TYLE_TYPES'
#'tile_type' used in 'Tile.set_type' is the keyword of a corresponding type in 'TILE_TYPES'
floor = {'title': 'f1loor', 'walkable': True, 'transparent': True, 'vis_color': c_vis_floor, 'hid_color': c_hid_floor, 'hi_color': c_hi_floor}
wall = {'title': 'metal wall', 'walkable': False, 'transparent': False, 'vis_color': c_vis_wall, 'hid_color': c_hid_wall, 'hi_color': c_hi_wall}
TILE_TYPES = {'floor': floor, 'wall': wall}
#variables for input handling
#they are kept here, as they don't hold any used information outside input-handling functions
#it doesn't matter, if they are constantly reinitializing
key = libtcod.Key()
mouse = libtcod.Mouse()
##########################################################
| MaxGavr/breakdown_game | globs.py | Python | gpl-3.0 | 3,049 |
from .main import create_random_key, vigenere, otp
| D-Vaillant/julius | julius/__init__.py | Python | gpl-3.0 | 51 |
# Tests of triangular lattices
#
# Copyright (C) 2017--2019 Simon Dobson
#
# This file is part of simplicial, simplicial topology in Python.
#
# Simplicial is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Simplicial is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Simplicial. If not, see <http://www.gnu.org/licenses/gpl.html>.
import unittest
import six
from simplicial import *
import math
import numpy.random
class TriangularLatticeTests(unittest.TestCase):
def _simplexCounts( self, r, c ):
"""Check we have the right numbers of simplices."""
self._complex = TriangularLattice(r, c)
ntriperr = 2 * (c - 1) + 1
ns = self._complex.numberOfSimplicesOfOrder()
self.assertEqual(ns[0], r * c)
if len(ns) > 1:
self.assertEqual(ns[1], (r - 1) * (2 * c - 1) + (r - 2) * c)
if len(ns) > 2:
self.assertEqual(ns[2], (r - 2) * ntriperr)
def testSimplest(self):
"""Test simplest triangular lattice."""
self._simplexCounts(2, 2)
def testNextSimplest(self):
"""Test the next simplest lattice."""
self._simplexCounts(3, 3)
def testEvenEven( self ):
"""Test a lattice with even rows and columns."""
self._simplexCounts(10, 10)
def testEvenOdd( self ):
"""Test a lattice with even rows and odd columns."""
self._simplexCounts(10, 11)
def testOddEven( self ):
"""Test a lattice with odd rows and even columns."""
self._simplexCounts(11, 10)
def testOddOdd( self ):
"""Test a lattice with odd rows and columns."""
self._simplexCounts(11, 10)
def testEuler( self ):
"""Test the Euler characteristic calculations."""
self._complex = TriangularLattice(20, 20)
self.assertEqual(self._complex.eulerCharacteristic(), 1)
def testRegularEmbedding( self ):
"""Test that the embedding is regular."""
self._complex = TriangularLattice(10, 10)
e = TriangularLatticeEmbedding(self._complex, 11, 11)
pos = e.positionsOf()
eps = 0.0001
# all columns equidistant
for i in range(10):
for j in range(9):
s1 = self._complex._indexOfVertex(i, j)
s2 = self._complex._indexOfVertex(i, j + 1)
self.assertTrue(pos[s2][0] - pos[s1][0] < (11.0 / 10) + eps)
# all rows equidistant
for i in range(9):
for j in range(10):
s1 = self._complex._indexOfVertex(i, j)
s2 = self._complex._indexOfVertex(i + 1, j)
self.assertTrue(pos[s2][1] - pos[s1][1] < (11.0 / 10) + eps)
# odd rows are offset
for i in range(9):
for j in range(9):
s1 = self._complex._indexOfVertex(i, j)
s2 = self._complex._indexOfVertex(i + 1, j)
if i % 2 == 0:
self.assertTrue(pos[s2][0] > pos[s1][0])
else:
self.assertTrue(pos[s2][0] < pos[s1][0])
def testPerturbedEmbedding( self ):
"""Test that we can perturb the embedding with explicit new positions."""
self._complex = TriangularLattice(10, 10)
e = TriangularLatticeEmbedding(self._complex, 11, 11)
ss = list(self._complex.simplicesOfOrder(0))
pos = e.positionsOf()
# choose a random simplex
i = int(numpy.random.random() * len(ss))
s = ss[i]
# re-position simplex
e.positionSimplex(s, [ 12, 13 ])
# make sure position is preserved
pos1 = e.positionsOf()
six.assertCountEqual(self, pos1[s], [ 12, 13 ])
| simoninireland/simplicial | test/test_triangularlattice.py | Python | gpl-3.0 | 4,117 |
# Rewritten by RayzoR
import sys
from com.l2scoria.gameserver.model.quest import State
from com.l2scoria.gameserver.model.quest import QuestState
from com.l2scoria.gameserver.model.quest.jython import QuestJython as JQuest
qn = "112_WalkOfFate"
# ~~~~~ npcId list: ~~~~~
Livina = 30572
Karuda = 32017
# ~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~ itemId list: ~~~~~~
EnchantD = 956
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
class Quest (JQuest) :
def __init__(self,id,name,descr): JQuest.__init__(self,id,name,descr)
def onAdvEvent (self,event,npc,player) :
st = player.getQuestState(qn)
if not st: return
htmltext = event
cond = st.getInt("cond")
if event == "32017-02.htm" and cond == 1 :
st.giveItems(57,22308)
st.giveItems(EnchantD,1)
st.addExpAndSp(112876,5774)
st.exitQuest(False)
st.playSound("ItemSound.quest_finish")
elif event == "30572-02.htm" :
st.playSound("ItemSound.quest_accept")
st.setState(STARTED)
st.set("cond","1")
return htmltext
def onTalk (self,npc,player):
htmltext = "<html><head><body>You are either not on a quest that involves this NPC, or you don't meet this NPC's minimum quest requirements.</body></html>"
st = player.getQuestState(qn)
if not st : return htmltext
state = st.getState()
npcId = npc.getNpcId()
cond = st.getInt("cond")
if state == COMPLETED :
htmltext = "<html><body>This quest has already been completed.</body></html>"
elif state == CREATED :
if npcId == Livina :
if player.getLevel() >= 20 :
htmltext = "30572-01.htm"
else:
htmltext = "30572-00.htm"
st.exitQuest(1)
elif state == STARTED :
if npcId == Livina :
htmltext = "30572-03.htm"
elif npcId == Karuda :
htmltext = "32017-01.htm"
return htmltext
QUEST = Quest(112,qn,"Walk of Fate")
CREATED = State('Start', QUEST)
STARTED = State('Started', QUEST)
COMPLETED = State('Completed', QUEST)
QUEST.setInitialState(CREATED)
QUEST.addStartNpc(Livina)
QUEST.addTalkId(Livina)
QUEST.addTalkId(Karuda)
| zenn1989/scoria-interlude | L2Jscoria-Game/data/scripts/quests/112_WalkOfFate/__init__.py | Python | gpl-3.0 | 2,208 |
from django.urls import path
from .views import redirect_to_archive
urlpatterns = [path("<str:media_id>/", redirect_to_archive)]
| whav/hav | src/hav/apps/media/urls.py | Python | gpl-3.0 | 130 |
# -*- coding: utf-8 -*-
'''
Mepinta
Copyright (c) 2011-2012, Joaquin G. Duo
This file is part of Mepinta.
Mepinta is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Mepinta is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Mepinta. If not, see <http://www.gnu.org/licenses/>.
'''
from unittest import TestLoader
from unittest.runner import TextTestRunner
from common.abstract.FrameworkObject import FrameworkObject
class TestDiscoveryManager(FrameworkObject): # Cannot be FrameworkBase because contexts are init in tests
def __getPluginsTestDir(self):
import plugins_tests.python as package
return package.__path__[0]
def runAllTests(self):
test_suite = TestLoader().discover(start_dir=self.__getPluginsTestDir(),
pattern='*.py', top_level_dir=self.__getPluginsTestDir())
TextTestRunner().run(test_suite)
def testModule():
TestDiscoveryManager().runAllTests()
if __name__ == "__main__":
testModule()
| joaduo/mepinta | core/python_core/mepinta/testing/plugins_testing/TestDiscoveryManager.py | Python | gpl-3.0 | 1,427 |
import numpy as np
from scipy import constants
from scipy.integrate import quad
# Boltzmann constant in eV/K
k = constants.value('Boltzmann constant in eV/K')
class Flux() :
"""
This class evaluates the neutron spectrum. The thermal cutoff is treated
as a variable parameter to ensure a specific fast-to-thermal ratio.
At thermal energy range (e < e1 eV), the flux is approximated by Maxwellian
distribution (D&H book Eq.(9-6)).
At fast energy range (e2 MeV < e < 20MeV), the flux is approximated by U-235
chi-spectrum (D&H book Eq.(2-112)).
At epithermal energies (e1 eV < e < e2 MeV), flux = 1/e
ratio : fast-to-thermal flux ratio
"""
def __init__(self, ratio = 2, thermal_temp = 600.0):
self.e2 = 1e6
self.thermal_temp = thermal_temp
# Maxwellian distribution, Eq.(9-6)
self.m = lambda x : x ** 0.5 * np.exp(-x/(k*self.thermal_temp))
# U235 chi distribution, Eq.(2-112)
self.chi = lambda x : np.exp(-1.036e-6*x)*np.sinh((2.29e-6 * x)**0.5)
# Middle energy range
self.f = lambda x : 1 / x
# Compute ratio as a function of thermal cutoff
E = np.logspace(-4, 0.1, 200)
R = np.array([self.compute_ratio(e1) for e1 in E])
# Compute thermal cutoff for given ratio
self.e1 = np.interp(1.0/ratio, R, E)
print 'Thermal cutoff is {} eV'.format(self.e1)
# Compute constants for each part of the spectrum
self.c1 = 1.0
self.c2 = self.m(self.e1) / self.f(self.e1)
self.c3 = self.c2 * self.f(self.e2) / self.chi(self.e2)
def compute_ratio(self, e1):
A = quad(self.m, 0, e1)[0]
C2 = self.m(e1) / self.f(e1)
C3 = self.f(self.e2) / self.chi(self.e2)
B = C2 * quad(self.f, e1, self.e2)[0]
C = C2 * C3 * quad(self.chi, self.e2, 2e7)[0]
r = A / (B + C)
return r
def evaluate(self, e):
# Evaluate flux at Energy e in eV
return (e<=self.e1) * self.c1*self.m(e) + \
(e>self.e1)*(e<=self.e2) * (self.c2 / e) + \
(e>self.e2) * self.c3*self.chi(e)
if __name__ == "__main__" :
import matplotlib.pyplot as plt
from multigroup_utilities import *
from nice_plots import init_nice_plots
init_nice_plots()
# PWR-like and TRIGA-like spectra
pwr = Flux(7.0, 600.0)
triga = Flux(1.0, 600.0)
# Evaluate the flux
E = np.logspace(-5, 7, 1000)
phi_pwr = pwr.evaluate(E)
phi_triga = triga.evaluate(E)
# Collapse the flux and flux per unit lethargy onto WIMS 69-group structure
bounds = energy_groups(structure='wims69')
pwr_mg = collapse(bounds, phi_pwr, E)
triga_mg = collapse(bounds, phi_triga, E)
phi_mg_pul = collapse(bounds, E*phi_pwr, E)
triga_mg_pul = collapse(bounds, E*phi_triga, E)
# Produce step-plot data for each spectrum
E_mg, phi_mg = plot_multigroup_data(bounds, pwr_mg)
_, triga_mg = plot_multigroup_data(bounds, triga_mg)
_, phi_mg_pul = plot_multigroup_data(bounds, phi_mg_pul)
_, triga_mg_pul = plot_multigroup_data(bounds, triga_mg_pul)
plt.figure(1)
plt.loglog(E, phi_pwr, 'k', E_mg, phi_mg, 'k--',
E, phi_triga, 'b', E_mg, triga_mg, 'b:')
plt.xlabel('$E$ (eV)')
plt.ylabel('$\phi(E)$')
plt.figure(2)
plt.loglog(E, E*phi_pwr, 'k', E_mg, phi_mg_pul, 'k--',
E, E*phi_triga, 'b', E_mg, triga_mg_pul, 'b:')
plt.xlabel('$E$ (eV)')
plt.ylabel('$E\phi(E)$')
plt.show() | corps-g/flux_spectrum | flux_spectrum.py | Python | gpl-3.0 | 3,697 |
#! python3
# coding: utf-8
# todo
# multithreader par machine
# nettoyer le code
import os
import sys
import configparser
import logging
import re
import gc
import traceback
import getpass
from logging import FileHandler
from colorama import Fore
import colorama
colorama.init()
#logger = logging.getLogger('MagretUtil')
##logger_info = logging.getLogger('Info')
#logger.setLevel(logging.WARNING)
##logger_info.setLevel(logging.INFO)
#formatter = logging.Formatter('%(asctime)s :: %(name)s :: %(levelname)s\n' + '=' * 100 + '\n%(message)s' + '=' * 100)
## file_handler = RotatingFileHandler('error.log', mode='w', 1000000, 1)
#file_handler = FileHandler('error.log', 'w')
#file_handler.setLevel(logging.WARNING)
#file_handler.setFormatter(formatter)
#logger.addHandler(file_handler)
#stream_handler = logging.StreamHandler()
#stream_handler.setLevel(logging.INFO)
#logger_info.addHandler(stream_handler)
from Groupe import Groupe
from Salle import Salle
import commandes
import var_global
import Privilege
import AutoComplete
def _protect_quotes(text):
lst = text.split('"')
for i, item in enumerate(lst):
if i % 2:
lst[i] = re.sub(r'\s', "::", item)
return '"'.join(lst)
def _remove_protect_char(lst_str):
return [re.sub(r'::', ' ', s) for s in lst_str]
def lire_fichier_ini(fichier):
""" Retourne les variables necessaire pour le fonctionnement retourne un
dictionnaire {nom_groupe:nbre_poste}"""
try:
config = configparser.ConfigParser()
config.read(fichier, encoding="utf-8-sig")
except configparser.Error:
print(Fore.LIGHTRED_EX + "[!] Erreur lors de l'initialisation du fichier ini : " + Fore.RESET)
raise SystemExit(0)
groupes_dict = {}
groupes_dict['GroupesMagret'] = {}
groupes_dict['Groupes'] = {}
groupes_dict['GroupesFile'] = {}
domaine = {}
try:
try:
for groupe in config['GroupesMagret']:
num_poste = config['GroupesMagret'][groupe].split('-')[1]
try:
nbre_poste = int(num_poste[1:])
except ValueError:
nbre_poste = 0
if nbre_poste != 0:
groupes_dict['GroupesMagret'][groupe.upper()] = nbre_poste
except KeyError:
print(Fore.LIGHTMAGENTA_EX + "[!] Aucun groupe Magret" + Fore.RESET)
try:
for groupe in config['Groupes']:
groupes_dict['Groupes'][groupe.upper()] = config['Groupes'][groupe]
except KeyError:
print(Fore.LIGHTMAGENTA_EX + "[!] Aucun groupe non Magret" + Fore.RESET)
try:
groupes_dict['GroupesFile']['file'] = config['GroupesFile']['file']
except KeyError:
print(Fore.LIGHTMAGENTA_EX + "[!] Aucun fichier pour définir des groupes" + Fore.RESET)
except Exception as e:
print(Fore.LIGHTRED_EX + '[!] Erreur de lecture du fichier config' + Fore.RESET)
#logger.critical(e)
raise e
domaine['name'] = config.get('Domaine', 'domaine', fallback=None)
domaine['login'] = config.get('Domaine', 'login', fallback=None)
return groupes_dict, domaine
def erreur_final(e_type, e_value, e_tb):
if e_type == KeyboardInterrupt:
raise SystemExit(0)
print(Fore.LIGHTRED_EX + '[!] Erreur critique, voir le fichier de log' + Fore.RESET)
os.system("pause")
with open("error.log", "w") as f:
f.write(''.join(traceback.format_exception(e_type, e_value, e_tb)))
#logger.critical(''.join(traceback.format_exception(e_type, e_value, e_tb)))
# pdb.post_mortem(e_tb)
return
# def init_groupes_old(ini_groupes):
# global groupes, selected_groupes, machines_dict
#
# for ini_salle, nbre in ini_groupes['GroupesMagret'].items():
# groupes.append(Salle(ini_salle, nbre))
# for ini_groupe, list_machine in ini_groupes['Groupes'].items():
# list_machine = list_machine.split(',')
# groupes.append(Groupe(ini_groupe, list_machine))
# groupes.sort(key=lambda x: x.name)
#
# machines_dict.update({machine.name: machine for g in groupes for machine in g})
# return
def init_groupes(ini_groupes):
groupes_machines_names = {}
all_names_machines = []
# [GroupesMagret]
for ini_salle, nbre in ini_groupes['GroupesMagret'].items():
# on reécrit le nom des machines
num = len(str(nbre)) if nbre >= 10 else 2
str_template = '%s-P%0--i'.replace('--', str(num))
names_machines = [str_template % (ini_salle, i) for i in range(1, nbre + 1)]
# dict qui fait le lien nom groupe-noms de machines
groupes_machines_names[ini_salle] = names_machines
# on crée une salle vide qu'on remplire plus tard
# on l'ajoute aux groupes globaux
var_global.groupes.append(Salle(ini_salle, 0))
all_names_machines.extend(names_machines)
# [Groupes]
for ini_groupe, list_machines in ini_groupes['Groupes'].items():
list_machines = list_machines.split(',')
groupes_machines_names[ini_groupe] = list_machines
all_names_machines.extend(list_machines)
var_global.groupes.append(Groupe(ini_groupe, []))
# [GroupesFile]
try:
file = open(ini_groupes['GroupesFile']['file'], encoding="utf-8-sig", errors='replace')
for line in file:
list_name = line.strip(" \n,").split(',')
if list_name[1:]:
all_names_machines.extend(list_name[1:])
groupes_machines_names[list_name[0]] = list_name[1:]
var_global.groupes.append(Groupe(list_name[0], []))
except FileNotFoundError:
print(Fore.LIGHTRED_EX + "[!] Fichier csv introuvable" + Fore.RESET)
except KeyError:
pass
# code ansi pour remonter le curseur : c'est plus jolie
up = len(var_global.groupes) + 1
print('\x1b[%sA' % up)
# on crée un groupe avec toute les machines, ça permet de lancer
# une action en multithreadant sur toutes les machines
# on met à jour le dictionnaire des machines existantes
var_global.groupe_selected_machines = Groupe('en cours', all_names_machines)
var_global.machines_dict.update(var_global.groupe_selected_machines.dict_machines)
# on remplie les groupes avec les machines créés au dessus
for g in var_global.groupes:
machines = [i for k, i in var_global.machines_dict.items()
if k in groupes_machines_names[g.name]]
g.machines = machines
g.dict_machines = {m.name: m for m in machines}
g.machines.sort(key=lambda x: x.name)
var_global.groupes.sort(key=lambda x: x.name)
return
def main():
sys.excepthook = erreur_final
try:
if not os.path.isdir('mac'):
os.mkdir('mac')
except:
print(Fore.LIGHTRED_EX + "[!] Erreur lors de la création du répertoire mac" + Fore.RESET)
os.system("pause")
print(Fore.LIGHTGREEN_EX + '[+] Lecture de conf.ini' + Fore.RESET)
ini_groupes, dom = lire_fichier_ini('conf.ini')
# on initialise la variable domaine qui contient le login administrateur
# du domaine
var_global.domaine.update(dom)
# Si le login du fichier config est différent que celui avec lequel
# on est connecté, on lance la procédure délévation de privilège
if var_global.domaine['login'] is not None and getpass.getuser() != var_global.domaine['login']:
commandes.password([])
# Si une demande de bypasser l'uac est demandé, on lance la procédure
if sys.argv[1:] and sys.argv[1] == "pass_uac":
Privilege.pass_uac()
raise SystemExit(0)
#logger_info.info('Création des alias')
print(Fore.LIGHTGREEN_EX + '[+] Création des alias' + Fore.RESET)
alias_cmd = var_global.lire_alias_ini()
print(Fore.LIGHTGREEN_EX + '[+] Initialisation des salles :' + Fore.RESET)
init_groupes(ini_groupes)
AutoComplete.init_auto_complete()
# efface l'écran
print('\x1b[2J', end='')
commandes.select(['*'])
print('-' * (os.get_terminal_size().columns - 1))
print(Fore.LIGHTGREEN_EX + "Taper help ou la touche 'enter' pour obtenir de l'aide" + Fore.RESET)
print('-' * (os.get_terminal_size().columns - 1))
while True:
param = input('>>>')
param = _protect_quotes(param)
param = param.strip()
param = param.split(' ')
param = _remove_protect_char(param)
cmd = param[0]
if cmd in alias_cmd:
# permet de créer des alias avec un parametre
str_replace =""
if param[1:]:
str_replace = " ".join(param[1:])
param = _protect_quotes(alias_cmd[cmd].replace("$$",str_replace))
param = param.strip()
param = param.split(' ')
param = _remove_protect_char(param)
cmd = param[0]
param.remove(cmd)
cmd = cmd.lower()
cmd_funct = getattr(commandes, cmd, commandes.help)
try:
# on efface la dernière erreur avant de lancer
# la prochaine commande
if cmd != 'errors':
for m in var_global.groupe_selected_machines:
m.message_erreur = ''
cmd_funct(param)
# nettoie une partie de ceux qui a été laissé par les threads
# de la dernière commande
# contrôle l'augmentation de la mémoire pour le multithread
gc.collect()
# print('com-ref: ', pythoncom._GetInterfaceCount())
print('-' * (os.get_terminal_size().columns - 1))
except Warning:
cmd_funct(['help'])
gc.collect()
# print(pythoncom._GetInterfaceCount())
print('-' * (os.get_terminal_size().columns - 1))
if __name__ == '__main__':
main()
| bbmt-bbmt/MagretUtil | source/MagretUtil.py | Python | gpl-3.0 | 9,880 |
#!/usr/bin/env python
'''
THIS APP IS NOT PRODUCTION READY!! DO NOT USE!
Flask app that provides a RESTful API to MultiScanner.
Supported operations:
GET / ---> Test functionality. {'Message': 'True'}
GET /api/v1/files/<sha256>?raw={t|f} ----> download sample, defaults to passwd protected zip
GET /api/v1/modules ---> Receive list of modules available
GET /api/v1/tags ----> Receive list of all tags in use
GET /api/v1/tasks ---> Receive list of tasks in MultiScanner
POST /api/v1/tasks ---> POST file and receive report id
Sample POST usage:
curl -i -X POST http://localhost:8080/api/v1/tasks -F file=@/bin/ls
GET /api/v1/tasks/<task_id> ---> receive task in JSON format
DELETE /api/v1/tasks/<task_id> ----> delete task_id
GET /api/v1/tasks/search/ ---> receive list of most recent report for matching samples
GET /api/v1/tasks/search/history ---> receive list of most all reports for matching samples
GET /api/v1/tasks/<task_id>/file?raw={t|f} ----> download sample, defaults to passwd protected zip
GET /api/v1/tasks/<task_id>/maec ----> download the Cuckoo MAEC 5.0 report, if it exists
GET /api/v1/tasks/<task_id>/notes ---> Receive list of this task's notes
POST /api/v1/tasks/<task_id>/notes ---> Add a note to task
PUT /api/v1/tasks/<task_id>/notes/<note_id> ---> Edit a note
DELETE /api/v1/tasks/<task_id>/notes/<note_id> ---> Delete a note
GET /api/v1/tasks/<task_id>/report?d={t|f}---> receive report in JSON, set d=t to download
GET /api/v1/tasks/<task_id>/pdf ---> Receive PDF report
POST /api/v1/tasks/<task_id>/tags ---> Add tags to task
DELETE /api/v1/tasks/<task_id>/tags ---> Remove tags from task
GET /api/v1/analytics/ssdeep_compare---> Run ssdeep.compare analytic
GET /api/v1/analytics/ssdeep_group---> Receive list of sample hashes grouped by ssdeep hash
The API endpoints all have Cross Origin Resource Sharing (CORS) enabled. By
default it will allow requests from any port on localhost. Change this setting
by modifying the 'cors' setting in the 'api' section of the api config file.
TODO:
* Add doc strings to functions
'''
from __future__ import print_function
import os
import sys
import time
import hashlib
import codecs
import configparser
import json
import multiprocessing
import subprocess
import queue
import shutil
from datetime import datetime
from flask_cors import CORS
from flask import Flask, jsonify, make_response, request, abort
from flask.json import JSONEncoder
from jinja2 import Markup
from six import PY3
import rarfile
import zipfile
import requests
MS_WD = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if os.path.join(MS_WD, 'storage') not in sys.path:
sys.path.insert(0, os.path.join(MS_WD, 'storage'))
if os.path.join(MS_WD, 'analytics') not in sys.path:
sys.path.insert(0, os.path.join(MS_WD, 'analytics'))
if os.path.join(MS_WD, 'libs') not in sys.path:
sys.path.insert(0, os.path.join(MS_WD, 'libs'))
if MS_WD not in sys.path:
sys.path.insert(0, os.path.join(MS_WD))
import multiscanner
import sql_driver as database
import elasticsearch_storage
import common
from utils.pdf_generator import create_pdf_document
TASK_NOT_FOUND = {'Message': 'No task or report with that ID found!'}
INVALID_REQUEST = {'Message': 'Invalid request parameters'}
HTTP_OK = 200
HTTP_CREATED = 201
HTTP_BAD_REQUEST = 400
HTTP_NOT_FOUND = 404
DEFAULTCONF = {
'host': 'localhost',
'port': 8080,
'upload_folder': '/mnt/samples/',
'distributed': True,
'web_loc': 'http://localhost:80',
'cors': 'https?://localhost(:\d+)?',
'batch_size': 100,
'batch_interval': 60 # Number of seconds to wait for additional files
# submitted to the create/ API
}
# Customize timestamp format output of jsonify()
class CustomJSONEncoder(JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime):
if obj.utcoffset() is not None:
obj = obj - obj.utcoffset()
return str(obj)
else:
return JSONEncoder.default(self, obj)
app = Flask(__name__)
app.json_encoder = CustomJSONEncoder
api_config_object = configparser.SafeConfigParser()
api_config_object.optionxform = str
api_config_file = multiscanner.common.get_config_path(multiscanner.CONFIG, 'api')
api_config_object.read(api_config_file)
if not api_config_object.has_section('api') or not os.path.isfile(api_config_file):
# Write default config
api_config_object.add_section('api')
for key in DEFAULTCONF:
api_config_object.set('api', key, str(DEFAULTCONF[key]))
conffile = codecs.open(api_config_file, 'w', 'utf-8')
api_config_object.write(conffile)
conffile.close()
api_config = multiscanner.common.parse_config(api_config_object)
# Needs api_config in order to function properly
from celery_worker import multiscanner_celery, ssdeep_compare_celery
from ssdeep_analytics import SSDeepAnalytic
db = database.Database(config=api_config.get('Database'))
# To run under Apache, we need to set up the DB outside of __main__
db.init_db()
storage_conf = multiscanner.common.get_config_path(multiscanner.CONFIG, 'storage')
storage_handler = multiscanner.storage.StorageHandler(configfile=storage_conf)
for handler in storage_handler.loaded_storage:
if isinstance(handler, elasticsearch_storage.ElasticSearchStorage):
break
ms_config_object = configparser.SafeConfigParser()
ms_config_object.optionxform = str
ms_configfile = multiscanner.CONFIG
ms_config_object.read(ms_configfile)
ms_config = common.parse_config(ms_config_object)
try:
DISTRIBUTED = api_config['api']['distributed']
except KeyError:
DISTRIBUTED = False
if not DISTRIBUTED:
work_queue = multiprocessing.Queue()
try:
cors_origins = api_config['api']['cors']
except KeyError:
cors_origins = DEFAULTCONF['cors']
CORS(app, origins=cors_origins)
batch_size = api_config['api']['batch_size']
batch_interval = api_config['api']['batch_interval']
# Add `delete_after_scan = True` to api_config.ini to delete samples after scan has completed
delete_after_scan = api_config['api'].get('delete_after_scan', False)
def multiscanner_process(work_queue, exit_signal):
'''Not used in distributed mode.
'''
metadata_list = []
time_stamp = None
while True:
time.sleep(1)
try:
metadata_list.append(work_queue.get_nowait())
if not time_stamp:
time_stamp = time.time()
while len(metadata_list) < batch_size:
metadata_list.append(work_queue.get_nowait())
except queue.Empty:
if metadata_list and time_stamp:
if len(metadata_list) >= batch_size:
pass
elif time.time() - time_stamp > batch_interval:
pass
else:
continue
else:
continue
filelist = [item[0] for item in metadata_list]
#modulelist = [item[5] for item in metadata_list]
resultlist = multiscanner.multiscan(
filelist, configfile=multiscanner.CONFIG
#module_list
)
results = multiscanner.parse_reports(resultlist, python=True)
scan_time = datetime.now().isoformat()
if delete_after_scan:
for file_name in results:
os.remove(file_name)
for item in metadata_list:
# Use the original filename as the index instead of the full path
results[item[1]] = results[item[0]]
del results[item[0]]
results[item[1]]['Scan Time'] = scan_time
results[item[1]]['Metadata'] = item[4]
db.update_task(
task_id=item[2],
task_status='Complete',
timestamp=scan_time,
)
metadata_list = []
storage_handler.store(results, wait=False)
filelist = []
time_stamp = None
storage_handler.close()
@app.errorhandler(HTTP_BAD_REQUEST)
def invalid_request(error):
'''Return a 400 with the INVALID_REQUEST message.'''
return make_response(jsonify(INVALID_REQUEST), HTTP_BAD_REQUEST)
@app.errorhandler(HTTP_NOT_FOUND)
def not_found(error):
'''Return a 404 with a TASK_NOT_FOUND message.'''
return make_response(jsonify(TASK_NOT_FOUND), HTTP_NOT_FOUND)
@app.route('/')
def index():
'''
Return a default standard message
for testing connectivity.
'''
return jsonify({'Message': 'True'})
@app.route('/api/v1/modules', methods=['GET'])
def modules():
'''
Return a list of module names available for MultiScanner to use,
and whether or not they are enabled in the config.
'''
files = multiscanner.parseDir(multiscanner.MODULEDIR, True)
filenames = [os.path.splitext(os.path.basename(f)) for f in files]
module_names = [m[0] for m in filenames if m[1] == '.py']
ms_config = configparser.SafeConfigParser()
ms_config.optionxform = str
ms_config.read(multiscanner.CONFIG)
modules = {}
for module in module_names:
try:
modules[module] = ms_config.get(module, 'ENABLED')
except (configparser.NoSectionError, configparser.NoOptionError):
pass
return jsonify({'Modules': modules})
@app.route('/api/v1/tasks', methods=['GET'])
def task_list():
'''
Return a JSON dictionary containing all the tasks
in the tasks DB.
'''
return jsonify({'Tasks': db.get_all_tasks()})
def search(params, get_all=False):
# Pass search term to Elasticsearch, get back list of sample_ids
sample_id = params.get('sha256')
if sample_id:
task_id = db.exists(sample_id)
if task_id:
return { 'TaskID' : task_id }
else:
return TASK_NOT_FOUND
search_term = params.get('search[value]')
search_type = params.pop('search_type', 'default')
if not search_term:
es_result = None
else:
es_result = handler.search(search_term, search_type)
# Search the task db for the ids we got from Elasticsearch
if get_all:
return db.search(params, es_result, return_all=True)
else:
return db.search(params, es_result)
@app.route('/api/v1/tasks/search/history', methods=['GET'])
def task_search_history():
'''
Handle query between jQuery Datatables, the task DB, and Elasticsearch.
Return all reports for matching samples.
'''
params = request.args.to_dict()
resp = search(params, get_all=True)
return jsonify(resp)
@app.route('/api/v1/tasks/search', methods=['GET'])
def task_search():
'''
Handle query between jQuery Datatables, the task DB, and Elasticsearch.
Return only the most recent report for each of the matching samples.
'''
params = request.args.to_dict()
resp = search(params)
return jsonify(resp)
@app.route('/api/v1/tasks/<int:task_id>', methods=['GET'])
def get_task(task_id):
'''
Return a JSON dictionary corresponding
to the given task ID.
'''
task = db.get_task(task_id)
if task:
return jsonify({'Task': task.to_dict()})
else:
abort(HTTP_NOT_FOUND)
@app.route('/api/v1/tasks/<int:task_id>', methods=['DELETE'])
def delete_task(task_id):
'''
Delete the specified task. Return deleted message.
'''
result = db.delete_task(task_id)
if not result:
abort(HTTP_NOT_FOUND)
return jsonify({'Message': 'Deleted'})
def save_hashed_filename(f, zipped=False):
'''
Save given file to the upload folder, with its SHA256 hash as its filename.
'''
f_name = hashlib.sha256(f.read()).hexdigest()
# Reset the file pointer to the beginning to allow us to save it
f.seek(0)
# TODO: should we check if the file is already there
# and skip this step if it is?
file_path = os.path.join(api_config['api']['upload_folder'], f_name)
full_path = os.path.join(MS_WD, file_path)
if zipped:
shutil.copy2(f.name, full_path)
else:
f.save(file_path)
return (f_name, full_path)
class InvalidScanTimeFormatError(ValueError):
pass
def import_task(file_):
'''
Import a JSON report that was downloaded from MultiScanner.
'''
report = json.loads(file_.read().decode('utf-8'))
try:
report['Scan Time'] = datetime.strptime(report['Scan Time'], '%Y-%m-%dT%H:%M:%S.%f')
except ValueError:
raise InvalidScanTimeFormatError()
task_id = db.add_task(
sample_id=report['SHA256'],
task_status='Complete',
timestamp=report['Scan Time'],
)
storage_handler.store({report['filename']: report}, wait=False)
return task_id
def queue_task(original_filename, f_name, full_path, metadata, rescan=False):
'''
Queue up a single new task, for a single non-archive file.
'''
# If option set, or no scan exists for this sample, skip and scan sample again
# Otherwise, pull latest scan for this sample
if (not rescan):
t_exists = db.exists(f_name)
if t_exists:
return t_exists
# Add task to sqlite DB
# Make the sample_id equal the sha256 hash
task_id = db.add_task(sample_id=f_name)
if DISTRIBUTED:
# Publish the task to Celery
multiscanner_celery.delay(full_path, original_filename,
task_id, f_name, metadata,
config=multiscanner.CONFIG)
else:
# Put the task on the queue
work_queue.put((full_path, original_filename, task_id, f_name, metadata))
return task_id
@app.route('/api/v1/tasks', methods=['POST'])
def create_task():
'''
Create a new task for a submitted file. Save the submitted file to
UPLOAD_FOLDER, optionally unzipping it. Return task id and 201 status.
'''
file_ = request.files['file']
if request.form.get('upload_type', None) == 'import':
try:
task_id = import_task(file_)
except KeyError:
return make_response(
jsonify({'Message': 'Cannot import report missing \'Scan Time\' field!'}),
HTTP_BAD_REQUEST)
except InvalidScanTimeFormatError:
return make_response(
jsonify({'Message': 'Cannot import report with \'Scan Time\' of invalid format!'}),
HTTP_BAD_REQUEST)
except (UnicodeDecodeError, ValueError):
return make_response(
jsonify({'Message': 'Cannot import non-JSON files!'}),
HTTP_BAD_REQUEST)
return make_response(
jsonify({'Message': {'task_ids': [task_id]}}),
HTTP_CREATED
)
original_filename = file_.filename
metadata = {}
task_id_list = []
extract_dir = None
rescan = False
for key in request.form.keys():
if key in ['file_id', 'archive-password', 'upload_type'] or request.form[key] == '':
continue
elif key == 'duplicate':
if request.form[key] == 'latest':
rescan = False
elif request.form[key] == 'rescan':
rescan = True
elif key == 'modules':
module_names = request.form[key]
files = multiscanner.parseDir(multiscanner.MODULEDIR, True)
modules = []
for f in files:
split = os.path.splitext(os.path.basename(f))
if split[0] in module_names and split[1] == '.py':
modules.append(f)
elif key == 'archive-analyze' and request.form[key] == 'true':
extract_dir = api_config['api']['upload_folder']
if not os.path.isdir(extract_dir):
return make_response(
jsonify({'Message': "'upload_folder' in API config is not "
"a valid folder!"}),
HTTP_BAD_REQUEST)
# Get password if present
if 'archive-password' in request.form:
password = request.form['archive-password']
if PY3:
password = bytes(password, 'utf-8')
else:
password = ''
else:
metadata[key] = request.form[key]
if extract_dir:
# Extract a zip
if zipfile.is_zipfile(file_):
z = zipfile.ZipFile(file_)
try:
# NOTE: zipfile module prior to Py 2.7.4 is insecure!
# https://docs.python.org/2/library/zipfile.html#zipfile.ZipFile.extract
z.extractall(path=extract_dir, pwd=password)
for uzfile in z.namelist():
unzipped_file = open(os.path.join(extract_dir, uzfile))
f_name, full_path = save_hashed_filename(unzipped_file, True)
tid = queue_task(uzfile, f_name, full_path, metadata, rescan=rescan)
task_id_list.append(tid)
except RuntimeError as e:
msg = "ERROR: Failed to extract " + str(file_) + ' - ' + str(e)
return make_response(
jsonify({'Message': msg}),
HTTP_BAD_REQUEST)
# Extract a rar
elif rarfile.is_rarfile(file_):
r = rarfile.RarFile(file_)
try:
r.extractall(path=extract_dir, pwd=password)
for urfile in r.namelist():
unrarred_file = open(os.path.join(extract_dir, urfile))
f_name, full_path = save_hashed_filename(unrarred_file, True)
tid = queue_task(urfile, f_name, full_path, metadata, rescan=rescan)
task_id_list.append(tid)
except RuntimeError as e:
msg = "ERROR: Failed to extract " + str(file_) + ' - ' + str(e)
return make_response(
jsonify({'Message': msg}),
HTTP_BAD_REQUEST)
else:
# File was not an archive to extract
f_name, full_path = save_hashed_filename(file_)
tid = queue_task(original_filename, f_name, full_path, metadata, rescan=rescan)
task_id_list = [tid]
msg = {'task_ids': task_id_list}
return make_response(
jsonify({'Message': msg}),
HTTP_CREATED
)
@app.route('/api/v1/tasks/<task_id>/report', methods=['GET'])
def get_report(task_id):
'''
Return a JSON dictionary corresponding
to the given task ID.
'''
download = request.args.get('d', default='False', type=str)[0].lower()
report_dict, success = get_report_dict(task_id)
if success:
if (download == 't' or download == 'y' or download == '1'):
# raw JSON
response = make_response(jsonify(report_dict))
response.headers['Content-Type'] = 'application/json'
response.headers['Content-Disposition'] = 'attachment; filename=%s.json' % task_id
return response
else:
# processed JSON intended for web UI
report_dict = _pre_process(report_dict)
return jsonify(report_dict)
else:
return jsonify(report_dict)
def _pre_process(report_dict={}):
'''
Returns a JSON dictionary where a series of pre-processing steps are
executed on report_dict.
'''
# pop unecessary keys
if report_dict.get('Report', {}).get('ssdeep', {}):
for k in ['chunksize', 'chunk', 'double_chunk']:
try:
report_dict['Report']['ssdeep'].pop(k)
except KeyError as e:
pass
report_dict = _add_links(report_dict)
return report_dict
def _add_links(report_dict):
'''
Returns a JSON dictionary where certain keys and/or values are replaced
with hyperlinks.
'''
web_loc = api_config['api']['web_loc']
# ssdeep matches
matches_dict = report_dict.get('Report', {}) \
.get('ssdeep', {}) \
.get('matches', {})
if matches_dict:
links_dict = {}
# k=SHA256, v=ssdeep.compare result
for k, v in matches_dict.items():
t_id = db.exists(k)
if t_id:
url = '{h}/report/{t_id}'.format(h=web_loc, t_id=t_id)
href = _linkify(k, url, True)
links_dict[href] = v
else:
links_dict[k] = v
# replace with updated dict
report_dict['Report']['ssdeep']['matches'] = links_dict
return report_dict
#TODO: should we move these helper functions to separate file?
def _linkify(s, url, new_tab=True):
'''
Return string s as HTML a tag with href pointing to url.
'''
return '<a{new_tab} href="{url}">{s}</a>'.format(
new_tab=' target="_blank"' if new_tab else '',
url=url,
s=s)
@app.route('/api/v1/tasks/<task_id>/file', methods=['GET'])
def files_get_task(task_id):
# try to get report dict
report_dict, success = get_report_dict(task_id)
if not success:
return jsonify(report_dict)
# okay, we have report dict; get sha256
sha256 = report_dict.get('Report', {}).get('SHA256')
if sha256:
return files_get_sha256_helper(
sha256,
request.args.get('raw', default=None))
else:
return jsonify({'Error': 'sha256 not in report!'})
@app.route('/api/v1/tasks/<task_id>/maec', methods=['GET'])
def get_maec_report(task_id):
# try to get report dict
report_dict, success = get_report_dict(task_id)
if not success:
return jsonify(report_dict)
# okay, we have report dict; get cuckoo task ID
try:
cuckoo_task_id = report_dict['Report']['Cuckoo Sandbox']['info']['id']
except KeyError:
return jsonify({'Error': 'No MAEC report found for that task!'})
# Get the MAEC report from Cuckoo
try:
maec_report = requests.get(
'{}/v1/tasks/report/{}/maec'.format(ms_config.get('Cuckoo', {}).get('API URL', ''), cuckoo_task_id)
)
except:
return jsonify({'Error': 'No MAEC report found for that task!'})
# raw JSON
response = make_response(jsonify(maec_report.json()))
response.headers['Content-Type'] = 'application/json'
response.headers['Content-Disposition'] = 'attachment; filename=%s.json' % task_id
return response
def get_report_dict(task_id):
task = db.get_task(task_id)
if not task:
abort(HTTP_NOT_FOUND)
if task.task_status == 'Complete':
return {'Report': handler.get_report(task.sample_id, task.timestamp)}, True
elif task.task_status == 'Pending':
return {'Report': 'Task still pending'}, False
else:
return {'Report': 'Task failed'}, False
@app.route('/api/v1/tasks/<task_id>', methods=['DELETE'])
def delete_report(task_id):
'''
Delete the specified task. Return deleted message.
'''
task = db.get_task(task_id)
if not task:
abort(HTTP_NOT_FOUND)
if handler.delete(task.report_id):
return jsonify({'Message': 'Deleted'})
else:
abort(HTTP_NOT_FOUND)
@app.route('/api/v1/tags/', methods=['GET'])
def taglist():
'''
Return a list of all tags currently in use.
'''
response = handler.get_tags()
return jsonify({'Tags': response})
@app.route('/api/v1/tasks/<task_id>/tags', methods=['POST', 'DELETE'])
def tags(task_id):
'''
Add/Remove the specified tag to the specified task.
'''
task = db.get_task(task_id)
if not task:
abort(HTTP_NOT_FOUND)
tag = request.values.get('tag', '')
if request.method == 'POST':
response = handler.add_tag(task.sample_id, tag)
if not response:
abort(HTTP_BAD_REQUEST)
return jsonify({'Message': 'Tag Added'})
elif request.method == 'DELETE':
response = handler.remove_tag(task.sample_id, tag)
if not response:
abort(HTTP_BAD_REQUEST)
return jsonify({'Message': 'Tag Removed'})
@app.route('/api/v1/tasks/<task_id>/notes', methods=['GET'])
def get_notes(task_id):
'''
Get one or more analyst notes/comments associated with the specified task.
'''
task = db.get_task(task_id)
if not task:
abort(HTTP_NOT_FOUND)
if ('ts' in request.args and 'uid' in request.args):
ts = request.args.get('ts', '')
uid = request.args.get('uid', '')
response = handler.get_notes(task.sample_id, [ts, uid])
else:
response = handler.get_notes(task.sample_id)
if not response:
abort(HTTP_BAD_REQUEST)
if 'hits' in response and 'hits' in response['hits']:
response = response['hits']['hits']
try:
for hit in response:
hit['_source']['text'] = Markup.escape(hit['_source']['text'])
except:
pass
return jsonify(response)
@app.route('/api/v1/tasks/<task_id>/notes', methods=['POST'])
def add_note(task_id):
'''
Add an analyst note/comment to the specified task.
'''
task = db.get_task(task_id)
if not task:
abort(HTTP_NOT_FOUND)
response = handler.add_note(task.sample_id, request.form.to_dict())
if not response:
abort(HTTP_BAD_REQUEST)
return jsonify(response)
@app.route('/api/v1/tasks/<task_id>/notes/<note_id>', methods=['PUT', 'DELETE'])
def edit_note(task_id, note_id):
'''
Modify/remove the specified analyst note/comment.
'''
task = db.get_task(task_id)
if not task:
abort(HTTP_NOT_FOUND)
if request.method == 'PUT':
response = handler.edit_note(task.sample_id, note_id,
Markup(request.form.get('text', '')).striptags())
elif request.method == 'DELETE':
response = handler.delete_note(task.sample_id, note_id)
if not response:
abort(HTTP_BAD_REQUEST)
return jsonify(response)
@app.route('/api/v1/files/<sha256>', methods=['GET'])
# get raw file - /api/v1/files/get/<sha256>?raw=true
def files_get_sha256(sha256):
'''
Returns binary from storage. Defaults to password protected zipfile.
'''
# is there a robust way to just get this as a bool?
raw = request.args.get('raw', default='False', type=str)
return files_get_sha256_helper(sha256, raw)
def files_get_sha256_helper(sha256, raw=None):
'''
Returns binary from storage. Defaults to password protected zipfile.
'''
file_path = os.path.join(api_config['api']['upload_folder'], sha256)
if not os.path.exists(file_path):
abort(HTTP_NOT_FOUND)
with open(file_path, "rb") as fh:
fh_content = fh.read()
raw = raw[0].lower()
if raw == 't' or raw == 'y' or raw == '1':
response = make_response(fh_content)
response.headers['Content-Type'] = 'application/octet-stream; charset=UTF-8'
response.headers['Content-Disposition'] = 'inline; filename={}.bin'.format(sha256) # better way to include fname?
else:
# ref: https://github.com/crits/crits/crits/core/data_tools.py#L122
rawname = sha256 + '.bin'
with open(os.path.join('/tmp/', rawname), 'wb') as raw_fh:
raw_fh.write(fh_content)
zipname = sha256 + '.zip'
args = ['/usr/bin/zip', '-j',
os.path.join('/tmp', zipname),
os.path.join('/tmp', rawname),
'-P', 'infected']
proc = subprocess.Popen(args)
wait_seconds = 30
while proc.poll() is None and wait_seconds:
time.sleep(1)
wait_seconds -= 1
if proc.returncode:
return make_response(jsonify({'Error': 'Failed to create zip ()'.format(proc.returncode)}))
elif not wait_seconds:
proc.terminate()
return make_response(jsonify({'Error': 'Process timed out'}))
else:
with open(os.path.join('/tmp', zipname), 'rb') as zip_fh:
zip_data = zip_fh.read()
if len(zip_data) == 0:
return make_response(jsonify({'Error': 'Zip file empty'}))
response = make_response(zip_data)
response.headers['Content-Type'] = 'application/zip; charset=UTF-8'
response.headers['Content-Disposition'] = 'inline; filename={}.zip'.format(sha256)
return response
@app.route('/api/v1/analytics/ssdeep_compare', methods=['GET'])
def run_ssdeep_compare():
'''
Runs ssdeep compare analytic and returns success / error message.
'''
try:
if DISTRIBUTED:
# Publish task to Celery
ssdeep_compare_celery.delay()
return make_response(jsonify({ 'Message': 'Success' }))
else:
ssdeep_analytic = SSDeepAnalytic()
ssdeep_analytic.ssdeep_compare()
return make_response(jsonify({ 'Message': 'Success' }))
except Exception as e:
return make_response(
jsonify({'Message': 'Unable to complete request.'}),
HTTP_BAD_REQUEST)
@app.route('/api/v1/analytics/ssdeep_group', methods=['GET'])
def run_ssdeep_group():
'''
Runs ssdeep group analytic and returns list of groups as a list.
'''
try:
ssdeep_analytic = SSDeepAnalytic()
groups = ssdeep_analytic.ssdeep_group()
return make_response(jsonify({ 'groups': groups }))
except Exception as e:
return make_response(
jsonify({'Message': 'Unable to complete request.'}),
HTTP_BAD_REQUEST)
@app.route('/api/v1/tasks/<task_id>/pdf', methods=['GET'])
def get_pdf_report(task_id):
'''
Generates a PDF version of a JSON report.
'''
report_dict, success = get_report_dict(task_id)
if not success:
return jsonify(report_dict)
pdf = create_pdf_document(MS_WD, report_dict)
response = make_response(pdf)
response.headers['Content-Type'] = 'application/pdf'
response.headers['Content-Disposition'] = 'attachment; filename=%s.pdf' % task_id
return response
if __name__ == '__main__':
if not os.path.isdir(api_config['api']['upload_folder']):
print('Creating upload dir')
os.makedirs(api_config['api']['upload_folder'])
if not DISTRIBUTED:
exit_signal = multiprocessing.Value('b')
exit_signal.value = False
ms_process = multiprocessing.Process(
target=multiscanner_process,
args=(work_queue, exit_signal)
)
ms_process.start()
app.run(host=api_config['api']['host'], port=api_config['api']['port'])
if not DISTRIBUTED:
ms_process.join()
| awest1339/multiscanner | utils/api.py | Python | mpl-2.0 | 30,509 |
#!/usr/bin/python3
from lxml import etree
import sys
class Word:
def __init__(self):
self.word = ''
self.pos = ''
self.props = []
def __hash__(self):
return (self.word + self.pos).__hash__()
def __cmp__(self, other):
n = cmp(self.word, other.word)
if n != 0:
return n
n = cmp(self.pos, other.pos)
if n != 0:
return n
# FIXME: 이렇게 하면 순서가 다를텐데. set에서 뭐가 먼저 나올지 알고...
if self.pos == '명사':
return 0
for prop in other.props:
if not prop in self.props:
return -1
for prop in self.props:
if not prop in other.props:
return 1
return 0
######################################################################
if len(sys.argv) < 1:
sys.exit(1)
filename = sys.argv[1]
doc = etree.parse(open(filename))
root = doc.getroot()
wordset = set()
for item in root:
w = Word()
for field in item:
if field.tag == 'word':
w.word = field.text
elif field.tag == 'pos':
w.pos = field.text
elif field.tag == 'props' and field.text:
w.props = field.text.split(',')
w.props.sort()
if w in wordset:
sys.stderr.write(('%s (%s)\n' % (w.word, w.pos)).encode('UTF-8'))
else:
wordset.add(w)
| changwoo/hunspell-dict-ko | utils/findduplicates.py | Python | mpl-2.0 | 1,426 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Future Modules:
from __future__ import annotations
# Built-in Modules:
import threading
from collections.abc import Callable
from typing import Any, Union
class BaseDelay(threading.Thread):
"""
Implements the base delay class.
"""
_delays: list[threading.Thread] = []
def __init__(
self,
duration: float,
count: Union[int, None],
function: Callable[..., Any],
*args: Any,
**kwargs: Any,
) -> None:
"""
Defines the constructor for the object.
Args:
duration: The amount of time (in seconds) to delay between iterations.
count: The number of iterations to delay, or None to repeat indefinitely.
function: The function to be called at each iteration.
*args: Positional arguments to be passed to the called function.
**kwargs: Key-word only arguments to be passed to the called function.
"""
if count is not None and count < 0:
raise ValueError("count must be a positive number or None.")
super().__init__()
self.daemon: bool = True
self._duration: float = duration
self._count: Union[int, None] = count
self._function: Callable[..., Any] = function
self._args: tuple[Any, ...] = args
self._kwargs: dict[str, Any] = kwargs
self._finished: threading.Event = threading.Event()
def stop(self) -> None:
"""Stops an active delay."""
self._finished.set()
def run(self) -> None:
try:
self._delays.append(self)
while not self._finished.is_set() and self._count != 0:
self._finished.wait(self._duration)
if not self._finished.is_set():
self._function(*self._args, **self._kwargs)
if self._count is not None:
self._count -= 1
finally:
del self._function, self._args, self._kwargs
self._delays.remove(self)
if not self._finished.is_set():
self.stop()
class Delay(BaseDelay):
"""
Implements a delay which automatically starts upon creation.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.start()
class OneShot(Delay):
"""
Implements a delay which is run only once.
"""
def __init__(self, duration: float, function: Callable[..., Any], *args: Any, **kwargs: Any) -> None:
"""
Defines the constructor for the object.
Args:
duration: The amount of time (in seconds) to delay.
function: The function to be called when the delay completes.
*args: Positional arguments to be passed to the called function.
**kwargs: Key-word only arguments to be passed to the called function.
"""
super().__init__(duration, 1, function, *args, **kwargs)
class Repeating(Delay):
"""
Implements a delay which runs indefinitely.
"""
def __init__(self, duration: float, function: Callable[..., Any], *args: Any, **kwargs: Any) -> None:
"""
Defines the constructor for the object.
Args:
duration: The amount of time (in seconds) to delay between iterations.
function: The function to be called at each iteration.
*args: Positional arguments to be passed to the called function.
**kwargs: Key-word only arguments to be passed to the called function.
"""
super().__init__(duration, None, function, *args, **kwargs)
| nstockton/mapperproxy-mume | mapper/delays.py | Python | mpl-2.0 | 3,316 |
from tqdm import tqdm
from django.core.management.base import BaseCommand
from django.db.models import Exists, OuterRef
from ...models import Locality, Operator, Service
class Command(BaseCommand):
def handle(self, *args, **options):
for locality in tqdm(Locality.objects.with_documents()):
locality.search_vector = locality.document
locality.save(update_fields=['search_vector'])
has_services = Exists(Service.objects.filter(current=True, operator=OuterRef('pk')))
for operator in tqdm(Operator.objects.with_documents().filter(has_services)):
operator.search_vector = operator.document
operator.save(update_fields=['search_vector'])
print(Operator.objects.filter(~has_services).update(search_vector=None))
for service in tqdm(Service.objects.with_documents().filter(current=True)):
service.search_vector = service.document
service.save(update_fields=['search_vector'])
print(Service.objects.filter(current=False).update(search_vector=None))
| jclgoodwin/bustimes.org.uk | busstops/management/commands/update_search_indexes.py | Python | mpl-2.0 | 1,072 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-05-01 08:10
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('os2webscanner', '0013_auto_20180501_1006'),
]
operations = [
migrations.AlterModelTable(
name='scan',
table='os2webscanner_scan',
),
]
| os2webscanner/os2webscanner | django-os2webscanner/os2webscanner/migrations/0014_auto_20180501_1010.py | Python | mpl-2.0 | 408 |
class A(object):
def something(self):
return 3 | caterinaurban/Typpete | typpete/unittests/inference/explicit_object_superclass.py | Python | mpl-2.0 | 58 |
# -*- coding: utf-8 -*-
# (c) 2015 Incaser Informatica S.L. - Sergio Teruel
# (c) 2015 Incaser Informatica S.L. - Carlos Dauden
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import models, fields, api
from openerp.tools.float_utils import float_round
import decimal
tipo_articulo = {'product': 'M', 'consu': 'M', 'service': 'I'}
codigo_empresa = 1
class AccountInvoice(models.Model):
_inherit = 'account.invoice'
sagelc_export = fields.Boolean(string='Exported', default=False)
sagelc_code = fields.Char(string='SageLC Code', size=15)
def sanitize_arg(self, val):
if isinstance(val, decimal.Decimal):
return eval(str(val)) / 10000000000.0
else:
return val
@api.multi
def export_records(self):
if self[0].type == 'out_invoice':
self.export_records_customer()
else:
self.export_records_supplier()
@api.multi
def export_records_customer(self):
db_obj = self.env['base.external.dbsource']
db_sage = db_obj.search([('name', '=', 'Logic')])
for invoice in self:
dic_equiv = {}
sql = 'SELECT * FROM CabeceraAlbaranCliente WHERE 1=?'
cab_cols = db_sage.execute(sql, (2), True)['cols']
sql = 'SELECT * FROM LineasAlbaranCliente WHERE 1=?'
lin_cols = db_sage.execute(sql, (2), True)['cols']
sql = 'SELECT * FROM Clientes WHERE CodigoEmpresa=? AND CodigoCliente=?'
clientes = db_sage.execute(sql, (codigo_empresa, invoice.partner_id.sagelc_customer_ref))
cli = clientes[0]
for pos in range(len(cli)):
col = cli.cursor_description[pos][0]
if col in cab_cols:
dic_equiv['[%s]' % col] = self.sanitize_arg(cli[pos])
sql = 'SELECT * FROM ClientesConta WHERE CodigoEmpresa=? AND CodigoCuenta=?'
clientes_conta = db_sage.execute(sql, (codigo_empresa, cli.CodigoContable))
cli_conta = clientes_conta[0]
for pos in range(len(cli_conta)):
col = cli_conta.cursor_description[pos][0]
if col in cab_cols:
dic_equiv['[%s]' % col] = self.sanitize_arg(cli_conta[pos])
sql = 'SELECT * FROM ClientesProveedores WHERE SiglaNacion=? AND CifDni=?'
clientes_prov = db_sage.execute(sql, (cli.SiglaNacion, cli.CifDni))
cli_prov = clientes_prov[0]
for pos in range(len(cli_prov)):
col = cli_prov.cursor_description[pos][0]
if col in cab_cols:
dic_equiv['[%s]' % col] = self.sanitize_arg(cli_prov[pos])
dic_man = {
'[CodigoEmpresa]': codigo_empresa,
'[EjercicioAlbaran]': invoice.date_invoice[:4],
'[SerieAlbaran]': 'OD',
'[NumeroAlbaran]': invoice.id,
'[IdDelegacion]': 'CAS',
'[CodigoCliente]': invoice.partner_id.sagelc_customer_ref,
'[FechaAlbaran]': invoice.date_invoice,
'[NumeroLineas]': len(invoice.invoice_line),
}
dic_equiv.update(dic_man)
print(dic_equiv)
key_list = dic_equiv.keys()
params = '?,' * len(dic_equiv)
params = params[:-1]
sql = 'INSERT INTO CabeceraAlbaranCliente (%s) VALUES (%s)' % (
', '.join(key_list), params)
param_list = tuple([dic_equiv[key] for key in key_list])
db_sage.execute_void(sql, param_list)
vals = {'sagelc_export': True}
invoice.write(vals)
for line in invoice.invoice_line:
dic_equiv = {
'[CodigoEmpresa]': codigo_empresa,
'[EjercicioAlbaran]': invoice.date_invoice[:4],
'[SerieAlbaran]': 'OD',
'[NumeroAlbaran]': invoice.id,
'[Orden]': line.sequence,
'[FechaAlbaran]': invoice.date_invoice,
'[CodigoArticulo]': line.product_id.sagelc_code,
'[CodigoAlmacen]': '1',
'[DescripcionArticulo]':
line.product_id.name and
line.product_id.name[:40] or '',
'[DescripcionLinea]':
line.name and line.name.replace('\n', '\r\n') or '',
'[FactorConversion_]': 1,
'[AcumulaEstadistica_]': -1,
'[CodigoTransaccion]': 1,
'[GrupoIva]': 1,
'[CodigoIva]': 21,
'[UnidadesServidas]': line.quantity,
'[Unidades]': line.quantity,
'[Unidades2_]': line.quantity,
'[Precio]': line.price_unit,
'[PrecioRebaje]': line.price_unit,
'[PrecioCoste]': line.product_id.standard_price,
'[%Descuento]': line.discount,
'[%Iva]': 21,
'[TipoArticulo]': tipo_articulo[line.product_id.type],
}
print(dic_equiv)
key_list = dic_equiv.keys()
params = '?,' * len(dic_equiv)
params = params[:-1]
sql = 'INSERT INTO LineasAlbaranCliente (%s) VALUES (%s)' % (
', '.join(key_list), params)
param_list = tuple([dic_equiv[key] for key in key_list])
db_sage.execute_void(sql, param_list)
@api.multi
def export_records_supplier(self):
db_obj = self.env['base.external.dbsource']
db_sage = db_obj.search([('name', '=', 'Logic')])
codigo_empresa = 1
for invoice in self:
dic_equiv = {}
sql = 'SELECT * FROM CabeceraAlbaranProveedor WHERE 1=?'
cab_cols = db_sage.execute(sql, (2), True)['cols']
sql = 'SELECT * FROM LineasAlbaranProveedor WHERE 1=?'
lin_cols = db_sage.execute(sql, (2), True)['cols']
sql = 'SELECT * FROM Proveedores WHERE CodigoEmpresa=? AND CodigoProveedor=?'
proveedores = db_sage.execute(sql, (codigo_empresa, invoice.partner_id.sagelc_supplier_ref))
prov = proveedores[0]
for pos in range(len(prov)):
col = prov.cursor_description[pos][0]
if col in cab_cols:
dic_equiv['[%s]' % col] = self.sanitize_arg(prov[pos])
sql = 'SELECT * FROM ClientesConta WHERE CodigoEmpresa=? AND CodigoCuenta=?'
clientes_conta = db_sage.execute(sql, (codigo_empresa, prov.CodigoContable))
cli_conta = clientes_conta[0]
for pos in range(len(cli_conta)):
col = cli_conta.cursor_description[pos][0]
if col in cab_cols:
dic_equiv['[%s]' % col] = self.sanitize_arg(cli_conta[pos])
sql = 'SELECT * FROM ClientesProveedores WHERE SiglaNacion=? AND CifDni=?'
clientes_prov = db_sage.execute(sql, (prov.SiglaNacion, prov.CifDni))
cli_prov = clientes_prov[0]
for pos in range(len(cli_prov)):
col = cli_prov.cursor_description[pos][0]
if col in cab_cols:
dic_equiv['[%s]' % col] = self.sanitize_arg(cli_prov[pos])
dic_man = {
'[CodigoEmpresa]': codigo_empresa,
'[EjercicioAlbaran]': invoice.date_invoice[:4],
'[SerieAlbaran]': 'OD',
'[NumeroAlbaran]': invoice.id,
'[IdDelegacion]': 'CAS',
'[CodigoProveedor]': invoice.partner_id.sagelc_supplier_ref,
'[FechaAlbaran]': invoice.date_invoice,
'[NumeroLineas]': len(invoice.invoice_line),
}
dic_equiv.update(dic_man)
print(dic_equiv)
key_list = dic_equiv.keys()
params = '?,' * len(dic_equiv)
params = params[:-1]
sql = 'INSERT INTO CabeceraAlbaranProveedor (%s) VALUES (%s)' % (', '.join(key_list), params)
param_list = tuple([dic_equiv[key] for key in key_list])
db_sage.execute_void(sql, param_list)
vals = {'sagelc_export': True}
invoice.write(vals)
for line in invoice.invoice_line:
dic_equiv = {
'[CodigoEmpresa]': codigo_empresa,
'[EjercicioAlbaran]': invoice.date_invoice[:4],
'[SerieAlbaran]': 'OD',
'[NumeroAlbaran]': invoice.id,
'[Orden]': line.sequence,
'[FechaAlbaran]': invoice.date_invoice,
'[CodigoArticulo]': line.product_id.sagelc_code,
'[CodigoAlmacen]': '1',
'[DescripcionArticulo]': line.product_id.name,
'[DescripcionLinea]': line.name and line.name.replace('\n', '\r\n') or '',
'[FactorConversion_]': 1,
'[AcumulaEstadistica_]': -1,
'[CodigoTransaccion]': 1,
'[GrupoIva]': 1,
'[CodigoIva]': 21,
'[UnidadesRecibidas]': line.quantity,
'[Unidades]': line.quantity,
'[Unidades2_]': line.quantity,
'[Precio]': line.price_unit,
'[PrecioRebaje]': line.price_unit,
'[%Descuento]': line.discount,
'[%Iva]': 21,
'[TipoArticulo]': tipo_articulo[line.product_id.type],
}
print(dic_equiv)
key_list = dic_equiv.keys()
params = '?,' * len(dic_equiv)
params = params[:-1]
sql = 'INSERT INTO LineasAlbaranProveedor (%s) VALUES (%s)' % (', '.join(key_list), params)
param_list = tuple([dic_equiv[key] for key in key_list])
db_sage.execute_void(sql, param_list)
| incaser/incaser-odoo-addons | export_sagelc/models/account_invoice.py | Python | agpl-3.0 | 10,115 |
from .sources import TestSources
__all__ = ["TestSources"]
| kaini/newslist | server/newslist/tests/__init__.py | Python | agpl-3.0 | 60 |
from django.apps import AppConfig
class ClassifConfig(AppConfig):
name = 'classif'
| bricaud/wevia | classif/apps.py | Python | agpl-3.0 | 89 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Product GTIN module for Odoo
# Copyright (C) 2004-2011 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2011 Camptocamp (<http://www.camptocamp.at>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
_logger = logging.getLogger(__name__)
from openerp.osv import orm, fields
import operator
CONSTRAINT_MESSAGE = 'Error: Invalid EAN/GTIN code'
HELP_MESSAGE = ("EAN8 EAN13 UPC JPC GTIN \n"
"http://en.wikipedia.org/wiki/Global_Trade_Item_Number")
def is_pair(x):
return not x % 2
def check_ean8(eancode):
"""Check if the given ean code answer ean8 requirements
For more details: http://en.wikipedia.org/wiki/EAN-8
:param eancode: string, ean-8 code
:return: boolean
"""
if not eancode or not eancode.isdigit():
return False
if not len(eancode) == 8:
_logger.warn('Ean8 code has to have a length of 8 characters.')
return False
sum = 0
ean_len = int(len(eancode))
for i in range(ean_len-1):
if is_pair(i):
sum += 3 * int(eancode[i])
else:
sum += int(eancode[i])
check = 10 - operator.mod(sum, 10)
if check == 10:
check = 0
return check == int(eancode[-1])
def check_upc(upccode):
"""Check if the given code answers upc requirements
For more details:
http://en.wikipedia.org/wiki/Universal_Product_Code
:param upccode: string, upc code
:return: bool
"""
if not upccode or not upccode.isdigit():
return False
if not len(upccode) == 12:
_logger.warn('UPC code has to have a length of 12 characters.')
return False
sum_pair = 0
ean_len = int(len(upccode))
for i in range(ean_len-1):
if is_pair(i):
sum_pair += int(upccode[i])
sum = sum_pair * 3
for i in range(ean_len-1):
if not is_pair(i):
sum += int(upccode[i])
check = ((sum/10 + 1) * 10) - sum
return check == int(upccode[-1])
def check_ean13(eancode):
"""Check if the given ean code answer ean13 requirements
For more details:
http://en.wikipedia.org/wiki/International_Article_Number_%28EAN%29
:param eancode: string, ean-13 code
:return: boolean
"""
if not eancode or not eancode.isdigit():
return False
if not len(eancode) == 13:
_logger.warn('Ean13 code has to have a length of 13 characters.')
return False
sum = 0
ean_len = int(len(eancode))
for i in range(ean_len-1):
pos = int(ean_len-2-i)
if is_pair(i):
sum += 3 * int(eancode[pos])
else:
sum += int(eancode[pos])
check = 10 - operator.mod(sum, 10)
if check == 10:
check = 0
return check == int(eancode[-1])
def check_ean11(eancode):
pass
def check_gtin14(eancode):
pass
DICT_CHECK_EAN = {8: check_ean8,
11: check_ean11,
12: check_upc,
13: check_ean13,
14: check_gtin14,
}
def check_ean(eancode):
if not eancode:
return True
if not len(eancode) in DICT_CHECK_EAN:
return False
try:
int(eancode)
except:
return False
return DICT_CHECK_EAN[len(eancode)](eancode)
class product_product(orm.Model):
_inherit = "product.product"
def _check_ean_key(self, cr, uid, ids):
for rec in self.browse(cr, uid, ids):
if not check_ean(rec.ean13):
return False
return True
_columns = {
'ean13': fields.char(
'EAN/GTIN', size=14,
help="Code for %s" % HELP_MESSAGE),
}
_constraints = [(_check_ean_key, CONSTRAINT_MESSAGE, ['ean13'])]
class product_packaging(orm.Model):
_inherit = "product.packaging"
def _check_ean_key(self, cr, uid, ids):
for rec in self.browse(cr, uid, ids):
if not check_ean(rec.ean):
return False
return True
_columns = {
'ean': fields.char(
'EAN', size=14,
help='Barcode number for %s' % HELP_MESSAGE),
}
_constraints = [(_check_ean_key, CONSTRAINT_MESSAGE, ['ean'])]
class res_partner(orm.Model):
_inherit = "res.partner"
def _check_ean_key(self, cr, uid, ids):
for rec in self.browse(cr, uid, ids):
if not check_ean(rec.ean13):
return False
return True
_columns = {
'ean13': fields.char(
'EAN', size=14,
help="Code for %s" % HELP_MESSAGE),
}
_constraints = [(_check_ean_key, CONSTRAINT_MESSAGE, ['ean13'])]
| cgstudiomap/cgstudiomap | main/parts/product-attribute/product_gtin/product_gtin.py | Python | agpl-3.0 | 5,465 |
from LiSE import Engine
from LiSE.examples import kobold
from .util import ELiDEAppTest, window_with_widget, idle_until
class TestPythonEditor(ELiDEAppTest):
def setUp(self):
super().setUp()
with Engine(self.prefix) as eng:
kobold.inittest(eng)
def _get_actions_box(self):
app = self.app
idle_until(lambda: hasattr(app, 'mainscreen') and app.mainscreen.mainview and app.mainscreen.statpanel and hasattr(app.mainscreen, 'gridview'))
app.funcs.toggle()
idle_until(lambda: 'actions' in app.funcs.ids, 100, 'Never got actions box')
actions_box = app.funcs.ids.actions
idle_until(lambda: actions_box.editor, 100, 'Never got FuncEditor')
idle_until(lambda: actions_box.storelist, 100, 'Never got StoreList')
idle_until(lambda: actions_box.storelist.data, 100, 'Never got StoreList data')
return actions_box
def test_show_code(self):
app = self.app
win = window_with_widget(app.build())
actions_box = self._get_actions_box()
last = actions_box.storelist.data[-1]['name']
actions_box.storelist.selection_name = last
idle_until(lambda: 'funname' in actions_box.editor.ids, 100, 'Never got function input widget')
idle_until(lambda: actions_box.editor.ids.funname.hint_text, 100, "Never got function name")
idle_until(lambda: 'code' in actions_box.editor.ids, 100, "Never got code editor widget")
idle_until(lambda: actions_box.editor.ids.code.text, 100, "Never got source code")
def test_create_action(self):
app = self.app
win = window_with_widget(app.build())
actions_box = self._get_actions_box()
actions_box.editor.ids.funname.text = 'new_func'
actions_box.editor.ids.code.text = 'return "Hello, world!"'
app.stop()
with Engine(self.prefix) as eng:
assert hasattr(eng.action, 'new_func')
| LogicalDash/LiSE | ELiDE/ELiDE/tests/test_python_editor.py | Python | agpl-3.0 | 1,944 |
# -*- coding: utf-8 -*-
import minimongo
import osm
import pymongo.collection
import logging
import re
import stitch
def is_area(way):
return len(way.nodes)\
and way.nodes[0] == way.nodes[-1]\
and ('area' in way.tags and way.tags['area'] == 'yes'\
or (not 'highway' in way.tags\
and not 'barrier' in way.tags))
def get_poly(nodes):
"""Get poly from a list of node ids"""
poly = []
for nodeid in nodes:
for node in osm.Node.collection.find({"_id": nodeid}):
poly.append((node.lon, node.lat))
return poly
def truish(x):
return x == "yes" or x == "true" or x == "1"
def falsish(x):
return x == "no" or x == "false" or x == "0"
class Sink(object):
def processWay(self, way):
poly = get_poly(way.nodes)
if is_area(way):
if Area.connection.planet.multipolygon_ways.find({"_id": way._id}).count():
#logging.debug("Skipping way %d as it belongs to a multipolygon relation", way._id)
return
parea = Area()
parea.id = way._id
parea.outer = poly
parea.tags = way.tags
parea.save()
else:
typ = Way.type_from_tags(way.tags)
if not typ is None:
pway = Way(way._id, poly, typ)
pway.attr_from_tags(way.tags)
pway.save()
else:
pline = Line(way._id, poly, way.tags)
pline.save()
def processNode(self, node):
pass
def processRelation(self, relation):
typ = relation.tags.get('type', None)
if typ == 'multipolygon':
self.processRelationMultiPolygon(relation)
def processRelationMultiPolygon(self, rel):
"""http://wiki.openstreetmap.org/wiki/Relation:multipolygon"""
memids = []
#logging.debug("%d members", len(rel.members))
#logging.debug("relation %d", rel._id)
#if len(rel.members)> 50:
# logging.debug('processRelationMultiPolygon: big rel')
# logging.debug("processing %d members", len(rel.members))
# logging.debug(rel._id)
# logging.debug(rel.tags)
outer_stitch = stitch.Stitch(rel['_id'], True)
inner_stitch = stitch.Stitch(rel['_id'], True)
for m in rel.members:
try:
if m['type'] == 'way':
way = osm.Way.collection.find_one({"_id": m['ref']})
if way:
#logging.debug(way._id)
try:
if m['role'] == 'outer':
outer_stitch.add_id(get_poly(way.nodes), way._id)
elif m['role'] == 'inner':
inner_stitch.add_id(get_poly(way.nodes), way._id)
except RuntimeError,e:
print e, 'way id: ', way['_id'], 'relation id', rel['_id']
memids.append(way['_id'])
try:
Area.connection.planet.multipolygon_ways.insert({"_id": way._id}, safe=True)
except pymongo.errors.DuplicateKeyError:
pass
else:
logging.debug("cound't find way id: %d in multipolygon relation id %d", m['ref'], rel._id)
if m['role'] == 'outer':
outer_stitch.dontClose()
elif m['role'] == 'inner':
inner_stitch.autoClose()
except KeyError:
logging.warn("processRelationMultiPolygon: KeyError")
return
parea = Area()
parea.id = rel._id
try:
parea.outer = outer_stitch.getPolygons()
except RuntimeError, e:
logging.warn("processRelationMultiPolygon exception: rel {0}: {1}".format(rel["_id"], e))
logging.warn(memids)
try:
parea.inner = inner_stitch.getPolygons()
except RuntimeError, e:
logging.warn("processRelationMultiPolygon exception: rel {0}: {1}".format(rel["_id"], e))
logging.warn(memids)
parea.tags = rel.tags
parea.memids = memids
parea.save()
#logging.debug("done")
def processMember(self, member):
pass
class Area(minimongo.Model):
class Meta:
database = 'planet'
# def __init__(self, initial=None, **kw):
# super(Area, self).__init__(initial, **kw)
#
# def __init__(self, id, outer, inner, tags):
# # might not be unique, can't use _id as we save areas from Ways and from Relations
# self.id = id
# self.outer = outer
# self.inner = inner
# self.tags = tags
#
class Line(minimongo.Model):
class Meta:
database = 'planet'
def __init__(self, id, poly, tags):
self._id = id
self.poly = poly
self.tags = tags
class Way(minimongo.Model):
class Meta:
database = 'planet'
HW_MOTORWAY = 0
HW_MOTORWAY_LINK = 1
HW_TRUNK = 2
HW_TRUNK_LINK = 3
HW_PRIMARY = 4
HW_PRIMARY_LINK = 5
HW_SECONDARY = 6
HW_SECONDARY_LINK = 7
HW_TERTIARY = 8
HW_TERTIARY_LINK = 9
HW_LIVING_STREET = 10
HW_PEDESTRIAN = 11
HW_RESIDENTIAL = 12
HW_UNCLASSIFIED = 13
HW_SERVICE = 14
HW_TRACK = 15
HW_BUS_GUIDEWAY = 16
HW_RACEWAY = 17
HW_ROAD = 18
HW_PATH = 19
HW_FOOTWAY = 20
HW_CYCLEWAY = 21
HW_BRIDLEWAY = 22
HW_STEPS = 23
HW_PROPOSED = 24
HW_CONSTRUCTION = 25
def __init__(self, id, poly, typ):
self._id = id
self.poly = poly
self.t = typ
@staticmethod
def type_from_tags(tags):
if tags.has_key('highway'):
try:
val = tags['highway']
attr = 'HW_{0}'.format(val.upper())
hw = getattr(Way, attr)
return hw
except AttributeError:
pass
except UnicodeEncodeError:
pass
return None
def attr_from_tags(self, tags):
self.car_forward = True
self.car_backward = True
self.bike = True
if tags.has_key('oneway'):
ow = tags['oneway']
if truish(ow):
self.car_backward = False
elif ow == "-1":
self.car_backward = True
#elif falsish(ow):
# pass
if tags.has_key('roundabout'):
self.roundabout = True
if not tags.has_key('oneway'):
self.car_backward = False
if self.t == Way.HW_MOTORWAY or self.t == Way.HW_MOTORWAY_LINK and not tags.has_key('oneway'):
self.car_forward = True
self.car_backward = False
if tags.has_key('maxspeed'):
s = tags['maxspeed']
m = re.match('\s*(\d+)\s*(\w*)\s*', s)
if m:
self.speedlimit = float(m.group(1))
if m.group(2) == 'mph':
self.speedlimit *= 1.609344
# TODO: no_thru
if self.t >= Way.HW_PATH:
self.car_forward = False
self.car_backward = False
# bikes
if self.t <= Way.HW_MOTORWAY_LINK\
or self.t == Way.HW_FOOTWAY\
or self.t == Way.HW_STEPS:
self.bike = False
if truish(tags.get('bicycle', False)):
self.bike = True
| larroy/osmtransform | planet/__init__.py | Python | agpl-3.0 | 7,567 |
# -*- coding: utf-8 -*-
#
#
# Copyright 2015 Camptocamp SA
# Author: Alexandre Fayolle
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from openerp import models, api
class SaleOrder(models.Model):
_inherit = 'sale.order'
@api.multi
def over_budget(self):
self.ensure_one()
if self.order_type == 'donation':
return False
else:
return super(SaleOrder, self).over_budget()
@api.multi
def has_budget_holder(self):
self.ensure_one()
if self.order_type == 'donation':
return True
else:
return super(SaleOrder, self).has_budget_holder()
| jorsea/vertical-ngo | logistic_order_donation_budget/model/sale_order.py | Python | agpl-3.0 | 1,300 |
from fabric.api import run, cd, env
from fabric import state
DISTANT_PATH = '/www-data/click-and-deploy'
def pull():
with cd(DISTANT_PATH):
run('git pull')
def restart_services():
run('sudo supervisorctl restart click-and-deploy')
def deploy():
pull()
restart_services()
| Cerkinfo/click-and-deploy | apps/recipies/self.py | Python | agpl-3.0 | 299 |
"""Offer Utility Methods. """
import logging
import string # pylint: disable=W0402
from decimal import Decimal
from urllib.parse import urlencode
import bleach
import waffle
from django.conf import settings
from django.template.loader import render_to_string
from django.utils.translation import ugettext as _
from ecommerce_worker.sailthru.v1.tasks import send_offer_assignment_email, send_offer_update_email
from oscar.core.loading import get_model
from ecommerce.core.constants import ENABLE_BRAZE
from ecommerce.core.url_utils import absolute_redirect
from ecommerce.extensions.checkout.utils import add_currency
from ecommerce.extensions.offer.constants import OFFER_ASSIGNED
logger = logging.getLogger(__name__)
def _remove_exponent_and_trailing_zeros(decimal):
"""
Remove exponent and trailing zeros.
Arguments:
decimal (Decimal): Decimal number that needs to be modified
Returns:
decimal (Decimal): Modified decimal number without exponent and trailing zeros.
"""
return decimal.quantize(Decimal(1)) if decimal == decimal.to_integral() else decimal.normalize()
def get_discount_percentage(discount_value, product_price):
"""
Get discount percentage of discount value applied to a product price.
Arguments:
discount_value (float): Discount value
product_price (float): Price of a product the discount is used on
Returns:
float: Discount percentage
"""
return discount_value / product_price * 100 if product_price > 0 else 0.0
def get_discount_value(discount_percentage, product_price):
"""
Get discount value of discount percentage applied to a product price.
Arguments:
discount_percentage (float): Discount percentage
product_price (float): Price of a product the discount is used on
Returns:
float: Discount value
"""
return discount_percentage * product_price / 100.0
def get_benefit_type(benefit):
""" Returns type of benefit using 'type' or 'proxy_class' attributes of Benefit object"""
_type = benefit.type
if not _type:
_type = getattr(benefit.proxy(), 'benefit_class_type', None)
return _type
def get_quantized_benefit_value(benefit):
"""
Returns the rounded value of the given benefit, without any decimal points.
"""
value = getattr(benefit.proxy(), 'benefit_class_value', benefit.value)
return _remove_exponent_and_trailing_zeros(Decimal(str(value)))
def format_benefit_value(benefit):
"""
Format benefit value for display based on the benefit type
Arguments:
benefit (Benefit): Benefit to be displayed
Returns:
benefit_value (str): String value containing formatted benefit value and type.
"""
Benefit = get_model('offer', 'Benefit')
benefit_value = get_quantized_benefit_value(benefit)
benefit_type = get_benefit_type(benefit)
if benefit_type == Benefit.PERCENTAGE:
benefit_value = _('{benefit_value}%').format(benefit_value=benefit_value)
else:
converted_benefit = add_currency(Decimal(benefit.value))
benefit_value = _('${benefit_value}').format(benefit_value=converted_benefit)
return benefit_value
def get_redirect_to_email_confirmation_if_required(request, offer, product):
"""
Render the email confirmation template if email confirmation is
required to redeem the offer.
We require email confirmation via account activation before an offer
can be redeemed if the site is configured to require account activation
or if the offer is restricted for use to learners with a specific
email domain. The learner needs to activate their account before we allow
them to redeem email domain-restricted offers, otherwise anyone could create
an account using an email address with a privileged domain and use the coupon
code associated with the offer.
Arguments:
request (HttpRequest): The current HttpRequest.
offer (ConditionalOffer): The offer to be redeemed.
product (Product): The
Returns:
HttpResponse or None: An HttpResponse that redirects to the email confirmation view if required.
"""
require_account_activation = request.site.siteconfiguration.require_account_activation or offer.email_domains
if require_account_activation and not request.user.account_details(request).get('is_active'):
response = absolute_redirect(request, 'offers:email_confirmation')
course_id = product.course and product.course.id
if course_id:
response['Location'] += '?{params}'.format(params=urlencode({'course_id': course_id}))
return response
return None
def format_assigned_offer_email(greeting, closing, learner_email, code, redemptions_remaining, code_expiration_date):
"""
Arguments:
greeting (String): Email greeting (prefix)
closing (String): Email closing (suffix)
learner_email (String): Email of the customer who will receive the code.
code (String): Code for the user.
redemptions_remaining (Integer): Number of times the code can be redeemed.
code_expiration_date(Datetime): Date till code is valid.
Return the formatted email body for offer assignment.
"""
email_template = settings.OFFER_ASSIGNMENT_EMAIL_TEMPLATE
placeholder_dict = SafeDict(
REDEMPTIONS_REMAINING=redemptions_remaining,
USER_EMAIL=learner_email,
CODE=code,
EXPIRATION_DATE=code_expiration_date
)
return format_email(email_template, placeholder_dict, greeting, closing)
def send_assigned_offer_email(
subject,
greeting,
closing,
offer_assignment_id,
learner_email,
code,
redemptions_remaining,
code_expiration_date,
sender_alias,
base_enterprise_url=''):
"""
Arguments:
*subject*
The email subject
*email_greeting*
The email greeting (prefix)
*email_closing*
The email closing (suffix)
*offer_assignment_id*
Primary key of the entry in the offer_assignment model.
*learner_email*
Email of the customer who will receive the code.
*code*
Code for the user.
*redemptions_remaining*
Number of times the code can be redeemed.
*code_expiration_date*
Date till code is valid.
"""
email_body = format_assigned_offer_email(
greeting,
closing,
learner_email,
code,
redemptions_remaining,
code_expiration_date
)
if settings.DEBUG: # pragma: no cover
# Avoid breaking devstack when no such service is available.
logger.warning("Skipping Sailthru task 'send_offer_assignment_email' because DEBUG=true.") # pragma: no cover
return # pragma: no cover
send_offer_assignment_email.delay(learner_email, offer_assignment_id, subject, email_body, sender_alias, None,
base_enterprise_url)
def send_revoked_offer_email(
subject,
greeting,
closing,
learner_email,
code,
sender_alias,
):
"""
Arguments:
*subject*
The email subject
*email_greeting*
The email greeting (prefix)
*email_closing*
The email closing (suffix)
*learner_email*
Email of the customer who will receive the code.
*code*
Code for the user.
"""
email_template = settings.OFFER_REVOKE_EMAIL_TEMPLATE
placeholder_dict = SafeDict(
USER_EMAIL=learner_email,
CODE=code,
)
email_body = format_email(email_template, placeholder_dict, greeting, closing)
send_offer_update_email.delay(learner_email, subject, email_body, sender_alias)
def send_assigned_offer_reminder_email(
subject,
greeting,
closing,
learner_email,
code,
redeemed_offer_count,
total_offer_count,
code_expiration_date,
sender_alias,
base_enterprise_url=''):
"""
Arguments:
*subject*
The email subject
*email_greeting*
The email greeting (prefix)
*email_closing*
The email closing (suffix)
*learner_email*
Email of the customer who will receive the code.
*code*
Code for the user.
*redeemed_offer_count*
Number of times the code has been redeemed.
*total_offer_count*
Total number of offer assignments for this (code,email) pair
*code_expiration_date*
Date till code is valid.
*sender_alias*
Enterprise customer sender alias.
*base_enterprise_url*
Url for the enterprise's learner portal
"""
email_template = settings.OFFER_REMINDER_EMAIL_TEMPLATE
placeholder_dict = SafeDict(
REDEEMED_OFFER_COUNT=redeemed_offer_count,
TOTAL_OFFER_COUNT=total_offer_count,
USER_EMAIL=learner_email,
CODE=code,
EXPIRATION_DATE=code_expiration_date
)
email_body = format_email(email_template, placeholder_dict, greeting, closing)
send_offer_update_email.delay(learner_email, subject, email_body, sender_alias, base_enterprise_url)
def format_email(template, placeholder_dict, greeting, closing):
"""
Arguments:
template (String): Email template body
placeholder_dict (SafeDict): Safe dictionary of placeholders and their values
greeting (String): Email greeting (prefix)
closing (String): Email closing (suffix)
Apply placeholders to the email template.
Safely handle placeholders in the template without matching tokens (just emit the placeholders).
Reference: https://stackoverflow.com/questions/17215400/python-format-string-unused-named-arguments
"""
if greeting is None:
greeting = ''
if closing is None:
closing = ''
greeting = bleach.clean(greeting)
closing = bleach.clean(closing)
email_body = string.Formatter().vformat(template, SafeTuple(), placeholder_dict)
if waffle.switch_is_active(ENABLE_BRAZE):
email_body = (greeting + email_body + closing).replace('\"', '\'')
return render_to_string('coupons/offer_email.html', {'body': email_body})
# \n\n is being treated as single line except of two lines in HTML template,
# so separating them with tag to render them as expected.
return (greeting + email_body + closing).replace('\n', '<br/>')
class SafeDict(dict):
"""
Safely handle missing placeholder values.
"""
def __missing__(self, key):
return '{' + key + '}'
class SafeTuple(tuple):
"""
Safely handle missing unnamed placeholder values in python3.
"""
def __getitem__(self, value):
return '{}'
def update_assignments_for_multi_use_per_customer(voucher):
"""
Update `OfferAssignment` records for MULTI_USE_PER_CUSTOMER coupon type when max_uses changes for a coupon.
"""
if voucher.usage == voucher.MULTI_USE_PER_CUSTOMER:
OfferAssignment = get_model('offer', 'OfferAssignment')
offer = voucher.enterprise_offer
existing_offer_assignments = OfferAssignment.objects.filter(code=voucher.code, offer=offer).count()
if existing_offer_assignments == 0:
return
if existing_offer_assignments < offer.max_global_applications:
user_email = OfferAssignment.objects.filter(code=voucher.code, offer=offer).first().user_email
offer_assignments_available = offer.max_global_applications - existing_offer_assignments
assignments = [
OfferAssignment(offer=offer, code=voucher.code, user_email=user_email, status=OFFER_ASSIGNED)
for __ in range(offer_assignments_available)
]
OfferAssignment.objects.bulk_create(assignments)
| eduNEXT/edunext-ecommerce | ecommerce/extensions/offer/utils.py | Python | agpl-3.0 | 12,022 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Alessandro Camilli ([email protected])
# Copyright (C) 2014
# Associazione OpenERP Italia (<http://www.openerp-italia.org>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import fields, orm
from openerp.tools.translate import _
import decimal_precision as dp
import datetime, time
class res_country(orm.Model):
_inherit = "res.country"
_columns = {
'codice_stato_agenzia_entrate': fields.char('Codice stato Agenzia Entrate', size=3)
}
class account_tax_code(orm.Model):
_inherit = "account.tax.code"
_columns = {
'spesometro_escludi': fields.boolean('Escludi dalla dichiarazione'),
}
_defaults = {
'spesometro_escludi' : False,
}
class account_journal(orm.Model):
_inherit = "account.journal"
_columns = {
'spesometro': fields.boolean('Da includere'),
'spesometro_operazione': fields.selection((('FA','Operazioni documentate da fattura'),
('SA','Operazioni senza fattura'),
('BL1','Operazioni con paesi con fiscalità privilegiata'),
('BL2','Operazioni con soggetti non residenti'),
('BL3','Acquisti di servizi da soggetti non residenti'),
('DR','Documento Riepilogativo')),
'Operazione' ),
'spesometro_segno': fields.selection((('attiva','Attiva'),
('passiva','Passiva')),
'Segno operaz.' ),
'spesometro_IVA_non_esposta': fields.boolean('IVA non esposta')
}
class res_partner(orm.Model):
_inherit = "res.partner"
_columns = {
'spesometro_escludi': fields.boolean('Escludi'),
'spesometro_operazione': fields.selection((('FA','Operazioni documentate da fattura'),
('SA','Operazioni senza fattura'),
('BL1','Operazioni con paesi con fiscalità privilegiata'),
('BL2','Operazioni con soggetti non residenti'),
('BL3','Acquisti di servizi da soggetti non residenti'),
('DR','Documento Riepilogativo')),
'Operazione' ),
'spesometro_IVA_non_esposta': fields.boolean('IVA non esposta'),
'spesometro_leasing': fields.selection((('A','Autovettura'),
('B','Caravan'),
('C','Altri veicoli'),
('D','Unità da diporto'),
('E','Aeromobili')),
'Tipo Leasing' ),
'spesometro_tipo_servizio': fields.selection((('cessione','Cessione Beni'),
('servizi','Prestazione di servizi')),
'Tipo servizio', help="Specificare per 'Operazioni con paesi con fiscalità privilegiata' "),
'spesometro_indirizzo_estero': fields.many2one('res.partner.address', 'Indirizzo non residente'),
}
_defaults = {
'spesometro_escludi' : False,
}
class spesometro_configurazione(orm.Model):
def _check_one_year(self, cr, uid, ids, context=None):
for element in self.browse(cr, uid, ids, context=context):
element_ids = self.search(cr, uid, [('anno','=', element.anno)], context=context)
if len(element_ids) > 1:
return False
return True
_name = "spesometro.configurazione"
_description = "Spesometro - Configurazione"
_columns = {
'anno': fields.integer('Anno', size=4, required=True ),
'stato_san_marino': fields.many2one('res.country', 'Stato San Marino', required=True),
'quadro_fa_limite_importo': fields.float('Quadro FA - Limite importo'),
'quadro_fa_limite_importo_line': fields.float('Quadro FA - Limite importo singola operaz.'),
'quadro_sa_limite_importo': fields.float('Quadro SA - Limite importo'),
'quadro_sa_limite_importo_line': fields.float('Quadro SA - Limite importo singola operaz.'),
'quadro_bl_limite_importo': fields.float('Quadro BL - Limite importo'),
'quadro_bl_limite_importo_line': fields.float('Quadro BL - Limite importo singola operaz.'),
'quadro_se_limite_importo_line': fields.float('Quadro SE - Limite importo singola operaz.'),
}
_constraints = [
(_check_one_year, 'Error! Config for this year already exists.', ['anno']),
]
class spesometro_comunicazione(orm.Model):
_name = "spesometro.comunicazione"
_description = "Spesometro - Comunicazione "
def _tot_operation_number(self, cr, uid, ids, field_names, args, context=None):
res = {}
for com in self.browse(cr, uid, ids):
# Aggregate
tot_FA = len(com.line_FA_ids)
tot_SA = len(com.line_SA_ids)
tot_BL1 = 0
tot_BL2 = 0
tot_BL3 = 0
for line in com.line_BL_ids:
if line.operazione_fiscalita_privilegiata:
tot_BL1 += 1
elif line.operazione_con_soggetti_non_residenti:
tot_BL2 += 1
elif line.Acquisto_servizi_da_soggetti_non_residenti:
tot_BL3 += 1
#Analitiche
tot_FE = 0 # Fatture emesse
tot_FE_R = 0 # Doc riepilogativi
for line in com.line_FE_ids:
if line.documento_riepilogativo:
tot_FE_R += 1
else:
tot_FE += 1
tot_FR = 0 # Fatture ricevute
tot_FR_R = 0 # Doc riepilogativi ricevuti
for line in com.line_FR_ids:
if line.documento_riepilogativo:
tot_FR_R += 1
else:
tot_FR += 1
tot_NE = len(com.line_NE_ids)
tot_NR = len(com.line_NR_ids)
tot_DF = len(com.line_DF_ids)
tot_FN = len(com.line_FN_ids)
tot_SE = len(com.line_SE_ids)
tot_TU = len(com.line_TU_ids)
res[com.id] = {
'totale_FA' : tot_FA,
'totale_SA' : tot_SA,
'totale_BL1' : tot_BL1,
'totale_BL2' : tot_BL2,
'totale_BL3' : tot_BL3,
'totale_FE' : tot_FE,
'totale_FE_R' : tot_FE_R,
'totale_FR' : tot_FR,
'totale_FR_r' : tot_FR_R,
'totale_NE' : tot_NE,
'totale_NR' : tot_NR,
'totale_DF' : tot_DF,
'totale_FN' : tot_FN,
'totale_SE' : tot_SE,
'totale_TU' : tot_TU,
}
return res
_columns = {
'company_id': fields.many2one('res.company', 'Azienda', required=True ),
'periodo': fields.selection((('anno','Annuale'), ('trimestre','Trimestrale'), ('mese','Mensile')),
'Periodo', required=True),
'anno' : fields.integer('Anno', size=4, required=True),
'trimestre' : fields.integer('Trimestre', size=1 ),
'mese' : fields.selection((('1','Gennaio'), ('2','Febbraio'), ('3','Marzo'), ('4','Aprile'),
('5','Maggio'), ('6','Giugno'), ('7','Luglio'), ('8','Agosto'),
('9','Settembre'), ('10','Ottobre'), ('11','Novembre'), ('12','Dicembre'),
),'Mese'),
'tipo': fields.selection((('ordinaria','Ordinaria'), ('sostitutiva','Sostitutiva'), ('annullamento','Annullamento')),
'Tipo comunicazione', required=True),
'comunicazione_da_sostituire_annullare': fields.integer('Protocollo comunicaz. da sostituire/annullare'),
'documento_da_sostituire_annullare': fields.integer('Protocollo documento da sostituire/annullare'),
'formato_dati': fields.selection((('aggregati','Dati Aggregati'), ('analitici','Dati Analitici')),
'Formato dati', readonly=True ),
'codice_fornitura': fields.char('Codice fornitura', readonly=True, size=5, help='Impostare a "NSP00" '),
'tipo_fornitore': fields.selection((('01','Invio propria comunicazione'), ('10','Intermediario')),
'Tipo fornitore' ),
'codice_fiscale_fornitore': fields.char('Codice fiscale Fornitore', size=16,
help="Deve essere uguale al Codice fiscale dell'intermediario (campo 52 del record B) se presente, altrimenti al Codice fiscale del soggetto tenuto alla comunicazione (campo 41 del record B) se presente, altrimenti al Codice fiscale del soggetto obbligato (campo 2 del record B)"),
#
# Valori per comunicazione su più invii (non gestito)
'progressivo_telematico': fields.integer('Progressivo telematico', readonly=True),
'numero_totale_invii': fields.integer('Numero totale invii telematici', readonly=True),
#
# Soggetto a cui si riferisce la comunicazione
#
'soggetto_codice_fiscale': fields.char('Codice fiscale soggetto obbligato', size=16,
help="Soggetto cui si riferisce la comunicazione"),
'soggetto_partitaIVA': fields.char('Partita IVA', size=11),
'soggetto_codice_attivita': fields.char('Codice attività', size=6, help="Codice ATECO 2007"),
'soggetto_telefono': fields.char('Telefono', size=12),
'soggetto_fax': fields.char('Fax', size=12),
'soggetto_email': fields.char('E-mail', size=50),
'soggetto_forma_giuridica': fields.selection((('persona_giuridica','Persona Giuridica'), ('persona_fisica','Persona Fisica')),
'Forma Giuridica'),
'soggetto_pf_cognome': fields.char('Cognome', size=24, help=""),
'soggetto_pf_nome': fields.char('Nome', size=20, help=""),
'soggetto_pf_sesso': fields.selection((('M','M'), ('F','F')),'Sesso'),
'soggetto_pf_data_nascita': fields.date('Data di nascita'),
'soggetto_pf_comune_nascita': fields.char('Comune o stato estero di nascita', size=40),
'soggetto_pf_provincia_nascita': fields.char('Provincia', size=2),
'soggetto_pg_denominazione': fields.char('Denominazione', size=60),
# Soggetto tenuto alla comunicazione
'soggetto_cm_forma_giuridica': fields.selection((('persona_giuridica','Persona Giuridica'), ('persona_fisica','Persona Fisica')),
'Forma Giuridica'),
'soggetto_cm_codice_fiscale': fields.char('Codice Fiscale', size=16, help="Soggetto che effettua la comunicazione se diverso dal soggetto tenuto alla comunicazione"),
'soggetto_cm_pf_cognome': fields.char('Cognome', size=24, help=""),
'soggetto_cm_pf_nome': fields.char('Nome', size=20, help=""),
'soggetto_cm_pf_sesso': fields.selection((('M','M'), ('F','F')),'Sesso'),
'soggetto_cm_pf_data_nascita': fields.date('Data di nascita'),
'soggetto_cm_pf_comune_nascita': fields.char('Comune o stato estero di nascita', size=40),
'soggetto_cm_pf_provincia_nascita': fields.char('Provincia', size=2),
'soggetto_cm_pf_codice_carica': fields.integer('Codice Fiscale', size=2, help=""),
'soggetto_cm_pf_data_inizio_procedura': fields.date('Data inizio procedura'),
'soggetto_cm_pf_data_fine_procedura': fields.date('Data fine procedura'),
'soggetto_cm_pg_denominazione': fields.char('Denominazione', size=60),
# Soggetto incaricato alla trasmissione
'soggetto_trasmissione_codice_fiscale': fields.char('Codice Fiscale', size=16, help="Intermediario che effettua la trasmissione telematica"),
'soggetto_trasmissione_numero_CAF': fields.integer('Nr iscrizione albo del C.A.F.', size=5, help="Intermediario che effettua la trasmissione telematica"),
'soggetto_trasmissione_impegno': fields.selection((('1','Soggetto obbligato'), ('2','Intermediario')),'Impegno trasmissione'),
'soggetto_trasmissione_data_impegno': fields.date('Data data impegno'),
'line_FA_ids': fields.one2many('spesometro.comunicazione.line.fa', 'comunicazione_id', 'Quadri FA' ),
'line_SA_ids': fields.one2many('spesometro.comunicazione.line.sa', 'comunicazione_id', 'Quadri SA' ),
'line_BL_ids': fields.one2many('spesometro.comunicazione.line.bl', 'comunicazione_id', 'Quadri BL' ),
'line_FE_ids': fields.one2many('spesometro.comunicazione.line.fe', 'comunicazione_id', 'Quadri FE' ),
'line_FR_ids': fields.one2many('spesometro.comunicazione.line.fr', 'comunicazione_id', 'Quadri FR' ),
'line_NE_ids': fields.one2many('spesometro.comunicazione.line.ne', 'comunicazione_id', 'Quadri NE' ),
'line_NR_ids': fields.one2many('spesometro.comunicazione.line.nr', 'comunicazione_id', 'Quadri NR' ),
'line_DF_ids': fields.one2many('spesometro.comunicazione.line.df', 'comunicazione_id', 'Quadri DF' ),
'line_FN_ids': fields.one2many('spesometro.comunicazione.line.fn', 'comunicazione_id', 'Quadri FN' ),
'line_SE_ids': fields.one2many('spesometro.comunicazione.line.se', 'comunicazione_id', 'Quadri SE' ),
'line_TU_ids': fields.one2many('spesometro.comunicazione.line.tu', 'comunicazione_id', 'Quadri TU' ),
'totale_FA': fields.function(_tot_operation_number, string='Tot operazioni FA', type='integer', multi='operation_number'),
'totale_SA': fields.function(_tot_operation_number, string='Tot operazioni SA', type='integer', multi='operation_number'),
'totale_BL1': fields.function(_tot_operation_number, string='Tot operazioni BL - Paesi con fiscalita privilegiata', type='integer', multi='operation_number'),
'totale_BL2': fields.function(_tot_operation_number, string='Tot operazioni BL - Soggetti non residenti', type='integer', multi='operation_number'),
'totale_BL3': fields.function(_tot_operation_number, string='Tot operazioni BL - Acquisti servizi non soggetti non residenti', type='integer', multi='operation_number'),
'totale_FE': fields.function(_tot_operation_number, string='Tot operazioni FE', type='integer', multi='operation_number'),
'totale_FE_R': fields.function(_tot_operation_number, string='Tot operazioni FE doc riepil.', type='integer', multi='operation_number'),
'totale_FR': fields.function(_tot_operation_number, string='Tot operazioni FR', type='integer', multi='operation_number'),
'totale_FR_R': fields.function(_tot_operation_number, string='Tot operazioni FR doc riepil.', type='integer', multi='operation_number'),
'totale_NE': fields.function(_tot_operation_number, string='Tot operazioni NE', type='integer', multi='operation_number'),
'totale_NR': fields.function(_tot_operation_number, string='Tot operazioni NR', type='integer', multi='operation_number'),
'totale_DF': fields.function(_tot_operation_number, string='Tot operazioni DF', type='integer', multi='operation_number'),
'totale_FN': fields.function(_tot_operation_number, string='Tot operazioni FN', type='integer', multi='operation_number'),
'totale_SE': fields.function(_tot_operation_number, string='Tot operazioni SE', type='integer', multi='operation_number'),
'totale_TU': fields.function(_tot_operation_number, string='Tot operazioni TU', type='integer', multi='operation_number'),
}
_default ={
'codice_fornitura': 'NSP00',
'tipo_fornitore': '01',
'formato_dati': 'aggregati',
}
def onchange_trasmissione_impegno(self, cr, uid, ids, type, context=None):
res = {}
fiscalcode = False
if type == '1': # soggetto obbligato
fiscalcode = context.get('soggetto_codice_fiscale', False)
res = {
'value' : {'soggetto_trasmissione_codice_fiscale' : fiscalcode}
}
return res
def partner_is_from_san_marino(self, cr, uid, move, invoice, arg):
# configurazione
anno_competenza = datetime.datetime.strptime(move.period_id.date_start, "%Y-%m-%d").year
configurazione_ids = self.pool.get('spesometro.configurazione').search(cr, uid, \
[('anno', '=', anno_competenza)])
if not configurazione_ids:
raise orm.except_orm(_('Configurazione mancante!'),_("Configurare l'anno relativo alla comunicazione") )
configurazione = self.pool.get('spesometro.configurazione').browse(cr, uid, configurazione_ids[0])
stato_estero = False
address = self._get_partner_address_obj(cr, uid, move, invoice, arg)
if address and address.country_id and configurazione.stato_san_marino.id == address.country_id.id:
return True
else:
return False
def _get_partner_address_obj(self, cr, uid, move, invoice, arg):
address = False
if move.partner_id.spesometro_indirizzo_estero:
address = move.partner_id.spesometro_indirizzo_estero
elif move.partner_id.address[0]:
address = move.partner_id.address[0]
return address
def compute_invoice_amounts(self, cr, uid, move, invoice, arg):
'''
Calcolo totali documento. Dall'imponibile vanno esclusi gli importi assoggettati ad un'imposta che ha l'esclusione sulla "Comunicazione art.21"
'''
res ={
'amount_untaxed' : 0,
'amount_tax' : 0,
'amount_total' : 0,
}
for line in invoice.tax_line:
if not line.tax_code_id.spesometro_escludi:
res['amount_untaxed'] += line.base
res['amount_tax'] += line.amount
res['amount_total'] += round(line.base + line.amount, 2)
return res
def truncate_values(self, cr, uid, ids, context=None):
for com in self.browse(cr, uid, ids):
for line in com.line_FA_ids:
vals = {
'attive_imponibile_non_esente': int(line.attive_imponibile_non_esente),
'attive_imposta': int(line.attive_imposta),
'attive_operazioni_iva_non_esposta': int(line.attive_operazioni_iva_non_esposta),
'attive_note_variazione': int(line.attive_note_variazione),
'attive_note_variazione_imposta': int(line.attive_note_variazione_imposta),
'passive_imponibile_non_esente': int(line.passive_imponibile_non_esente),
'passive_imposta': int(line.passive_imposta),
'passive_operazioni_iva_non_esposta': int(line.passive_operazioni_iva_non_esposta),
'passive_note_variazione': int(line.passive_note_variazione),
'passive_note_variazione_imposta': int(line.passive_note_variazione_imposta),
}
self.pool.get('spesometro.comunicazione.line.fa').write(cr, uid, [line.id], vals)
for line in com.line_SA_ids:
vals = {
'importo_complessivo': int(line.importo_complessivo),
}
self.pool.get('spesometro.comunicazione.line.sa').write(cr, uid, [line.id], vals)
for line in com.line_BL_ids:
vals = {
'attive_importo_complessivo': int(line.attive_importo_complessivo),
'attive_imposta': int(line.attive_imposta),
'attive_non_sogg_cessione_beni': int(line.attive_non_sogg_cessione_beni),
'attive_non_sogg_servizi': int(line.attive_non_sogg_servizi),
'attive_note_variazione': int(line.attive_note_variazione),
'attive_note_variazione_imposta': int(line.attive_note_variazione_imposta),
'passive_importo_complessivo': int(line.passive_importo_complessivo),
'passive_imposta': int(line.passive_imposta),
'passive_non_sogg_importo_complessivo': int(line.passive_non_sogg_importo_complessivo),
'passive_note_variazione': int(line.passive_note_variazione),
'passive_note_variazione_imposta': int(line.passive_note_variazione_imposta),
}
self.pool.get('spesometro.comunicazione.line.bl').write(cr, uid, [line.id], vals)
return True
def validate_lines(self, cr, uid, ids, context=None):
for com in self.browse(cr, uid, ids):
# configurazione
configurazione_ids = self.pool.get('spesometro.configurazione').search(cr, uid, \
[('anno', '=', com.anno)])
if not configurazione_ids:
raise orm.except_orm(_('Configurazione mancante!'),_("Configurare l'anno relativo alla comunicazione") )
configurazione = self.pool.get('spesometro.configurazione').browse(cr, uid, configurazione_ids[0])
for line in com.line_FA_ids:
if configurazione.quadro_fa_limite_importo :
if line.attive_imponibile_non_esente and \
line.attive_imponibile_non_esente < configurazione.quadro_fa_limite_importo:
self.pool.get('spesometro.comunicazione.line.fa').unlink(cr, uid, [line.id])
for line in com.line_SA_ids:
if configurazione.quadro_sa_limite_importo :
if line.importo_complessivo and \
line.importo_complessivo < configurazione.quadro_sa_limite_importo:
self.pool.get('spesometro.comunicazione.line.sa').unlink(cr, uid, [line.id])
for line in com.line_BL_ids:
if configurazione.quadro_bl_limite_importo :
importo_test = 0
if line.attive_importo_complessivo :
importo_test = line.attive_importo_complessivo
elif line.attive_non_sogg_cessione_beni :
importo_test = line.attive_non_sogg_cessione_beni
elif line.attive_non_sogg_servizi :
importo_test = line.attive_non_sogg_servizi
if importo_test and \
importo_test < configurazione.quadro_bl_limite_importo:
self.pool.get('spesometro.comunicazione.line.bl').unlink(cr, uid, [line.id])
# Controllo formale comunicazione
# ... periodo in presenza di linee nel quadro SE
if com.line_SE_ids and not com.trimestre and not com.mese:
raise orm.except_orm(_('Perido Errato!'),_("In presenza di operazione nel qudro SE (Acquisti da San Marino) \
sono ammessi solo periodi mensili/trimestrali") )
return True
def validate_operation(self, cr, uid, move, invoice, arg):
# configurazione
anno_competenza = datetime.datetime.strptime(move.period_id.date_start, "%Y-%m-%d").year
configurazione_ids = self.pool.get('spesometro.configurazione').search(cr, uid, \
[('anno', '=', anno_competenza)])
if not configurazione_ids:
raise orm.except_orm(_('Configurazione mancante!'),_("Configurare l'anno relativo alla comunicazione") )
configurazione = self.pool.get('spesometro.configurazione').browse(cr, uid, configurazione_ids[0])
doc_vals = self.pool.get('spesometro.comunicazione').compute_invoice_amounts(cr, uid, move, invoice, arg)
# Nessu quadro definito
if not arg['quadro']:
return False
# Quadro richiesto
if arg['quadro'] not in arg['quadri_richiesti']:
return False
# Valori minimi
if arg['quadro'] == 'FA':
if configurazione.quadro_fa_limite_importo_line :
if not doc_vals.get('amount_untaxed', 0) or doc_vals.get('amount_untaxed', 0) < configurazione.quadro_fa_limite_importo_line:
return False
if arg['quadro'] == 'SA':
if configurazione.quadro_sa_limite_importo_line :
if not doc_vals.get('amount_total', 0) or doc_vals.get('amount_total', 0) < configurazione.quadro_sa_limite_importo_line:
return False
if arg['quadro'] == 'BL':
if configurazione.quadro_bl_limite_importo_line :
if not doc_vals.get('amount_total', 0) or doc_vals.get('amount_total', 0) < configurazione.quadro_bl_limite_importo_line:
return False
if arg['quadro'] == 'SE':
if configurazione.quadro_se_limite_importo_line :
if not doc_vals.get('amount_untaxed', 0) or doc_vals.get('amount_untaxed', 0) < configurazione.quadro_se_limite_importo_line:
return False
# Operazioni con San Marino Escluse se richiesta forma aggregata
if arg['formato_dati'] == 'aggregati' and self.partner_is_from_san_marino(cr, uid, move, invoice, arg):
return False
return True
def get_define_quadro(self, cr, uid, move, invoice, arg):
quadro = False
operazione = arg.get('operazione')
# Forma aggregata
if arg['formato_dati'] == 'aggregati':
if operazione == 'FA' or operazione == 'DR':
quadro = 'FA'
elif operazione == 'SA': # Operazioni senza fattura
quadro = 'SA'
elif (operazione == 'BL1') or (operazione == 'BL2') or (operazione == 'BL2'):
quadro = 'BL'
# Forma analitica
if arg['formato_dati'] == 'analitici':
# Priorità x San Marino -> quadro SE
if self.partner_is_from_san_marino(cr, uid, move, invoice, arg):
operazione = 'BL3'
# Impostazioni anagrafiche partner
if operazione == 'FA' or operazione == 'DR':
if arg.get('segno') == 'attiva':
quadro = 'FE'
elif arg.get('segno') == 'passiva':
quadro = 'FR'
elif operazione == 'SA': # Operazioni senza fattura
quadro = 'DF'
elif operazione == 'BL2': #Operazioni con soggetti non residenti
quadro = 'FN'
elif operazione == 'BL1' or operazione == 'BL3': #Operazioni con paesi con fiscalità privilegiata - Acquisti di servizi da soggetti non residenti
quadro = 'SE'
# Note di variazione
if operazione == 'FE' and 'refund' in move.journal_id.type:
operazione = 'NE'
elif operazione == 'FR' and 'refund' in move.journal_id.type:
operazione = 'NR'
return quadro
def genera_comunicazione(self, cr, uid, params, context=None):
def _get_periods(cr, uid, params, context=None):
'''
Definizione periodi di competenza
'''
sql_select = "SELECT p.id FROM account_period p "
sql_where = " WHERE p.special = False "
search_params = {}
# Periodo annuale
if params.get('periodo') == 'anno':
period_date_start = datetime.date(params.get('anno') , 1, 1)
period_date_stop = datetime.date(params.get('anno') , 12, 31)
sql_where += " AND p.date_start >= date(%(period_date_start)s) AND p.date_stop <=date(%(period_date_stop)s) "
search_params.update({
'period_date_start' : period_date_start,
'period_date_stop' : period_date_stop
})
# Periodo mensile
if params.get('periodo') == 'mese':
period_date_start = datetime.date(params.get('anno') , int(params.get('mese')), 1)
sql_where += " AND p.date_start = date(%(period_date_start)s) "
search_params.update({
'period_date_start' : period_date_start,
})
# Periodo trimestrale
if params.get('periodo') == 'trimestre':
if params.get('trimestre') == 1:
period_date_start = datetime.date(params.get('anno') , 1, 1)
period_date_start = datetime.date(params.get('anno') , 3, 31)
elif params.get('trimestre') == 2:
period_date_start = datetime.date(params.get('anno') , 3, 1)
period_date_start = datetime.date(params.get('anno') , 6, 30)
elif params.get('trimestre') == 2:
period_date_start = datetime.date(params.get('anno') , 7, 1)
period_date_start = datetime.date(params.get('anno') , 9, 30)
elif params.get('trimestre') == 2:
period_date_start = datetime.date(params.get('anno') , 10, 1)
period_date_start = datetime.date(params.get('anno') , 12, 31)
else:
raise orm.except_orm(_('Dato errato!'),_("Errore nel valore del trimestre") )
sql_where += " AND p.date_start >= date(%(period_date_start)s) AND p.date_stop <=date(%(period_date_stop)s) "
search_params.update({
'period_date_start' : period_date_start,
'period_date_stop' : period_date_stop
})
sql = sql_select + sql_where
cr.execute(sql, search_params)
periods = [i[0] for i in cr.fetchall()]
return periods
def _genera_testata(cr, uid, params, context=None):
'''
Generazione testata dichiarazione
'''
company = self.pool.get('res.company').browse(cr, uid, params['company_id'])
# progressivo telematico :" il progressivo deve essere univoco e crescente (con incrementi di una unità per ogni file prodotto)"
if params['tipo'] == 'ordinaria':
com_search = [('tipo', '=', 'ordinaria')]
com_last_ids = self.search(cr, uid, com_search, order='progressivo_telematico desc', limit=1)
com_next_prg = 1
if com_last_ids:
com_next_prg = self.browse(cr, uid, com_last_ids[0]).progressivo_telematico + 1
progressivo_telematico = com_next_prg
# vat
if company.partner_id.vat:
partita_iva = company.partner_id.vat[2:]
else:
partita_iva = '{:11s}'.format("".zfill(11))
# codice fiscale soggetto incaricato alla trasmissione
codice_fiscale_incaricato_trasmissione=''
if params.get('tipo_fornitore') == '10' and params.get('partner_intermediario', False):
partner_intermediario = self.pool.get('res.partner').browse(cr, uid, params.get('partner_intermediario'))
codice_fiscale_incaricato_trasmissione = partner_intermediario.fiscalcode or False
# Soggetto con impegno alla trasmissione
if params.get('tipo_fornitore') == '10':
soggetto_trasmissione_impegno = '2'
else:
soggetto_trasmissione_impegno = '1'
# Persona fisica o giuridica
# Considerazione: se se lunghezza codice fiscale < 16 allora c'è la P.Iva e quindi trattasi di soggetto giuridico
tipo_persona = 'persona_fisica'
if len(company.partner_id.fiscalcode) < 16:
tipo_persona = 'persona_giuridica'
values = {
'company_id' : company.id,
'codice_fiscale_fornitore' : company.partner_id.fiscalcode,
'tipo' : params.get('tipo', False),
'periodo' : params.get('periodo', False),
'anno' : params.get('anno', False),
'mese' : params.get('mese', False),
'trimestre' : params.get('trimestre', False),
'progressivo_telematico' : progressivo_telematico or False,
'tipo_fornitore' : params.get('tipo_fornitore', False),
'formato_dati' : params.get('formato_dati', False),
'soggetto_codice_fiscale' : company.partner_id and company.partner_id.fiscalcode or '',
'soggetto_partitaIVA' : partita_iva,
'soggetto_telefono' : company.partner_id and company.partner_id.address[0].phone or '',
'soggetto_fax' : company.partner_id and company.partner_id.address[0].fax or '',
'soggetto_email' : company.partner_id and company.partner_id.address[0].email or '',
'soggetto_forma_giuridica' : tipo_persona,
'soggetto_pg_denominazione' : company.partner_id and company.partner_id.name or company.name or '',
'soggetto_cm_forma_giuridica' : tipo_persona,
'soggetto_cm_pg_denominazione' : company.partner_id and company.partner_id.name or company.name or '',
'soggetto_trasmissione_codice_fiscale' : codice_fiscale_incaricato_trasmissione,
'soggetto_trasmissione_impegno' : soggetto_trasmissione_impegno,
}
comunicazione_id = self.create(cr, uid, values)
return comunicazione_id
# Esistenza record di configurazione per l'anno della comunicazione
configurazione_ids = self.pool.get('spesometro.configurazione').search(cr, uid, [('anno', '=', params.get('anno'))])
if not configurazione_ids:
raise orm.except_orm(_('Configurazione mancante!'),_("Configurare l'anno relativo alla comunicazione") )
configurazione = self.pool.get('spesometro.configurazione').browse(cr, uid, configurazione_ids[0])
# Testata comunicazione
comunicazione_id = _genera_testata(cr, uid, params, context=None)
period_obj = self.pool.get('account.period')
journal_obj = self.pool.get('account.journal')
partner_obj = self.pool.get('res.partner')
account_move_obj = self.pool.get('account.move')
invoice_obj = self.pool.get('account.invoice')
# periods
period_ids = _get_periods(cr, uid, params, context=None)
# journal
journal_search = [('spesometro','=', True)]
journal_ids = journal_obj.search(cr, uid, journal_search, context=context)
# Partners to exclude
partner_search = [('spesometro_escludi','=', True)]
partner_to_exclude_ids = partner_obj.search(cr, uid, partner_search, context=context)
move_search = [('company_id', '=', params['company_id']),('period_id','in', period_ids), ('journal_id','in', journal_ids), ('partner_id','not in', partner_to_exclude_ids)]
move_ids = account_move_obj.search(cr, uid, move_search, context=context)
for move in self.pool.get('account.move').browse(cr, uid, move_ids):
# Test move validate
if not move.partner_id:
continue
# Invoice
invoice_search = [('move_id','=', move.id)]
invoice_ids = invoice_obj.search(cr, uid, invoice_search, context=context)
if not invoice_ids:
continue
invoice = invoice_obj.browse(cr,uid, invoice_ids[0])
# Config spesometro
operazione = False
operazione_iva_non_esposta = False
operazione = move.journal_id.spesometro_operazione
operazione_iva_non_esposta = move.journal_id.spesometro_IVA_non_esposta
segno = move.journal_id.spesometro_segno
if move.partner_id.spesometro_operazione:
operazione = move.partner_id.spesometro_operazione
operazione_iva_non_esposta = move.partner_id.spesometro_IVA_non_esposta
arg = {
'comunicazione_id' : comunicazione_id,
'segno' : segno,
'operazione_iva_non_esposta' : operazione_iva_non_esposta,
'operazione' : operazione,
'formato_dati' : params['formato_dati'],
'quadri_richiesti' : params['quadri_richiesti'],
}
# Quadro di competenza
quadro = self.get_define_quadro(cr, uid, move, invoice, arg)
arg.update({'quadro': quadro})
# Test operazione da includere nella comunicazione
if not self.validate_operation(cr, uid, move, invoice, arg):
continue
if quadro == 'FA':
line_id = self.pool.get('spesometro.comunicazione.line.fa').add_line(cr, uid, move, invoice, arg)
if quadro == 'SA':
line_id = self.pool.get('spesometro.comunicazione.line.sa').add_line(cr, uid, move, invoice, arg)
if quadro == 'BL':
line_id = self.pool.get('spesometro.comunicazione.line.bl').add_line(cr, uid, move, invoice, arg)
if quadro == 'SE':
line_id = self.pool.get('spesometro.comunicazione.line.se').add_line(cr, uid, move, invoice, arg)
# Arrotonda importi su valori raggruppati -> troncare i decimali
if params['formato_dati'] == 'aggregati':
self.truncate_values(cr, uid, [comunicazione_id])
# Rimuove le linee che non rientrano nei limiti ed effettua un controllo formale sull'intera comunicazione
self.validate_lines(cr, uid, [comunicazione_id])
# Update for compute totals
self.write(cr, uid, [comunicazione_id],{})
return True
class spesometro_comunicazione_line_FA(orm.Model):
'''
QUADRO FA - Operazioni documentate da fattura esposte in forma aggregata
'''
_name = "spesometro.comunicazione.line.fa"
_description = "Spesometro - Comunicazione linee quadro FA"
_columns = {
'comunicazione_id': fields.many2one('spesometro.comunicazione', 'Comunicazione', ondelete='cascade'),
'partner_id': fields.many2one('res.partner', 'Partner'),
'partita_iva': fields.char('Partita IVA', size=11),
'codice_fiscale': fields.char('Codice Fiscale', size=16),
'documento_riepilogativo': fields.boolean('Documento Riepilogativo'),
'noleggio': fields.selection((('A','Autovettura'), ('B','Caravan'), ('C','Altri Veicoli'), ('D','Unità da diporto'), ('E','Aeromobii')),'Leasing'),
'numero_operazioni_attive_aggregate': fields.integer('Nr op. attive', size=16),
'numero_operazioni_passive_aggregate': fields.integer('Nr op. passive', size=16),
'attive_imponibile_non_esente': fields.float('Tot impon., non impon ed esenti', digits_compute=dp.get_precision('Account'), help="Totale operazioni imponibili, non imponibili ed esenti"),
'attive_imposta': fields.float(' Tot imposta', digits_compute=dp.get_precision('Account'), help="Totale imposta"),
'attive_operazioni_iva_non_esposta': fields.float('Totale operaz. IVA non esposta', digits_compute=dp.get_precision('Account'), help="Totale operazioni con IVA non esposta"),
'attive_note_variazione': fields.float('Totale note variazione', digits_compute=dp.get_precision('Account'), help="Totale note di variazione a debito per la controparte"),
'attive_note_variazione_imposta': fields.float('Totale imposta note variazione', digits_compute=dp.get_precision('Account'), help="Totale imposta sulle note di variazione a debito"),
'passive_imponibile_non_esente': fields.float('Tot impon., non impon ed esenti', digits_compute=dp.get_precision('Account'), help="Totale operazioni imponibili, non imponibili ed esenti"),
'passive_imposta': fields.float('Totale imposta', digits_compute=dp.get_precision('Account'), help="Totale imposta"),
'passive_operazioni_iva_non_esposta': fields.float('Totale operaz. IVA non esposta', digits_compute=dp.get_precision('Account'), help="Totale operazioni con IVA non esposta"),
'passive_note_variazione': fields.float('Totale note variazione', digits_compute=dp.get_precision('Account'), help="Totale note di variazione a credito per la controparte"),
'passive_note_variazione_imposta': fields.float('Totale imposta note variazione', digits_compute=dp.get_precision('Account'), help="Totale imposta sulle note di variazione a credito"),
}
def add_line(self, cr, uid, move, invoice, arg):
comunicazione_lines_obj = self.pool.get('spesometro.comunicazione.line.fa')
comunicazione_id = arg.get('comunicazione_id', False)
com_line_search = [('comunicazione_id','=',comunicazione_id), ('partner_id', '=', move.partner_id.id)]
com_line_ids = self.search(cr, uid, com_line_search)
val = {}
# Valori documento
doc_vals = self.pool.get('spesometro.comunicazione').compute_invoice_amounts(cr, uid, move, invoice, arg)
# New partner
if not com_line_ids:
partita_iva =''
if move.partner_id.vat:
partita_iva = move.partner_id.vat[2:]
documento_riepilogativo = False
if arg['operazione'] == 'DR':
documento_riepilogativo = True
val = {
'comunicazione_id' : comunicazione_id,
'partner_id' : move.partner_id.id,
'partita_iva' : partita_iva,
'codice_fiscale' : move.partner_id.fiscalcode or '',
'noleggio' : move.partner_id.spesometro_leasing or '',
'documento_riepilogativo' : documento_riepilogativo,
}
# attive
if arg.get('segno', False) == 'attiva':
val['numero_operazioni_attive_aggregate'] = 1
if 'refund' in move.journal_id.type:
val['attive_note_variazione'] = doc_vals.get('amount_untaxed', 0)
val['attive_note_variazione_imposta'] = doc_vals.get('amount_tax', 0)
else:
if arg.get('operazione_iva_non_esposta', False):
val['attive_operazioni_iva_non_esposta' ] = doc_vals.get('amount_total', 0)
else:
val['attive_imponibile_non_esente' ] = doc_vals.get('amount_untaxed', 0)
val['attive_imposta'] =doc_vals.get('amount_tax', 0)
# passive
else:
val['numero_operazioni_passive_aggregate'] = 1
if 'refund' in move.journal_id.type:
val['passive_note_variazione'] = doc_vals.get('amount_untaxed', 0)
val['passive_note_variazione_imposta'] = doc_vals.get('amount_tax', 0)
else:
if arg.get('operazione_iva_non_esposta', False):
val['passive_operazioni_iva_non_esposta' ] = doc_vals.get('amount_total', 0)
else:
val['passive_imponibile_non_esente' ] = doc_vals.get('amount_untaxed', 0)
val['passive_imposta' ] = doc_vals.get('amount_tax', 0)
# Partner already exists
if com_line_ids:
for com_line in self.browse(cr, uid, com_line_ids):
# attive
if arg.get('segno', False) == 'attiva':
val['numero_operazioni_attive_aggregate'] = com_line.numero_operazioni_attive_aggregate + 1
if 'refund' in move.journal_id.type:
val['attive_note_variazione'] = com_line.attive_note_variazione + doc_vals.get('amount_untaxed', 0)
val['attive_note_variazione_imposta'] = com_line.attive_note_variazione_imposta + doc_vals.get('amount_tax', 0)
else:
if arg.get('operazione_iva_non_esposta', False):
val['attive_operazioni_iva_non_esposta' ] = com_line.attive_operazioni_iva_non_esposta + doc_vals.get('amount_total', 0)
else:
val['attive_imponibile_non_esente' ] = com_line.attive_imponibile_non_esente + doc_vals.get('amount_untaxed', 0)
val['attive_imposta' ] = com_line.attive_imposta + doc_vals.get('amount_tax', 0)
# passive
else:
val['numero_operazioni_passive_aggregate'] = com_line.numero_operazioni_passive_aggregate + 1
if 'refund' in move.journal_id.type:
val['passive_note_variazione'] = com_line.passive_note_variazione + doc_vals.get('amount_untaxed', 0)
val['passive_note_variazione_imposta'] = com_line.passive_note_variazione_imposta + doc_vals.get('amount_tax', 0)
else:
if arg.get('operazione_iva_non_esposta', False):
val['passive_operazioni_iva_non_esposta' ] = com_line.passive_operazioni_iva_non_esposta + doc_vals.get('amount_total', 0)
else:
val['passive_imponibile_non_esente' ] = com_line.passive_imponibile_non_esente + doc_vals.get('amount_untaxed', 0)
val['passive_imposta' ] = com_line.passive_imposta + doc_vals.get('amount_tax', 0)
if com_line_ids:
line_id = com_line.id
self.write(cr, uid, [com_line.id], val)
else:
line_id = self.create(cr, uid, val)
return line_id
class spesometro_comunicazione_line_SA(orm.Model):
'''
QUADRO SA - Operazioni senza fattura esposte in forma aggregata
'''
_name = "spesometro.comunicazione.line.sa"
_description = "Spesometro - Comunicazione linee quadro SA"
_columns = {
'comunicazione_id': fields.many2one('spesometro.comunicazione', 'Comunicazione' , ondelete='cascade'),
'partner_id': fields.many2one('res.partner', 'Partner'),
'codice_fiscale': fields.char('Codice Fiscale', size=16),
'numero_operazioni': fields.integer('Numero operazioni'),
'importo_complessivo': fields.float('Importo complessivo', digits_compute=dp.get_precision('Account')),
'noleggio': fields.selection((('A','Autovettura'), ('B','Caravan'), ('C','Altri Veicoli'), ('D','Unità da diporto'), ('E','Aeromobii')),'Leasing'),
}
def add_line(self, cr, uid, move, invoice, arg):
comunicazione_lines_obj = self.pool.get('spesometro.comunicazione.line.fa')
comunicazione_id = arg.get('comunicazione_id', False)
com_line_search = [('comunicazione_id','=',comunicazione_id), ('partner_id', '=', move.partner_id.id)]
com_line_ids = self.search(cr, uid, com_line_search)
val = {}
# Valori documento
doc_vals = self.pool.get('spesometro.comunicazione').compute_invoice_amounts(cr, uid, move, invoice, arg)
# New partner
if not com_line_ids:
val = {
'comunicazione_id' : comunicazione_id,
'partner_id' : move.partner_id.id,
'codice_fiscale' : move.partner_id.fiscalcode or False,
'noleggio' : move.partner_id.spesometro_leasing or False,
'numero_operazioni' : 1,
'importo_complessivo' : doc_vals.get('amount_total', 0),
}
# Partner already exists
if com_line_ids:
for com_line in self.browse(cr, uid, com_line_ids):
val['numero_operazioni'] = com_line.numero_operazioni + 1
val['importo_complessivo'] = com_line.importo_complessivo + doc_vals.get('amount_total', 0)
if com_line_ids:
line_id = com_line.id
self.write(cr, uid, [com_line.id], val)
else:
line_id = self.create(cr, uid, val)
return line_id
class spesometro_comunicazione_line_BL(orm.Model):
'''
QUADRO BL
- Operazioni con paesi con fiscalità privilegiata (è obbligatorio compilare le sezioni BL001, BL002
e almeno un campo delle sezioni BL003, BL004, BL005, BL006, BL007, BL008)
- Operazioni con soggetti non residenti (è obbligatorio compilare le sezioni BL001, BL002 e almeno
un campo delle sezioni BL003 e BL006)
- Acquisti di servizi da soggetti non residenti (è obbligatorio compilare le sezioni BL001, BL002 e
almeno un campo della sezione BL006)
'''
_name = "spesometro.comunicazione.line.bl"
_description = "Spesometro - Comunicazione linee quadro BL"
_columns = {
'comunicazione_id': fields.many2one('spesometro.comunicazione', 'Comunicazione', ondelete='cascade'),
'partner_id': fields.many2one('res.partner', 'Partner'),
'codice_fiscale': fields.char('Codice Fiscale', size=16),
'numero_operazioni': fields.integer('Numero operazioni'),
'importo_complessivo': fields.integer('Importo complessivo', digits_compute=dp.get_precision('Account')),
'noleggio': fields.selection((('A','Autovettura'), ('B','Caravan'), ('C','Altri Veicoli'), ('D','Unità da diporto'), ('E','Aeromobii')),'Leasing'),
'pf_cognome': fields.char('Cognome', size=24, help=""),
'pf_nome': fields.char('Nome', size=20, help=""),
'pf_data_nascita': fields.date('Data di nascita'),
'pf_comune_stato_nascita': fields.char('Comune o stato estero di nascita', size=40),
'pf_provincia_nascita': fields.char('Provincia', size=2),
'pf_codice_stato_estero': fields.char('Codice Stato Estero', size=3, help="Deve essere uno di quelli presenti nella tabella 'elenco dei paesi e\
territori esteri' pubblicata nelle istruzioni del modello Unico"),
'pg_denominazione': fields.char('Denominazione/Ragione sociale', size=60),
'pg_citta_estera_sede_legale': fields.char('Città estera delle Sede legale', size=40),
'pg_codice_stato_estero': fields.char('Codice Stato Estero', size=3, help="Deve essere uno di quelli presenti nella tabella 'elenco dei paesi e\
territori esteri' pubblicata nelle istruzioni del modello Unico"),
'pg_indirizzo_sede_legale': fields.char('Indirizzo sede legale', size=60),
'codice_identificativo_IVA': fields.char('Codice identificativo IVA', size=16),
'operazione_fiscalita_privilegiata': fields.boolean('Operazione con pesei con fiscalità privilegiata'),
'operazione_con_soggetti_non_residenti': fields.boolean('Operazione con soggetto non residente'),
'Acquisto_servizi_da_soggetti_non_residenti': fields.boolean('Acquisto di servizi da soggetti non residenti'),
'attive_importo_complessivo': fields.float('Tot operaz. attive impon., non impon ed esenti', digits_compute=dp.get_precision('Account'), help="Totale operazioni imponibili, non imponibili ed esenti"),
'attive_imposta': fields.float('Tot operaz. attive imposta', digits_compute=dp.get_precision('Account'), help="Totale imposta"),
'attive_non_sogg_cessione_beni': fields.float('Operaz.attive non soggette ad IVA - Cessione beni', digits_compute=dp.get_precision('Account'), help="Totale operazioni imponibili, non imponibili ed esenti"),
'attive_non_sogg_servizi': fields.float('Operaz.attive non soggette ad IVA - Servizi', digits_compute=dp.get_precision('Account'), help="Totale operazioni imponibili, non imponibili ed esenti"),
'attive_note_variazione': fields.float('Totale note variazione', digits_compute=dp.get_precision('Account'), help="Totale note di variazione a debito per la controparte"),
'attive_note_variazione_imposta': fields.float('Totale imposta note variazione', digits_compute=dp.get_precision('Account'), help="Totale imposta sulle note di variazione a debito"),
'passive_importo_complessivo': fields.float('Tot operaz. passive impon., non impon ed esenti', digits_compute=dp.get_precision('Account'), help="Totale operazioni imponibili, non imponibili ed esenti"),
'passive_imposta': fields.float('Tot operaz. passive imposta', digits_compute=dp.get_precision('Account'), help="Totale imposta"),
'passive_non_sogg_importo_complessivo': fields.float('Operaz.passive non soggette ad IVA', digits_compute=dp.get_precision('Account'), help="Totale operazioni imponibili, non imponibili ed esenti"),
'passive_note_variazione': fields.float('Totale note variazione', digits_compute=dp.get_precision('Account'), help="Totale note di variazione a debito per la controparte"),
'passive_note_variazione_imposta': fields.float('Totale imposta note variazione', digits_compute=dp.get_precision('Account'), help="Totale imposta sulle note di variazione a debito"),
}
def add_line(self, cr, uid, move, invoice, arg):
comunicazione_lines_obj = self.pool.get('spesometro.comunicazione.line.bl')
comunicazione_id = arg.get('comunicazione_id', False)
com_line_search = [('comunicazione_id','=',comunicazione_id), ('partner_id', '=', move.partner_id.id)]
com_line_ids = self.search(cr, uid, com_line_search)
val = {}
# Valori documento
doc_vals = self.pool.get('spesometro.comunicazione').compute_invoice_amounts(cr, uid, move, invoice, arg)
# New partner
if not com_line_ids:
# p.iva
if move.partner_id.vat:
partita_iva = move.partner_id.vat[2:]
else:
partita_iva = '{:11s}'.format("".zfill(11))
# prov. nascita
prov_code = False
if move.partner_id.birth_city.name:
city_data = move.partner_id.address[0]._set_vals_city_data(cr, uid, {'city' : move.partner_id.birth_city.name})
prov_id = city_data.get('province_id', False)
if prov_id:
prov = self.pool.get('res.province').borwse(cr, uid, prov_id)
prov_nascita_code = prov.code
val = {
'comunicazione_id' : comunicazione_id,
'partner_id' : move.partner_id.id,
'codice_fiscale' : move.partner_id.fiscalcode or False,
'noleggio' : move.partner_id.spesometro_leasing or False,
'pf_cognome' : move.partner_id.fiscalcode_surname or False,
'pf_nome' : move.partner_id.fiscalcode_firstname or False,
'pf_data_nascita' : move.partner_id.birth_date or False,
'pf_comune_stato_nascita' : move.partner_id.birth_city.name or False,
'pf_provincia_nascita' : prov_code or False,
'pf_codice_stato_estero' : move.partner_id.address[0].country_id.codice_stato_agenzia_entrate or '',
'pg_denominazione' : move.partner_id.name or False,
'pg_citta_estera_sede_legale' : move.partner_id.address[0].city or False,
'pg_codice_stato_estero' : move.partner_id.address[0].country_id.codice_stato_agenzia_entrate or '',
'pg_indirizzo_sede_legale' : move.partner_id.address[0].street or False,
'operazione_fiscalita_privilegiata' : False,
'operazione_con_soggetti_non_residenti' : False,
'Acquisto_servizi_da_soggetti_non_residenti' : False,
}
if move.partner_id.spesometro_operazione == 'BL1':
val['operazione_fiscalita_privilegiata'] = True
elif move.partner_id.spesometro_operazione == 'BL2':
val['operazione_con_soggetti_non_residenti'] = True
elif move.partner_id.spesometro_operazione == 'BL3':
val['Acquisto_servizi_da_soggetti_non_residenti'] = True
# attive
if arg.get('segno', False) == 'attiva':
if val['operazione_fiscalita_privilegiata'] or val['operazione_con_soggetti_non_residenti']:
val['attive_importo_complessivo'] = doc_vals.get('amount_total', 0)
val['attive_imposta'] = doc_vals.get('amount_tax', 0)
if val['operazione_fiscalita_privilegiata'] == True:
if move.partner_id.spesometro_operazione == 'cessioni':
val['attive_non_sogg_cessione_beni'] = doc_vals.get('amount_total', 0)
else:
val['attive_non_sogg_servizi'] = doc_vals.get('amount_total', 0)
if 'refund' in move.journal_id.type:
val['attive_note_variazione'] = doc_vals.get('amount_untaxed', 0)
val['attive_note_variazione_imposta'] = doc_vals.get('amount_tax', 0)
# passive
else:
if val['operazione_fiscalita_privilegiata'] or val['operazione_con_soggetti_non_residenti'] or val['Acquisto_servizi_da_soggetti_non_residenti']:
val['passive_importo_complessivo'] = doc_vals.get('amount_total', 0)
val['passive_imposta'] = doc_vals.get('amount_tax', 0)
if val['operazione_fiscalita_privilegiata'] == True:
val['passive_non_sogg_importo_complessivo'] = doc_vals.get('amount_total', 0)
if 'refund' in move.journal_id.type:
val['passive_note_variazione'] = doc_vals.get('amount_untaxed', 0)
val['passive_note_variazione_imposta'] = doc_vals.get('amount_tax', 0)
# Partner already exists
if com_line_ids:
for com_line in self.browse(cr, uid, com_line_ids):
# attive
if arg.get('segno', False) == 'attiva':
if val['operazione_fiscalita_privilegiata'] or val['operazione_con_soggetti_non_residenti']:
val['attive_importo_complessivo'] = com_line.attive_importo_complessivo + doc_vals.get('amount_total', 0)
val['attive_imposta'] = com_line.attive_imposta + doc_vals.get('amount_tax', 0)
if val['operazione_fiscalita_privilegiata'] == True:
if move.partner_id.spesometro_operazione == 'cessioni':
val['attive_non_sogg_cessione_beni'] = com_line.attive_non_sogg_cessione_beni + doc_vals.get('amount_total', 0)
else:
val['attive_non_sogg_servizi'] = com_line.attive_non_sogg_servizi + doc_vals.get('amount_total', 0)
if 'refund' in move.journal_id.type:
val['attive_note_variazione'] = com_line.attive_note_variazione + doc_vals.get('amount_untaxed', 0)
val['attive_note_variazione_imposta'] = com_line.attive_note_variazione_imposta + doc_vals.get('amount_tax', 0)
# passive
else:
if val['operazione_fiscalita_privilegiata'] or val['operazione_con_soggetti_non_residenti'] or val['Acquisto_servizi_da_soggetti_non_residenti']:
val['passive_importo_complessivo'] = com_line.passive_importo_complessivo + doc_vals.get('amount_total', 0)
val['passive_imposta'] = com_line.passive_imposta + doc_vals.get('amount_tax', 0)
if val['operazione_fiscalita_privilegiata'] == True:
val['passive_non_sogg_importo_complessivo'] = com_line.passive_non_sogg_importo_complessivo + doc_vals.get('amount_total', 0)
if 'refund' in move.journal_id.type:
val['passive_note_variazione'] = com_line.passive_note_variazione + doc_vals.get('amount_untaxed', 0)
val['passive_note_variazione_imposta'] = com_line.passive_note_variazione_imposta + doc_vals.get('amount_tax', 0)
if com_line_ids:
line_id = com_line.id
self.write(cr, uid, [com_line.id], val)
else:
line_id = self.create(cr, uid, val)
return line_id
class spesometro_comunicazione_line_FE(orm.Model):
_name = "spesometro.comunicazione.line.fe"
_description = "Spesometro - Comunicazione linee quadro FE"
_columns = {
'comunicazione_id': fields.many2one('spesometro.comunicazione', 'Comunicazione', ondelete='cascade'),
'partner_id': fields.many2one('res.partner', 'Partner'),
'partita_iva': fields.char('Partita IVA', size=11),
'codice_fiscale': fields.char('Codice Fiscale', size=16),
'documento_riepilogativo': fields.boolean('Documento Riepilogativo'),
'noleggio': fields.selection((('A','Autovettura'), ('B','Caravan'), ('C','Altri Veicoli'), ('D','Unità da diporto'), ('E','Aeromobii')),'Leasing'),
'autofattura': fields.boolean('Autofattura'),
'data_documento': fields.date('Data documento'),
'data_registrazione': fields.date('Data registrazione'),
'numero_fattura': fields.char('Numero Fattura - Doc riepilog.', size=16),
'importo': fields.float('Importo', digits_compute=dp.get_precision('Account')),
'imposta': fields.float('Imposta', digits_compute=dp.get_precision('Account')),
}
class spesometro_comunicazione_line_FR(orm.Model):
_name = "spesometro.comunicazione.line.fr"
_description = "Spesometro - Comunicazione linee quadro FR"
_columns = {
'comunicazione_id': fields.many2one('spesometro.comunicazione', 'Comunicazione', ondelete='cascade'),
'partner_id': fields.many2one('res.partner', 'Partner'),
'partita_iva': fields.char('Partita IVA', size=11),
'documento_riepilogativo': fields.boolean('Documento Riepilogativo'),
'data_documento': fields.date('Data documento'),
'data_registrazione': fields.date('Data registrazione'),
'iva_non_esposta': fields.boolean('IVA non esposta'),
'reverse_charge': fields.boolean('Reverse charge'),
'autofattura': fields.boolean('Autofattura'),
'importo': fields.float('Importo', digits_compute=dp.get_precision('Account')),
'imposta': fields.float('Imposta', digits_compute=dp.get_precision('Account')),
}
class spesometro_comunicazione_line_NE(orm.Model):
_name = "spesometro.comunicazione.line.ne"
_description = "Spesometro - Comunicazione linee quadro NE"
_columns = {
'comunicazione_id': fields.many2one('spesometro.comunicazione', 'Comunicazione', ondelete='cascade'),
'partner_id': fields.many2one('res.partner', 'Partner'),
'partita_iva': fields.char('Partita IVA', size=11),
'codice_fiscale': fields.char('Codice Fiscale', size=16),
'data_emissione': fields.date('Data emissione'),
'data_registrazione': fields.date('Data registrazione'),
'numero_nota': fields.char('Numero Nota', size=16),
'importo': fields.float('Importo', digits_compute=dp.get_precision('Account')),
'imposta': fields.float('Imposta', digits_compute=dp.get_precision('Account')),
}
class spesometro_comunicazione_line_NR(orm.Model):
_name = "spesometro.comunicazione.line.nr"
_description = "Spesometro - Comunicazione linee quadro NR"
_columns = {
'comunicazione_id': fields.many2one('spesometro.comunicazione', 'Comunicazione', ondelete='cascade'),
'partner_id': fields.many2one('res.partner', 'Partner'),
'partita_iva': fields.char('Partita IVA', size=11),
'data_documento': fields.date('Data documento'),
'data_registrazione': fields.date('Data registrazione'),
'importo': fields.float('Importo', digits_compute=dp.get_precision('Account')),
'imposta': fields.float('Imposta', digits_compute=dp.get_precision('Account')),
}
class spesometro_comunicazione_line_DF(orm.Model):
_name = "spesometro.comunicazione.line.df"
_description = "Spesometro - Comunicazione linee quadro DF"
_columns = {
'comunicazione_id': fields.many2one('spesometro.comunicazione', 'Comunicazione', ondelete='cascade'),
'partner_id': fields.many2one('res.partner', 'Partner'),
'codice_fiscale': fields.char('Codice Fiscale', size=16),
'data_operazione': fields.date('Data operazione'),
'importo': fields.float('Importo', digits_compute=dp.get_precision('Account')),
'noleggio': fields.selection((('A','Autovettura'), ('B','Caravan'), ('C','Altri Veicoli'), ('D','Unità da diporto'), ('E','Aeromobii')),'Leasing'),
}
class spesometro_comunicazione_line_FN(orm.Model):
_name = "spesometro.comunicazione.line.fn"
_description = "Spesometro - Comunicazione linee quadro FN"
_columns = {
'comunicazione_id': fields.many2one('spesometro.comunicazione', 'Comunicazione', ondelete='cascade'),
'partner_id': fields.many2one('res.partner', 'Partner'),
'pf_cognome': fields.char('Cognome', size=24, help=""),
'pf_nome': fields.char('Nome', size=20, help=""),
'pf_data_nascita': fields.date('Data di nascita'),
'pf_comune_stato_nascita': fields.char('Comune o stato estero di nascita', size=40),
'pf_provincia_nascita': fields.char('Provincia', size=2),
'pf_codice_stato_estero_domicilio': fields.char('Codice Stato Estero del Domicilio', size=3, help="Deve essere uno di quelli presenti nella tabella 'elenco dei paesi e\
territori esteri' pubblicata nelle istruzioni del modello Unico"),
'pg_denominazione': fields.char('Denominazione/Ragione sociale', size=60),
'pg_citta_estera_sede_legale': fields.char('Città estera delle Sede legale', size=40),
'pg_codice_stato_estero_domicilio': fields.char('Codice Stato Estero del Domicilio', size=3, help="Deve essere uno di quelli presenti nella tabella 'elenco dei paesi e\
territori esteri' pubblicata nelle istruzioni del modello Unico"),
'pg_indirizzo_sede_legale': fields.char('Indirizzo legale', size=40),
'data_emissione': fields.date('Data emissione'),
'data_registrazione': fields.date('Data registrazione'),
'numero_fattura': fields.char('Numero Fattura/Doc riepilog.', size=16),
'noleggio': fields.selection((('A','Autovettura'), ('B','Caravan'), ('C','Altri Veicoli'), ('D','Unità da diporto'), ('E','Aeromobii')),'Leasing'),
'importo': fields.float('Importo', digits_compute=dp.get_precision('Account')),
'imposta': fields.float('Imposta', digits_compute=dp.get_precision('Account')),
}
class spesometro_comunicazione_line_SE(orm.Model):
'''
QUADRO SE - Acquisti di servizi da non residenti e Acquisti da operatori di San Marino
'''
_name = "spesometro.comunicazione.line.se"
_description = "Spesometro - Comunicazione linee quadro SE"
_columns = {
'comunicazione_id': fields.many2one('spesometro.comunicazione', 'Comunicazione', ondelete='cascade'),
'partner_id': fields.many2one('res.partner', 'Partner'),
'pf_cognome': fields.char('Cognome', size=24, help=""),
'pf_nome': fields.char('Nome', size=20, help=""),
'pf_data_nascita': fields.date('Data di nascita'),
'pf_comune_stato_nascita': fields.char('Comune o stato estero di nascita', size=40),
'pf_provincia_nascita': fields.char('Provincia', size=2),
'pf_codice_stato_estero_domicilio': fields.char('Codice Stato Estero del Domicilio', size=3, help="Deve essere uno di quelli presenti nella tabella 'elenco dei paesi e\
territori esteri' pubblicata nelle istruzioni del modello Unico"),
'pg_denominazione': fields.char('Denominazione/Ragione sociale', size=60),
'pg_citta_estera_sede_legale': fields.char('Città estera delle Sede legale', size=40),
'pg_codice_stato_estero_domicilio': fields.char('Codice Stato Estero del Domicilio', size=3, help="Deve essere uno di quelli presenti nella tabella 'elenco dei paesi e\
territori esteri' pubblicata nelle istruzioni del modello Unico"),
'pg_indirizzo_sede_legale': fields.char('Indirizzo legale', size=40),
'codice_identificativo_IVA': fields.char('Codice Identificativo IVA (037=San Marino)', size=3),
'data_emissione': fields.date('Data emissione'),
'data_registrazione': fields.date('Data registrazione'),
'numero_fattura': fields.char('Numero Fattura/Doc riepilog.', size=16),
'importo': fields.float('Importo/imponibile', digits_compute=dp.get_precision('Account')),
'imposta': fields.float('Imposta', digits_compute=dp.get_precision('Account')),
}
def add_line(self, cr, uid, move, invoice, arg):
comunicazione_lines_obj = self.pool.get('spesometro.comunicazione.line.se')
comunicazione_id = arg.get('comunicazione_id', False)
com_line_search = [('comunicazione_id','=',comunicazione_id), ('partner_id', '=', move.partner_id.id)]
com_line_ids = self.search(cr, uid, com_line_search)
val = {}
# Valori documento
doc_vals = self.pool.get('spesometro.comunicazione').compute_invoice_amounts(cr, uid, move, invoice, arg)
# p.iva
if move.partner_id.vat:
partita_iva = move.partner_id.vat[2:]
else:
partita_iva = '{:11s}'.format("".zfill(11))
# prov. nascita
prov_code = False
if move.partner_id.birth_city.name:
city_data = move.partner_id.address[0]._set_vals_city_data(cr, uid, {'city' : move.partner_id.birth_city.name})
prov_id = city_data.get('province_id', False)
if prov_id:
prov = self.pool.get('res.province').borwse(cr, uid, prov_id)
prov_nascita_code = prov.code
# Indirizzo
address = self.pool.get('spesometro.comunicazione')._get_partner_address_obj(cr, uid, move, invoice, arg)
# Codice identificativo IVA -Da indicare esclusivamente per operazioni con San Marino (Codice Stato = 037)
codice_identificativo_iva=''
if self.pool.get('spesometro.comunicazione').partner_is_from_san_marino(cr, uid, move, invoice, arg):
codice_identificativo_iva = '037'
val = {
'comunicazione_id' : comunicazione_id,
'partner_id' : move.partner_id.id,
'codice_fiscale' : move.partner_id.fiscalcode or False,
'noleggio' : move.partner_id.spesometro_leasing or False,
'pf_cognome' : move.partner_id.fiscalcode_surname or False,
'pf_nome' : move.partner_id.fiscalcode_firstname or False,
'pf_data_nascita' : move.partner_id.birth_date or False,
'pf_comune_stato_nascita' : move.partner_id.birth_city.name or False,
'pf_provincia_nascita' : prov_code or False,
'pf_codice_stato_estero_domicilio' : address.country_id.codice_stato_agenzia_entrate or codice_identificativo_iva or '',
'pg_denominazione' : move.partner_id.name or False,
'pg_citta_estera_sede_legale' : address.city or False,
'pg_codice_stato_estero_domicilio' : address.country_id.codice_stato_agenzia_entrate or codice_identificativo_iva or '',
'pg_indirizzo_sede_legale' : address.street or False,
'codice_identificativo_IVA' : codice_identificativo_iva,
'data_emissione': move.date,
'data_registrazione': invoice.date_invoice or move.date,
'numero_fattura': move.name,
'importo': doc_vals.get('amount_untaxed', 0),
'imposta': doc_vals.get('amount_tax', 0)
}
line_id = self.create(cr, uid, val)
return line_id
class spesometro_comunicazione_line_TU(orm.Model):
_name = "spesometro.comunicazione.line.tu"
_description = "Spesometro - Comunicazione linee quadro TU"
_columns = {
'comunicazione_id': fields.many2one('spesometro.comunicazione', 'Comunicazione', ondelete='cascade'),
'partner_id': fields.many2one('res.partner', 'Partner'),
'cognome': fields.char('Cognome', size=24, help=""),
'nome': fields.char('Nome', size=20, help=""),
'data_nascita': fields.date('Data di nascita'),
'comune_stato_nascita': fields.char('Comune o stato estero di nascita', size=40),
'provincia_nascita': fields.char('Provincia', size=2),
'citta_estera_residenza': fields.char('Città Estera di residenza', size=40),
'codice_stato_estero': fields.char('Codice Stato Estero', size=3, help="Deve essere uno di quelli presenti nella tabella 'elenco dei paesi e\
territori esteri' pubblicata nelle istruzioni del modello Unico"),
'indirizzo_estero_residenza': fields.char('Indirizzo Estero di residenza', size=40),
'data_emissione': fields.date('Data emissione'),
'data_registrazione': fields.date('Data registrazione'),
'numero_fattura': fields.char('Numero Fattura/Doc riepilog.', size=16),
'importo': fields.float('Importo/imponibile', digits_compute=dp.get_precision('Account')),
'imposta': fields.float('Imposta', digits_compute=dp.get_precision('Account')),
}
| dhp-denero/LibrERP | l10n_it_spesometro/spesometro.py | Python | agpl-3.0 | 75,593 |
# coding: utf-8
import sys
sys.path.append(".")
from workshop.en.i import *
DISCLOSE_SECRET_WORD = TRUE
"""
Some variables are now handled by the student. Names are free.
"""
"""
Can be omitted, as 'reset(…)' will be call before the variables
will be used.
"""
goodGuesses = ""
errorsAmount = 0
"""
NOTA: the four folowing functions are not called outside this file.
"""
def pickWord(*args):
return workshop.rfPickWord(*args)
def isLetterInWord(*args):
return workshop.rfIsLetterInWord(*args)
def getMask(*args):
return workshop.rfGetMask(*args)
def updateBody(*args):
return workshop.rfUpdateBody(*args)
"""
Reset the variables and the display for a new round and
return the secret word.
"""
def reset(suggestion,randomWord):
global goodGuesses,errorsAmount
secretWord = pickWord(suggestion,randomWord)
goodGuesses = ""
errorsAmount = 0
print(secretWord)
display(getMask(secretWord,""))
return secretWord
"""
N.B.: NOT THREAD-SAFE!!!
Multiple instances can be launched to show
why this is a problem.
"""
"""
- 'guess': the letter chosen by the player,
If 'guess' in 'word', must update the mask, otherwise
must update the drawing of the body.
"""
def handleGuess(guess,secretWord):
global goodGuesses,errorsAmount
if isLetterInWord(guess,secretWord): # Test is not mandatory
if not isLetterInWord(guess,goodGuesses):
goodGuesses += guess
display(getMask(secretWord,goodGuesses))
else:
errorsAmount += 1
updateBody(errorsAmount)
go(globals())
| epeios-q37/epeios | other/exercises/Hangman/en/i.py | Python | agpl-3.0 | 1,604 |
#!/usr/bin/env python
import networkx as nx
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from infomap import infomap
"""
Generate and draw a network with NetworkX, colored
according to the community structure found by Infomap.
"""
def findCommunities(G):
"""
Partition network with the Infomap algorithm.
Annotates nodes with 'community' id and return number of communities found.
"""
conf = infomap.init("--two-level");
# Input data
network = infomap.Network(conf);
# Output data
tree = infomap.HierarchicalNetwork(conf)
print "Building network..."
for e in G.edges_iter():
network.addLink(*e)
network.finalizeAndCheckNetwork(True, nx.number_of_nodes(G));
# Cluster network
infomap.run(network, tree);
print "Found %d top modules with codelength: %f" % (tree.numTopModules(), tree.codelength())
communities = {}
clusterIndexLevel = 1 # 1, 2, ... or -1 for top, second, ... or lowest cluster level
for node in tree.leafIter(clusterIndexLevel):
communities[node.originalLeafIndex] = node.clusterIndex()
nx.set_node_attributes(G, 'community', communities)
return tree.numTopModules()
def drawNetwork(G):
# position map
pos = nx.spring_layout(G)
# community ids
communities = [v for k,v in nx.get_node_attributes(G, 'community').items()]
numCommunities = max(communities) + 1
# color map from http://colorbrewer2.org/
cmapLight = colors.ListedColormap(['#a6cee3', '#b2df8a', '#fb9a99', '#fdbf6f', '#cab2d6'], 'indexed', numCommunities)
cmapDark = colors.ListedColormap(['#1f78b4', '#33a02c', '#e31a1c', '#ff7f00', '#6a3d9a'], 'indexed', numCommunities)
# edges
nx.draw_networkx_edges(G, pos)
# nodes
nodeCollection = nx.draw_networkx_nodes(G,
pos = pos,
node_color = communities,
cmap = cmapLight
)
# set node border color to the darker shade
darkColors = [cmapDark(v) for v in communities]
nodeCollection.set_edgecolor(darkColors)
# Print node labels separately instead
for n in G.nodes_iter():
plt.annotate(n,
xy = pos[n],
textcoords = 'offset points',
horizontalalignment = 'center',
verticalalignment = 'center',
xytext = [0, 2],
color = cmapDark(communities[n])
)
plt.axis('off')
# plt.savefig("karate.png")
plt.show()
G=nx.karate_club_graph()
numCommunities = findCommunities(G)
print "Number of communities found:", numCommunities
drawNetwork(G)
| nicktimko/infomap | examples/python/example-networkx.py | Python | agpl-3.0 | 2,366 |
# -*- coding: utf-8 -*-
###############################################################################
#
# ODOO (ex OpenERP)
# Open Source Management Solution
# Copyright (C) 2001-2015 Micronaet S.r.l. (<http://www.micronaet.it>)
# Developer: Nicola Riolini @thebrush (<https://it.linkedin.com/in/thebrush>)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
{
'name': 'Label for Easylabel',
'version': '0.1',
'category': 'Generic/Label',
'author': "Micronaet S.r.l. - Nicola Riolini",
'website': 'http://www.micronaet.it',
'license': 'AGPL-3',
"depends": [
'base',
'product',
'sale',
'report_aeroo',
],
"data": [
'security/easylabel_group.xml',
'security/ir.model.access.csv',
'easylabel.xml',
'wizard/view_wizard.xml',
'report/report_easylabel.xml',
],
"qweb": [],
"demo": [],
"test": [],
"active": False,
"installable": True,
"application": False,
}
| Micronaet/micronaet-migration | label_easy/__openerp__.py | Python | agpl-3.0 | 1,694 |
#!/usr/bin/python3
###### Памятка по статусам #####
# OK -- сервис онлайн, обрабатывает запросы, получает и отдает флаги.
# MUMBLE -- сервис онлайн, но некорректно работает
# CORRUPT -- сервис онлайн, но установленные флаги невозможно получить.
# DOWN -- сервис оффлайн.
from sys import argv
from socket import socket, AF_INET, SOCK_STREAM
from string import ascii_letters
from random import randint, shuffle
from time import sleep
from tinfoilhat import Checker, \
ServiceMumbleException, \
ServiceCorruptException, \
ServiceDownException
class DummyChecker(Checker):
BUFSIZE = 1024
"""
Сгенерировать логин
@return строка логина из 10 символов английского алфавита
"""
def random_login(self):
symbols = list(ascii_letters)
shuffle(symbols)
return ''.join(symbols[0:10])
"""
Сгенерировать пароль
@return строка пароля из 10 цифр
"""
def random_password(self):
return str(randint(100500**2, 100500**3))[0:10]
"""
Отправить логин и пароль сервису.
@param sock сокет
@param login логин
@param password пароль
"""
def send_cred(self, s, login, password):
s.send(login.encode('utf-8'))
if b'OK\n' != s.recv(self.BUFSIZE):
raise ServiceMumbleException()
s.send(password.encode('utf-8'))
if b'OK\n' != s.recv(self.BUFSIZE):
raise ServiceMumbleException()
"""
Положить флаг в сервис
@param host адрес хоста
@param port порт сервиса
@param flag флаг
@return состояние, необходимое для получения флага
"""
def put(self, host, port, flag):
try:
s = socket(AF_INET, SOCK_STREAM)
s.connect((host, port))
s.send(b'REG\n')
if b'OK\n' != s.recv(self.BUFSIZE):
raise ServiceMumbleException()
login = self.random_login()
password = self.random_password()
self.send_cred(s, login, password)
s.close()
s = socket(AF_INET, SOCK_STREAM)
s.connect((host, port))
s.send(b'PUT\n')
if b'OK\n' != s.recv(self.BUFSIZE):
raise ServiceMumbleException()
self.send_cred(s, login, password)
s.send(flag.encode('utf-8'))
if b'OK\n' != s.recv(self.BUFSIZE):
raise ServiceMumbleException()
return login + ":" + password
except (OSError, IOError) as e:
if e.errno == 111: # ConnectionRefusedError
raise ServiceDownException()
else:
raise ServiceMumbleException()
"""
Получить флаг из сервиса
@param host адрес хоста
@param port порт сервиса
@param state состояние
@return флаг
"""
def get(self, host, port, state):
login, password = state.split(':')
s = socket(AF_INET, SOCK_STREAM)
s.connect((host, port))
s.send(b'GET\n')
if b'OK\n' != s.recv(self.BUFSIZE):
raise ServiceMumbleException()
try:
self.send_cred(s, login, password)
except ServiceMumbleException:
raise ServiceCorruptException()
try:
flag, ret = s.recv(self.BUFSIZE).split()
return flag.decode('utf-8')
except ValueError:
raise ServiceCorruptException()
"""
Проверить состояние сервиса
@param host адрес хоста
@param port порт сервиса
"""
def chk(self, host, port):
# Так как сервис реализует только логику хранилища,
# её и проверяем.
# Это отличается от put и get тем, что происходит в один момент,
# тем самым наличие данных по прошествии времени не проверяется.
data = self.random_password()
try:
state = self.put(host, port, data)
new_data = self.get(host, port, state)
except (OSError, IOError) as e:
if e.errno == 111: # ConnectionRefusedError
raise ServiceDownException()
else:
raise ServiceMumbleException()
if data != new_data:
raise ServiceMumbleException()
if __name__ == '__main__':
DummyChecker(argv)
| jollheef/tin_foil_hat | checker/python-api/dummy_checker.py | Python | agpl-3.0 | 4,904 |
from django.db import models
from submissions.models.utils import strip_punc
from django.contrib.auth.models import User
from submissions.models.album import Album
from django.contrib.contenttypes.models import ContentType
class Track(models.Model):
"""
A single track(song) for an album.
"""
URL_CHOICES = (
('download', 'download'),
('stream','stream'),
('buy','buy'),
)
title = models.CharField(max_length=255)
cleaned_name = models.CharField(max_length=255, blank=True, null=True, editable=False, help_text="A cleaned name without punctuation or weird stuff.")
track_number = models.IntegerField(blank=True, null=True)
#Replace by regexfield r'[0-9:]+' in forms
duration = models.CharField(max_length=255, blank=True, null=True)
url = models.URLField('URL', blank=True, null=True)
url_type = models.CharField('Type', max_length=255, choices=URL_CHOICES, blank=True, null=True)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
uploader = models.ForeignKey(User, blank=True, null=True, help_text="The uploader of this track.")
album = models.ForeignKey(Album, related_name="tracks", help_text="The album to which this track belongs.")
mbid = models.CharField(max_length=255, blank=True, null=True)
class Meta:
app_label = "submissions"
ordering = ('track_number', 'title')
def __unicode__(self):
return self.title
@models.permalink
def get_absolute_url(self):
return ('album-detail', (), {'artist': self.album.artist.slug, 'album': self.album.slug })
def save(self, *args, **kwargs):
if not self.cleaned_name:
self.cleaned_name = strip_punc(self.title)
super(Track, self).save(*args, **kwargs)
| tsoporan/tehorng | submissions/models/track.py | Python | agpl-3.0 | 1,835 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Deck'
db.create_table(u'djecks_anki_deck', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
('apkg', self.gf('django.db.models.fields.files.FileField')(max_length=100, blank=True)),
))
db.send_create_signal(u'djecks_anki', ['Deck'])
def backwards(self, orm):
# Deleting model 'Deck'
db.delete_table(u'djecks_anki_deck')
models = {
u'djecks_anki.deck': {
'Meta': {'object_name': 'Deck'},
'apkg': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
}
}
complete_apps = ['djecks_anki'] | bdunnette/djecks_anki | migrations/0002_auto__add_deck.py | Python | agpl-3.0 | 1,388 |
from bottle import request
class BottleHelper:
"""map Apify methods to Bottle magic
and other helpers"""
def __init__(self, app):
self.app = app
def route(self, path, function, methods=['GET']):
self.app.route(path, method=methods, callback=function)
def request(self, items=None):
if request.json is not None:
datas = request.json
else:
if hasattr(request, 'params'):
datas = request.params.dict
else:
return None
if items is None:
return datas
elif isinstance(items, list):
ret = {}
for i in items:
data = datas.get(i)
ret[i] = data
return ret
else:
return datas.get(items)
| abondis/api-hi | api_hi/helpers/bottle.py | Python | agpl-3.0 | 816 |
from django.core.management.base import BaseCommand, CommandError
from voty.initproc.models import Quorum, IssueSupportersQuorum, IssueVotersQuorum
from django.contrib.auth import get_user_model
from django.utils import timezone
from datetime import date
from math import ceil
"""
- Bis 99 Abstimmungsberechtigten 10 Personen
- ab 100 bis 299 Abstimmungsberechtigten 15 Personen
- ab 300 bis 599 Abstimmungsberechtigten 20 Personen
- ab 600 bis 999 Abstimmungsberechtigten 30 Personen
- ab 1000 bis 1999 Abstimmungsberechtigten 35 Personen
- ab 2000 bis 4999 Abstimmungsberechtigten 50 Personen
- ab 5000 Abstimmungsberechtigten 1% der Abstimmungsberechtigten
"""
class Command(BaseCommand):
help = "Calculate the next quorum and set it"
def handle(self, *args, **options):
now = timezone.now()
year = now.year
month = now.month
# round to turn of month
if now.day > 15:
month += 1
month -= 6
if month < 1:
year -= 1
month += 12
threshold = timezone.datetime(year=year, month=month, day=1, tzinfo=now.tzinfo)
total = get_user_model().objects.filter(is_active=True, config__last_activity__gt=threshold).count()
totalpartymembers = get_user_model().objects.filter(is_active=True, config__is_party_member=True, config__last_activity__gt=threshold).count()
print("Total active users: {}".format(total))
print("Total active party members: {}".format(totalpartymembers))
#Quorum for Issue Support
quorum = ceil(total / 20.0)
if quorum < 5:
quorum = 5
IssueSupportersQuorum(value=quorum).save()
print("Issue Support Quorum set to {}".format(quorum))
#Quorum for Issue Voting
quorum = ceil(totalpartymembers / 10.0)
if quorum < 5:
quorum = 5
#commented out because it is now set manually, 10% of all party members, not only of those who have a login or are active in Plenum
#IssueVotersQuorum(value=quorum).save()
#print("Issue Voting Quorum set to {}".format(quorum))
quorum = ceil(total / 100.0)
if total < 100:
quorum = 10
elif total < 300:
quorum = 15
elif total < 600:
quorum = 20
elif total < 1000:
quorum = 30
elif total < 2000:
quorum = 35
elif total < 5000:
quorum = 50
Quorum(quorum=quorum).save()
print("Initiatives Quorum set to {}".format(quorum)) | DemokratieInBewegung/abstimmungstool | voty/initproc/management/commands/set_quorum.py | Python | agpl-3.0 | 2,590 |
# Copyright (c) 2015 Ultimaker B.V.
# Cura is released under the terms of the AGPLv3 or higher.
from UM.Scene.SceneNode import SceneNode
from UM.Resources import Resources
from UM.Math.Color import Color
from UM.Math.Vector import Vector
from UM.Mesh.MeshData import MeshData
import numpy
class ConvexHullNode(SceneNode):
def __init__(self, node, hull, parent = None):
super().__init__(parent)
self.setCalculateBoundingBox(False)
self._material = None
self._original_parent = parent
self._inherit_orientation = False
self._inherit_scale = False
self._node = node
self._node.transformationChanged.connect(self._onNodePositionChanged)
self._node.parentChanged.connect(self._onNodeParentChanged)
#self._onNodePositionChanged(self._node)
self._hull = hull
hull_points = self._hull.getPoints()
center = (hull_points.min(0) + hull_points.max(0)) / 2.0
mesh = MeshData()
mesh.addVertex(center[0], 0.1, center[1])
for point in hull_points:
mesh.addVertex(point[0], 0.1, point[1])
indices = []
for i in range(len(hull_points) - 1):
indices.append([0, i + 1, i + 2])
indices.append([0, mesh.getVertexCount() - 1, 1])
mesh.addIndices(numpy.array(indices, numpy.int32))
self.setMeshData(mesh)
def getWatchedNode(self):
return self._node
def render(self, renderer):
if not self._material:
self._material = renderer.createMaterial(Resources.getPath(Resources.ShadersLocation, "basic.vert"), Resources.getPath(Resources.ShadersLocation, "color.frag"))
self._material.setUniformValue("u_color", Color(35, 35, 35, 128))
renderer.queueNode(self, material = self._material, transparent = True)
return True
def _onNodePositionChanged(self, node):
#self.setPosition(node.getWorldPosition())
if hasattr(node, "_convex_hull"):
delattr(node, "_convex_hull")
self.setParent(None)
#self._node.transformationChanged.disconnect(self._onNodePositionChanged)
#self._node.parentChanged.disconnect(self._onNodeParentChanged)
def _onNodeParentChanged(self, node):
if node.getParent():
self.setParent(self._original_parent)
else:
self.setParent(None)
| quillford/Cura | cura/ConvexHullNode.py | Python | agpl-3.0 | 2,406 |
# Copyright (C) 2015 Linaro Limited
#
# Author: Stevan Radakovic <[email protected]>
#
# This file is part of Lava Server.
#
# Lava Server is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License version 3
# as published by the Free Software Foundation
#
# Lava Server is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Lava Server. If not, see <http://www.gnu.org/licenses/>.
from django import forms
from lava_results_app.models import (
Chart,
ChartQuery,
ChartQueryUser,
TestCase,
)
class ChartForm(forms.ModelForm):
class Meta:
model = Chart
exclude = ('is_published', 'chart_group', 'group', 'queries')
widgets = {'owner': forms.HiddenInput}
def __init__(self, owner, *args, **kwargs):
super(ChartForm, self).__init__(*args, **kwargs)
def save(self, commit=True, **kwargs):
instance = super(ChartForm, self).save(commit=commit, **kwargs)
return instance
class ChartQueryForm(forms.ModelForm):
class Meta:
model = ChartQuery
exclude = ()
widgets = {'chart': forms.HiddenInput,
'query': forms.HiddenInput,
'relative_index': forms.HiddenInput}
def __init__(self, user, *args, **kwargs):
super(ChartQueryForm, self).__init__(*args, **kwargs)
def save(self, commit=True, **kwargs):
instance = super(ChartQueryForm, self).save(commit=commit, **kwargs)
return instance
def clean(self):
form_data = self.cleaned_data
try:
# Chart type validation.
if form_data["query"].content_type.model_class() == TestCase and \
form_data["chart_type"] == "pass/fail":
self.add_error(
"chart_type",
"Pass/fail is incorrect value for 'chart_type' with TestCase based queries.")
except KeyError:
# form_data will pick up the rest of validation errors.
pass
return form_data
class ChartQueryUserForm(forms.ModelForm):
class Meta:
model = ChartQueryUser
exclude = ['user', 'chart_query']
def __init__(self, user, *args, **kwargs):
super(ChartQueryUserForm, self).__init__(*args, **kwargs)
| Linaro/lava-server | lava_results_app/views/chart/forms.py | Python | agpl-3.0 | 2,583 |
# coding: utf-8
from django.contrib.auth.models import User, Permission
from django.urls import reverse
from django.utils import timezone
from rest_framework import status
from kpi.constants import (
ASSET_TYPE_COLLECTION,
PERM_CHANGE_ASSET,
PERM_MANAGE_ASSET,
PERM_VIEW_ASSET,
)
from kpi.models import Asset, ObjectPermission
from kpi.tests.kpi_test_case import KpiTestCase
from kpi.urls.router_api_v2 import URL_NAMESPACE as ROUTER_URL_NAMESPACE
from kpi.utils.object_permission import get_anonymous_user
class ApiAnonymousPermissionsTestCase(KpiTestCase):
URL_NAMESPACE = ROUTER_URL_NAMESPACE
def setUp(self):
self.anon = get_anonymous_user()
self.someuser = User.objects.get(username='someuser')
self.someuser_password = 'someuser'
# This was written when we allowed anons to create assets, but I'll
# leave it here just to make sure it has no effect
permission = Permission.objects.get(codename='add_asset')
self.anon.user_permissions.add(permission)
# Log in and create an asset that anon can access
self.client.login(username=self.someuser.username,
password=self.someuser_password)
self.anon_accessible = self.create_asset('Anonymous can access this!')
self.add_perm(self.anon_accessible, self.anon, 'view_')
# Log out and become anonymous again
self.client.logout()
response = self.client.get(reverse('currentuser-detail'))
self.assertFalse('username' in response.data)
def test_anon_list_assets(self):
# `view_` granted to anon means detail access, NOT list access
self.assert_object_in_object_list(self.anon_accessible, in_list=False)
def test_anon_asset_detail(self):
self.assert_detail_viewable(self.anon_accessible)
def test_cannot_create_asset(self):
url = reverse(self._get_endpoint('asset-list'))
data = {'name': 'my asset', 'content': ''}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN,
msg="anonymous user cannot create a asset")
class ApiPermissionsPublicAssetTestCase(KpiTestCase):
URL_NAMESPACE = ROUTER_URL_NAMESPACE
def setUp(self):
KpiTestCase.setUp(self)
self.anon = get_anonymous_user()
self.admin = User.objects.get(username='admin')
self.admin_password = 'pass'
self.someuser = User.objects.get(username='someuser')
self.someuser_password = 'someuser'
self.login(self.admin.username, self.admin_password)
self.admins_public_asset = self.create_asset('admins_public_asset')
self.add_perm(self.admins_public_asset, self.anon, 'view')
self.login(self.someuser.username, self.someuser_password)
self.someusers_public_asset = self.create_asset('someusers_public_asset')
self.add_perm(self.someusers_public_asset, self.anon, 'view')
def test_user_can_view_public_asset(self):
self.assert_detail_viewable(self.admins_public_asset, self.someuser, self.someuser_password)
def test_public_asset_not_in_list_user(self):
self.assert_object_in_object_list(self.admins_public_asset, self.someuser, self.someuser_password,
in_list=False)
def test_public_asset_not_in_list_admin(self):
self.assert_object_in_object_list(self.someusers_public_asset, self.admin, self.admin_password,
in_list=False)
def test_revoke_anon_from_asset_in_public_collection(self):
self.login(self.someuser.username, self.someuser_password)
public_collection = self.create_collection('public_collection')
child_asset = self.create_asset('child_asset_in_public_collection')
self.add_to_collection(child_asset, public_collection)
child_asset.refresh_from_db()
# Anon should have no access at this point
self.client.logout()
self.assert_viewable(child_asset, viewable=False)
# Grant anon access to the parent collection
self.login(self.someuser.username, self.someuser_password)
self.add_perm(public_collection, self.anon, 'view_')
# Verify anon can access the child asset
self.client.logout()
# Anon can only see a public asset by accessing the detail view
# directly; `assert_viewble()` will always fail because it expects the
# asset to be in the list view as well
self.assert_detail_viewable(child_asset)
# Revoke anon's access to the child asset
self.login(self.someuser.username, self.someuser_password)
self.remove_perm_v2_api(child_asset, self.anon, PERM_VIEW_ASSET)
# Make sure anon cannot access the child asset any longer
self.client.logout()
self.assert_viewable(child_asset, viewable=False)
class ApiPermissionsTestCase(KpiTestCase):
fixtures = ['test_data']
URL_NAMESPACE = ROUTER_URL_NAMESPACE
def setUp(self):
self.admin = User.objects.get(username='admin')
self.admin_password = 'pass'
self.someuser = User.objects.get(username='someuser')
self.someuser_password = 'someuser'
self.anotheruser = User.objects.get(username='anotheruser')
self.anotheruser_password = 'anotheruser'
self.assertTrue(self.client.login(username=self.admin.username,
password=self.admin_password))
self.admin_asset = self.create_asset('admin_asset')
self.admin_collection = self.create_collection('admin_collection')
self.child_collection = self.create_collection('child_collection')
self.add_to_collection(self.child_collection, self.admin_collection)
self.client.logout()
################# Asset tests #####################
def test_own_asset_in_asset_list(self):
self.assert_viewable(self.admin_asset, self.admin,
self.admin_password)
def test_viewable_asset_in_asset_list(self):
# Give "someuser" view permissions on an asset owned by "admin".
self.add_perm(self.admin_asset, self.someuser, 'view_')
# Test that "someuser" can now view the asset.
self.assert_viewable(self.admin_asset, self.someuser,
self.someuser_password)
def test_non_viewable_asset_not_in_asset_list(self):
# Wow, that's quite a function name...
# Ensure that "someuser" doesn't have permission to view the survey
# asset owned by "admin".
perm_name = self._get_perm_name('view_', self.admin_asset)
self.assertFalse(self.someuser.has_perm(perm_name, self.admin_asset))
# Verify they can't view the asset through the API.
self.assert_viewable(self.admin_asset, self.someuser,
self.someuser_password, viewable=False)
def test_inherited_viewable_assets_in_asset_list(self):
# Give "someuser" view permissions on a collection owned by "admin" and
# add an asset also owned by "admin" to that collection.
self.add_perm(self.admin_asset, self.someuser, 'view_')
self.add_to_collection(self.admin_asset, self.admin_collection,
self.admin, self.admin_password)
# Test that "someuser" can now view the asset.
self.assert_viewable(self.admin_asset, self.someuser,
self.someuser_password)
def test_viewable_asset_inheritance_conflict(self):
# Log in as "admin", create a new child collection, and add an asset to
# that collection.
self.add_to_collection(self.admin_asset, self.child_collection,
self.admin, self.admin_password)
# Give "someuser" view permission on 'child_collection'.
self.add_perm(self.child_collection, self.someuser, 'view_')
# Give "someuser" view permission on the parent collection.
self.add_perm(self.admin_collection, self.someuser, 'view_')
# Revoke the view permissions of "someuser" on the parent collection.
self.remove_perm(self.admin_collection, self.admin,
self.admin_password, self.someuser,
self.someuser_password, 'view_')
# Confirm that "someuser" can view the contents of 'child_collection'.
self.assert_viewable(self.admin_asset, self.someuser,
self.someuser_password)
def test_non_viewable_asset_inheritance_conflict(self):
# Log in as "admin", create a new child collection, and add an asset to
# that collection.
self.add_to_collection(self.admin_asset, self.child_collection,
self.admin, self.admin_password)
# Give "someuser" view permission on the parent collection.
self.add_perm(self.admin_collection, self.someuser, 'view_')
# Revoke the view permissions of "someuser" on the child collection.
self.remove_perm(self.child_collection, self.admin, self.admin_password,
self.someuser, self.someuser_password, 'view_')
# Confirm that "someuser" can't view the contents of 'child_collection'.
self.assert_viewable(self.admin_asset, self.someuser,
self.someuser_password, viewable=False)
def test_viewable_asset_not_deletable(self):
# Give "someuser" view permissions on an asset owned by "admin".
self.add_perm(self.admin_asset, self.someuser, 'view_')
# Confirm that "someuser" is not allowed to delete the asset.
delete_perm = self._get_perm_name('delete_', self.admin_asset)
self.assertFalse(self.someuser.has_perm(delete_perm, self.admin_asset))
# Test that "someuser" can't delete the asset.
self.client.login(username=self.someuser.username,
password=self.someuser_password)
url = reverse(self._get_endpoint('asset-detail'), kwargs={'uid': self.admin_asset.uid})
response = self.client.delete(url)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_inherited_viewable_asset_not_deletable(self):
# Give "someuser" view permissions on a collection owned by "admin" and
# add an asset also owned by "admin" to that collection.
self.add_perm(self.admin_asset, self.someuser, 'view_')
self.add_to_collection(self.admin_asset, self.admin_collection,
self.admin, self.admin_password)
# Confirm that "someuser" is not allowed to delete the asset.
delete_perm = self._get_perm_name('delete_', self.admin_asset)
self.assertFalse(self.someuser.has_perm(delete_perm, self.admin_asset))
# Test that "someuser" can't delete the asset.
self.client.login(username=self.someuser.username,
password=self.someuser_password)
url = reverse(self._get_endpoint('asset-detail'), kwargs={'uid': self.admin_asset.uid})
response = self.client.delete(url)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_shared_asset_remove_own_permissions_allowed(self):
"""
Ensuring that a non-owner who has been shared an asset is able to remove
themselves from that asset if they want.
"""
self.client.login(
username=self.someuser.username,
password=self.someuser_password,
)
new_asset = self.create_asset(
name='a new asset',
owner=self.someuser,
)
perm = new_asset.assign_perm(self.anotheruser, 'view_asset')
kwargs = {
'parent_lookup_asset': new_asset.uid,
'uid': perm.uid,
}
url = reverse(
'api_v2:asset-permission-assignment-detail', kwargs=kwargs
)
self.client.logout()
self.client.login(
username=self.anotheruser.username,
password=self.anotheruser_password,
)
assert self.anotheruser.has_perm(PERM_VIEW_ASSET, new_asset)
# `anotheruser` attempting to remove themselves from the asset
res = self.client.delete(url)
assert res.status_code == status.HTTP_204_NO_CONTENT
assert not self.anotheruser.has_perm(PERM_VIEW_ASSET, new_asset)
assert len(new_asset.get_perms(self.anotheruser)) == 0
def test_shared_asset_non_owner_remove_owners_permissions_not_allowed(self):
"""
Ensuring that a non-owner who has been shared an asset is not able to
remove permissions from the owner of that asset
"""
self.client.login(
username=self.someuser.username,
password=self.someuser_password,
)
new_asset = self.create_asset(
name='a new asset',
owner=self.someuser,
)
# Getting existing permission for the owner of the asset
perm = ObjectPermission.objects.filter(asset=new_asset).get(
user=self.someuser, permission__codename=PERM_VIEW_ASSET
)
new_asset.assign_perm(self.anotheruser, PERM_VIEW_ASSET)
kwargs = {
'parent_lookup_asset': new_asset.uid,
'uid': perm.uid,
}
url = reverse(
'api_v2:asset-permission-assignment-detail', kwargs=kwargs
)
self.client.logout()
self.client.login(
username=self.anotheruser.username,
password=self.anotheruser_password,
)
assert self.someuser.has_perm(PERM_VIEW_ASSET, new_asset)
# `anotheruser` attempting to remove `someuser` from the asset
res = self.client.delete(url)
assert res.status_code == status.HTTP_403_FORBIDDEN
assert self.someuser.has_perm(PERM_VIEW_ASSET, new_asset)
def test_shared_asset_non_owner_remove_another_non_owners_permissions_not_allowed(self):
"""
Ensuring that a non-owner who has an asset shared with them cannot
remove permissions from another non-owner with that same asset shared
with them.
"""
yetanotheruser = User.objects.create(
username='yetanotheruser',
)
self.client.login(
username=self.someuser.username,
password=self.someuser_password,
)
new_asset = self.create_asset(
name='a new asset',
owner=self.someuser,
owner_password=self.someuser_password,
)
new_asset.assign_perm(self.anotheruser, PERM_VIEW_ASSET)
perm = new_asset.assign_perm(yetanotheruser, PERM_VIEW_ASSET)
kwargs = {
'parent_lookup_asset': new_asset.uid,
'uid': perm.uid,
}
url = reverse(
'api_v2:asset-permission-assignment-detail', kwargs=kwargs
)
self.client.logout()
self.client.login(
username=self.anotheruser.username,
password=self.anotheruser_password,
)
assert yetanotheruser.has_perm(PERM_VIEW_ASSET, new_asset)
# `anotheruser` attempting to remove `yetanotheruser` from the asset
res = self.client.delete(url)
assert res.status_code == status.HTTP_404_NOT_FOUND
assert yetanotheruser.has_perm(PERM_VIEW_ASSET, new_asset)
def test_shared_asset_manage_asset_remove_another_non_owners_permissions_allowed(self):
"""
Ensure that a non-owner who has an asset shared with them and has
`manage_asset` permissions is able to remove permissions from another
non-owner with that same asset shared with them.
"""
yetanotheruser = User.objects.create(
username='yetanotheruser',
)
self.client.login(
username=self.someuser.username,
password=self.someuser_password,
)
new_asset = self.create_asset(
name='a new asset',
owner=self.someuser,
owner_password=self.someuser_password,
)
new_asset.assign_perm(self.anotheruser, PERM_MANAGE_ASSET)
perm = new_asset.assign_perm(yetanotheruser, PERM_VIEW_ASSET)
kwargs = {
'parent_lookup_asset': new_asset.uid,
'uid': perm.uid,
}
url = reverse(
'api_v2:asset-permission-assignment-detail', kwargs=kwargs
)
self.client.logout()
self.client.login(
username=self.anotheruser.username,
password=self.anotheruser_password,
)
assert yetanotheruser.has_perm(PERM_VIEW_ASSET, new_asset)
# `anotheruser` attempting to remove `yetanotheruser` from the asset
res = self.client.delete(url)
assert res.status_code == status.HTTP_204_NO_CONTENT
assert not yetanotheruser.has_perm(PERM_VIEW_ASSET, new_asset)
def test_copy_permissions_between_assets(self):
# Give "someuser" edit permissions on an asset owned by "admin"
self.add_perm(self.admin_asset, self.someuser, 'change_')
# Confirm that "someuser" has received the implied permissions
expected_perms = [PERM_CHANGE_ASSET, PERM_VIEW_ASSET]
self.assertListEqual(
sorted(self.admin_asset.get_perms(self.someuser)),
expected_perms
)
# Create another asset to receive the copied permissions
new_asset = self.create_asset(
name='destination asset', owner=self.admin,
owner_password=self.admin_password
)
# Add some extraneous permissions to the destination asset; these
# should be removed by the copy operation
self.add_perm(new_asset, self.anotheruser, 'view_')
self.assertTrue(self.anotheruser.has_perm(PERM_VIEW_ASSET, new_asset))
# Perform the permissions copy via the API endpoint
self.client.login(
username=self.admin.username, password=self.admin_password
)
if self.URL_NAMESPACE is None:
dest_asset_perm_url = reverse(
'asset-permissions', kwargs={'uid': new_asset.uid}
)
else:
dest_asset_perm_url = reverse(
'api_v2:asset-permission-assignment-clone',
kwargs={'parent_lookup_asset': new_asset.uid}
)
# TODO: check that `clone_from` can also be a URL.
# You know, Roy Fielding and all that.
self.client.patch(
dest_asset_perm_url, data={'clone_from': self.admin_asset.uid}
)
# Check the result; since the source and destination have the same
# owner, the permissions should be identical
self.assertDictEqual(
self.admin_asset.get_users_with_perms(attach_perms=True),
new_asset.get_users_with_perms(attach_perms=True)
)
def test_cannot_copy_permissions_between_non_owned_assets(self):
# Give "someuser" view permissions on an asset owned by "admin"
self.add_perm(self.admin_asset, self.someuser, 'view_')
self.assertTrue(self.someuser.has_perm(PERM_VIEW_ASSET, self.admin_asset))
# Create another asset to receive the copied permissions
new_asset = self.create_asset(
name='destination asset', owner=self.admin,
owner_password=self.admin_password
)
# Give "someuser" edit permissions on the new asset owned by "admin"
self.add_perm(new_asset, self.someuser, 'change_')
self.assertTrue(self.someuser.has_perm(PERM_CHANGE_ASSET, new_asset))
new_asset_perms_before_copy_attempt = new_asset.get_users_with_perms(
attach_perms=True
)
# Perform the permissions copy via the API endpoint
self.client.login(
username=self.someuser.username, password=self.someuser_password
)
if self.URL_NAMESPACE is None:
dest_asset_perm_url = reverse(
'asset-permissions', kwargs={'uid': new_asset.uid}
)
else:
dest_asset_perm_url = reverse(
'api_v2:asset-permission-assignment-clone',
kwargs={'parent_lookup_asset': new_asset.uid}
)
response = self.client.patch(
dest_asset_perm_url, data={'clone_from': self.admin_asset.uid}
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
# Check the result; nothing should have changed
self.assertDictEqual(
new_asset_perms_before_copy_attempt,
new_asset.get_users_with_perms(attach_perms=True)
)
def test_user_cannot_copy_permissions_from_non_viewable_asset(self):
# Make sure "someuser" cannot view the asset owned by "admin"
self.assertFalse(
self.someuser.has_perm(PERM_VIEW_ASSET, self.admin_asset)
)
# Create another asset to receive the copied permissions
new_asset = self.create_asset(
name='destination asset', owner=self.admin,
owner_password=self.admin_password
)
# Take note of the destination asset's permissions to make sure they
# are *not* changed later
dest_asset_original_perms = new_asset.get_users_with_perms(
attach_perms=True
)
# Perform the permissions copy via the API endpoint
self.client.login(
username=self.someuser.username, password=self.someuser_password
)
if self.URL_NAMESPACE is None:
dest_asset_perm_url = reverse(
'asset-permissions', kwargs={'uid': new_asset.uid}
)
else:
dest_asset_perm_url = reverse(
'api_v2:asset-permission-assignment-clone',
kwargs={'parent_lookup_asset': new_asset.uid}
)
response = self.client.patch(
dest_asset_perm_url, data={'clone_from': self.admin_asset.uid}
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
# Make sure no permissions were changed on the destination asset
self.assertDictEqual(
dest_asset_original_perms,
new_asset.get_users_with_perms(attach_perms=True)
)
def test_user_cannot_copy_permissions_to_non_editable_asset(self):
# Give "someuser" view permissions on an asset owned by "admin"
self.add_perm(self.admin_asset, self.someuser, 'view_')
self.assertTrue(self.someuser.has_perm(PERM_VIEW_ASSET, self.admin_asset))
# Create another asset to receive the copied permissions
new_asset = self.create_asset(
name='destination asset', owner=self.admin,
owner_password=self.admin_password
)
# Give "someuser" view permissions on the new asset owned by "admin"
self.add_perm(new_asset, self.someuser, 'view_')
self.assertTrue(self.someuser.has_perm(PERM_VIEW_ASSET, new_asset))
# Take note of the destination asset's permissions to make sure they
# are *not* changed later
dest_asset_original_perms = new_asset.get_users_with_perms(
attach_perms=True
)
# Perform the permissions copy via the API endpoint
self.client.login(
username=self.someuser.username, password=self.someuser_password
)
if self.URL_NAMESPACE is None:
dest_asset_perm_url = reverse(
'asset-permissions', kwargs={'uid': new_asset.uid}
)
else:
dest_asset_perm_url = reverse(
'api_v2:asset-permission-assignment-clone',
kwargs={'parent_lookup_asset': new_asset.uid}
)
response = self.client.patch(
dest_asset_perm_url, data={'clone_from': self.admin_asset.uid}
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
# Make sure no permissions were changed on the destination asset
self.assertDictEqual(
dest_asset_original_perms,
new_asset.get_users_with_perms(attach_perms=True)
)
############# Collection tests ###############
def test_own_collection_in_collection_list(self):
self.assert_viewable(self.admin_collection, self.admin,
self.admin_password)
def test_viewable_collection_in_collection_list(self):
# Give "someuser" view permissions on a collection owned by "admin".
self.add_perm(self.admin_collection, self.someuser, 'view_')
# Test that "someuser" can now view the collection.
self.assert_viewable(self.admin_collection, self.someuser,
self.someuser_password)
def test_non_viewable_collection_not_in_collection_list(self):
# Wow, that's quite a function name...
# Ensure that "someuser" doesn't have permission to view the survey
# collection owned by "admin".
perm_name = self._get_perm_name('view_', self.admin_collection)
self.assertFalse(self.someuser.has_perm(perm_name, self.admin_collection))
# Verify they can't view the collection through the API.
self.assert_viewable(self.admin_collection, self.someuser,
self.someuser_password, viewable=False)
def test_inherited_viewable_collections_in_collection_list(self):
# Give "someuser" view permissions on the parent collection.
self.add_perm(self.admin_collection, self.someuser, 'view_')
# Test that "someuser" can now view the child collection.
self.assert_viewable(self.child_collection, self.someuser,
self.someuser_password)
def test_viewable_collection_inheritance_conflict(self):
grandchild_collection = self.create_collection('grandchild_collection',
self.admin, self.admin_password)
self.add_to_collection(grandchild_collection, self.child_collection,
self.admin, self.admin_password)
# Give "someuser" view permission on 'child_collection'.
self.add_perm(self.child_collection, self.someuser, 'view_')
# Give "someuser" view permission on the parent collection.
self.add_perm(self.admin_collection, self.someuser, 'view_')
# Revoke the view permissions of "someuser" on 'parent_collection'.
self.remove_perm(self.admin_collection, self.admin,
self.admin_password, self.someuser,
self.someuser_password, 'view_')
# Confirm that "someuser" can view 'grandchild_collection'.
self.assert_viewable(grandchild_collection, self.someuser,
self.someuser_password)
def test_non_viewable_collection_inheritance_conflict(self):
grandchild_collection = self.create_collection('grandchild_collection',
self.admin, self.admin_password)
self.add_to_collection(grandchild_collection, self.child_collection,
self.admin, self.admin_password)
# Give "someuser" view permission on the parent collection.
self.add_perm(self.admin_collection, self.someuser, 'view_')
# Revoke the view permissions of "someuser" on the child collection.
self.remove_perm(self.child_collection, self.admin,
self.admin_password, self.someuser,
self.someuser_password, 'view_')
# Confirm that "someuser" can't view 'grandchild_collection'.
self.assert_viewable(grandchild_collection, self.someuser,
self.someuser_password, viewable=False)
def test_viewable_collection_not_deletable(self):
# Give "someuser" view permissions on a collection owned by "admin".
self.add_perm(self.admin_collection, self.someuser, 'view_')
# Confirm that "someuser" is not allowed to delete the collection.
delete_perm = self._get_perm_name('delete_', self.admin_collection)
self.assertFalse(self.someuser.has_perm(delete_perm,
self.admin_collection))
# Test that "someuser" can't delete the collection.
self.client.login(username=self.someuser.username,
password=self.someuser_password)
url = reverse(self._get_endpoint('asset-detail'),
kwargs={'uid': self.admin_collection.uid})
response = self.client.delete(url)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_inherited_viewable_collection_not_deletable(self):
# Give "someuser" view permissions on a collection owned by "admin".
self.add_perm(self.admin_collection, self.someuser, 'view_')
# Confirm that "someuser" is not allowed to delete the child collection.
delete_perm = self._get_perm_name('delete_', self.child_collection)
self.assertFalse(self.someuser.has_perm(delete_perm, self.child_collection))
# Test that "someuser" can't delete the child collection.
self.client.login(username=self.someuser.username,
password=self.someuser_password)
url = reverse(self._get_endpoint('asset-detail'), kwargs={'uid':
self.child_collection.uid})
response = self.client.delete(url)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
class ApiAssignedPermissionsTestCase(KpiTestCase):
"""
An obnoxiously large amount of code to test that the endpoint for listing
assigned permissions complies with the following rules:
* Superusers see it all (and there is *no* pagination)
* Anonymous users see nothing
* Regular users see everything that concerns them, namely all
their own permissions and all the owners' permissions for all objects
to which they have been assigned any permission
See also
kpi.utils.object_permission.get_user_permission_assignments_queryset
"""
# TODO: does this duplicate stuff in
# test_api_asset_permission_assignment.py / should it be moved there?
URL_NAMESPACE = ROUTER_URL_NAMESPACE
def setUp(self):
super().setUp()
self.anon = get_anonymous_user()
self.super = User.objects.get(username='admin')
self.super_password = 'pass'
self.someuser = User.objects.get(username='someuser')
self.someuser_password = 'someuser'
self.anotheruser = User.objects.get(username='anotheruser')
self.anotheruser_password = 'anotheruser'
self.collection = Asset.objects.create(
asset_type=ASSET_TYPE_COLLECTION, owner=self.someuser
)
self.asset = Asset.objects.create(owner=self.someuser)
def test_anon_only_sees_owner_and_anon_permissions(self):
self.asset.assign_perm(self.anon, PERM_VIEW_ASSET)
self.assertTrue(self.anon.has_perm(PERM_VIEW_ASSET, self.asset))
url = self.get_asset_perm_assignment_list_url(self.asset)
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
user_urls = []
for username in [self.asset.owner.username, self.anon.username]:
user_urls.append(
self.absolute_reverse(
self._get_endpoint('user-detail'),
kwargs={'username': username},
)
)
self.assertSetEqual(
set((a['user'] for a in response.data)), set(user_urls)
)
def test_user_sees_relevant_permissions_on_assigned_objects(self):
# A user with explicitly-assigned permissions should see their
# own permissions and the owner's permissions, but not permissions
# assigned to other users
self.asset.assign_perm(self.anotheruser, PERM_VIEW_ASSET)
self.assertTrue(self.anotheruser.has_perm(PERM_VIEW_ASSET, self.asset))
irrelevant_user = User.objects.create(username='mindyourown')
self.asset.assign_perm(irrelevant_user, PERM_VIEW_ASSET)
self.client.login(username=self.anotheruser.username,
password=self.anotheruser_password)
url = self.get_asset_perm_assignment_list_url(self.asset)
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
returned_urls = [r['url'] for r in response.data]
all_obj_perms = self.asset.permissions.all()
relevant_obj_perms = all_obj_perms.filter(
user__in=(self.asset.owner, self.anotheruser),
permission__codename__in=self.asset.get_assignable_permissions(
with_partial=False
),
)
self.assertListEqual(
sorted(returned_urls),
sorted(
self.get_urls_for_asset_perm_assignment_objs(
relevant_obj_perms, asset=self.asset
)
),
)
def test_user_cannot_see_permissions_on_unassigned_objects(self):
self.asset.assign_perm(self.anotheruser, PERM_VIEW_ASSET)
self.assertTrue(self.anotheruser.has_perm(PERM_VIEW_ASSET, self.asset))
self.client.login(username=self.anotheruser.username,
password=self.anotheruser_password)
url = self.get_asset_perm_assignment_list_url(self.collection)
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_superuser_sees_all_permissions(self):
self.asset.assign_perm(self.anotheruser, PERM_VIEW_ASSET)
self.assertTrue(self.anotheruser.has_perm(PERM_VIEW_ASSET, self.asset))
self.client.login(username=self.super.username,
password=self.super_password)
url = self.get_asset_perm_assignment_list_url(self.asset)
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
returned_urls = [r['url'] for r in response.data]
all_obj_perms = self.asset.permissions.all()
self.assertListEqual(
sorted(returned_urls),
sorted(
self.get_urls_for_asset_perm_assignment_objs(
all_obj_perms, asset=self.asset
)
),
)
| kobotoolbox/kpi | kpi/tests/api/v2/test_api_permissions.py | Python | agpl-3.0 | 34,731 |
__author__ = 'c.brett'
| Spycho/aimmo | aimmo-game/simulation/test/__init__.py | Python | agpl-3.0 | 23 |
# -*- coding: utf-8 -*-
# setup.py
# Copyright (C) 2013 LEAP
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
setup file for leap.mx
"""
import os
from setuptools import setup, find_packages
import versioneer
versioneer.versionfile_source = 'src/leap/mx/_version.py'
versioneer.versionfile_build = 'leap/mx/_version.py'
versioneer.tag_prefix = '' # tags are like 1.2.0
versioneer.parentdir_prefix = 'leap.mx-'
from pkg.utils.reqs import parse_requirements
trove_classifiers = [
'Development Status :: 3 - Alpha',
'Environment :: No Input/Output (Daemon)',
'Framework :: Twisted',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU Affero General Public License v3'
' or later (AGPLv3+)',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Communications :: Email',
'Topic :: Security :: Cryptography',
]
if os.environ.get("VIRTUAL_ENV", None):
data_files = None
else:
# XXX use a script entrypoint for mx instead, it will
# be automatically
# placed by distutils, using whatever interpreter is
# available.
data_files = [("/usr/local/bin/", ["pkg/mx.tac"]),
("/etc/init.d/", ["pkg/leap_mx"])]
setup(
name='leap.mx',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
url="http://github.com/leapcode/leap_mx",
license='AGPLv3+',
author='The LEAP Encryption Access Project',
author_email='[email protected]',
description=("An asynchronous, transparently-encrypting remailer "
"for the LEAP platform"),
long_description=(
"An asynchronous, transparently-encrypting remailer "
"using BigCouch/CouchDB and PGP/GnuPG, written in Twisted Python."
),
namespace_packages=["leap"],
package_dir={'': 'src'},
packages=find_packages('src'),
#test_suite='leap.mx.tests',
install_requires=parse_requirements(),
classifiers=trove_classifiers,
data_files=data_files
)
| kalikaneko/leap_mx | setup.py | Python | agpl-3.0 | 2,678 |
from django import template
register = template.Library()
@register.simple_tag
def keyvalue(dict, key):
return dict[key]
| routetopa/tet | tet/browser/templatetags/keyvalue.py | Python | agpl-3.0 | 127 |
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
import re
def camelcase_to_snakecase(string_to_convert):
"""
Convert CamelCase string to snake_case
Original solution in
http://stackoverflow.com/questions/1175208/elegant-python-function-to-convert-camelcase-to-snake-case
"""
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', string_to_convert)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
| suutari/shoop | shuup/admin/utils/str_utils.py | Python | agpl-3.0 | 605 |
# -*- coding: utf-8 -*-
import os.path
import posixpath
import re
import urllib
from docutils import nodes
from sphinx import addnodes, util
from sphinx.locale import admonitionlabels
def _parents(node):
while node.parent:
node = node.parent
yield node
class BootstrapTranslator(nodes.NodeVisitor, object):
head_prefix = 'head_prefix'
head = 'head'
stylesheet = 'stylesheet'
body_prefix = 'body_prefix'
body_pre_docinfo = 'body_pre_docinfo'
docinfo = 'docinfo'
body_suffix = 'body_suffix'
subtitle = 'subtitle'
header = 'header'
footer = 'footer'
html_prolog = 'html_prolog'
html_head = 'html_head'
html_title = 'html_title'
html_subtitle = 'html_subtitle'
# <meta> tags
meta = [
'<meta http-equiv="X-UA-Compatible" content="IE=edge">',
'<meta name="viewport" content="width=device-width, initial-scale=1">'
]
def __init__(self, builder, document):
super(BootstrapTranslator, self).__init__(document)
self.builder = builder
self.body = []
self.fragment = self.body
self.html_body = self.body
# document title
self.title = []
self.start_document_title = 0
self.first_title = False
self.context = []
self.section_level = 0
self.highlightlang = self.highlightlang_base = self.builder.config.highlight_language
self.highlightopts = getattr(builder.config, 'highlight_options', {})
self.first_param = 1
self.optional_param_level = 0
self.required_params_left = 0
self.param_separator = ','
def encode(self, text):
return unicode(text).translate({
ord('&'): u'&',
ord('<'): u'<',
ord('"'): u'"',
ord('>'): u'>',
0xa0: u' '
})
def starttag(self, node, tagname, **attributes):
tagname = unicode(tagname).lower()
# extract generic attributes
attrs = {name.lower(): value for name, value in attributes.iteritems()}
attrs.update(
(name, value) for name, value in node.attributes.iteritems()
if name.startswith('data-')
)
prefix = []
postfix = []
# handle possibly multiple ids
assert 'id' not in attrs, "starttag can't be passed a single id attribute, use a list of ids"
ids = node.get('ids', []) + attrs.pop('ids', [])
if ids:
_ids = iter(ids)
attrs['id'] = next(_ids)
postfix.extend(u'<i id="{}"></i>'.format(_id) for _id in _ids)
# set CSS class
classes = set(node.get('classes', []) + attrs.pop('class', '').split())
if classes:
attrs['class'] = u' '.join(classes)
return u'{prefix}<{tag} {attrs}>{postfix}'.format(
prefix=u''.join(prefix),
tag=tagname,
attrs=u' '.join(u'{}="{}"'.format(name, self.attval(value))
for name, value in attrs.iteritems()),
postfix=u''.join(postfix),
)
# only "space characters" SPACE, CHARACTER TABULATION, LINE FEED,
# FORM FEED and CARRIAGE RETURN should be collapsed, not al White_Space
def attval(self, value, whitespace=re.compile(u'[ \t\n\f\r]')):
return self.encode(whitespace.sub(u' ', unicode(value)))
def astext(self):
return u''.join(self.body)
def unknown_visit(self, node):
print "unknown node", node.__class__.__name__
self.body.append(u'[UNKNOWN NODE {}]'.format(node.__class__.__name__))
raise nodes.SkipNode
def visit_highlightlang(self, node):
self.highlightlang = node['lang']
def depart_highlightlang(self, node):
pass
def visit_document(self, node):
self.first_title = True
def depart_document(self, node):
pass
def visit_section(self, node):
# close "parent" or preceding section, unless this is the opening of
# the first section
if self.section_level:
self.body.append(u'</section>')
self.section_level += 1
self.body.append(self.starttag(node, 'section'))
def depart_section(self, node):
self.section_level -= 1
# close last section of document
if not self.section_level:
self.body.append(u'</section>')
def is_compact_paragraph(self, node):
parent = node.parent
if isinstance(parent, (nodes.document, nodes.compound,
addnodes.desc_content,
addnodes.versionmodified)):
# Never compact paragraphs in document or compound.
return False
for key, value in node.attlist():
# we can ignore a few specific classes, all other non-default
# attributes require that a <p> node remains
if key != 'classes' or value not in ([], ['first'], ['last'], ['first', 'last']):
return False
first = isinstance(node.parent[0], nodes.label)
for child in parent.children[first:]:
# only first paragraph can be compact
if isinstance(child, nodes.Invisible):
continue
if child is node:
break
return False
parent_length = len([
1 for n in parent
if not isinstance(n, (nodes.Invisible, nodes.label))
])
return parent_length == 1
def visit_paragraph(self, node):
if self.is_compact_paragraph(node):
self.context.append(u'')
return
self.body.append(self.starttag(node, 'p'))
self.context.append(u'</p>')
def depart_paragraph(self, node):
self.body.append(self.context.pop())
def visit_compact_paragraph(self, node):
pass
def depart_compact_paragraph(self, node):
pass
def visit_literal_block(self, node):
if node.rawsource != node.astext():
# most probably a parsed-literal block -- don't highlight
self.body.append(self.starttag(node, 'pre'))
return
lang = self.highlightlang
highlight_args = node.get('highlight_args', {})
if 'language' in node:
# code-block directives
lang = node['language']
highlight_args['force'] = True
linenos = node.get('linenos', False)
if lang is self.highlightlang_base:
# only pass highlighter options for original language
opts = self.highlightopts
else:
opts = {}
def warner(msg):
self.builder.warn(msg, (self.builder.current_docname, node.line))
highlighted = self.builder.highlighter.highlight_block(
node.rawsource, lang, opts=opts, warn=warner, linenos=linenos,
**highlight_args)
self.body.append(self.starttag(node, 'div', CLASS='highlight-%s' % lang))
self.body.append(highlighted)
self.body.append(u'</div>\n')
raise nodes.SkipNode
def depart_literal_block(self, node):
self.body.append(u'</pre>')
def visit_bullet_list(self, node):
self.body.append(self.starttag(node, 'ul'))
def depart_bullet_list(self, node):
self.body.append(u'</ul>')
def visit_enumerated_list(self, node):
self.body.append(self.starttag(node, 'ol'))
def depart_enumerated_list(self, node):
self.body.append(u'</ol>')
def visit_list_item(self, node):
self.body.append(self.starttag(node, 'li'))
def depart_list_item(self, node):
self.body.append(u'</li>')
def visit_definition_list(self, node):
self.body.append(self.starttag(node, 'dl'))
def depart_definition_list(self, node):
self.body.append(u'</dl>')
def visit_definition_list_item(self, node):
pass
def depart_definition_list_item(self, node):
pass
def visit_term(self, node):
self.body.append(self.starttag(node, 'dt'))
def depart_term(self, node):
self.body.append(u'</dt>')
def visit_termsep(self, node):
self.body.append(self.starttag(node, 'br'))
raise nodes.SkipNode
def visit_definition(self, node):
self.body.append(self.starttag(node, 'dd'))
def depart_definition(self, node):
self.body.append(u'</dd>')
def visit_admonition(self, node, type=None):
clss = {
# ???: 'alert-success',
'note': 'alert-info',
'hint': 'alert-info',
'tip': 'alert-info',
'seealso': 'alert-info',
'warning': 'alert-warning',
'attention': 'alert-warning',
'caution': 'alert-warning',
'important': 'alert-warning',
'danger': 'alert-danger',
'error': 'alert-danger',
'exercise': 'alert-exercise',
}
self.body.append(self.starttag(node, 'div', role='alert', CLASS='alert {}'.format(
clss.get(type, '')
)))
if 'alert-dismissible' in node.get('classes', []):
self.body.append(
u'<button type="button" class="close" data-dismiss="alert" aria-label="Close">'
u'<span aria-hidden="true">×</span>'
u'</button>')
if type:
node.insert(0, nodes.title(type, admonitionlabels[type]))
def depart_admonition(self, node):
self.body.append(u'</div>')
visit_note = lambda self, node: self.visit_admonition(node, 'note')
visit_warning = lambda self, node: self.visit_admonition(node, 'warning')
visit_attention = lambda self, node: self.visit_admonition(node, 'attention')
visit_caution = lambda self, node: self.visit_admonition(node, 'caution')
visit_danger = lambda self, node: self.visit_admonition(node, 'danger')
visit_error = lambda self, node: self.visit_admonition(node, 'error')
visit_hint = lambda self, node: self.visit_admonition(node, 'hint')
visit_important = lambda self, node: self.visit_admonition(node, 'important')
visit_tip = lambda self, node: self.visit_admonition(node, 'tip')
visit_exercise = lambda self, node: self.visit_admonition(node, 'exercise')
visit_seealso = lambda self, node: self.visit_admonition(node, 'seealso')
depart_note = depart_admonition
depart_warning = depart_admonition
depart_attention = depart_admonition
depart_caution = depart_admonition
depart_danger = depart_admonition
depart_error = depart_admonition
depart_hint = depart_admonition
depart_important = depart_admonition
depart_tip = depart_admonition
depart_exercise = depart_admonition
depart_seealso = depart_admonition
def visit_versionmodified(self, node):
self.body.append(self.starttag(node, 'div', CLASS=node['type']))
def depart_versionmodified(self, node):
self.body.append(u'</div>')
def visit_title(self, node):
parent = node.parent
closing = u'</p>'
if isinstance(parent, nodes.Admonition):
self.body.append(self.starttag(node, 'p', CLASS='alert-title'))
elif isinstance(node.parent, nodes.document):
self.body.append(self.starttag(node, 'h1'))
closing = u'</h1>'
self.start_document_title = len(self.body)
else:
assert isinstance(parent, nodes.section), "expected a section node as parent to the title, found {}".format(parent)
if self.first_title:
self.first_title = False
raise nodes.SkipNode()
nodename = 'h{}'.format(self.section_level)
self.body.append(self.starttag(node, nodename))
closing = u'</{}>'.format(nodename)
self.context.append(closing)
def depart_title(self, node):
self.body.append(self.context.pop())
if self.start_document_title:
self.title = self.body[self.start_document_title:-1]
self.start_document_title = 0
del self.body[:]
# the rubric should be a smaller heading than the current section, up to
# h6... maybe "h7" should be a ``p`` instead?
def visit_rubric(self, node):
self.body.append(self.starttag(node, 'h{}'.format(min(self.section_level + 1, 6))))
def depart_rubric(self, node):
self.body.append(u'</h{}>'.format(min(self.section_level + 1, 6)))
def visit_block_quote(self, node):
self.body.append(self.starttag(node, 'blockquote'))
def depart_block_quote(self, node):
self.body.append(u'</blockquote>')
def visit_attribution(self, node):
self.body.append(self.starttag(node, 'footer'))
def depart_attribution(self, node):
self.body.append(u'</footer>')
def visit_container(self, node):
self.body.append(self.starttag(node, 'div'))
def depart_container(self, node):
self.body.append(u'</div>')
def visit_compound(self, node):
self.body.append(self.starttag(node, 'div'))
def depart_compound(self, node):
self.body.append(u'</div>')
def visit_image(self, node):
uri = node['uri']
if uri in self.builder.images:
uri = posixpath.join(self.builder.imgpath,
self.builder.images[uri])
attrs = {'src': uri, 'class': 'img-responsive'}
if 'alt' in node:
attrs['alt'] = node['alt']
# todo: explicit width/height/scale?
self.body.append(self.starttag(node, 'img', **attrs))
def depart_image(self, node): pass
def visit_figure(self, node):
self.body.append(self.starttag(node, 'div'))
def depart_figure(self, node):
self.body.append(u'</div>')
def visit_caption(self, node):
# first paragraph of figure content
self.body.append(self.starttag(node, 'h4'))
def depart_caption(self, node):
self.body.append(u'</h4>')
def visit_legend(self, node): pass
def depart_legend(self, node): pass
def visit_line(self, node):
self.body.append(self.starttag(node, 'div', CLASS='line'))
# ensure the line still takes the room it needs
if not len(node): self.body.append(u'<br />')
def depart_line(self, node):
self.body.append(u'</div>')
def visit_line_block(self, node):
self.body.append(self.starttag(node, 'div', CLASS='line-block'))
def depart_line_block(self, node):
self.body.append(u'</div>')
def visit_table(self, node):
self.body.append(self.starttag(node, 'table', CLASS='table'))
def depart_table(self, node):
self.body.append(u'</table>')
def visit_tgroup(self, node): pass
def depart_tgroup(self, node): pass
def visit_colspec(self, node): raise nodes.SkipNode
def visit_thead(self, node):
self.body.append(self.starttag(node, 'thead'))
def depart_thead(self, node):
self.body.append(u'</thead>')
def visit_tbody(self, node):
self.body.append(self.starttag(node, 'tbody'))
def depart_tbody(self, node):
self.body.append(u'</tbody>')
def visit_row(self, node):
self.body.append(self.starttag(node, 'tr'))
def depart_row(self, node):
self.body.append(u'</tr>')
def visit_entry(self, node):
if isinstance(node.parent.parent, nodes.thead):
tagname = 'th'
else:
tagname = 'td'
self.body.append(self.starttag(node, tagname))
self.context.append(tagname)
def depart_entry(self, node):
self.body.append(u'</{}>'.format(self.context.pop()))
def visit_Text(self, node):
self.body.append(self.encode(node.astext()))
def depart_Text(self, node):
pass
def visit_literal(self, node):
self.body.append(self.starttag(node, 'code'))
def depart_literal(self, node):
self.body.append(u'</code>')
visit_literal_emphasis = visit_literal
depart_literal_emphasis = depart_literal
def visit_emphasis(self, node):
self.body.append(self.starttag(node, 'em'))
def depart_emphasis(self, node):
self.body.append(u'</em>')
def visit_strong(self, node):
self.body.append(self.starttag(node, 'strong'))
def depart_strong(self, node):
self.body.append(u'</strong>')
visit_literal_strong = visit_strong
depart_literal_strong = depart_strong
def visit_inline(self, node):
self.body.append(self.starttag(node, 'span'))
def depart_inline(self, node):
self.body.append(u'</span>')
def visit_abbreviation(self, node):
attrs = {}
if 'explanation' in node:
attrs['title'] = node['explanation']
self.body.append(self.starttag(node, 'abbr', **attrs))
def depart_abbreviation(self, node):
self.body.append(u'</abbr>')
def visit_reference(self, node):
attrs = {
'class': 'reference',
'href': node['refuri'] if 'refuri' in node else '#' + node['refid']
}
attrs['class'] += ' internal' if (node.get('internal') or 'refuri' not in node) else ' external'
if any(isinstance(ancestor, nodes.Admonition) for ancestor in _parents(node)):
attrs['class'] += ' alert-link'
if 'reftitle' in node:
attrs['title'] = node['reftitle']
self.body.append(self.starttag(node, 'a', **attrs))
def depart_reference(self, node):
self.body.append(u'</a>')
def visit_target(self, node): pass
def depart_target(self, node): pass
def visit_footnote(self, node):
self.body.append(self.starttag(node, 'div', CLASS='footnote'))
self.footnote_backrefs(node)
def depart_footnote(self, node):
self.body.append(u'</div>')
def visit_footnote_reference(self, node):
self.body.append(self.starttag(
node, 'a', href='#' + node['refid'], CLASS="footnote-ref"))
def depart_footnote_reference(self, node):
self.body.append(u'</a>')
def visit_label(self, node):
self.body.append(self.starttag(node, 'span', CLASS='footnote-label'))
self.body.append(u'%s[' % self.context.pop())
def depart_label(self, node):
# Context added in footnote_backrefs.
self.body.append(u']%s</span> %s' % (self.context.pop(), self.context.pop()))
def footnote_backrefs(self, node):
# should store following data on context stack (in that order since
# they'll be popped so LIFO)
#
# * outside (after) label
# * after label text
# * before label text
backrefs = node['backrefs']
if not backrefs:
self.context.extend(['', '', ''])
elif len(backrefs) == 1:
self.context.extend([
'',
'</a>',
'<a class="footnote-backref" href="#%s">' % backrefs[0]
])
else:
backlinks = (
'<a class="footnote-backref" href="#%s">%s</a>' % (backref, i)
for i, backref in enumerate(backrefs, start=1)
)
self.context.extend([
'<em class="footnote-backrefs">(%s)</em> ' % ', '.join(backlinks),
'',
''
])
def visit_desc(self, node):
self.body.append(self.starttag(node, 'section', CLASS='code-' + node['objtype']))
def depart_desc(self, node):
self.body.append(u'</section>')
def visit_desc_signature(self, node):
self.body.append(self.starttag(node, 'h6'))
self.body.append(u'<code>')
def depart_desc_signature(self, node):
self.body.append(u'</code>')
self.body.append(u'</h6>')
def visit_desc_addname(self, node): pass
def depart_desc_addname(self, node): pass
def visit_desc_type(self, node): pass
def depart_desc_type(self, node): pass
def visit_desc_returns(self, node):
self.body.append(u' → ')
def depart_desc_returns(self, node):
pass
def visit_desc_name(self, node): pass
def depart_desc_name(self, node): pass
def visit_desc_parameterlist(self, node):
self.body.append(u'(')
self.first_param = True
self.optional_param_level = 0
# How many required parameters are left.
self.required_params_left = sum(isinstance(c, addnodes.desc_parameter) for c in node.children)
self.param_separator = node.child_text_separator
def depart_desc_parameterlist(self, node):
self.body.append(u')')
# If required parameters are still to come, then put the comma after
# the parameter. Otherwise, put the comma before. This ensures that
# signatures like the following render correctly (see issue #1001):
#
# foo([a, ]b, c[, d])
#
def visit_desc_parameter(self, node):
if self.first_param:
self.first_param = 0
elif not self.required_params_left:
self.body.append(self.param_separator)
if self.optional_param_level == 0:
self.required_params_left -= 1
if 'noemph' not in node: self.body.append(u'<em>')
def depart_desc_parameter(self, node):
if 'noemph' not in node: self.body.append(u'</em>')
if self.required_params_left:
self.body.append(self.param_separator)
def visit_desc_optional(self, node):
self.optional_param_level += 1
self.body.append(u'[')
def depart_desc_optional(self, node):
self.optional_param_level -= 1
self.body.append(u']')
def visit_desc_annotation(self, node):
self.body.append(self.starttag(node, 'em'))
def depart_desc_annotation(self, node):
self.body.append(u'</em>')
def visit_desc_content(self, node): pass
def depart_desc_content(self, node): pass
def visit_field_list(self, node):
self.body.append(self.starttag(node, 'div', CLASS='code-fields'))
def depart_field_list(self, node):
self.body.append(u'</div>')
def visit_field(self, node):
self.body.append(self.starttag(node, 'div', CLASS='code-field'))
def depart_field(self, node):
self.body.append(u'</div>')
def visit_field_name(self, node):
self.body.append(self.starttag(node, 'div', CLASS='code-field-name'))
def depart_field_name(self, node):
self.body.append(u'</div>')
def visit_field_body(self, node):
self.body.append(self.starttag(node, 'div', CLASS='code-field-body'))
def depart_field_body(self, node):
self.body.append(u'</div>')
def visit_glossary(self, node): pass
def depart_glossary(self, node): pass
def visit_comment(self, node): raise nodes.SkipNode
def visit_toctree(self, node):
# div class=row {{ section_type }}
# h2 class=col-sm-12
# {{ section title }}
# div class=col-sm-6 col-md-3
# figure class=card
# a href=current_link style=background-image: document-image-attribute class=card-img
# figcaption
# {{ card title }}
env = self.builder.env
conf = self.builder.app.config
for title, ref in ((e[0], e[1]) for e in node['entries']):
# external URL, no toc, can't recurse into
if ref not in env.tocs:
continue
toc = env.tocs[ref].traverse(addnodes.toctree)
classes = env.metadata[ref].get('types', 'tutorials')
classes += ' toc-single-entry' if not toc else ' toc-section'
self.body.append(self.starttag(node, 'div', CLASS="row " + classes))
self.body.append(u'<h2 class="col-sm-12">')
self.body.append(title if title else util.nodes.clean_astext(env.titles[ref]))
self.body.append(u'</h2>')
entries = [(title, ref)] if not toc else ((e[0], e[1]) for e in toc[0]['entries'])
for subtitle, subref in entries:
baseuri = self.builder.get_target_uri(node['parent'])
if subref in env.metadata:
cover = env.metadata[subref].get('banner', conf.odoo_cover_default)
elif subref in conf.odoo_cover_external:
cover = conf.odoo_cover_external[subref]
else:
cover = conf.odoo_cover_default_external
if cover:
banner = '_static/' + cover
base, ext = os.path.splitext(banner)
small = "{}.small{}".format(base, ext)
if os.path.isfile(urllib.url2pathname(small)):
banner = small
style = u"background-image: url('{}')".format(
util.relative_uri(baseuri, banner) or '#')
else:
style = u''
self.body.append(u"""
<div class="col-sm-6 col-md-3">
<figure class="card">
<a href="{link}" class="card-img">
<span style="{style}"></span>
<figcaption>{title}</figcaption>
</a>
</figure>
</div>
""".format(
link=subref if util.url_re.match(subref) else util.relative_uri(
baseuri, self.builder.get_target_uri(subref)),
style=style,
title=subtitle if subtitle else util.nodes.clean_astext(env.titles[subref]),
))
self.body.append(u'</div>')
raise nodes.SkipNode
def visit_index(self, node): raise nodes.SkipNode
def visit_raw(self, node):
if 'html' in node.get('format', '').split():
t = 'span' if isinstance(node.parent, nodes.TextElement) else 'div'
if node['classes']:
self.body.append(self.starttag(node, t))
self.body.append(node.astext())
if node['classes']:
self.body.append('</%s>' % t)
# Keep non-HTML raw text out of output:
raise nodes.SkipNode
| xujb/odoo | doc/_extensions/odoo/translator.py | Python | agpl-3.0 | 26,143 |
"""Laposte XML -> Python."""
from datetime import datetime
from lxml import objectify
from ...codec import DecoderGetLabel
from ...codec import DecoderGetPackingSlip
import base64
class _UNSPECIFIED:
pass
def _get_text(xml, tag, default=_UNSPECIFIED):
"""
Returns the text content of a tag to avoid returning an lxml instance
If no default is specified, it will raises the original exception of accessing
to an inexistant tag
"""
if not hasattr(xml, tag):
if default is _UNSPECIFIED:
# raise classic attr error
return getattr(xml, tag)
return default
return getattr(xml, tag).text
def _get_cid(tag, tree):
element = tree.find(tag)
if element is None:
return None
href = element.getchildren()[0].attrib["href"]
# href contains cid:[email protected]
return href[len("cid:") :] # remove prefix
class LaposteFrDecoderGetLabel(DecoderGetLabel):
"""Laposte XML -> Python."""
def decode(self, response, input_payload):
"""Laposte XML -> Python."""
body = response["body"]
parts = response["parts"]
output_format = input_payload["output_format"]
xml = objectify.fromstring(body)
msg = xml.xpath("//return")[0]
rep = msg.labelV2Response
cn23_cid = _get_cid("cn23", rep)
label_cid = _get_cid("label", rep)
annexes = []
if cn23_cid:
data = parts.get(cn23_cid)
annexes.append(
{"name": "cn23", "data": base64.b64encode(data), "type": "pdf"}
)
if rep.find("pdfUrl"):
annexes.append({"name": "label", "data": rep.find("pdfUrl"), "type": "url"})
parcel = {
"id": 1, # no multi parcel management for now.
"reference": self._get_parcel_number(input_payload),
"tracking": {
# we need to force to real string because of those data can be reused
# and cerberus won't accept an ElementString insteadof a string.
"number": _get_text(rep, "parcelNumber"),
"url": "",
"partner": _get_text(rep, "parcelNumberPartner", ""),
},
"label": {
"data": base64.b64encode(parts.get(label_cid)),
"name": "label_1",
"type": output_format,
},
}
if hasattr(rep, "fields") and hasattr(rep.fields, "field"):
for field in rep.fields.field:
parcel["tracking"][_get_text(field, "key")] = _get_text(field, "value")
self.result["parcels"].append(parcel)
self.result["annexes"] += annexes
class LaposteFrDecoderGetPackingSlip(DecoderGetPackingSlip):
"""Laposte Bordereau Response XML -> Python."""
def decode(self, response, input_payload):
body = response["body"]
parts = response["parts"]
xml = objectify.fromstring(body)
msg = xml.xpath("//return")[0]
header = msg.bordereau.bordereauHeader
published_dt = _get_text(header, "publishingDate", None)
if published_dt:
if "." in published_dt:
# get packing slip with it's number does not return microseconds
# but when creating a new one, it does... We remove microseconds in result
# to have a better homogeneity
published_dt = published_dt.split(".")
published_dt = "%s+%s" % (
published_dt[0],
published_dt[1].split("+")[1],
)
published_datetime = datetime.strptime(published_dt, "%Y-%m-%dT%H:%M:%S%z")
self.result["packing_slip"] = {
"number": _get_text(header, "bordereauNumber", None),
"published_datetime": published_datetime,
"number_of_parcels": int(_get_text(header, "numberOfParcels", 0)),
"site_pch": {
"code": _get_text(header, "codeSitePCH", None),
"name": _get_text(header, "nameSitePCH", None),
},
"client": {
"number": _get_text(header, "clientNumber", None),
"adress": _get_text(header, "Address", None),
"company": _get_text(header, "Company", None),
},
}
packing_slip_cid = _get_cid("bordereauDataHandler", msg.bordereau)
if packing_slip_cid:
self.result["annexes"].append(
{
"name": "packing_slip",
"data": base64.b64encode(parts.get(packing_slip_cid)),
"type": "pdf",
}
)
return self.result
| akretion/roulier | roulier/carriers/laposte_fr/decoder.py | Python | agpl-3.0 | 4,729 |
"""alloccli subcommand for editing alloc reminders."""
from alloc import alloc
import re
class reminder(alloc):
"""Add or edit a reminder."""
# Setup the options that this cli can accept
ops = []
ops.append(('', 'help ', 'Show this help.'))
ops.append(('q', 'quiet ', 'Run with less output.\n'))
ops.append(('r.', ' ', 'Edit a reminder. Specify an ID or omit -r to create.'))
ops.append(('t.', 'task=ID|NAME ', 'A task ID, or a fuzzy match for a task name.'))
ops.append(('p.', 'project=ID|NAME', 'A project ID, or a fuzzy match for a project name.'))
ops.append(('c.', 'client=ID|NAME ', 'A client ID, or a fuzzy match for a client name.'))
ops.append(('s.', 'subject=TEXT ', 'The subject line of the reminder.'))
ops.append(('b.', 'body=TEXT ', 'The text body of the reminder.'))
ops.append(('', 'frequency=FREQ ', 'How often this reminder is to recur.\n'
'Specify as [number][unit], where unit is one of:\n'
'[h]our, [d]ay, [w]eek, [m]onth, [y]ear.'))
ops.append(('', 'notice=WARNING ', 'Advance warning for this reminder. Same format as frequency.'))
ops.append(('d.', 'date=DATE ', 'When this reminder is to trigger.'))
ops.append(('', 'active=1|0 ', 'Whether this reminder is active or not.'))
ops.append(('T:', 'to=PEOPLE ', 'Recipients. Can be usernames, full names and/or email.'))
ops.append(('D:', 'remove=PEOPLE ', 'Recipients to remove.'))
# Specify some header and footer text for the help text
help_text = "Usage: %s [OPTIONS]\n"
help_text += __doc__
help_text += """\n\n%s
This program allows editing of the fields on a reminder.
Examples:
# Edit a particular reminder.
alloc reminder -r 1234 --title 'Name for the reminder.' --to alla
# Omit -r to create a new reminder
alloc reminder --title 'Name for the reminder.' --to alla"""
def run(self, command_list):
"""Execute subcommand."""
# Get the command line arguments into a dictionary
o, remainder_ = self.get_args(command_list, self.ops, self.help_text)
# Got this far, then authenticate
self.authenticate()
personID = self.get_my_personID()
args = {}
if not o['r']:
o['r'] = 'new'
args['entity'] = 'reminder'
args['id'] = o['r']
if o['date']:
o['date'] = self.parse_date(o['date'])
if o['project'] and not self.is_num(o['project']):
o['project'] = self.search_for_project(o['project'], personID)
if o['task'] and not self.is_num(o['task']):
o['task'] = self.search_for_task({'taskName': o['task']})
if o['client'] and not self.is_num(o['client']):
o['client'] = self.search_for_client({'clientName': o['client']})
if o['frequency'] and not re.match(r'\d+[hdwmy]', o['frequency'], re.IGNORECASE):
self.die("Invalid frequency specification")
if o['notice'] and not re.match(r'\d+[hdwmy]', o['notice'], re.IGNORECASE):
self.die("Invalid advance notice specification")
if o['to']:
o['recipients'] = [x['personID']
for x in self.get_people(o['to']).values()]
if o['remove']:
o['recipients_remove'] = [x['personID']
for x in self.get_people(o['remove']).values()]
package = {}
for key, val in o.items():
if val:
package[key] = val
if isinstance(val, str) and val.lower() == 'null':
package[key] = ''
package['command'] = 'edit_reminder'
args['options'] = package
args['method'] = 'edit_entity'
rtn = self.make_request(args)
self.handle_server_response(rtn, not o['quiet'])
| mattcen/alloc | bin/alloccli/reminder.py | Python | agpl-3.0 | 3,917 |
# -*- coding: utf-8 -*-
# (c) 2017 Daniel Campos - AvanzOSC
# (c) 2017 Alfredo de la Fuente - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import fields, models, api, exceptions, _
import base64
import cStringIO
import tempfile
import csv
class ImportPriceFile(models.TransientModel):
_name = 'import.price.file'
_description = 'Wizard for import price list file'
data = fields.Binary(string='File', required=True)
name = fields.Char(string='Filename', required=False)
delimeter = fields.Char(
string='Delimeter', default=',', help='Default delimeter is ","')
file_type = fields.Selection([('csv', 'CSV'),
('xls', 'XLS')], string='File type',
required=True, default='csv')
def _prepare_data_dict(self, data_dict):
return data_dict
def _import_csv(self, load_id, file_data, delimeter=';'):
""" Imports data from a CSV file in defined object.
@param load_id: Loading id
@param file_data: Input data to load
@param delimeter: CSV file data delimeter
@return: Imported file number
"""
file_line_obj = self.env['product.supplierinfo.load.line']
data = base64.b64decode(file_data)
file_input = cStringIO.StringIO(data)
file_input.seek(0)
reader_info = []
reader = csv.reader(file_input, delimiter=str(delimeter),
lineterminator='\r\n')
try:
reader_info.extend(reader)
except Exception:
raise exceptions.Warning(_("Not a valid file!"))
keys = reader_info[0]
counter = 0
if not isinstance(keys, list):
raise exceptions.Warning(_("Not a valid file!"))
del reader_info[0]
for i in range(len(reader_info)):
field = reader_info[i]
values = dict(zip(keys, field))
data_dict = self._prepare_data_dict(
{'supplier': values.get('Supplier', ''),
'code': values.get('ProductCode', ''),
'sequence': values.get('Sequence', 0),
'supplier_code': values.get('ProductSupplierCode', ''),
'info': values.get('ProductSupplierName', ''),
'delay': values.get('Delay', 0),
'price': values.get('Price', 0.00).replace(',', '.'),
'min_qty': values.get('MinQty', 0.00),
'fail': True,
'fail_reason': _('No processed'),
'file_load': load_id})
file_line_obj.create(data_dict)
counter += 1
return counter
def _import_xls(self, load_id, file_data):
""" Imports data from a XLS file in defined object.
@param load_id: Loading id
@param file_data: Input data to load
@return: Imported file number
"""
try:
import xlrd
except ImportError:
exceptions.Warning(_("xlrd python lib not installed"))
file_line_obj = self.env['product.supplierinfo.load.line']
file_1 = base64.decodestring(file_data)
(fileno, fp_name) = tempfile.mkstemp('.xls', 'openerp_')
openfile = open(fp_name, "w")
openfile.write(file_1)
openfile.seek(0)
book = xlrd.open_workbook(fp_name)
sheet = book.sheet_by_index(0)
values = {}
keys = sheet.row_values(0, 0, end_colx=sheet.ncols)
for counter in range(sheet.nrows - 1):
# grab the current row
rowValues = sheet.row_values(counter + 1, 0,
end_colx=sheet.ncols)
row_lst = []
for val in rowValues: # codification format control
if isinstance(val, unicode):
valor = val.encode('utf8')
row_lst.append(valor)
elif isinstance(val, float):
if float(val) % 1 == 0.0:
row_lst.append(
'{0:.5f}'.format(float(val)).split('.')[0])
else:
row_lst.append('{0:g}'.format(float(val)))
else:
row_lst.append(val)
row = map(lambda x: str(x), row_lst)
values = dict(zip(keys, row))
data_dict = self._prepare_data_dict(
{'supplier': values.get('Supplier', ''),
'code': values.get('ProductCode', ''),
'sequence': values.get('Sequence', 0),
'supplier_code': values.get('ProductSupplierCode', ''),
'info': values.get('ProductSupplierName', ''),
'delay': values.get('Delay', 0),
'price': values.get('Price', 0.00).replace(',', '.'),
'min_qty': values.get('MinQty', 0.00),
'fail': True,
'fail_reason': _('No processed'),
'file_load': load_id
})
file_line_obj.create(data_dict)
counter += 1
return counter
@api.multi
def action_import(self):
file_load_obj = self.env['product.supplierinfo.load']
if self.env.context.get('active_id', False):
load_id = self.env.context.get('active_id')
file_load = file_load_obj.browse(load_id)
for line in file_load.file_lines:
line.unlink()
for wiz in self:
if not wiz.data:
raise exceptions.Warning(_("You need to select a file!"))
date_hour = fields.datetime.now()
actual_date = fields.date.today()
filename = wiz.name
if wiz.file_type == 'csv':
counter = self._import_csv(load_id, wiz.data, wiz.delimeter)
elif wiz.file_type == 'xls':
counter = self._import_xls(load_id, wiz.data)
else:
raise exceptions.Warning(_("Not a .csv/.xls file found"))
file_load.write({'name': ('%s_%s') % (filename, actual_date),
'date': date_hour, 'fails': counter,
'file_name': filename, 'process': counter})
return counter
| esthermm/odoo-addons | product_supplierinfo_import/wizard/import_price_files.py | Python | agpl-3.0 | 6,282 |
""" IMAPClient wrapper for the Nilas Sync Engine. """
import contextlib
import re
import time
import imaplib
import imapclient
# Even though RFC 2060 says that the date component must have two characters
# (either two digits or space+digit), it seems that some IMAP servers only
# return one digit. Fun times.
imaplib.InternalDate = re.compile(
r'.*INTERNALDATE "'
r'(?P<day>[ 0123]?[0-9])-' # insert that `?` to make first digit optional
r'(?P<mon>[A-Z][a-z][a-z])-'
r'(?P<year>[0-9][0-9][0-9][0-9])'
r' (?P<hour>[0-9][0-9]):'
r'(?P<min>[0-9][0-9]):'
r'(?P<sec>[0-9][0-9])'
r' (?P<zonen>[-+])(?P<zoneh>[0-9][0-9])(?P<zonem>[0-9][0-9])'
r'"')
import functools
import threading
from email.parser import HeaderParser
from collections import namedtuple, defaultdict
import gevent
from gevent import socket
from gevent.lock import BoundedSemaphore
from gevent.queue import Queue
from inbox.util.concurrency import retry
from inbox.util.itert import chunk
from inbox.util.misc import or_none
from inbox.basicauth import GmailSettingError
from inbox.models.session import session_scope
from inbox.models.account import Account
from nylas.logging import get_logger
log = get_logger()
__all__ = ['CrispinClient', 'GmailCrispinClient']
# Unify flags API across IMAP and Gmail
Flags = namedtuple('Flags', 'flags')
# Flags includes labels on Gmail because Gmail doesn't use \Draft.
GmailFlags = namedtuple('GmailFlags', 'flags labels')
GMetadata = namedtuple('GMetadata', 'g_msgid g_thrid size')
RawMessage = namedtuple(
'RawImapMessage',
'uid internaldate flags body g_thrid g_msgid g_labels')
RawFolder = namedtuple('RawFolder', 'display_name role')
# Lazily-initialized map of account ids to lock objects.
# This prevents multiple greenlets from concurrently creating duplicate
# connection pools for a given account.
_lock_map = defaultdict(threading.Lock)
CONN_DISCARD_EXC_CLASSES = (socket.error, imaplib.IMAP4.error)
class FolderMissingError(Exception):
pass
def _get_connection_pool(account_id, pool_size, pool_map, readonly):
with _lock_map[account_id]:
if account_id not in pool_map:
pool_map[account_id] = CrispinConnectionPool(
account_id, num_connections=pool_size, readonly=readonly)
return pool_map[account_id]
def connection_pool(account_id, pool_size=3, pool_map=dict()):
""" Per-account crispin connection pool.
Use like this:
with crispin.connection_pool(account_id).get() as crispin_client:
# your code here
pass
Note that the returned CrispinClient could have ANY folder selected, or
none at all! It's up to the calling code to handle folder sessions
properly. We don't reset to a certain select state because it's slow.
"""
return _get_connection_pool(account_id, pool_size, pool_map, True)
def writable_connection_pool(account_id, pool_size=1, pool_map=dict()):
""" Per-account crispin connection pool, with *read-write* connections.
Use like this:
conn_pool = crispin.writable_connection_pool(account_id)
with conn_pool.get() as crispin_client:
# your code here
pass
"""
return _get_connection_pool(account_id, pool_size, pool_map, False)
class CrispinConnectionPool(object):
"""
Connection pool for Crispin clients.
Connections in a pool are specific to an IMAPAccount.
Parameters
----------
account_id : int
Which IMAPAccount to open up a connection to.
num_connections : int
How many connections in the pool.
readonly : bool
Is the connection to the IMAP server read-only?
"""
def __init__(self, account_id, num_connections, readonly):
log.info('Creating Crispin connection pool for account {} with {} '
'connections'.format(account_id, num_connections))
self.account_id = account_id
self.readonly = readonly
self._queue = Queue(num_connections, items=num_connections * [None])
self._sem = BoundedSemaphore(num_connections)
self._set_account_info()
@contextlib.contextmanager
def get(self):
""" Get a connection from the pool, or instantiate a new one if needed.
If `num_connections` connections are already in use, block until one is
available.
"""
# A gevent semaphore is granted in the order that greenlets tried to
# acquire it, so we use a semaphore here to prevent potential
# starvation of greenlets if there is high contention for the pool.
# The queue implementation does not have that property; having
# greenlets simply block on self._queue.get(block=True) could cause
# individual greenlets to block for arbitrarily long.
self._sem.acquire()
client = self._queue.get()
try:
if client is None:
client = self._new_connection()
yield client
except CONN_DISCARD_EXC_CLASSES as exc:
# Discard the connection on socket or IMAP errors. Technically this
# isn't always necessary, since if you got e.g. a FETCH failure you
# could reuse the same connection. But for now it's the simplest
# thing to do.
log.info('IMAP connection error; discarding connection',
exc_info=True)
if client is not None:
try:
client.logout()
except:
log.error('Error on IMAP logout', exc_info=True)
client = None
raise exc
except:
raise
finally:
self._queue.put(client)
self._sem.release()
def _set_account_info(self):
with session_scope() as db_session:
account = db_session.query(Account).get(self.account_id)
self.sync_state = account.sync_state
self.provider_info = account.provider_info
self.email_address = account.email_address
self.auth_handler = account.auth_handler
if account.provider == 'gmail':
self.client_cls = GmailCrispinClient
else:
self.client_cls = CrispinClient
def _new_raw_connection(self):
"""Returns a new, authenticated IMAPClient instance for the account."""
with session_scope() as db_session:
account = db_session.query(Account).get(self.account_id)
return self.auth_handler.connect_account(account)
def _new_connection(self):
conn = self._new_raw_connection()
return self.client_cls(self.account_id, self.provider_info,
self.email_address, conn,
readonly=self.readonly)
def _exc_callback():
log.info('Connection broken with error; retrying with new connection',
exc_info=True)
gevent.sleep(5)
retry_crispin = functools.partial(
retry, retry_classes=CONN_DISCARD_EXC_CLASSES, exc_callback=_exc_callback)
class CrispinClient(object):
"""
Generic IMAP client wrapper.
One thing to note about crispin clients is that *all* calls operate on
the currently selected folder.
Crispin will NEVER implicitly select a folder for you.
This is very important! IMAP only guarantees that folder message UIDs
are valid for a "session", which is defined as from the time you
SELECT a folder until the connection is closed or another folder is
selected.
Crispin clients *always* return long ints rather than strings for number
data types, such as message UIDs, Google message IDs, and Google thread
IDs.
All inputs are coerced to strings before being passed off to the IMAPClient
connection.
You should really be interfacing with this class via a connection pool,
see `connection_pool()`.
Parameters
----------
account_id : int
Database id of the associated IMAPAccount.
conn : IMAPClient
Open IMAP connection (should be already authed).
readonly : bool
Whether or not to open IMAP connections as readonly.
"""
def __init__(self, account_id, provider_info, email_address, conn,
readonly=True):
self.account_id = account_id
self.provider_info = provider_info
self.email_address = email_address
# IMAP isn't stateless :(
self.selected_folder = None
self._folder_names = None
self.conn = conn
self.readonly = readonly
def _fetch_folder_list(self):
""" NOTE: XLIST is deprecated, so we just use LIST.
An example response with some other flags:
* LIST (\HasNoChildren) "/" "INBOX"
* LIST (\Noselect \HasChildren) "/" "[Gmail]"
* LIST (\HasNoChildren \All) "/" "[Gmail]/All Mail"
* LIST (\HasNoChildren \Drafts) "/" "[Gmail]/Drafts"
* LIST (\HasNoChildren \Important) "/" "[Gmail]/Important"
* LIST (\HasNoChildren \Sent) "/" "[Gmail]/Sent Mail"
* LIST (\HasNoChildren \Junk) "/" "[Gmail]/Spam"
* LIST (\HasNoChildren \Flagged) "/" "[Gmail]/Starred"
* LIST (\HasNoChildren \Trash) "/" "[Gmail]/Trash"
IMAPClient parses this response into a list of
(flags, delimiter, name) tuples.
"""
return self.conn.list_folders()
def select_folder(self, folder, uidvalidity_cb):
""" Selects a given folder.
Makes sure to set the 'selected_folder' attribute to a
(folder_name, select_info) pair.
Selecting a folder indicates the start of an IMAP session. IMAP UIDs
are only guaranteed valid for sessions, so the caller must provide a
callback that checks UID validity.
Starts a new session even if `folder` is already selected, since
this does things like e.g. makes sure we're not getting
cached/out-of-date values for HIGHESTMODSEQ from the IMAP server.
"""
try:
select_info = self.conn.select_folder(
folder, readonly=self.readonly)
except imapclient.IMAPClient.Error as e:
# Specifically point out folders that come back as missing by
# checking for Yahoo / Gmail / Outlook (Hotmail) specific errors:
if '[NONEXISTENT] Unknown Mailbox:' in e.message or \
'does not exist' in e.message or \
"doesn't exist" in e.message:
raise FolderMissingError(folder)
# We can't assume that all errors here are caused by the folder
# being deleted, as other connection errors could occur - but we
# want to make sure we keep track of different providers'
# "nonexistent" messages, so log this event.
log.error("IMAPClient error selecting folder. May be deleted",
error=str(e))
raise
select_info['UIDVALIDITY'] = long(select_info['UIDVALIDITY'])
self.selected_folder = (folder, select_info)
# Don't propagate cached information from previous session
self._folder_names = None
return uidvalidity_cb(self.account_id, folder, select_info)
@property
def selected_folder_name(self):
return or_none(self.selected_folder, lambda f: f[0])
@property
def selected_folder_info(self):
return or_none(self.selected_folder, lambda f: f[1])
@property
def selected_uidvalidity(self):
return or_none(self.selected_folder_info, lambda i: i['UIDVALIDITY'])
@property
def selected_uidnext(self):
return or_none(self.selected_folder_info, lambda i: i.get('UIDNEXT'))
def sync_folders(self):
"""
List of folders to sync.
In generic IMAP, the 'INBOX' folder is required.
Returns
-------
list
Folders to sync (as strings).
"""
to_sync = []
have_folders = self.folder_names()
assert 'inbox' in have_folders, \
"Missing required 'inbox' folder for account_id: {}".\
format(self.account_id)
for names in have_folders.itervalues():
to_sync.extend(names)
return to_sync
def folder_names(self, force_resync=False):
"""
Return the folder names for the account as a mapping from
recognized role: list of folder names,
for example: 'sent': ['Sent Items', 'Sent'].
The list of recognized folder roles is in:
inbox/models/constants.py
Folders that do not belong to a recognized role are mapped to
None, for example: None: ['MyFolder', 'OtherFolder'].
The mapping is also cached in self._folder_names
Parameters:
-----------
force_resync: boolean
Return the cached mapping or return a refreshed mapping
(after refetching from the remote).
"""
if force_resync or self._folder_names is None:
self._folder_names = defaultdict(list)
raw_folders = self.folders()
for f in raw_folders:
self._folder_names[f.role].append(f.display_name)
return self._folder_names
def folders(self):
"""
Fetch the list of folders for the account from the remote, return as a
list of RawFolder objects.
NOTE:
Always fetches the list of folders from the remote.
"""
raw_folders = []
folders = self._fetch_folder_list()
for flags, delimiter, name in folders:
if u'\\Noselect' in flags or u'\\NoSelect' in flags \
or u'\\NonExistent' in flags:
# Special folders that can't contain messages
continue
raw_folder = self._process_folder(name, flags)
raw_folders.append(raw_folder)
return raw_folders
def _process_folder(self, display_name, flags):
"""
Determine the role for the remote folder from its `name` and `flags`.
Returns
-------
RawFolder representing the folder
"""
# TODO[[k]: Important/ Starred for generic IMAP?
# Different providers have different names for folders, here
# we have a default map for common name mapping, additional
# mappings can be provided via the provider configuration file
default_folder_map = {
'inbox': 'inbox',
'drafts': 'drafts',
'draft': 'drafts',
'junk': 'spam',
'spam': 'spam',
'archive': 'archive',
'sent': 'sent',
'trash': 'trash'}
# Additionally we provide a custom mapping for providers that
# don't fit into the defaults.
folder_map = self.provider_info.get('folder_map', {})
# Some providers also provide flags to determine common folders
# Here we read these flags and apply the mapping
flag_map = {'\\Trash': 'trash', '\\Sent': 'sent', '\\Drafts': 'drafts',
'\\Junk': 'spam', '\\Inbox': 'inbox', '\\Spam': 'spam'}
role = default_folder_map.get(display_name.lower())
if not role:
role = folder_map.get(display_name)
if not role:
for flag in flags:
role = flag_map.get(flag)
return RawFolder(display_name=display_name, role=role)
def create_folder(self, name):
self.conn.create_folder(name)
def condstore_supported(self):
# Technically QRESYNC implies CONDSTORE, although this is unlikely to
# matter in practice.
capabilities = self.conn.capabilities()
return 'CONDSTORE' in capabilities or 'QRESYNC' in capabilities
def idle_supported(self):
return 'IDLE' in self.conn.capabilities()
def search_uids(self, criteria):
"""
Find UIDs in this folder matching the criteria. See
http://tools.ietf.org/html/rfc3501.html#section-6.4.4 for valid
criteria.
"""
return sorted([long(uid) for uid in self.conn.search(criteria)])
def all_uids(self):
""" Fetch all UIDs associated with the currently selected folder.
Returns
-------
list
UIDs as integers sorted in ascending order.
"""
# Note that this list may include items which have been marked for
# deletion with the \Deleted flag, but not yet actually removed via
# an EXPUNGE command. I choose to include them here since most clients
# will still display them (sometimes with a strikethrough). If showing
# these is a problem, we can either switch back to searching for
# 'UNDELETED' or doing a fetch for ['UID', 'FLAGS'] and filtering.
try:
t = time.time()
fetch_result = self.conn.search(['ALL'])
except imaplib.IMAP4.error as e:
if e.message.find('UID SEARCH wrong arguments passed') >= 0:
# Mail2World servers fail for the otherwise valid command
# 'UID SEARCH ALL' but strangely pass for 'UID SEARCH ALL UID'
log.debug("Getting UIDs failed when using 'UID SEARCH "
"ALL'. Switching to alternative 'UID SEARCH "
"ALL UID", exception=e)
t = time.time()
fetch_result = self.conn.search(['ALL', 'UID'])
else:
raise
elapsed = time.time() - t
log.debug('Requested all UIDs',
selected_folder=self.selected_folder_name,
search_time=elapsed,
total_uids=len(fetch_result))
return sorted([long(uid) for uid in fetch_result])
def uids(self, uids):
uid_set = set(uids)
messages = []
raw_messages = {}
for uid in uid_set:
try:
raw_messages.update(self.conn.fetch(
uid, ['BODY.PEEK[]', 'INTERNALDATE', 'FLAGS']))
except imapclient.IMAPClient.Error as e:
if ('[UNAVAILABLE] UID FETCH Server error '
'while fetching messages') in str(e):
log.info('Got an exception while requesting an UID',
uid=uid, error=e,
logstash_tag='imap_download_exception')
continue
else:
log.info(('Got an unhandled exception while '
'requesting an UID'),
uid=uid, error=e,
logstash_tag='imap_download_exception')
raise
for uid in sorted(raw_messages.iterkeys(), key=long):
# Skip handling unsolicited FETCH responses
if uid not in uid_set:
continue
msg = raw_messages[uid]
if msg.keys() == ['SEQ']:
log.error('No data returned for UID, skipping', uid=uid)
continue
messages.append(RawMessage(uid=long(uid),
internaldate=msg['INTERNALDATE'],
flags=msg['FLAGS'],
body=msg['BODY[]'],
# TODO: use data structure that isn't
# Gmail-specific
g_thrid=None, g_msgid=None,
g_labels=None))
return messages
def flags(self, uids):
if len(uids) > 100:
# Some backends abort the connection if you give them a really
# long sequence set of individual UIDs, so instead fetch flags for
# all UIDs greater than or equal to min(uids).
seqset = '{}:*'.format(min(uids))
else:
seqset = uids
data = self.conn.fetch(seqset, ['FLAGS'])
uid_set = set(uids)
return {uid: Flags(ret['FLAGS'])
for uid, ret in data.items() if uid in uid_set}
def delete_uids(self, uids):
uids = [str(u) for u in uids]
self.conn.delete_messages(uids)
self.conn.expunge()
def set_starred(self, uids, starred):
if starred:
self.conn.add_flags(uids, ['\\Flagged'])
else:
self.conn.remove_flags(uids, ['\\Flagged'])
def set_unread(self, uids, unread):
uids = [str(u) for u in uids]
if unread:
self.conn.remove_flags(uids, ['\\Seen'])
else:
self.conn.add_flags(uids, ['\\Seen'])
def save_draft(self, message, date=None):
assert self.selected_folder_name in self.folder_names()['drafts'], \
'Must select a drafts folder first ({0})'.\
format(self.selected_folder_name)
self.conn.append(self.selected_folder_name, message, ['\\Draft',
'\\Seen'], date)
def create_message(self, message, date=None):
"""
Create a message on the server. Only used to fix server-side bugs,
like iCloud not saving Sent messages.
"""
assert self.selected_folder_name in self.folder_names()['sent'], \
'Must select sent folder first ({0})'.\
format(self.selected_folder_name)
return self.conn.append(self.selected_folder_name, message, [], date)
def fetch_headers(self, uids):
"""
Fetch headers for the given uids. Chunked because certain providers
fail with 'Command line too large' if you feed them too many uids at
once.
"""
headers = {}
for uid_chunk in chunk(uids, 100):
headers.update(self.conn.fetch(
uid_chunk, ['BODY.PEEK[HEADER]']))
return headers
def find_by_header(self, header_name, header_value):
"""Find all uids in the selected folder with the given header value."""
all_uids = self.all_uids()
# It would be nice to just search by header too, but some backends
# don't support that, at least not if you want to search by X-INBOX-ID
# header. So fetch the header for each draft and see if we
# can find one that matches.
# TODO(emfree): are there other ways we can narrow the result set a
# priori (by subject or date, etc.)
matching_draft_headers = self.fetch_headers(all_uids)
results = []
for uid, response in matching_draft_headers.iteritems():
headers = response['BODY[HEADER]']
parser = HeaderParser()
header = parser.parsestr(headers).get(header_name)
if header == header_value:
results.append(uid)
return results
def delete_draft(self, inbox_uid, message_id_header):
"""
Delete a draft, as identified either by its X-Inbox-Id or by its
Message-Id header. We first delete the message from the Drafts folder,
and then also delete it from the Trash folder if necessary.
"""
drafts_folder_name = self.folder_names()['drafts'][0]
self.conn.select_folder(drafts_folder_name)
self._delete_message(inbox_uid, message_id_header)
trash_folder_name = self.folder_names()['trash'][0]
self.conn.select_folder(trash_folder_name)
self._delete_message(inbox_uid, message_id_header)
def _delete_message(self, inbox_uid, message_id_header):
"""
Delete a message from the selected folder, using either the X-Inbox-Id
header or the Message-Id header to locate it. Does nothing if no
matching messages are found, or if more than one matching message is
found.
"""
assert inbox_uid or message_id_header, 'Need at least one header'
if inbox_uid:
matching_uids = self.find_by_header('X-Inbox-Id', inbox_uid)
else:
matching_uids = self.find_by_header('Message-Id',
message_id_header)
if not matching_uids:
log.error('No remote messages found to delete',
inbox_uid=inbox_uid,
message_id_header=message_id_header)
return
if len(matching_uids) > 1:
log.error('Multiple remote messages found to delete',
inbox_uid=inbox_uid,
message_id_header=message_id_header,
uids=matching_uids)
return
self.conn.delete_messages(matching_uids)
self.conn.expunge()
def logout(self):
self.conn.logout()
def idle(self, timeout):
"""Idle for up to `timeout` seconds. Make sure we take the connection
back out of idle mode so that we can reuse this connection in another
context."""
log.info('Idling', timeout=timeout)
self.conn.idle()
try:
with self._restore_timeout():
r = self.conn.idle_check(timeout)
except:
self.conn.idle_done()
raise
self.conn.idle_done()
return r
@contextlib.contextmanager
def _restore_timeout(self):
# IMAPClient.idle_check() calls setblocking(1) on the underlying
# socket, erasing any previously set timeout. So make sure to restore
# the timeout.
sock = getattr(self.conn._imap, 'sslobj', self.conn._imap.sock)
timeout = sock.gettimeout()
try:
yield
finally:
sock.settimeout(timeout)
def condstore_changed_flags(self, modseq):
data = self.conn.fetch('1:*', ['FLAGS'],
modifiers=['CHANGEDSINCE {}'.format(modseq)])
return {uid: Flags(ret['FLAGS']) for uid, ret in data.items()}
class GmailCrispinClient(CrispinClient):
PROVIDER = 'gmail'
def sync_folders(self):
"""
Gmail-specific list of folders to sync.
In Gmail, every message is in `All Mail`, with the exception of
messages in the Trash and Spam folders. So we only sync the `All Mail`,
Trash and Spam folders.
Returns
-------
list
Folders to sync (as strings).
"""
present_folders = self.folder_names()
if 'all' not in present_folders:
raise GmailSettingError(
"Account {} ({}) is missing the 'All Mail' folder. This is "
"probably due to 'Show in IMAP' being disabled. "
"Please enable at "
"https://mail.google.com/mail/#settings/labels"
.format(self.account_id, self.email_address))
# If the account has Trash, Spam folders, sync those too.
to_sync = []
for folder in ['all', 'trash', 'spam']:
if folder in present_folders:
to_sync.append(present_folders[folder][0])
return to_sync
def flags(self, uids):
"""
Gmail-specific flags.
Returns
-------
dict
Mapping of `uid` : GmailFlags.
"""
data = self.conn.fetch(uids, ['FLAGS', 'X-GM-LABELS'])
uid_set = set(uids)
return {uid: GmailFlags(ret['FLAGS'], ret['X-GM-LABELS'])
for uid, ret in data.items() if uid in uid_set}
def condstore_changed_flags(self, modseq):
data = self.conn.fetch('1:*', ['FLAGS', 'X-GM-LABELS'],
modifiers=['CHANGEDSINCE {}'.format(modseq)])
results = {}
for uid, ret in data.items():
if 'FLAGS' not in ret or 'X-GM-LABELS' not in ret:
# We might have gotten an unsolicited fetch response that
# doesn't have all the data we asked for -- if so, explicitly
# fetch flags and labels for that UID.
log.info('Got incomplete response in flags fetch', uid=uid,
ret=str(ret))
data_for_uid = self.conn.fetch(uid, ['FLAGS', 'X-GM-LABELS'])
if not data_for_uid:
continue
ret = data_for_uid[uid]
results[uid] = GmailFlags(ret['FLAGS'], ret['X-GM-LABELS'])
return results
def g_msgids(self, uids):
"""
X-GM-MSGIDs for the given UIDs.
Returns
-------
dict
Mapping of `uid` (long) : `g_msgid` (long)
"""
data = self.conn.fetch(uids, ['X-GM-MSGID'])
uid_set = set(uids)
return {uid: ret['X-GM-MSGID']
for uid, ret in data.items() if uid in uid_set}
def folder_names(self, force_resync=False):
"""
Return the folder names ( == label names for Gmail) for the account
as a mapping from recognized role: list of folder names in the
role, for example: 'sent': ['Sent Items', 'Sent'].
The list of recognized categories is in:
inbox/models/constants.py
Folders that do not belong to a recognized role are mapped to None, for
example: None: ['MyFolder', 'OtherFolder'].
The mapping is also cached in self._folder_names
Parameters:
-----------
force_resync: boolean
Return the cached mapping or return a refreshed mapping
(after refetching from the remote).
"""
if force_resync or self._folder_names is None:
self._folder_names = defaultdict(list)
raw_folders = self.folders()
for f in raw_folders:
self._folder_names[f.role].append(f.display_name)
return self._folder_names
def folders(self):
"""
Fetch the list of folders for the account from the remote, return as a
list of RawFolder objects.
NOTE:
Always fetches the list of folders from the remote.
"""
raw_folders = []
folders = self._fetch_folder_list()
for flags, delimiter, name in folders:
if u'\\Noselect' in flags or u'\\NoSelect' in flags \
or u'\\NonExistent' in flags:
# Special folders that can't contain messages, usually
# just '[Gmail]'
continue
raw_folder = self._process_folder(name, flags)
raw_folders.append(raw_folder)
return raw_folders
def _process_folder(self, display_name, flags):
"""
Determine the canonical_name for the remote folder from its `name` and
`flags`.
Returns
-------
RawFolder representing the folder
"""
flag_map = {'\\Drafts': 'drafts', '\\Important': 'important',
'\\Sent': 'sent', '\\Junk': 'spam', '\\Flagged': 'starred',
'\\Trash': 'trash'}
role = None
if '\\All' in flags:
role = 'all'
elif display_name.lower() == 'inbox':
# Special-case the display name here. In Gmail, the inbox
# folder shows up in the folder list as 'INBOX', and in sync as
# the label '\\Inbox'. We're just always going to give it the
# display name 'Inbox'.
role = 'inbox'
display_name = 'Inbox'
else:
for flag in flags:
if flag in flag_map:
role = flag_map[flag]
return RawFolder(display_name=display_name, role=role)
def uids(self, uids):
raw_messages = self.conn.fetch(uids, ['BODY.PEEK[]', 'INTERNALDATE',
'FLAGS', 'X-GM-THRID',
'X-GM-MSGID', 'X-GM-LABELS'])
messages = []
uid_set = set(uids)
for uid in sorted(raw_messages.iterkeys(), key=long):
# Skip handling unsolicited FETCH responses
if uid not in uid_set:
continue
msg = raw_messages[uid]
messages.append(RawMessage(uid=long(uid),
internaldate=msg['INTERNALDATE'],
flags=msg['FLAGS'],
body=msg['BODY[]'],
g_thrid=long(msg['X-GM-THRID']),
g_msgid=long(msg['X-GM-MSGID']),
g_labels=msg['X-GM-LABELS']))
return messages
def g_metadata(self, uids):
"""
Download Gmail MSGIDs, THRIDs, and message sizes for the given uids.
Parameters
----------
uids : list
UIDs to fetch data for. Must be from the selected folder.
Returns
-------
dict
uid: GMetadata(msgid, thrid, size)
"""
# Super long sets of uids may fail with BAD ['Could not parse command']
# In that case, just fetch metadata for /all/ uids.
seqset = uids if len(uids) < 1e6 else '1:*'
data = self.conn.fetch(seqset, ['X-GM-MSGID', 'X-GM-THRID',
'RFC822.SIZE'])
uid_set = set(uids)
return {uid: GMetadata(ret['X-GM-MSGID'], ret['X-GM-THRID'],
ret['RFC822.SIZE'])
for uid, ret in data.items() if uid in uid_set}
def expand_thread(self, g_thrid):
"""
Find all message UIDs in the selected folder with X-GM-THRID equal to
g_thrid.
Returns
-------
list
"""
uids = [long(uid) for uid in
self.conn.search('X-GM-THRID {}'.format(g_thrid))]
# UIDs ascend over time; return in order most-recent first
return sorted(uids, reverse=True)
def find_by_header(self, header_name, header_value):
criteria = ['HEADER {} {}'.format(header_name, header_value)]
return self.conn.search(criteria)
| gale320/sync-engine | inbox/crispin.py | Python | agpl-3.0 | 34,132 |
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from openerp import fields, models
class Tag(models.Model):
_inherit = 'myo.tag'
pharmacy_ids = fields.Many2many(
'myo.pharmacy',
'myo_pharmacy_tag_rel',
'tag_id',
'pharmacy_id',
'Pharmacies'
)
class Pharmacy(models.Model):
_inherit = 'myo.pharmacy'
tag_ids = fields.Many2many(
'myo.tag',
'myo_pharmacy_tag_rel',
'pharmacy_id',
'tag_id',
'Tags'
)
| MostlyOpen/odoo_addons | myo_pharmacy/models/tag.py | Python | agpl-3.0 | 1,365 |
# encoding: utf-8
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import logging
import os.path
import sys
import ckan.plugins as plugins
import ckan.plugins.toolkit as toolkit
from ckan.lib.plugins import DefaultTranslation
from . import get_config
log = logging.getLogger(__name__)
class DiscoveryPlugin(plugins.SingletonPlugin, DefaultTranslation):
plugins.implements(plugins.ITemplateHelpers)
plugins.implements(plugins.ITranslation)
#
# ITemplateHelpers
#
def get_helpers(self):
return {
'discovery_get_config': get_config,
'discovery_as_bool': toolkit.asbool,
}
#
# ITranslation
#
def i18n_directory(self):
module = sys.modules['ckanext.discovery']
module_dir = os.path.abspath(os.path.dirname(module.__file__))
return os.path.join(module_dir, 'i18n')
def i18n_domain(self):
return 'ckanext-discovery'
| stadt-karlsruhe/ckanext-discovery | ckanext/discovery/plugins/discovery.py | Python | agpl-3.0 | 999 |
# - coding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import models
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| odoo-arg/odoo_l10n_ar | l10n_ar/wizard/__init__.py | Python | agpl-3.0 | 951 |
# encoding: utf-8
# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
from six.moves.urllib.parse import quote, quote_plus
from .check_utils import journey_basic_query
from .tests_mechanism import dataset, AbstractTestFixture
from .check_utils import *
from six.moves import range
@dataset({"main_ptref_test": {}})
class TestPtRef(AbstractTestFixture):
"""
Test the structure of the ptref response
"""
@staticmethod
def _test_links(response, pt_obj_name):
# Test the validity of links of 'previous', 'next', 'last', 'first'
wanted_links_type = ['previous', 'next', 'last', 'first']
for l in response['links']:
if l['type'] in wanted_links_type:
assert pt_obj_name in l['href']
# Test the consistency between links
wanted_links = [l['href'] for l in response['links'] if l['type'] in wanted_links_type]
if len(wanted_links) <= 1:
return
def _get_dict_to_compare(link):
url_dict = query_from_str(link)
url_dict.pop('start_page', None)
url_dict['url'] = link.split('?')[0]
return url_dict
url_dict = _get_dict_to_compare(wanted_links[0])
for l in wanted_links[1:]:
assert url_dict == _get_dict_to_compare(l)
def test_pagination_links_with_count(self):
response = self.query_region("stop_points?count=2&start_page=2", display=True)
for link in response['links']:
if link['type'] in ('previous', 'next', 'first', 'last'):
assert 'count=2' in link['href']
def test_vj_default_depth(self):
"""default depth is 1"""
response = self.query_region("vehicle_journeys")
vjs = get_not_null(response, 'vehicle_journeys')
for vj in vjs:
is_valid_vehicle_journey(vj, depth_check=1)
assert len(vjs) == 3
vj = vjs[0]
assert vj['id'] == 'vj1'
assert len(vj['stop_times']) == 2
assert vj['stop_times'][0]['arrival_time'] == '101500'
assert vj['stop_times'][0]['departure_time'] == '101500'
assert vj['stop_times'][1]['arrival_time'] == '111000'
assert vj['stop_times'][1]['departure_time'] == '111000'
#we added some comments on the vj, we should have them
com = get_not_null(vj, 'comments')
assert len(com) == 1
assert com[0]['type'] == 'standard'
assert com[0]['value'] == 'hello'
assert "feed_publishers" in response
feed_publishers = response["feed_publishers"]
for feed_publisher in feed_publishers:
is_valid_feed_publisher(feed_publisher)
feed_publisher = feed_publishers[1]
assert (feed_publisher["id"] == "c1")
assert (feed_publisher["name"] == "name-c1")
assert (feed_publisher["license"] == "ls-c1")
assert (feed_publisher["url"] == "ws-c1")
feed_publisher = feed_publishers[0]
assert (feed_publisher["id"] == "builder")
assert (feed_publisher["name"] == "canal tp")
assert (feed_publisher["license"] == "ODBL")
assert (feed_publisher["url"] == "www.canaltp.fr")
def test_vj_depth_0(self):
"""default depth is 1"""
response = self.query_region("vehicle_journeys?depth=0")
vjs = get_not_null(response, 'vehicle_journeys')
for vj in vjs:
is_valid_vehicle_journey(vj, depth_check=0)
def test_vj_depth_2(self):
"""default depth is 1"""
response = self.query_region("vehicle_journeys?depth=2")
vjs = get_not_null(response, 'vehicle_journeys')
for vj in vjs:
is_valid_vehicle_journey(vj, depth_check=2)
def test_vj_depth_3(self):
"""default depth is 1"""
response = self.query_region("vehicle_journeys?depth=3")
vjs = get_not_null(response, 'vehicle_journeys')
for vj in vjs:
is_valid_vehicle_journey(vj, depth_check=3)
def test_vj_show_codes_propagation(self):
"""stop_area:stop1 has a code, we should be able to find it when accessing it by the vj"""
response = self.query_region("stop_areas/stop_area:stop1/vehicle_journeys")
vjs = get_not_null(response, 'vehicle_journeys')
assert vjs
for vj in vjs:
is_valid_vehicle_journey(vj, depth_check=1)
stop_points = [get_not_null(st, 'stop_point') for vj in vjs for st in vj['stop_times']]
stops1 = [s for s in stop_points if s['id'] == 'stop_area:stop1']
assert stops1
for stop1 in stops1:
# all reference to stop1 must have it's codes
codes = get_not_null(stop1, 'codes')
code_uic = [c for c in codes if c['type'] == 'code_uic']
assert len(code_uic) == 1 and code_uic[0]['value'] == 'bobette'
def test_ptref_without_current_datetime(self):
"""
stop_area:stop1 without message because _current_datetime is NOW()
"""
response = self.query_region("stop_areas/stop_area:stop1")
assert len(response['disruptions']) == 0
def test_ptref_invalid_type(self):
response, code = self.query_region("AAAAAA/stop_areas", check=False)
assert code == 400
assert response['message'] == 'unknown type: AAAAAA'
coord = "{lon};{lat}".format(lon=1.2, lat=3.4)
response, code = self.query_region("{coord}/stop_areas".format(coord=coord), check=False)
assert code == 400
assert response['message'] == 'unknown type: {coord}'.format(coord=coord)
def test_ptref_with_current_datetime(self):
"""
stop_area:stop1 with _current_datetime
"""
response = self.query_region("stop_areas/stop_area:stop1?_current_datetime=20140115T235959")
disruptions = get_not_null(response, 'disruptions')
assert len(disruptions) == 1
messages = get_not_null(disruptions[0], 'messages')
assert(messages[0]['text']) == 'Disruption on StopArea stop_area:stop1'
def test_contributors(self):
"""test contributor formating"""
response = self.query_region("contributors")
contributors = get_not_null(response, 'contributors')
assert len(contributors) == 1
ctr = contributors[0]
assert(ctr["id"] == 'c1')
assert(ctr["website"] == 'ws-c1')
assert(ctr["license"] == 'ls-c1')
def test_datasets(self):
"""test dataset formating"""
response = self.query_region("datasets")
datasets = get_not_null(response, 'datasets')
assert len(datasets) == 1
ds = datasets[0]
assert(ds["id"] == 'd1')
assert(ds["description"] == 'desc-d1')
assert(ds["system"] == 'sys-d1')
def test_contributor_by_dataset(self):
"""test contributor by dataset formating"""
response = self.query_region("datasets/d1/contributors")
ctrs = get_not_null(response, 'contributors')
assert len(ctrs) == 1
ctr = ctrs[0]
assert(ctr["id"] == 'c1')
assert(ctr["website"] == 'ws-c1')
assert(ctr["license"] == 'ls-c1')
def test_dataset_by_contributor(self):
"""test dataset by contributor formating"""
response = self.query_region("contributors/c1/datasets")
frs = get_not_null(response, 'datasets')
assert len(frs) == 1
fr = frs[0]
assert(fr["id"] == 'd1')
def test_line(self):
"""test line formating"""
response = self.query_region("lines")
lines = get_not_null(response, 'lines')
assert len(lines) == 3
l = lines[0]
is_valid_line(l, depth_check=1)
assert l["text_color"] == 'FFD700'
#we know we have a geojson for this test so we can check it
geo = get_not_null(l, 'geojson')
shape(geo)
com = get_not_null(l, 'comments')
assert len(com) == 1
assert com[0]['type'] == 'standard'
assert com[0]['value'] == "I'm a happy comment"
physical_modes = get_not_null(l, 'physical_modes')
assert len(physical_modes) == 1
is_valid_physical_mode(physical_modes[0], depth_check=1)
assert physical_modes[0]['id'] == 'physical_mode:Car'
assert physical_modes[0]['name'] == 'name physical_mode:Car'
line_group = get_not_null(l, 'line_groups')
assert len(line_group) == 1
is_valid_line_group(line_group[0], depth_check=0)
assert line_group[0]['name'] == 'A group'
assert line_group[0]['id'] == 'group:A'
self._test_links(response, 'lines')
def test_line_without_shape(self):
"""test line formating with shape disabled"""
response = self.query_region("lines?disable_geojson=true")
lines = get_not_null(response, 'lines')
assert len(lines) == 3
l = lines[0]
is_valid_line(l, depth_check=1)
#we don't want a geojson since we have desactivate them
assert 'geojson' not in l
response = self.query_region("lines")
lines = get_not_null(response, 'lines')
assert len(lines) == 3
l = lines[0]
is_valid_line(l, depth_check=1)
#we check our geojson, just to be safe :)
assert 'geojson' in l
geo = get_not_null(l, 'geojson')
shape(geo)
def test_line_with_shape(self):
"""test line formating with shape explicitly enabled"""
response = self.query_region("lines?disable_geojson=false")
lines = get_not_null(response, 'lines')
assert len(lines) == 3
l = lines[0]
is_valid_line(l, depth_check=1)
# Test that the geojson is indeed there
geo = get_not_null(l, 'geojson')
shape(geo)
def test_line_groups(self):
"""test line group formating"""
# Test for each possible range to ensure main_line is always at a depth of 0
for depth in range(0,3):
response = self.query_region("line_groups?depth={0}".format(depth))
line_groups = get_not_null(response, 'line_groups')
assert len(line_groups) == 1
lg = line_groups[0]
is_valid_line_group(lg, depth_check=depth)
if depth > 0:
com = get_not_null(lg, 'comments')
assert len(com) == 1
assert com[0]['type'] == 'standard'
assert com[0]['value'] == "I'm a happy comment"
# test if line_groups are accessible through the ptref graph
response = self.query_region("routes/line:A:0/line_groups")
line_groups = get_not_null(response, 'line_groups')
assert len(line_groups) == 1
lg = line_groups[0]
is_valid_line_group(lg)
def test_line_with_active_disruption(self):
"""test disruption is active"""
response = self.query_region("lines/line:A?_current_datetime=20140115T235959")
disruptions = get_not_null(response, 'disruptions')
assert len(disruptions) == 1
d = disruptions[0]
# in pt_ref, the status is always active as the checked
# period is the validity period
assert d["status"] == "active"
messages = get_not_null(d, 'messages')
assert(messages[0]['text']) == 'Disruption on Line line:A'
def test_line_codes(self):
"""test line formating"""
response = self.query_region("lines/line:A?show_codes=true")
lines = get_not_null(response, 'lines')
assert len(lines) == 1
l = lines[0]
codes = get_not_null(l, 'codes')
assert len(codes) == 4
is_valid_codes(codes)
def test_route(self):
"""test line formating"""
response = self.query_region("routes")
routes = get_not_null(response, 'routes')
assert len(routes) == 3
r = [r for r in routes if r['id'] == 'line:A:0']
assert len(r) == 1
r = r[0]
is_valid_route(r, depth_check=1)
#we know we have a geojson for this test so we can check it
geo = get_not_null(r, 'geojson')
shape(geo)
com = get_not_null(r, 'comments')
assert len(com) == 1
assert com[0]['type'] == 'standard'
assert com[0]['value'] == "I'm a happy comment"
self._test_links(response, 'routes')
def test_stop_areas(self):
"""test stop_areas formating"""
response = self.query_region("stop_areas")
stops = get_not_null(response, 'stop_areas')
assert len(stops) == 3
s = next((s for s in stops if s['name'] == 'stop_area:stop1'))
is_valid_stop_area(s, depth_check=1)
com = get_not_null(s, 'comments')
assert len(com) == 2
assert com[0]['type'] == 'standard'
assert com[0]['value'] == "comment on stop A"
assert com[1]['type'] == 'standard'
assert com[1]['value'] == "the stop is sad"
self._test_links(response, 'stop_areas')
def test_stop_area(self):
"""test stop_areas formating"""
response = self.query_region("stop_areas/stop_area:stop1?depth=2")
stops = get_not_null(response, 'stop_areas')
assert len(stops) == 1
is_valid_stop_area(stops[0], depth_check=2)
modes = get_not_null(stops[0], 'physical_modes')
assert len(modes) == 1
modes = get_not_null(stops[0], 'commercial_modes')
assert len(modes) == 1
def test_stop_points(self):
"""test stop_points formating"""
response = self.query_region("stop_points?depth=2")
stops = get_not_null(response, 'stop_points')
assert len(stops) == 3
s = next((s for s in stops if s['name'] == 'stop_area:stop2'))# yes, that's a stop_point
is_valid_stop_point(s, depth_check=2)
com = get_not_null(s, 'comments')
assert len(com) == 1
assert com[0]['type'] == 'standard'
assert com[0]['value'] == "hello bob"
modes = get_not_null(s, 'physical_modes')
assert len(modes) == 1
is_valid_physical_mode(modes[0], depth_check=1)
modes = get_not_null(s, 'commercial_modes')
assert len(modes) == 1
is_valid_commercial_mode(modes[0], depth_check=1)
self._test_links(response, 'stop_points')
def test_company_default_depth(self):
"""default depth is 1"""
response = self.query_region("companies")
companies = get_not_null(response, 'companies')
for company in companies:
is_valid_company(company, depth_check=1)
#we check afterward that we have the right data
#we know there is only one vj in the dataset
assert len(companies) == 1
company = companies[0]
assert company['id'] == 'CMP1'
self._test_links(response, 'companies')
def test_simple_crow_fly(self):
journey_basic_query = "journeys?from=9;9.001&to=stop_area%3Astop2&datetime=20140105T000000"
response = self.query_region(journey_basic_query)
#the response must be still valid (this test the kraken data reloading)
self.is_valid_journey_response(response, journey_basic_query)
def test_forbidden_uris_on_line(self):
"""test forbidden uri for lines"""
response = self.query_region("lines")
lines = get_not_null(response, 'lines')
assert len(lines) == 3
assert len(lines[0]['physical_modes']) == 1
assert lines[0]['physical_modes'][0]['id'] == 'physical_mode:Car'
#there is only one line, so when we forbid it's physical mode, we find nothing
response, code = self.query_no_assert("v1/coverage/main_ptref_test/lines"
"?forbidden_uris[]=physical_mode:Car")
assert code == 404
# for retrocompatibility purpose forbidden_id[] is the same
response, code = self.query_no_assert("v1/coverage/main_ptref_test/lines"
"?forbidden_id[]=physical_mode:Car")
assert code == 404
# when we forbid another physical_mode, we find again our line
response, code = self.query_no_assert("v1/coverage/main_ptref_test/lines"
"?forbidden_uris[]=physical_mode:Bus")
assert code == 200
def test_simple_pt_objects(self):
response = self.query_region('pt_objects?q=stop2')
is_valid_pt_objects_response(response)
pt_objs = get_not_null(response, 'pt_objects')
assert len(pt_objs) == 1
assert get_not_null(pt_objs[0], 'id') == 'stop_area:stop2'
def test_line_label_pt_objects(self):
response = self.query_region('pt_objects?q=line:A&type[]=line')
is_valid_pt_objects_response(response)
pt_objs = get_not_null(response, 'pt_objects')
assert len(pt_objs) == 1
assert get_not_null(pt_objs[0], 'name') == 'base_network Car line:A'
response = self.query_region('pt_objects?q=line:Ca roule&type[]=line')
pt_objs = get_not_null(response, 'pt_objects')
assert len(pt_objs) == 1
# not valid as there is no commercial mode (which impact name)
assert get_not_null(pt_objs[0], 'name') == 'base_network line:Ça roule'
def test_query_with_strange_char(self):
q = b'stop_points/stop_point:stop_with name bob \" , é'
encoded_q = quote(q)
response = self.query_region(encoded_q)
stops = get_not_null(response, 'stop_points')
assert len(stops) == 1
is_valid_stop_point(stops[0], depth_check=1)
assert stops[0]["id"] == u'stop_point:stop_with name bob \" , é'
def test_filter_query_with_strange_char(self):
"""test that the ptref mechanism works an object with a weird id"""
response = self.query_region('stop_points/stop_point:stop_with name bob \" , é/lines')
lines = get_not_null(response, 'lines')
assert len(lines) == 1
for l in lines:
is_valid_line(l)
def test_filter_query_with_strange_char_in_filter(self):
"""test that the ptref mechanism works an object with a weird id passed in filter args"""
response = self.query_region('lines?filter=stop_point.uri="stop_point:stop_with name bob \\\" , é"')
lines = get_not_null(response, 'lines')
assert len(lines) == 1
for l in lines:
is_valid_line(l)
def test_journey_with_strange_char(self):
#we use an encoded url to be able to check the links
query = 'journeys?from={}&to={}&datetime=20140105T070000'.format(quote_plus(b'stop_with name bob \" , é'), quote_plus(b'stop_area:stop1'))
response = self.query_region(query, display=True)
self.is_valid_journey_response(response, query)
def test_vj_period_filter(self):
"""with just a since in the middle of the period, we find vj1"""
response = self.query_region("vehicle_journeys?since=20140105T070000")
vjs = get_not_null(response, 'vehicle_journeys')
for vj in vjs:
is_valid_vehicle_journey(vj, depth_check=1)
assert 'vj1' in (vj['id'] for vj in vjs)
# same with an until at the end of the day
response = self.query_region("vehicle_journeys?since=20140105T000000&until=20140106T0000")
vjs = get_not_null(response, 'vehicle_journeys')
assert 'vj1' in (vj['id'] for vj in vjs)
# there is no vj after the 8
response, code = self.query_no_assert("v1/coverage/main_ptref_test/vehicle_journeys?since=20140109T070000")
assert code == 404
assert get_not_null(response, 'error')['message'] == 'ptref : Filters: Unable to find object'
def test_line_by_code(self):
"""test the filter=type.has_code(key, value)"""
response = self.query_region("lines?filter=line.has_code(codeB, B)&show_codes=true")
lines = get_not_null(response, 'lines')
assert len(lines) == 1
assert 'B' in [code['value'] for code in lines[0]['codes'] if code['type'] == 'codeB']
response = self.query_region("lines?filter=line.has_code(codeB, Bise)&show_codes=true")
lines = get_not_null(response, 'lines')
assert len(lines) == 1
assert 'B' in [code['value'] for code in lines[0]['codes'] if code['type'] == 'codeB']
response = self.query_region("lines?filter=line.has_code(codeC, C)&show_codes=true")
lines = get_not_null(response, 'lines')
assert len(lines) == 1
assert 'B' in [code['value'] for code in lines[0]['codes'] if code['type'] == 'codeB']
response, code = self.query_no_assert("v1/coverage/main_ptref_test/lines?filter=line.has_code(codeB, rien)&show_codes=true")
assert code == 400
assert get_not_null(response, 'error')['message'] == 'ptref : Filters: Unable to find object'
response, code = self.query_no_assert("v1/coverage/main_ptref_test/lines?filter=line.has_code(codeC, rien)&show_codes=true")
assert code == 400
assert get_not_null(response, 'error')['message'] == 'ptref : Filters: Unable to find object'
def test_pt_ref_internal_method(self):
from jormungandr import i_manager
from navitiacommon import type_pb2
i = i_manager.instances['main_ptref_test']
assert len([r for r in i.ptref.get_objs(type_pb2.ROUTE)]) == 3
@dataset({"main_ptref_test": {}, "main_routing_test": {}})
class TestPtRefRoutingAndPtrefCov(AbstractTestFixture):
def test_external_code(self):
"""test the strange and ugly external code api"""
response = self.query("v1/lines?external_code=A&show_codes=true")
lines = get_not_null(response, 'lines')
assert len(lines) == 1
assert 'A' in [code['value'] for code in lines[0]['codes'] if code['type'] == 'external_code']
def test_external_code_no_code(self):
"""the external_code is a mandatory parameter for collection without coverage"""
r, status = self.query_no_assert("v1/lines")
assert status == 400
assert "parameter \"external_code\" invalid: " \
"Missing required parameter in the post body or the query string" \
"\nexternal_code description: An external code to query" == \
r.get('message')
def test_parameter_error_message(self):
"""test the parameter validation error message"""
r, status = self.query_no_assert("v1/coverage/lines?disable_geojson=12")
assert status == 400
assert "parameter \"disable_geojson\" invalid: Invalid literal for boolean(): 12\n" \
"disable_geojson description: hide the coverage geojson to reduce response size" == \
r.get('message')
def test_invalid_url(self):
"""the following bad url was causing internal errors, it should only be a 404"""
_, status = self.query_no_assert("v1/coverage/lines/bob")
assert status == 404
@dataset({"main_routing_test": {}})
class TestPtRefRoutingCov(AbstractTestFixture):
def test_with_coords(self):
"""test with a coord in the pt call, so a place nearby is actually called"""
response = self.query_region("coords/{coord}/stop_areas".format(coord=r_coord))
stops = get_not_null(response, 'stop_areas')
for s in stops:
is_valid_stop_area(s)
#the default is the search for all stops within 200m, so we should have A and C
assert len(stops) == 2
assert set(["stopA", "stopC"]) == set([s['name'] for s in stops])
def test_with_coord(self):
"""some but with coord and not coords"""
response = self.query_region("coord/{coord}/stop_areas".format(coord=r_coord))
stops = get_not_null(response, 'stop_areas')
for s in stops:
is_valid_stop_area(s)
#the default is the search for all stops within 200m, so we should have A and C
assert len(stops) == 2
assert set(["stopA", "stopC"]) == set([s['name'] for s in stops])
def test_with_coord_distance_different(self):
"""same as test_with_coord, but with 300m radius. so we find all stops"""
response = self.query_region("coords/{coord}/stop_areas?distance=300".format(coord=r_coord))
stops = get_not_null(response, 'stop_areas')
for s in stops:
is_valid_stop_area(s)
assert len(stops) == 3
assert set(["stopA", "stopB", "stopC"]) == set([s['name'] for s in stops])
def test_with_coord_and_filter(self):
"""
we now test with a more complex query, we want all stops with a metro within 300m of r
only A and C have a metro line
Note: the metro is physical_mode:0x1
"""
response = self.query_region("physical_modes/physical_mode:0x1/coords/{coord}/stop_areas"
"?distance=300".format(coord=r_coord), display=True)
stops = get_not_null(response, 'stop_areas')
for s in stops:
is_valid_stop_area(s)
#the default is the search for all stops within 200m, so we should have all 3 stops
#we should have 3 stops
assert len(stops) == 2
assert set(["stopA", "stopC"]) == set([s['name'] for s in stops])
def test_all_lines(self):
"""test with all lines in the pt call"""
response = self.query_region('lines')
assert 'error' not in response
lines = get_not_null(response, 'lines')
assert len(lines) == 4
assert {"1A", "1B", "1C", "1D"} == {l['code'] for l in lines}
def test_line_filter_line_code(self):
"""test filtering lines from line code 1A in the pt call"""
response = self.query_region('lines?filter=line.code=1A')
assert 'error' not in response
lines = get_not_null(response, 'lines')
assert len(lines) == 1
assert "1A" == lines[0]['code']
def test_line_filter_line_code_with_resource_uri(self):
"""test filtering lines from line code 1A in the pt call with a resource uri"""
response = self.query_region('physical_modes/physical_mode:0x1/lines?filter=line.code=1D')
assert 'error' not in response
lines = get_not_null(response, 'lines')
assert len(lines) == 1
assert "1D" == lines[0]['code']
def test_line_filter_line_code_empty_response(self):
"""test filtering lines from line code bob in the pt call
as no line has the code "bob" response returns no object"""
url = 'v1/coverage/main_routing_test/lines?filter=line.code=bob'
response, status = self.query_no_assert(url)
assert status == 400
assert 'error' in response
assert 'bad_filter' in response['error']['id']
def test_line_filter_route_code_ignored(self):
"""test filtering lines from route code bob in the pt call
as there is no attribute "code" for route, filter is invalid and ignored"""
response_all_lines = self.query_region('lines')
all_lines = get_not_null(response_all_lines, 'lines')
response = self.query_region('lines?filter=route.code=bob')
assert 'error' not in response
lines = get_not_null(response, 'lines')
assert len(lines) == 4
assert {l['code'] for l in all_lines} == {l['code'] for l in lines}
def test_route_filter_line_code(self):
"""test filtering routes from line code 1B in the pt call"""
response = self.query_region('routes?filter=line.code=1B')
assert 'error' not in response
routes = get_not_null(response, 'routes')
assert len(routes) == 1
assert "1B" == routes[0]['line']['code']
def test_headsign(self):
"""test basic usage of headsign"""
response = self.query_region('vehicle_journeys?headsign=vjA')
assert 'error' not in response
vjs = get_not_null(response, 'vehicle_journeys')
assert len(vjs) == 1
def test_headsign_with_resource_uri(self):
"""test usage of headsign with resource uri"""
response = self.query_region('physical_modes/physical_mode:0x0/vehicle_journeys'
'?headsign=vjA')
assert 'error' not in response
vjs = get_not_null(response, 'vehicle_journeys')
assert len(vjs) == 1
def test_headsign_with_code_filter_and_resource_uri(self):
"""test usage of headsign with code filter and resource uri"""
response = self.query_region('physical_modes/physical_mode:0x0/vehicle_journeys'
'?headsign=vjA&filter=line.code=1A')
assert 'error' not in response
vjs = get_not_null(response, 'vehicle_journeys')
assert len(vjs) == 1
def test_multiple_resource_uri_no_final_collection_uri(self):
"""test usage of multiple resource uris with line and physical mode giving result,
then with multiple resource uris giving no result as nothing matches"""
response = self.query_region('physical_modes/physical_mode:0x0/lines/A')
assert 'error' not in response
lines = get_not_null(response, 'lines')
assert len(lines) == 1
response = self.query_region('lines/D')
assert 'error' not in response
lines = get_not_null(response, 'lines')
assert len(lines) == 1
response = self.query_region('physical_modes/physical_mode:0x1/lines/D')
assert 'error' not in response
lines = get_not_null(response, 'lines')
assert len(lines) == 1
response, status = self.query_region('physical_modes/physical_mode:0x0/lines/D', False)
assert status == 404
assert 'error' in response
assert 'unknown_object' in response['error']['id']
def test_multiple_resource_uri_with_final_collection_uri(self):
"""test usage of multiple resource uris with line and physical mode giving result,
as we match it with a final collection, so the intersection is what we want"""
response = self.query_region('physical_modes/physical_mode:0x1/lines/D/stop_areas')
assert 'error' not in response
stop_areas = get_not_null(response, 'stop_areas')
assert len(stop_areas) == 2
response = self.query_region('physical_modes/physical_mode:0x0/lines/D/stop_areas')
assert 'error' not in response
stop_areas = get_not_null(response, 'stop_areas')
assert len(stop_areas) == 1
def test_headsign_stop_time_vj(self):
"""test basic print of headsign in stop_times for vj"""
response = self.query_region('vehicle_journeys?filter=vehicle_journey.name="vjA"')
assert 'error' not in response
vjs = get_not_null(response, 'vehicle_journeys')
assert len(vjs) == 1
assert len(vjs[0]['stop_times']) == 2
assert vjs[0]['stop_times'][0]['headsign'] == "A00"
assert vjs[0]['stop_times'][1]['headsign'] == "vjA"
def test_headsign_display_info_journeys(self):
"""test basic print of headsign in section for journeys"""
response = self.query_region('journeys?from=stop_point:stopB&to=stop_point:stopA&datetime=20120615T000000&max_duration_to_pt=0')
assert 'error' not in response
journeys = get_not_null(response, 'journeys')
assert len(journeys) == 1
assert len(journeys[0]['sections']) == 1
assert journeys[0]['sections'][0]['display_informations']['headsign'] == "A00"
def test_headsign_display_info_departures(self):
"""test basic print of headsign in display informations for departures"""
response = self.query_region('stop_points/stop_point:stopB/departures?from_datetime=20120615T000000')
assert 'error' not in response
departures = get_not_null(response, 'departures')
assert len(departures) == 2
assert {"A00", "vjB"} == {d['display_informations']['headsign'] for d in departures}
def test_headsign_display_info_arrivals(self):
"""test basic print of headsign in display informations for arrivals"""
response = self.query_region('stop_points/stop_point:stopB/arrivals?from_datetime=20120615T000000')
assert 'error' not in response
arrivals = get_not_null(response, 'arrivals')
assert len(arrivals) == 2
assert arrivals[0]['display_informations']['headsign'] == "vehicle_journey 2"
def test_headsign_display_info_route_schedules(self):
"""test basic print of headsign in display informations for route schedules"""
response = self.query_region('routes/A:0/route_schedules?from_datetime=20120615T000000')
assert 'error' not in response
route_schedules = get_not_null(response, 'route_schedules')
assert len(route_schedules) == 1
assert len(route_schedules[0]['table']['headers']) == 1
display_info = route_schedules[0]['table']['headers'][0]['display_informations']
assert display_info['headsign'] == "vjA"
assert {"A00", "vjA"} == set(display_info['headsigns'])
def test_trip_id_vj(self):
"""test basic print of trip and its id in vehicle_journeys"""
response = self.query_region('vehicle_journeys')
assert 'error' not in response
vjs = get_not_null(response, 'vehicle_journeys')
for vj in vjs:
is_valid_vehicle_journey(vj, depth_check=1)
assert any(vj['name'] == "vjB" and vj['trip']['id'] == "vjB" for vj in vjs)
def test_disruptions(self):
"""test the /disruptions api"""
response = self.query_region('disruptions')
disruptions = get_not_null(response, 'disruptions')
assert len(disruptions) == 9
for d in disruptions:
is_valid_disruption(d)
# we test that we can access a specific disruption
response = self.query_region('disruptions/too_bad_line_C')
disruptions = get_not_null(response, 'disruptions')
assert len(disruptions) == 1
# we can also display all disruptions of an object
response = self.query_region('lines/C/disruptions')
disruptions = get_not_null(response, 'disruptions')
assert len(disruptions) == 2
disruptions_uris = set([d['uri'] for d in disruptions])
assert {"too_bad_line_C", "too_bad_all_lines"} == disruptions_uris
# we can't access object from the disruption though (we don't think it to be useful for the moment)
response, status = self.query_region('disruptions/too_bad_line_C/lines', check=False)
assert status == 404
e = get_not_null(response, 'error')
assert e['id'] == 'unknown_object'
assert e['message'] == 'ptref : Filters: Unable to find object'
def test_trips(self):
"""test the /trips api"""
response = self.query_region('trips')
trips = get_not_null(response, 'trips')
assert len(trips) == 5
for t in trips:
is_valid_trip(t)
# we test that we can access a specific trip
response = self.query_region('trips/vjA')
trips = get_not_null(response, 'trips')
assert len(trips) == 1
assert get_not_null(trips[0], 'id') == "vjA"
# we can also display trip of a vj
response = self.query_region('vehicle_journeys/vjB/trips')
trips = get_not_null(response, 'trips')
assert len(trips) == 1
assert get_not_null(trips[0], 'id') == "vjB"
def test_attributs_in_display_info_journeys(self):
"""test some attributs in display_information of a section for journeys"""
response = self.query_region('journeys?from=stop_point:stopB&to=stop_point:stopA&datetime=20120615T000000&max_duration_to_pt=0')
assert 'error' not in response
journeys = get_not_null(response, 'journeys')
assert len(journeys) == 1
assert len(journeys[0]['sections']) == 1
assert journeys[0]['sections'][0]['display_informations']['headsign'] == "A00"
assert journeys[0]['sections'][0]['display_informations']['color'] == "289728"
assert journeys[0]['sections'][0]['display_informations']['text_color'] == "FFD700"
assert journeys[0]['sections'][0]['display_informations']['label'] == "1A"
assert journeys[0]['sections'][0]['display_informations']['code'] == "1A"
assert journeys[0]['sections'][0]['display_informations']['name'] == "A"
def test_stop_points_depth_3(self):
"""
test stop_points formating in depth 3
Note: done in main_routing_test because we need a routing graph to have all the attributes
"""
response = self.query_region("stop_points?depth=3")
for s in get_not_null(response, 'stop_points'):
is_valid_stop_point(s, depth_check=3)
def test_pois_uri_poi_types(self):
response = self.query_region("pois/poi:station_1/poi_types")
assert len(response["poi_types"]) == 1
assert response["poi_types"][0]["id"] == "poi_type:amenity:bicycle_rental"
| antoine-de/navitia | source/jormungandr/tests/ptref_tests.py | Python | agpl-3.0 | 38,313 |
from django.contrib.auth.decorators import login_required
from django.http import Http404
from django.core.urlresolvers import reverse
from mitxmako.shortcuts import render_to_response
from courseware.access import has_access
from courseware.courses import get_course_with_access
from notes.utils import notes_enabled_for_course
from static_replace import replace_static_urls
@login_required
def index(request, course_id, book_index, page=None):
course = get_course_with_access(request.user, course_id, 'load')
staff_access = has_access(request.user, course, 'staff')
book_index = int(book_index)
if book_index < 0 or book_index >= len(course.textbooks):
raise Http404("Invalid book index value: {0}".format(book_index))
textbook = course.textbooks[book_index]
table_of_contents = textbook.table_of_contents
if page is None:
page = textbook.start_page
return render_to_response('staticbook.html',
{'book_index': book_index, 'page': int(page),
'course': course,
'book_url': textbook.book_url,
'table_of_contents': table_of_contents,
'start_page': textbook.start_page,
'end_page': textbook.end_page,
'staff_access': staff_access})
def index_shifted(request, course_id, page):
return index(request, course_id=course_id, page=int(page) + 24)
@login_required
def pdf_index(request, course_id, book_index, chapter=None, page=None):
"""
Display a PDF textbook.
course_id: course for which to display text. The course should have
"pdf_textbooks" property defined.
book index: zero-based index of which PDF textbook to display.
chapter: (optional) one-based index into the chapter array of textbook PDFs to display.
Defaults to first chapter. Specifying this assumes that there are separate PDFs for
each chapter in a textbook.
page: (optional) one-based page number to display within the PDF. Defaults to first page.
"""
course = get_course_with_access(request.user, course_id, 'load')
staff_access = has_access(request.user, course, 'staff')
book_index = int(book_index)
if book_index < 0 or book_index >= len(course.pdf_textbooks):
raise Http404("Invalid book index value: {0}".format(book_index))
textbook = course.pdf_textbooks[book_index]
def remap_static_url(original_url, course):
input_url = "'" + original_url + "'"
output_url = replace_static_urls(
input_url,
getattr(course, 'data_dir', None),
course_namespace=course.location
)
# strip off the quotes again...
return output_url[1:-1]
if 'url' in textbook:
textbook['url'] = remap_static_url(textbook['url'], course)
# then remap all the chapter URLs as well, if they are provided.
if 'chapters' in textbook:
for entry in textbook['chapters']:
entry['url'] = remap_static_url(entry['url'], course)
return render_to_response('static_pdfbook.html',
{'book_index': book_index,
'course': course,
'textbook': textbook,
'chapter': chapter,
'page': page,
'staff_access': staff_access})
@login_required
def html_index(request, course_id, book_index, chapter=None):
"""
Display an HTML textbook.
course_id: course for which to display text. The course should have
"html_textbooks" property defined.
book index: zero-based index of which HTML textbook to display.
chapter: (optional) one-based index into the chapter array of textbook HTML files to display.
Defaults to first chapter. Specifying this assumes that there are separate HTML files for
each chapter in a textbook.
"""
course = get_course_with_access(request.user, course_id, 'load')
staff_access = has_access(request.user, course, 'staff')
notes_enabled = notes_enabled_for_course(course)
book_index = int(book_index)
if book_index < 0 or book_index >= len(course.html_textbooks):
raise Http404("Invalid book index value: {0}".format(book_index))
textbook = course.html_textbooks[book_index]
def remap_static_url(original_url, course):
input_url = "'" + original_url + "'"
output_url = replace_static_urls(
input_url,
getattr(course, 'data_dir', None),
course_namespace=course.location
)
# strip off the quotes again...
return output_url[1:-1]
if 'url' in textbook:
textbook['url'] = remap_static_url(textbook['url'], course)
# then remap all the chapter URLs as well, if they are provided.
if 'chapters' in textbook:
for entry in textbook['chapters']:
entry['url'] = remap_static_url(entry['url'], course)
return render_to_response('static_htmlbook.html',
{'book_index': book_index,
'course': course,
'textbook': textbook,
'chapter': chapter,
'staff_access': staff_access,
'notes_enabled': notes_enabled})
| elimence/edx-platform | lms/djangoapps/staticbook/views.py | Python | agpl-3.0 | 5,555 |
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2015 CoNWeT Lab., Universidad Politécnica de Madrid
# This file is part of Wirecloud.
# Wirecloud is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Wirecloud is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with Wirecloud. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import locale
from optparse import make_option
from django.contrib.auth.models import User, Group
from django.core.management.base import BaseCommand, CommandError
from django.utils.translation import override, ugettext as _
from wirecloud.catalogue.views import add_packaged_resource
from wirecloud.commons.utils.template import TemplateParser
from wirecloud.commons.utils.wgt import WgtFile
from wirecloud.platform.localcatalogue.utils import install_resource_to_user, install_resource_to_group, install_resource_to_all_users
class Command(BaseCommand):
args = '<file.wgt>...'
help = 'Adds one or more packaged mashable application components into the catalogue'
option_list = BaseCommand.option_list + (
make_option('--redeploy',
action='store_true',
dest='redeploy',
help='Replace mashable application components files with the new ones.',
default=False),
make_option('-u', '--users',
action='store',
type='string',
dest='users',
help='Comma separated list of users that will obtain access to the uploaded mashable application components',
default=''),
make_option('-g', '--groups',
action='store',
type='string',
dest='groups',
help='Comma separated list of groups that will obtain access rights to the uploaded mashable application components',
default=''),
make_option('-p', '--public',
action='store_true',
dest='public',
help='Allow any user to access the mashable application components.',
default=False),
)
def _handle(self, *args, **options):
if len(args) < 1:
raise CommandError(_('Wrong number of arguments'))
self.verbosity = int(options.get('verbosity', 1))
users = []
groups = []
redeploy = options['redeploy']
public = options['public']
users_string = options['users'].strip()
groups_string = options['groups'].strip()
if redeploy is False and public is False and users_string == '' and groups_string == '':
raise CommandError(_('You must use at least one of the following flags: --redeploy, --users, --groups or --public '))
if not options['redeploy']:
if users_string != '':
for username in users_string.split(','):
users.append(User.objects.get(username=username))
if groups_string != '':
for groupname in groups_string.split(','):
groups.append(Group.objects.get(name=groupname))
for file_name in args:
try:
f = open(file_name, 'rb')
wgt_file = WgtFile(f)
except:
self.log(_('Failed to read from %(file_name)s') % {'file_name': file_name}, level=1)
continue
try:
template_contents = wgt_file.get_template()
template = TemplateParser(template_contents)
if options['redeploy']:
add_packaged_resource(f, None, wgt_file=wgt_file, template=template, deploy_only=True)
else:
for user in users:
install_resource_to_user(user, file_contents=wgt_file)
for group in groups:
install_resource_to_group(group, file_contents=wgt_file)
if public:
install_resource_to_all_users(file_contents=wgt_file)
wgt_file.close()
f.close()
self.log(_('Successfully imported \"%(name)s\" from \"%(file_name)s\"') % {'name': template.get_resource_processed_info()['title'], 'file_name': file_name}, level=1)
except:
self.log(_('Failed to import the mashable application component from %(file_name)s') % {'file_name': file_name}, level=1)
def handle(self, *args, **options):
try:
default_locale = locale.getdefaultlocale()[0][:2]
except TypeError:
default_locale = None
with override(default_locale):
return self._handle(*args, **options)
def log(self, msg, level=2):
"""
Small log helper
"""
if self.verbosity >= level:
self.stdout.write(msg)
| rockneurotiko/wirecloud | src/wirecloud/catalogue/management/commands/addtocatalogue.py | Python | agpl-3.0 | 5,256 |
Subsets and Splits