repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
csutherl/sos | sos/plugins/acpid.py | 12 | 1211 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
class Acpid(Plugin):
"""ACPI daemon information"""
plugin_name = "acpid"
profiles = ('hardware',)
class RedHatAcpid(Acpid, RedHatPlugin):
def setup(self):
self.add_copy_spec([
"/var/log/acpid*",
"/etc/acpi/events/power.conf"])
class DebianAcpid(Acpid, DebianPlugin, UbuntuPlugin):
def setup(self):
self.add_copy_spec([
"/etc/acpi/events/powerbtn*"])
# vim: set et ts=4 sw=4 :
| gpl-2.0 |
codypiersall/platformer | lib/menu.py | 1 | 7244 | """
A simple menu system for pygame. Probably a little too simple, unfortunately."""
from __future__ import division
import pygame
# This silly Exception is used to return from a menu.
class ReturnError(Exception):
pass
class Exit(Exception):
pass
class Menu(object):
"""
Class for building a menu. Initialize it with a Pygame surface object,
list of menu items (it's just a list of strings), a Pygame font object,
and a dict of settings. The idea behind this class is that menus are
for changing settings, which will be given to other Pygame objects.
"""
SPACE = 10
UP = pygame.K_UP
DOWN = pygame.K_DOWN
RETURN = pygame.K_RETURN
BG_COLOR = (0,0,0)
FONT_COLOR = (255,0,0)
SELECTOR_COLOR = (0,255,0)
def __init__(self, screen, items, font, settings=None):
self.settings = settings if settings else {}
self.screen = screen
self.items = items
self.selected = 0
self.surfaces = []
self.font = font
self.actions = {}
self.initial_repeat = pygame.key.get_repeat()
self.repeat = (200, 70)
def add_item(self, item):
"""Add another item to the menu. `item` should just be a string."""
self.items.append(item)
def add_submenu(self, index, items):
"""
Create a new Menu instance, initialized with items, that can be
accessed by clicking on the index of the current menu.
This makes the font and the settings refer to the same object,
so a submenu can change settings too.
example:
```
main_menu = Menu(screen, ['Start', 'Options', 'Back'], some_font)
options_menu = main_menu.add_submenu(1, ['Levels', 'Character Select'])
```
this will create a menu with "Start", "Options", and "Back" items first;
then clicking "Options" will start the `options_menu` main loop.
"""
submenu = Menu(self.screen, items, self.font, self.settings)
self.__add_action(index, submenu)
return submenu
def change_settings(self, index, setting, value):
"""
When a menu item associated with the given index is clicked,
change the setting indicated to value.
"""
self.__add_action(index, ('settings', setting, value))
def draw(self):
"""Menu layout and whatnot."""
self.surfaces = [self.font.render(str(i), 1, self.FONT_COLOR) for i in self.items]
num_items = len(self.items)
ind_height = self.surfaces[0].get_height()
height = self.surfaces[0].get_height() * num_items + self.SPACE * (num_items - 1)
width = max(s.get_width() for s in self.surfaces)
draw_surf = pygame.Surface((width, height))
draw_surf.fill(self.BG_COLOR)
for i, item in enumerate(self.surfaces):
draw_surf.blit(item, (0, ind_height*i + self.SPACE*i))
menu_x = (self.screen.get_width() - width) / 2
menu_y = (self.screen.get_height() - height) / 2
sy = menu_y + ind_height*self.selected + self.SPACE * self.selected
sx = menu_x - 20
self.screen.fill(self.BG_COLOR)
self.screen.blit(draw_surf, (menu_x, menu_y))
pygame.draw.polygon(self.screen, self.SELECTOR_COLOR, ([sx,sy], [sx, sy + ind_height], [sx + 10, (2 *sy + ind_height) / 2]))
def change_select(self, direction):
"""Change the current menu selection."""
if direction == self.UP:
if self.selected == 0:
self.selected = len(self.items) - 1
else:
self.selected -= 1
elif direction == self.DOWN:
if self.selected == len(self.items) - 1:
self.selected = 0
else:
self.selected += 1
def _reset_repeat(self):
"""Change key repeat back to what it was before the menu was called."""
if self.initial_repeat == (0, 0):
pygame.key.set_repeat()
else:
pygame.key.set_repeat(*self.initial_repeat)
def seeya(self):
"""Clean up code when the menu is destroyed."""
self._reset_repeat()
def on_enter(self):
"""Determine what to do when the enter key is pressed."""
try:
action = self.actions[self.selected]
except KeyError:
print("You should add an action for item #{}.".format(self.selected))
return
if isinstance(action, Menu):
action.mainloop()
elif action == 'return':
# hokey way of getting back to the main loop. I'm not proud
# of this.
raise ReturnError
elif isinstance(action, (tuple, list)):
if action[0] == 'settings':
self.settings[action[1]] = action[2]
print(self.settings)
raise ReturnError
if action[0] == 'start':
game = action[1]()
self._reset_repeat()
game.main(self.screen, self.settings)
pygame.key.set_repeat(*self.repeat)
def add_start_action(self, index, Game):
"""Resets key repeat and calls `Game.main(self.screen, self.settings)`"""
self.__add_action(index, ('start', Game))
def add_back_action(self, index):
"""
Whenever `index` is selected, go to the previous mainloop.
"""
self.__add_action(-1, 'return')
def __add_action(self, index, action):
"""
Internal method used for adding an action to a menu item.
This should not be called directly.
"""
if index < 0:
index = len(self.items) + index
self.actions.update({index: action})
def mainloop(self):
pygame.key.set_repeat(*self.repeat)
pygame.display.update()
clock = pygame.time.Clock()
while True:
clock.tick(30)
for e in pygame.event.get():
if e.type == pygame.QUIT:
raise Exit
if e.type == pygame.KEYDOWN:
if e.key == pygame.K_ESCAPE:
self.seeya()
return
elif e.key == self.UP or e.key == self.DOWN:
self.change_select(e.key)
elif e.key == self.RETURN:
try:
self.on_enter()
except ReturnError:
return
self.draw()
pygame.display.update()
if __name__ == '__main__':
"""Minimalist example for creating a menu."""
pygame.init()
if not pygame.display.get_init():
pygame.display.init()
if not pygame.font.get_init():
pygame.font.init()
screen = pygame.display.set_mode((640, 480))
font = pygame.font.Font('../coders_crux.ttf', 48)
menu = Menu(screen, 'Some Good Items Exit'.split(), font)
menu.add_back_action(-1)
menu.mainloop()
| bsd-3-clause |
ryanjmccall/nupic | examples/opf/experiments/classification/scalar_encoder_0/description.py | 17 | 2421 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.expdescriptionhelpers import importBaseDescription
# the sub-experiment configuration
config = \
{
'dataSource': 'file://' + os.path.join(os.path.dirname(__file__),
'../datasets/scalar_SP_0.csv'),
'modelParams': { 'clParams': { 'clVerbosity': 0},
'inferenceType': 'NontemporalClassification',
'sensorParams': { 'encoders': { 'field1': { 'clipInput': True,
'fieldname': u'field1',
'maxval': 0.10000000000000001,
'minval': 0.0,
'n': 11,
'name': u'field1',
'type': 'AdaptiveScalarEncoder',
'w': 7}},
'verbosity': 0},
'spEnable': False,
'spParams': { 'spVerbosity': 0},
'tpEnable': False,
'tpParams': { }}}
mod = importBaseDescription('../base_scalar/description.py', config)
locals().update(mod.__dict__)
| gpl-3.0 |
willprice/arduino-sphere-project | scripts/example_direction_finder/temboo/Library/Zendesk/MonitoredTwitterHandles/GetMonitoredTwitterHandle.py | 5 | 3934 | # -*- coding: utf-8 -*-
###############################################################################
#
# GetMonitoredTwitterHandle
# Retrieves detailed information on a specified monitored Twitter account.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetMonitoredTwitterHandle(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetMonitoredTwitterHandle Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetMonitoredTwitterHandle, self).__init__(temboo_session, '/Library/Zendesk/MonitoredTwitterHandles/GetMonitoredTwitterHandle')
def new_input_set(self):
return GetMonitoredTwitterHandleInputSet()
def _make_result_set(self, result, path):
return GetMonitoredTwitterHandleResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetMonitoredTwitterHandleChoreographyExecution(session, exec_id, path)
class GetMonitoredTwitterHandleInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetMonitoredTwitterHandle
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_Email(self, value):
"""
Set the value of the Email input for this Choreo. ((required, string) The email address you use to login to your Zendesk account.)
"""
super(GetMonitoredTwitterHandleInputSet, self)._set_input('Email', value)
def set_ID(self, value):
"""
Set the value of the ID input for this Choreo. ((required, string) ID of the monitored Twitter handle.)
"""
super(GetMonitoredTwitterHandleInputSet, self)._set_input('ID', value)
def set_Password(self, value):
"""
Set the value of the Password input for this Choreo. ((required, password) Your Zendesk password.)
"""
super(GetMonitoredTwitterHandleInputSet, self)._set_input('Password', value)
def set_Server(self, value):
"""
Set the value of the Server input for this Choreo. ((required, string) Your Zendesk domain and subdomain (e.g., temboocare.zendesk.com).)
"""
super(GetMonitoredTwitterHandleInputSet, self)._set_input('Server', value)
class GetMonitoredTwitterHandleResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetMonitoredTwitterHandle Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Zendesk.)
"""
return self._output.get('Response', None)
class GetMonitoredTwitterHandleChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetMonitoredTwitterHandleResultSet(response, path)
| gpl-2.0 |
batxes/4Cin | SHH_WT_models_highres/SHH_WT_models_highres_final_output_0.1_-0.1_5000/SHH_WT_models_highres13173.py | 4 | 88213 | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((3090.89, 5216.81, 3306.16), (0.7, 0.7, 0.7), 182.271)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((3442.4, 5488.79, 3362.45), (0.7, 0.7, 0.7), 258.199)
if "particle_2 geometry" not in marker_sets:
s=new_marker_set('particle_2 geometry')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((3243.28, 5186.02, 3208.37), (0.7, 0.7, 0.7), 123.897)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((3266.09, 5486.11, 2938.21), (0.7, 0.7, 0.7), 146.739)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((3308.41, 5741.81, 2530.91), (0.7, 0.7, 0.7), 179.098)
if "particle_5 geometry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((3314.34, 5112.88, 2522.65), (0.7, 0.7, 0.7), 148.854)
if "particle_6 geometry" not in marker_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((3226.75, 4558.87, 2388.07), (0.7, 0.7, 0.7), 196.357)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((2820.11, 4640.86, 1957.35), (0.7, 0.7, 0.7), 166.873)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((2449.6, 4795.53, 1424.79), (0.7, 0.7, 0.7), 95.4711)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((2690.15, 4499.98, 1601.81), (0.7, 0.7, 0.7), 185.401)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((3072.82, 4363.07, 2003), (0.7, 0.7, 0.7), 151.984)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((3454.34, 4147.67, 2500.02), (0.7, 0.7, 0.7), 185.612)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((3845.48, 4233.73, 2737.26), (0.7, 0.7, 0.7), 210.273)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((4017.91, 4541.21, 2723.57), (0.7, 0.7, 0.7), 106.892)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((4401.24, 4603.34, 2818.23), (0.7, 0.7, 0.7), 202.025)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((4794.35, 4726.7, 3173.3), (0.7, 0.7, 0.7), 192.169)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((5186.86, 4777.04, 3678.1), (0.7, 0.7, 0.7), 241.11)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((5387.31, 4579.19, 4174.33), (0.7, 0.7, 0.7), 128.465)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((5505.5, 4442.72, 4747.76), (0.7, 0.7, 0.7), 217.38)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((5715.78, 4619.53, 5401.87), (0.7, 0.7, 0.7), 184.555)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((5354.49, 4154.73, 5120.84), (0.7, 0.7, 0.7), 140.055)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((5444.65, 3767.47, 4897.28), (0.7, 0.7, 0.7), 169.708)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((5637.46, 3350.81, 4869.79), (0.7, 0.7, 0.7), 184.639)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((5430.83, 3055.01, 4907.7), (0.7, 0.7, 0.7), 119.286)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((5215.06, 2952.45, 5124.7), (0.7, 0.7, 0.7), 147.754)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((4993.18, 3158.14, 5200.35), (0.7, 0.7, 0.7), 171.4)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((4942.44, 3245.51, 4791.83), (0.7, 0.7, 0.7), 156.341)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((4508.02, 3364.32, 4409.99), (0.7, 0.7, 0.7), 186.501)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((4102.11, 3385.19, 4041.84), (0.7, 0.7, 0.7), 308.325)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((3999.66, 3459.17, 3639.49), (0.7, 0.7, 0.7), 138.617)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((3990.46, 3321.12, 3375.95), (0.7, 0.7, 0.7), 130.03)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((4184.65, 3265.84, 3644.95), (0.7, 0.7, 0.7), 156.552)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["particle_32 geometry"]=s
s= marker_sets["particle_32 geometry"]
mark=s.place_marker((4026.47, 3512.91, 3767.62), (0.7, 0.7, 0.7), 183.244)
if "particle_33 geometry" not in marker_sets:
s=new_marker_set('particle_33 geometry')
marker_sets["particle_33 geometry"]=s
s= marker_sets["particle_33 geometry"]
mark=s.place_marker((3897.67, 3697.53, 3916.15), (0.7, 0.7, 0.7), 181.382)
if "particle_34 geometry" not in marker_sets:
s=new_marker_set('particle_34 geometry')
marker_sets["particle_34 geometry"]=s
s= marker_sets["particle_34 geometry"]
mark=s.place_marker((3807.73, 3656.25, 4086.53), (0.7, 0.7, 0.7), 101.943)
if "particle_35 geometry" not in marker_sets:
s=new_marker_set('particle_35 geometry')
marker_sets["particle_35 geometry"]=s
s= marker_sets["particle_35 geometry"]
mark=s.place_marker((3783.04, 3832.54, 4398.97), (1, 0.7, 0), 138.913)
if "particle_36 geometry" not in marker_sets:
s=new_marker_set('particle_36 geometry')
marker_sets["particle_36 geometry"]=s
s= marker_sets["particle_36 geometry"]
mark=s.place_marker((3228.84, 2967.93, 4776.67), (0.7, 0.7, 0.7), 221.737)
if "particle_37 geometry" not in marker_sets:
s=new_marker_set('particle_37 geometry')
marker_sets["particle_37 geometry"]=s
s= marker_sets["particle_37 geometry"]
mark=s.place_marker((2616.08, 2353.63, 4600.86), (0.7, 0.7, 0.7), 256.38)
if "particle_38 geometry" not in marker_sets:
s=new_marker_set('particle_38 geometry')
marker_sets["particle_38 geometry"]=s
s= marker_sets["particle_38 geometry"]
mark=s.place_marker((1952.2, 2273.33, 4248.27), (0.7, 0.7, 0.7), 221.694)
if "particle_39 geometry" not in marker_sets:
s=new_marker_set('particle_39 geometry')
marker_sets["particle_39 geometry"]=s
s= marker_sets["particle_39 geometry"]
mark=s.place_marker((1721.16, 2626.59, 3618.89), (0.7, 0.7, 0.7), 259.341)
if "particle_40 geometry" not in marker_sets:
s=new_marker_set('particle_40 geometry')
marker_sets["particle_40 geometry"]=s
s= marker_sets["particle_40 geometry"]
mark=s.place_marker((2382.49, 2754.68, 3233.37), (0.7, 0.7, 0.7), 117.89)
if "particle_41 geometry" not in marker_sets:
s=new_marker_set('particle_41 geometry')
marker_sets["particle_41 geometry"]=s
s= marker_sets["particle_41 geometry"]
mark=s.place_marker((3187.11, 2940.63, 3218.99), (0.7, 0.7, 0.7), 116.071)
if "particle_42 geometry" not in marker_sets:
s=new_marker_set('particle_42 geometry')
marker_sets["particle_42 geometry"]=s
s= marker_sets["particle_42 geometry"]
mark=s.place_marker((3423.18, 3186.91, 3539), (0.7, 0.7, 0.7), 268.224)
if "particle_43 geometry" not in marker_sets:
s=new_marker_set('particle_43 geometry')
marker_sets["particle_43 geometry"]=s
s= marker_sets["particle_43 geometry"]
mark=s.place_marker((3119.75, 3280.74, 3537.84), (0.7, 0.7, 0.7), 386.918)
if "particle_44 geometry" not in marker_sets:
s=new_marker_set('particle_44 geometry')
marker_sets["particle_44 geometry"]=s
s= marker_sets["particle_44 geometry"]
mark=s.place_marker((2659.51, 3383, 3130.67), (0.7, 0.7, 0.7), 121.316)
if "particle_45 geometry" not in marker_sets:
s=new_marker_set('particle_45 geometry')
marker_sets["particle_45 geometry"]=s
s= marker_sets["particle_45 geometry"]
mark=s.place_marker((2262.82, 3557.09, 3144.59), (0.7, 0.7, 0.7), 138.363)
if "particle_46 geometry" not in marker_sets:
s=new_marker_set('particle_46 geometry')
marker_sets["particle_46 geometry"]=s
s= marker_sets["particle_46 geometry"]
mark=s.place_marker((2681.82, 3645.19, 3719.46), (1, 0.7, 0), 175.207)
if "particle_47 geometry" not in marker_sets:
s=new_marker_set('particle_47 geometry')
marker_sets["particle_47 geometry"]=s
s= marker_sets["particle_47 geometry"]
mark=s.place_marker((2011.97, 3812.97, 3529.26), (0.7, 0.7, 0.7), 131.468)
if "particle_48 geometry" not in marker_sets:
s=new_marker_set('particle_48 geometry')
marker_sets["particle_48 geometry"]=s
s= marker_sets["particle_48 geometry"]
mark=s.place_marker((1293.89, 3958.68, 3548.36), (0.7, 0.7, 0.7), 287.894)
if "particle_49 geometry" not in marker_sets:
s=new_marker_set('particle_49 geometry')
marker_sets["particle_49 geometry"]=s
s= marker_sets["particle_49 geometry"]
mark=s.place_marker((1523.24, 3465.94, 3594.87), (0.7, 0.7, 0.7), 88.1109)
if "particle_50 geometry" not in marker_sets:
s=new_marker_set('particle_50 geometry')
marker_sets["particle_50 geometry"]=s
s= marker_sets["particle_50 geometry"]
mark=s.place_marker((2067.26, 3224.06, 3548.5), (0.7, 0.7, 0.7), 145.385)
if "particle_51 geometry" not in marker_sets:
s=new_marker_set('particle_51 geometry')
marker_sets["particle_51 geometry"]=s
s= marker_sets["particle_51 geometry"]
mark=s.place_marker((2194.35, 3102.8, 3444.89), (0.7, 0.7, 0.7), 155.452)
if "particle_52 geometry" not in marker_sets:
s=new_marker_set('particle_52 geometry')
marker_sets["particle_52 geometry"]=s
s= marker_sets["particle_52 geometry"]
mark=s.place_marker((1577.34, 3094.31, 3356.89), (0.7, 0.7, 0.7), 145.512)
if "particle_53 geometry" not in marker_sets:
s=new_marker_set('particle_53 geometry')
marker_sets["particle_53 geometry"]=s
s= marker_sets["particle_53 geometry"]
mark=s.place_marker((1071.16, 3085.41, 3225.32), (0.7, 0.7, 0.7), 99.9972)
if "particle_54 geometry" not in marker_sets:
s=new_marker_set('particle_54 geometry')
marker_sets["particle_54 geometry"]=s
s= marker_sets["particle_54 geometry"]
mark=s.place_marker((699.839, 3134.94, 2967.88), (0.7, 0.7, 0.7), 327.529)
if "particle_55 geometry" not in marker_sets:
s=new_marker_set('particle_55 geometry')
marker_sets["particle_55 geometry"]=s
s= marker_sets["particle_55 geometry"]
mark=s.place_marker((1194.99, 2993.02, 2595.42), (0.7, 0.7, 0.7), 137.983)
if "particle_56 geometry" not in marker_sets:
s=new_marker_set('particle_56 geometry')
marker_sets["particle_56 geometry"]=s
s= marker_sets["particle_56 geometry"]
mark=s.place_marker((1650.27, 2814.7, 2723.25), (0.7, 0.7, 0.7), 83.3733)
if "particle_57 geometry" not in marker_sets:
s=new_marker_set('particle_57 geometry')
marker_sets["particle_57 geometry"]=s
s= marker_sets["particle_57 geometry"]
mark=s.place_marker((2150.52, 2680.43, 2960.7), (0.7, 0.7, 0.7), 101.562)
if "particle_58 geometry" not in marker_sets:
s=new_marker_set('particle_58 geometry')
marker_sets["particle_58 geometry"]=s
s= marker_sets["particle_58 geometry"]
mark=s.place_marker((2599.89, 2684.91, 3234.81), (0.7, 0.7, 0.7), 165.689)
if "particle_59 geometry" not in marker_sets:
s=new_marker_set('particle_59 geometry')
marker_sets["particle_59 geometry"]=s
s= marker_sets["particle_59 geometry"]
mark=s.place_marker((2498.8, 2743.03, 3522.98), (0.7, 0.7, 0.7), 136.925)
if "particle_60 geometry" not in marker_sets:
s=new_marker_set('particle_60 geometry')
marker_sets["particle_60 geometry"]=s
s= marker_sets["particle_60 geometry"]
mark=s.place_marker((2417.74, 2809.45, 3528.8), (0.7, 0.7, 0.7), 123.389)
if "particle_61 geometry" not in marker_sets:
s=new_marker_set('particle_61 geometry')
marker_sets["particle_61 geometry"]=s
s= marker_sets["particle_61 geometry"]
mark=s.place_marker((2064.84, 2726.46, 3228.19), (0.7, 0.7, 0.7), 184.47)
if "particle_62 geometry" not in marker_sets:
s=new_marker_set('particle_62 geometry')
marker_sets["particle_62 geometry"]=s
s= marker_sets["particle_62 geometry"]
mark=s.place_marker((1385.03, 2507.08, 2857.65), (0.7, 0.7, 0.7), 148.473)
if "particle_63 geometry" not in marker_sets:
s=new_marker_set('particle_63 geometry')
marker_sets["particle_63 geometry"]=s
s= marker_sets["particle_63 geometry"]
mark=s.place_marker((502.988, 2188.25, 2486.64), (0.7, 0.7, 0.7), 241.406)
if "particle_64 geometry" not in marker_sets:
s=new_marker_set('particle_64 geometry')
marker_sets["particle_64 geometry"]=s
s= marker_sets["particle_64 geometry"]
mark=s.place_marker((1046.74, 2535.16, 2334.68), (0.7, 0.7, 0.7), 182.736)
if "particle_65 geometry" not in marker_sets:
s=new_marker_set('particle_65 geometry')
marker_sets["particle_65 geometry"]=s
s= marker_sets["particle_65 geometry"]
mark=s.place_marker((1398.05, 2813.55, 2377.44), (0.7, 0.7, 0.7), 166.62)
if "particle_66 geometry" not in marker_sets:
s=new_marker_set('particle_66 geometry')
marker_sets["particle_66 geometry"]=s
s= marker_sets["particle_66 geometry"]
mark=s.place_marker((1507.41, 2672.68, 2618.01), (0.7, 0.7, 0.7), 113.872)
if "particle_67 geometry" not in marker_sets:
s=new_marker_set('particle_67 geometry')
marker_sets["particle_67 geometry"]=s
s= marker_sets["particle_67 geometry"]
mark=s.place_marker((1713.24, 2763.55, 2851.64), (0.7, 0.7, 0.7), 110.065)
if "particle_68 geometry" not in marker_sets:
s=new_marker_set('particle_68 geometry')
marker_sets["particle_68 geometry"]=s
s= marker_sets["particle_68 geometry"]
mark=s.place_marker((1783.94, 2947.17, 3190.69), (0.7, 0.7, 0.7), 150.08)
if "particle_69 geometry" not in marker_sets:
s=new_marker_set('particle_69 geometry')
marker_sets["particle_69 geometry"]=s
s= marker_sets["particle_69 geometry"]
mark=s.place_marker((1751.61, 3222.24, 3587.05), (0.7, 0.7, 0.7), 118.525)
if "particle_70 geometry" not in marker_sets:
s=new_marker_set('particle_70 geometry')
marker_sets["particle_70 geometry"]=s
s= marker_sets["particle_70 geometry"]
mark=s.place_marker((1585.48, 3430.21, 4063.25), (0.7, 0.7, 0.7), 163.955)
if "particle_71 geometry" not in marker_sets:
s=new_marker_set('particle_71 geometry')
marker_sets["particle_71 geometry"]=s
s= marker_sets["particle_71 geometry"]
mark=s.place_marker((1251.91, 3368.59, 4215.12), (0.7, 0.7, 0.7), 170.131)
if "particle_72 geometry" not in marker_sets:
s=new_marker_set('particle_72 geometry')
marker_sets["particle_72 geometry"]=s
s= marker_sets["particle_72 geometry"]
mark=s.place_marker((920.013, 2947.14, 3682.27), (0.7, 0.7, 0.7), 78.2127)
if "particle_73 geometry" not in marker_sets:
s=new_marker_set('particle_73 geometry')
marker_sets["particle_73 geometry"]=s
s= marker_sets["particle_73 geometry"]
mark=s.place_marker((625.25, 2568.83, 3003.93), (0.7, 0.7, 0.7), 251.896)
if "particle_74 geometry" not in marker_sets:
s=new_marker_set('particle_74 geometry')
marker_sets["particle_74 geometry"]=s
s= marker_sets["particle_74 geometry"]
mark=s.place_marker((524.525, 2394.79, 2352.85), (0.7, 0.7, 0.7), 167.55)
if "particle_75 geometry" not in marker_sets:
s=new_marker_set('particle_75 geometry')
marker_sets["particle_75 geometry"]=s
s= marker_sets["particle_75 geometry"]
mark=s.place_marker((649.765, 2398.17, 1953.81), (0.7, 0.7, 0.7), 167.846)
if "particle_76 geometry" not in marker_sets:
s=new_marker_set('particle_76 geometry')
marker_sets["particle_76 geometry"]=s
s= marker_sets["particle_76 geometry"]
mark=s.place_marker((517.955, 1972.34, 2220.43), (0.7, 0.7, 0.7), 259.68)
if "particle_77 geometry" not in marker_sets:
s=new_marker_set('particle_77 geometry')
marker_sets["particle_77 geometry"]=s
s= marker_sets["particle_77 geometry"]
mark=s.place_marker((346.79, 1995.38, 2656.93), (0.7, 0.7, 0.7), 80.2854)
if "particle_78 geometry" not in marker_sets:
s=new_marker_set('particle_78 geometry')
marker_sets["particle_78 geometry"]=s
s= marker_sets["particle_78 geometry"]
mark=s.place_marker((161.404, 2071.41, 2591.3), (0.7, 0.7, 0.7), 82.4427)
if "particle_79 geometry" not in marker_sets:
s=new_marker_set('particle_79 geometry')
marker_sets["particle_79 geometry"]=s
s= marker_sets["particle_79 geometry"]
mark=s.place_marker((-25.3969, 1838.69, 2400.86), (0.7, 0.7, 0.7), 212.811)
if "particle_80 geometry" not in marker_sets:
s=new_marker_set('particle_80 geometry')
marker_sets["particle_80 geometry"]=s
s= marker_sets["particle_80 geometry"]
mark=s.place_marker((503.845, 1356.45, 2339.49), (0.7, 0.7, 0.7), 176.391)
if "particle_81 geometry" not in marker_sets:
s=new_marker_set('particle_81 geometry')
marker_sets["particle_81 geometry"]=s
s= marker_sets["particle_81 geometry"]
mark=s.place_marker((1222.84, 1485.94, 2421.17), (0.7, 0.7, 0.7), 99.3204)
if "particle_82 geometry" not in marker_sets:
s=new_marker_set('particle_82 geometry')
marker_sets["particle_82 geometry"]=s
s= marker_sets["particle_82 geometry"]
mark=s.place_marker((1660.59, 1766.61, 2731.09), (0.7, 0.7, 0.7), 166.62)
if "particle_83 geometry" not in marker_sets:
s=new_marker_set('particle_83 geometry')
marker_sets["particle_83 geometry"]=s
s= marker_sets["particle_83 geometry"]
mark=s.place_marker((1800.31, 1723.55, 2988.95), (0.7, 0.7, 0.7), 102.831)
if "particle_84 geometry" not in marker_sets:
s=new_marker_set('particle_84 geometry')
marker_sets["particle_84 geometry"]=s
s= marker_sets["particle_84 geometry"]
mark=s.place_marker((1102.38, 1202.31, 2685.64), (0.7, 0.7, 0.7), 65.0997)
if "particle_85 geometry" not in marker_sets:
s=new_marker_set('particle_85 geometry')
marker_sets["particle_85 geometry"]=s
s= marker_sets["particle_85 geometry"]
mark=s.place_marker((1022.98, 1677.39, 2505.54), (0.7, 0.7, 0.7), 92.1294)
if "particle_86 geometry" not in marker_sets:
s=new_marker_set('particle_86 geometry')
marker_sets["particle_86 geometry"]=s
s= marker_sets["particle_86 geometry"]
mark=s.place_marker((1223.57, 2185.68, 2445.99), (0.7, 0.7, 0.7), 194.791)
if "particle_87 geometry" not in marker_sets:
s=new_marker_set('particle_87 geometry')
marker_sets["particle_87 geometry"]=s
s= marker_sets["particle_87 geometry"]
mark=s.place_marker((1357.33, 2514.76, 2270.95), (0.7, 0.7, 0.7), 120.766)
if "particle_88 geometry" not in marker_sets:
s=new_marker_set('particle_88 geometry')
marker_sets["particle_88 geometry"]=s
s= marker_sets["particle_88 geometry"]
mark=s.place_marker((977.298, 2261.52, 1914.44), (0.7, 0.7, 0.7), 217.803)
if "particle_89 geometry" not in marker_sets:
s=new_marker_set('particle_89 geometry')
marker_sets["particle_89 geometry"]=s
s= marker_sets["particle_89 geometry"]
mark=s.place_marker((825.453, 2187.1, 2286.49), (0.7, 0.7, 0.7), 115.775)
if "particle_90 geometry" not in marker_sets:
s=new_marker_set('particle_90 geometry')
marker_sets["particle_90 geometry"]=s
s= marker_sets["particle_90 geometry"]
mark=s.place_marker((837.571, 2430.38, 2631.33), (0.7, 0.7, 0.7), 115.648)
if "particle_91 geometry" not in marker_sets:
s=new_marker_set('particle_91 geometry')
marker_sets["particle_91 geometry"]=s
s= marker_sets["particle_91 geometry"]
mark=s.place_marker((1161.16, 2521.92, 2635.77), (0.7, 0.7, 0.7), 83.8386)
if "particle_92 geometry" not in marker_sets:
s=new_marker_set('particle_92 geometry')
marker_sets["particle_92 geometry"]=s
s= marker_sets["particle_92 geometry"]
mark=s.place_marker((1306.03, 2646.15, 2301.15), (0.7, 0.7, 0.7), 124.32)
if "particle_93 geometry" not in marker_sets:
s=new_marker_set('particle_93 geometry')
marker_sets["particle_93 geometry"]=s
s= marker_sets["particle_93 geometry"]
mark=s.place_marker((1434.13, 2952.53, 2005.87), (0.7, 0.7, 0.7), 185.993)
if "particle_94 geometry" not in marker_sets:
s=new_marker_set('particle_94 geometry')
marker_sets["particle_94 geometry"]=s
s= marker_sets["particle_94 geometry"]
mark=s.place_marker((1157.57, 3458.33, 1790.63), (0.7, 0.7, 0.7), 238.826)
if "particle_95 geometry" not in marker_sets:
s=new_marker_set('particle_95 geometry')
marker_sets["particle_95 geometry"]=s
s= marker_sets["particle_95 geometry"]
mark=s.place_marker((696.696, 3738.86, 1899.14), (0.7, 0.7, 0.7), 128.465)
if "particle_96 geometry" not in marker_sets:
s=new_marker_set('particle_96 geometry')
marker_sets["particle_96 geometry"]=s
s= marker_sets["particle_96 geometry"]
mark=s.place_marker((677.478, 3416.37, 2459.03), (0.7, 0.7, 0.7), 203.209)
if "particle_97 geometry" not in marker_sets:
s=new_marker_set('particle_97 geometry')
marker_sets["particle_97 geometry"]=s
s= marker_sets["particle_97 geometry"]
mark=s.place_marker((949.309, 2991.87, 2582.89), (0.7, 0.7, 0.7), 160.486)
if "particle_98 geometry" not in marker_sets:
s=new_marker_set('particle_98 geometry')
marker_sets["particle_98 geometry"]=s
s= marker_sets["particle_98 geometry"]
mark=s.place_marker((1009.11, 2892.43, 2257.02), (0.7, 0.7, 0.7), 149.277)
if "particle_99 geometry" not in marker_sets:
s=new_marker_set('particle_99 geometry')
marker_sets["particle_99 geometry"]=s
s= marker_sets["particle_99 geometry"]
mark=s.place_marker((494.817, 2872.42, 2113.28), (0.7, 0.7, 0.7), 35.7435)
if "particle_100 geometry" not in marker_sets:
s=new_marker_set('particle_100 geometry')
marker_sets["particle_100 geometry"]=s
s= marker_sets["particle_100 geometry"]
mark=s.place_marker((1149.85, 2656.06, 2847.1), (0.7, 0.7, 0.7), 98.3898)
if "particle_101 geometry" not in marker_sets:
s=new_marker_set('particle_101 geometry')
marker_sets["particle_101 geometry"]=s
s= marker_sets["particle_101 geometry"]
mark=s.place_marker((2010.02, 2417.66, 3425.01), (0.7, 0.7, 0.7), 188.404)
if "particle_102 geometry" not in marker_sets:
s=new_marker_set('particle_102 geometry')
marker_sets["particle_102 geometry"]=s
s= marker_sets["particle_102 geometry"]
mark=s.place_marker((2488.19, 2268.67, 3360.05), (0.7, 0.7, 0.7), 110.318)
if "particle_103 geometry" not in marker_sets:
s=new_marker_set('particle_103 geometry')
marker_sets["particle_103 geometry"]=s
s= marker_sets["particle_103 geometry"]
mark=s.place_marker((2204.33, 2105.35, 3157.25), (0.7, 0.7, 0.7), 127.534)
if "particle_104 geometry" not in marker_sets:
s=new_marker_set('particle_104 geometry')
marker_sets["particle_104 geometry"]=s
s= marker_sets["particle_104 geometry"]
mark=s.place_marker((1863.04, 2112.83, 2995.47), (0.7, 0.7, 0.7), 91.368)
if "particle_105 geometry" not in marker_sets:
s=new_marker_set('particle_105 geometry')
marker_sets["particle_105 geometry"]=s
s= marker_sets["particle_105 geometry"]
mark=s.place_marker((1530.34, 2244.74, 2839.69), (0.7, 0.7, 0.7), 131.045)
if "particle_106 geometry" not in marker_sets:
s=new_marker_set('particle_106 geometry')
marker_sets["particle_106 geometry"]=s
s= marker_sets["particle_106 geometry"]
mark=s.place_marker((1270.14, 2562.14, 2745.95), (0.7, 0.7, 0.7), 143.608)
if "particle_107 geometry" not in marker_sets:
s=new_marker_set('particle_107 geometry')
marker_sets["particle_107 geometry"]=s
s= marker_sets["particle_107 geometry"]
mark=s.place_marker((1249.87, 2773.06, 2408.46), (0.7, 0.7, 0.7), 135.783)
if "particle_108 geometry" not in marker_sets:
s=new_marker_set('particle_108 geometry')
marker_sets["particle_108 geometry"]=s
s= marker_sets["particle_108 geometry"]
mark=s.place_marker((1260.97, 2915.05, 2103.49), (0.7, 0.7, 0.7), 92.5947)
if "particle_109 geometry" not in marker_sets:
s=new_marker_set('particle_109 geometry')
marker_sets["particle_109 geometry"]=s
s= marker_sets["particle_109 geometry"]
mark=s.place_marker((1468.35, 2738.57, 2045.95), (0.7, 0.7, 0.7), 150.123)
if "particle_110 geometry" not in marker_sets:
s=new_marker_set('particle_110 geometry')
marker_sets["particle_110 geometry"]=s
s= marker_sets["particle_110 geometry"]
mark=s.place_marker((1647.87, 2700.23, 1960.63), (0.7, 0.7, 0.7), 121.57)
if "particle_111 geometry" not in marker_sets:
s=new_marker_set('particle_111 geometry')
marker_sets["particle_111 geometry"]=s
s= marker_sets["particle_111 geometry"]
mark=s.place_marker((1627.21, 2618.06, 1641.27), (0.7, 0.7, 0.7), 104.777)
if "particle_112 geometry" not in marker_sets:
s=new_marker_set('particle_112 geometry')
marker_sets["particle_112 geometry"]=s
s= marker_sets["particle_112 geometry"]
mark=s.place_marker((1998.47, 2558.16, 1766.02), (0.7, 0.7, 0.7), 114.844)
if "particle_113 geometry" not in marker_sets:
s=new_marker_set('particle_113 geometry')
marker_sets["particle_113 geometry"]=s
s= marker_sets["particle_113 geometry"]
mark=s.place_marker((2394.12, 2502.16, 1901.18), (0.7, 0.7, 0.7), 150.588)
if "particle_114 geometry" not in marker_sets:
s=new_marker_set('particle_114 geometry')
marker_sets["particle_114 geometry"]=s
s= marker_sets["particle_114 geometry"]
mark=s.place_marker((2406.83, 2462.22, 2281.67), (0.7, 0.7, 0.7), 103.55)
if "particle_115 geometry" not in marker_sets:
s=new_marker_set('particle_115 geometry')
marker_sets["particle_115 geometry"]=s
s= marker_sets["particle_115 geometry"]
mark=s.place_marker((2181.3, 2142.24, 2574.6), (0.7, 0.7, 0.7), 215.392)
if "particle_116 geometry" not in marker_sets:
s=new_marker_set('particle_116 geometry')
marker_sets["particle_116 geometry"]=s
s= marker_sets["particle_116 geometry"]
mark=s.place_marker((2072.96, 1812.79, 2949.94), (0.7, 0.7, 0.7), 99.9126)
if "particle_117 geometry" not in marker_sets:
s=new_marker_set('particle_117 geometry')
marker_sets["particle_117 geometry"]=s
s= marker_sets["particle_117 geometry"]
mark=s.place_marker((1629.21, 1205.21, 2899.78), (0.7, 0.7, 0.7), 99.7857)
if "particle_118 geometry" not in marker_sets:
s=new_marker_set('particle_118 geometry')
marker_sets["particle_118 geometry"]=s
s= marker_sets["particle_118 geometry"]
mark=s.place_marker((1161.71, 866.438, 2889.49), (0.7, 0.7, 0.7), 109.98)
if "particle_119 geometry" not in marker_sets:
s=new_marker_set('particle_119 geometry')
marker_sets["particle_119 geometry"]=s
s= marker_sets["particle_119 geometry"]
mark=s.place_marker((1549.97, 1155.21, 2666.42), (0.7, 0.7, 0.7), 102.831)
if "particle_120 geometry" not in marker_sets:
s=new_marker_set('particle_120 geometry')
marker_sets["particle_120 geometry"]=s
s= marker_sets["particle_120 geometry"]
mark=s.place_marker((1731.87, 1478.97, 2617.24), (0.7, 0.7, 0.7), 103.593)
if "particle_121 geometry" not in marker_sets:
s=new_marker_set('particle_121 geometry')
marker_sets["particle_121 geometry"]=s
s= marker_sets["particle_121 geometry"]
mark=s.place_marker((1740.72, 1903.23, 2560.12), (0.7, 0.7, 0.7), 173.472)
if "particle_122 geometry" not in marker_sets:
s=new_marker_set('particle_122 geometry')
marker_sets["particle_122 geometry"]=s
s= marker_sets["particle_122 geometry"]
mark=s.place_marker((1301.34, 2136.75, 2419.2), (0.7, 0.7, 0.7), 113.575)
if "particle_123 geometry" not in marker_sets:
s=new_marker_set('particle_123 geometry')
marker_sets["particle_123 geometry"]=s
s= marker_sets["particle_123 geometry"]
mark=s.place_marker((1277.16, 2514.89, 2253.05), (0.7, 0.7, 0.7), 128.296)
if "particle_124 geometry" not in marker_sets:
s=new_marker_set('particle_124 geometry')
marker_sets["particle_124 geometry"]=s
s= marker_sets["particle_124 geometry"]
mark=s.place_marker((1251.41, 2822.63, 1973.4), (0.7, 0.7, 0.7), 145.004)
if "particle_125 geometry" not in marker_sets:
s=new_marker_set('particle_125 geometry')
marker_sets["particle_125 geometry"]=s
s= marker_sets["particle_125 geometry"]
mark=s.place_marker((1430, 3201.21, 1695.25), (0.7, 0.7, 0.7), 148.261)
if "particle_126 geometry" not in marker_sets:
s=new_marker_set('particle_126 geometry')
marker_sets["particle_126 geometry"]=s
s= marker_sets["particle_126 geometry"]
mark=s.place_marker((1263.04, 3659.72, 1326.51), (0.7, 0.7, 0.7), 127.704)
if "particle_127 geometry" not in marker_sets:
s=new_marker_set('particle_127 geometry')
marker_sets["particle_127 geometry"]=s
s= marker_sets["particle_127 geometry"]
mark=s.place_marker((937.274, 4090.42, 1124.53), (0.7, 0.7, 0.7), 129.607)
if "particle_128 geometry" not in marker_sets:
s=new_marker_set('particle_128 geometry')
marker_sets["particle_128 geometry"]=s
s= marker_sets["particle_128 geometry"]
mark=s.place_marker((719.611, 3815.82, 1491.28), (0.7, 0.7, 0.7), 139.759)
if "particle_129 geometry" not in marker_sets:
s=new_marker_set('particle_129 geometry')
marker_sets["particle_129 geometry"]=s
s= marker_sets["particle_129 geometry"]
mark=s.place_marker((725.563, 3427.72, 2034.38), (0.7, 0.7, 0.7), 118.567)
if "particle_130 geometry" not in marker_sets:
s=new_marker_set('particle_130 geometry')
marker_sets["particle_130 geometry"]=s
s= marker_sets["particle_130 geometry"]
mark=s.place_marker((758.012, 2993.92, 2075.96), (0.7, 0.7, 0.7), 136.164)
if "particle_131 geometry" not in marker_sets:
s=new_marker_set('particle_131 geometry')
marker_sets["particle_131 geometry"]=s
s= marker_sets["particle_131 geometry"]
mark=s.place_marker((999.731, 2602.15, 2158.14), (0.7, 0.7, 0.7), 121.655)
if "particle_132 geometry" not in marker_sets:
s=new_marker_set('particle_132 geometry')
marker_sets["particle_132 geometry"]=s
s= marker_sets["particle_132 geometry"]
mark=s.place_marker((1281.07, 2260.76, 2108.01), (0.7, 0.7, 0.7), 127.492)
if "particle_133 geometry" not in marker_sets:
s=new_marker_set('particle_133 geometry')
marker_sets["particle_133 geometry"]=s
s= marker_sets["particle_133 geometry"]
mark=s.place_marker((1279.23, 1865.68, 1913.12), (0.7, 0.7, 0.7), 138.617)
if "particle_134 geometry" not in marker_sets:
s=new_marker_set('particle_134 geometry')
marker_sets["particle_134 geometry"]=s
s= marker_sets["particle_134 geometry"]
mark=s.place_marker((1586.8, 1653.2, 1899.48), (0.7, 0.7, 0.7), 120.766)
if "particle_135 geometry" not in marker_sets:
s=new_marker_set('particle_135 geometry')
marker_sets["particle_135 geometry"]=s
s= marker_sets["particle_135 geometry"]
mark=s.place_marker((1763.41, 1530.1, 2153), (0.7, 0.7, 0.7), 145.893)
if "particle_136 geometry" not in marker_sets:
s=new_marker_set('particle_136 geometry')
marker_sets["particle_136 geometry"]=s
s= marker_sets["particle_136 geometry"]
mark=s.place_marker((1876.15, 1810.8, 2535.69), (0.7, 0.7, 0.7), 185.02)
if "particle_137 geometry" not in marker_sets:
s=new_marker_set('particle_137 geometry')
marker_sets["particle_137 geometry"]=s
s= marker_sets["particle_137 geometry"]
mark=s.place_marker((2205.73, 2093.04, 2837.17), (0.7, 0.7, 0.7), 221.314)
if "particle_138 geometry" not in marker_sets:
s=new_marker_set('particle_138 geometry')
marker_sets["particle_138 geometry"]=s
s= marker_sets["particle_138 geometry"]
mark=s.place_marker((2596.33, 2386.1, 2875.4), (0.7, 0.7, 0.7), 165.139)
if "particle_139 geometry" not in marker_sets:
s=new_marker_set('particle_139 geometry')
marker_sets["particle_139 geometry"]=s
s= marker_sets["particle_139 geometry"]
mark=s.place_marker((2475.55, 2586.76, 2848.82), (0.7, 0.7, 0.7), 179.437)
if "particle_140 geometry" not in marker_sets:
s=new_marker_set('particle_140 geometry')
marker_sets["particle_140 geometry"]=s
s= marker_sets["particle_140 geometry"]
mark=s.place_marker((2198.82, 2290.26, 2990.97), (0.7, 0.7, 0.7), 137.898)
if "particle_141 geometry" not in marker_sets:
s=new_marker_set('particle_141 geometry')
marker_sets["particle_141 geometry"]=s
s= marker_sets["particle_141 geometry"]
mark=s.place_marker((1955.8, 2033.96, 3039.56), (0.7, 0.7, 0.7), 124.658)
if "particle_142 geometry" not in marker_sets:
s=new_marker_set('particle_142 geometry')
marker_sets["particle_142 geometry"]=s
s= marker_sets["particle_142 geometry"]
mark=s.place_marker((1869.79, 1790.68, 2810.05), (0.7, 0.7, 0.7), 97.7553)
if "particle_143 geometry" not in marker_sets:
s=new_marker_set('particle_143 geometry')
marker_sets["particle_143 geometry"]=s
s= marker_sets["particle_143 geometry"]
mark=s.place_marker((1761.16, 1555.08, 2643.29), (0.7, 0.7, 0.7), 92.9331)
if "particle_144 geometry" not in marker_sets:
s=new_marker_set('particle_144 geometry')
marker_sets["particle_144 geometry"]=s
s= marker_sets["particle_144 geometry"]
mark=s.place_marker((1551.4, 1323.2, 2515.72), (0.7, 0.7, 0.7), 123.135)
if "particle_145 geometry" not in marker_sets:
s=new_marker_set('particle_145 geometry')
marker_sets["particle_145 geometry"]=s
s= marker_sets["particle_145 geometry"]
mark=s.place_marker((1780.48, 1547.37, 2732.15), (0.7, 0.7, 0.7), 125.716)
if "particle_146 geometry" not in marker_sets:
s=new_marker_set('particle_146 geometry')
marker_sets["particle_146 geometry"]=s
s= marker_sets["particle_146 geometry"]
mark=s.place_marker((1929.9, 1833.09, 2726.9), (0.7, 0.7, 0.7), 127.534)
if "particle_147 geometry" not in marker_sets:
s=new_marker_set('particle_147 geometry')
marker_sets["particle_147 geometry"]=s
s= marker_sets["particle_147 geometry"]
mark=s.place_marker((1812.18, 2089.44, 2616.9), (0.7, 0.7, 0.7), 94.9212)
if "particle_148 geometry" not in marker_sets:
s=new_marker_set('particle_148 geometry')
marker_sets["particle_148 geometry"]=s
s= marker_sets["particle_148 geometry"]
mark=s.place_marker((2073.52, 2425.01, 2749.8), (0.7, 0.7, 0.7), 137.644)
if "particle_149 geometry" not in marker_sets:
s=new_marker_set('particle_149 geometry')
marker_sets["particle_149 geometry"]=s
s= marker_sets["particle_149 geometry"]
mark=s.place_marker((2276.64, 2719.64, 2734.72), (0.7, 0.7, 0.7), 149.277)
if "particle_150 geometry" not in marker_sets:
s=new_marker_set('particle_150 geometry')
marker_sets["particle_150 geometry"]=s
s= marker_sets["particle_150 geometry"]
mark=s.place_marker((2392.19, 2559.14, 2424.7), (0.7, 0.7, 0.7), 103.677)
if "particle_151 geometry" not in marker_sets:
s=new_marker_set('particle_151 geometry')
marker_sets["particle_151 geometry"]=s
s= marker_sets["particle_151 geometry"]
mark=s.place_marker((2332.26, 2393.17, 1953.59), (0.7, 0.7, 0.7), 99.6588)
if "particle_152 geometry" not in marker_sets:
s=new_marker_set('particle_152 geometry')
marker_sets["particle_152 geometry"]=s
s= marker_sets["particle_152 geometry"]
mark=s.place_marker((2289.64, 2338.93, 1574.29), (0.7, 0.7, 0.7), 134.133)
if "particle_153 geometry" not in marker_sets:
s=new_marker_set('particle_153 geometry')
marker_sets["particle_153 geometry"]=s
s= marker_sets["particle_153 geometry"]
mark=s.place_marker((2313.95, 2177.08, 1864.22), (0.7, 0.7, 0.7), 173.007)
if "particle_154 geometry" not in marker_sets:
s=new_marker_set('particle_154 geometry')
marker_sets["particle_154 geometry"]=s
s= marker_sets["particle_154 geometry"]
mark=s.place_marker((2517.97, 2359.26, 2368.67), (0.7, 0.7, 0.7), 141.028)
if "particle_155 geometry" not in marker_sets:
s=new_marker_set('particle_155 geometry')
marker_sets["particle_155 geometry"]=s
s= marker_sets["particle_155 geometry"]
mark=s.place_marker((2600.01, 2485.32, 2816.83), (0.7, 0.7, 0.7), 161.121)
if "particle_156 geometry" not in marker_sets:
s=new_marker_set('particle_156 geometry')
marker_sets["particle_156 geometry"]=s
s= marker_sets["particle_156 geometry"]
mark=s.place_marker((2330.41, 2642.93, 2968.55), (0.7, 0.7, 0.7), 119.582)
if "particle_157 geometry" not in marker_sets:
s=new_marker_set('particle_157 geometry')
marker_sets["particle_157 geometry"]=s
s= marker_sets["particle_157 geometry"]
mark=s.place_marker((2013.04, 2492.05, 2781.23), (0.7, 0.7, 0.7), 137.094)
if "particle_158 geometry" not in marker_sets:
s=new_marker_set('particle_158 geometry')
marker_sets["particle_158 geometry"]=s
s= marker_sets["particle_158 geometry"]
mark=s.place_marker((1654.71, 2163.4, 2679.06), (0.7, 0.7, 0.7), 149.234)
if "particle_159 geometry" not in marker_sets:
s=new_marker_set('particle_159 geometry')
marker_sets["particle_159 geometry"]=s
s= marker_sets["particle_159 geometry"]
mark=s.place_marker((1746.09, 1925.79, 3022.4), (0.7, 0.7, 0.7), 151.011)
if "particle_160 geometry" not in marker_sets:
s=new_marker_set('particle_160 geometry')
marker_sets["particle_160 geometry"]=s
s= marker_sets["particle_160 geometry"]
mark=s.place_marker((2131.07, 1862.7, 3338.64), (0.7, 0.7, 0.7), 184.216)
if "particle_161 geometry" not in marker_sets:
s=new_marker_set('particle_161 geometry')
marker_sets["particle_161 geometry"]=s
s= marker_sets["particle_161 geometry"]
mark=s.place_marker((2396.61, 1700.13, 3076.17), (0.7, 0.7, 0.7), 170.596)
if "particle_162 geometry" not in marker_sets:
s=new_marker_set('particle_162 geometry')
marker_sets["particle_162 geometry"]=s
s= marker_sets["particle_162 geometry"]
mark=s.place_marker((2006.97, 1316.19, 2760.43), (0.7, 0.7, 0.7), 215.603)
if "particle_163 geometry" not in marker_sets:
s=new_marker_set('particle_163 geometry')
marker_sets["particle_163 geometry"]=s
s= marker_sets["particle_163 geometry"]
mark=s.place_marker((1396.3, 811.098, 2370.96), (0.7, 0.7, 0.7), 79.0164)
if "particle_164 geometry" not in marker_sets:
s=new_marker_set('particle_164 geometry')
marker_sets["particle_164 geometry"]=s
s= marker_sets["particle_164 geometry"]
mark=s.place_marker((1481.94, 793.405, 2045.79), (0.7, 0.7, 0.7), 77.2821)
if "particle_165 geometry" not in marker_sets:
s=new_marker_set('particle_165 geometry')
marker_sets["particle_165 geometry"]=s
s= marker_sets["particle_165 geometry"]
mark=s.place_marker((1628.33, 1100.64, 1974.76), (0.7, 0.7, 0.7), 188.658)
if "particle_166 geometry" not in marker_sets:
s=new_marker_set('particle_166 geometry')
marker_sets["particle_166 geometry"]=s
s= marker_sets["particle_166 geometry"]
mark=s.place_marker((1898.14, 1041.02, 1827.19), (0.7, 0.7, 0.7), 115.437)
if "particle_167 geometry" not in marker_sets:
s=new_marker_set('particle_167 geometry')
marker_sets["particle_167 geometry"]=s
s= marker_sets["particle_167 geometry"]
mark=s.place_marker((2103.96, 1465.73, 2206.61), (0.7, 0.7, 0.7), 88.4916)
if "particle_168 geometry" not in marker_sets:
s=new_marker_set('particle_168 geometry')
marker_sets["particle_168 geometry"]=s
s= marker_sets["particle_168 geometry"]
mark=s.place_marker((2310.07, 1904.89, 2594.65), (0.7, 0.7, 0.7), 108.88)
if "particle_169 geometry" not in marker_sets:
s=new_marker_set('particle_169 geometry')
marker_sets["particle_169 geometry"]=s
s= marker_sets["particle_169 geometry"]
mark=s.place_marker((2242.38, 2088.21, 2888.06), (0.7, 0.7, 0.7), 172.119)
if "particle_170 geometry" not in marker_sets:
s=new_marker_set('particle_170 geometry')
marker_sets["particle_170 geometry"]=s
s= marker_sets["particle_170 geometry"]
mark=s.place_marker((1940.99, 1739.53, 2722.31), (0.7, 0.7, 0.7), 139.505)
if "particle_171 geometry" not in marker_sets:
s=new_marker_set('particle_171 geometry')
marker_sets["particle_171 geometry"]=s
s= marker_sets["particle_171 geometry"]
mark=s.place_marker((1654.36, 1394.61, 2522.18), (0.7, 0.7, 0.7), 92.7639)
if "particle_172 geometry" not in marker_sets:
s=new_marker_set('particle_172 geometry')
marker_sets["particle_172 geometry"]=s
s= marker_sets["particle_172 geometry"]
mark=s.place_marker((1571.58, 1423.73, 2718.66), (0.7, 0.7, 0.7), 89.8452)
if "particle_173 geometry" not in marker_sets:
s=new_marker_set('particle_173 geometry')
marker_sets["particle_173 geometry"]=s
s= marker_sets["particle_173 geometry"]
mark=s.place_marker((1857.29, 1311.46, 2745.25), (0.7, 0.7, 0.7), 149.446)
if "particle_174 geometry" not in marker_sets:
s=new_marker_set('particle_174 geometry')
marker_sets["particle_174 geometry"]=s
s= marker_sets["particle_174 geometry"]
mark=s.place_marker((1933.14, 1154.16, 2459.34), (0.7, 0.7, 0.7), 126.858)
if "particle_175 geometry" not in marker_sets:
s=new_marker_set('particle_175 geometry')
marker_sets["particle_175 geometry"]=s
s= marker_sets["particle_175 geometry"]
mark=s.place_marker((1611.59, 1131.64, 2325.12), (0.7, 0.7, 0.7), 106.046)
if "particle_176 geometry" not in marker_sets:
s=new_marker_set('particle_176 geometry')
marker_sets["particle_176 geometry"]=s
s= marker_sets["particle_176 geometry"]
mark=s.place_marker((1192.94, 1390.41, 2494.63), (0.7, 0.7, 0.7), 156.298)
if "particle_177 geometry" not in marker_sets:
s=new_marker_set('particle_177 geometry')
marker_sets["particle_177 geometry"]=s
s= marker_sets["particle_177 geometry"]
mark=s.place_marker((728.078, 1782.92, 2604.07), (0.7, 0.7, 0.7), 231.212)
if "particle_178 geometry" not in marker_sets:
s=new_marker_set('particle_178 geometry')
marker_sets["particle_178 geometry"]=s
s= marker_sets["particle_178 geometry"]
mark=s.place_marker((742.94, 2168.54, 3016.39), (0.7, 0.7, 0.7), 88.4916)
if "particle_179 geometry" not in marker_sets:
s=new_marker_set('particle_179 geometry')
marker_sets["particle_179 geometry"]=s
s= marker_sets["particle_179 geometry"]
mark=s.place_marker((1128.29, 2309.08, 3261.29), (0.7, 0.7, 0.7), 111.334)
if "particle_180 geometry" not in marker_sets:
s=new_marker_set('particle_180 geometry')
marker_sets["particle_180 geometry"]=s
s= marker_sets["particle_180 geometry"]
mark=s.place_marker((1747.91, 2281.92, 3273.57), (0.7, 0.7, 0.7), 127.619)
if "particle_181 geometry" not in marker_sets:
s=new_marker_set('particle_181 geometry')
marker_sets["particle_181 geometry"]=s
s= marker_sets["particle_181 geometry"]
mark=s.place_marker((2205.7, 2242.19, 3277.45), (0.7, 0.7, 0.7), 230.746)
if "particle_182 geometry" not in marker_sets:
s=new_marker_set('particle_182 geometry')
marker_sets["particle_182 geometry"]=s
s= marker_sets["particle_182 geometry"]
mark=s.place_marker((1939.96, 2025.64, 3076.75), (0.7, 0.7, 0.7), 124.573)
if "particle_183 geometry" not in marker_sets:
s=new_marker_set('particle_183 geometry')
marker_sets["particle_183 geometry"]=s
s= marker_sets["particle_183 geometry"]
mark=s.place_marker((1482.01, 1656.91, 2883.13), (0.7, 0.7, 0.7), 124.489)
if "particle_184 geometry" not in marker_sets:
s=new_marker_set('particle_184 geometry')
marker_sets["particle_184 geometry"]=s
s= marker_sets["particle_184 geometry"]
mark=s.place_marker((1504.57, 1478.11, 2544.99), (0.7, 0.7, 0.7), 196.61)
if "particle_185 geometry" not in marker_sets:
s=new_marker_set('particle_185 geometry')
marker_sets["particle_185 geometry"]=s
s= marker_sets["particle_185 geometry"]
mark=s.place_marker((1531.55, 1770.69, 2557.96), (0.7, 0.7, 0.7), 134.049)
if "particle_186 geometry" not in marker_sets:
s=new_marker_set('particle_186 geometry')
marker_sets["particle_186 geometry"]=s
s= marker_sets["particle_186 geometry"]
mark=s.place_marker((1407.73, 1610.39, 2785.39), (0.7, 0.7, 0.7), 141.493)
if "particle_187 geometry" not in marker_sets:
s=new_marker_set('particle_187 geometry')
marker_sets["particle_187 geometry"]=s
s= marker_sets["particle_187 geometry"]
mark=s.place_marker((1078.32, 1373.51, 2866.52), (0.7, 0.7, 0.7), 172.203)
if "particle_188 geometry" not in marker_sets:
s=new_marker_set('particle_188 geometry')
marker_sets["particle_188 geometry"]=s
s= marker_sets["particle_188 geometry"]
mark=s.place_marker((1540.71, 1508.68, 2433.47), (0.7, 0.7, 0.7), 271.354)
if "particle_189 geometry" not in marker_sets:
s=new_marker_set('particle_189 geometry')
marker_sets["particle_189 geometry"]=s
s= marker_sets["particle_189 geometry"]
mark=s.place_marker((1728.21, 1816.18, 2082.57), (0.7, 0.7, 0.7), 97.0785)
if "particle_190 geometry" not in marker_sets:
s=new_marker_set('particle_190 geometry')
marker_sets["particle_190 geometry"]=s
s= marker_sets["particle_190 geometry"]
mark=s.place_marker((1629.49, 2143.27, 1857.23), (0.7, 0.7, 0.7), 151.857)
if "particle_191 geometry" not in marker_sets:
s=new_marker_set('particle_191 geometry')
marker_sets["particle_191 geometry"]=s
s= marker_sets["particle_191 geometry"]
mark=s.place_marker((1806.4, 2496.19, 1454.7), (0.7, 0.7, 0.7), 199.233)
if "particle_192 geometry" not in marker_sets:
s=new_marker_set('particle_192 geometry')
marker_sets["particle_192 geometry"]=s
s= marker_sets["particle_192 geometry"]
mark=s.place_marker((2253.13, 2808.98, 1557.96), (0.7, 0.7, 0.7), 118.863)
if "particle_193 geometry" not in marker_sets:
s=new_marker_set('particle_193 geometry')
marker_sets["particle_193 geometry"]=s
s= marker_sets["particle_193 geometry"]
mark=s.place_marker((2331.88, 3194.75, 1445.21), (0.7, 0.7, 0.7), 172.415)
if "particle_194 geometry" not in marker_sets:
s=new_marker_set('particle_194 geometry')
marker_sets["particle_194 geometry"]=s
s= marker_sets["particle_194 geometry"]
mark=s.place_marker((2126.38, 3458.65, 1110.05), (0.7, 0.7, 0.7), 134.26)
if "particle_195 geometry" not in marker_sets:
s=new_marker_set('particle_195 geometry')
marker_sets["particle_195 geometry"]=s
s= marker_sets["particle_195 geometry"]
mark=s.place_marker((1657.55, 3744.45, 373.939), (0.7, 0.7, 0.7), 139.548)
if "particle_196 geometry" not in marker_sets:
s=new_marker_set('particle_196 geometry')
marker_sets["particle_196 geometry"]=s
s= marker_sets["particle_196 geometry"]
mark=s.place_marker((1196.42, 3573.54, 664.478), (0.7, 0.7, 0.7), 196.526)
if "particle_197 geometry" not in marker_sets:
s=new_marker_set('particle_197 geometry')
marker_sets["particle_197 geometry"]=s
s= marker_sets["particle_197 geometry"]
mark=s.place_marker((1132.34, 3166.14, 1351.41), (0.7, 0.7, 0.7), 136.206)
if "particle_198 geometry" not in marker_sets:
s=new_marker_set('particle_198 geometry')
marker_sets["particle_198 geometry"]=s
s= marker_sets["particle_198 geometry"]
mark=s.place_marker((1537.38, 2985.53, 2233.12), (0.7, 0.7, 0.7), 152.322)
if "particle_199 geometry" not in marker_sets:
s=new_marker_set('particle_199 geometry')
marker_sets["particle_199 geometry"]=s
s= marker_sets["particle_199 geometry"]
mark=s.place_marker((1930.26, 2764.05, 2718.16), (0.7, 0.7, 0.7), 126.054)
if "particle_200 geometry" not in marker_sets:
s=new_marker_set('particle_200 geometry')
marker_sets["particle_200 geometry"]=s
s= marker_sets["particle_200 geometry"]
mark=s.place_marker((1758.92, 2388.91, 2687.17), (0.7, 0.7, 0.7), 164.378)
if "particle_201 geometry" not in marker_sets:
s=new_marker_set('particle_201 geometry')
marker_sets["particle_201 geometry"]=s
s= marker_sets["particle_201 geometry"]
mark=s.place_marker((1683.29, 2102.45, 2322.3), (0.7, 0.7, 0.7), 122.205)
if "particle_202 geometry" not in marker_sets:
s=new_marker_set('particle_202 geometry')
marker_sets["particle_202 geometry"]=s
s= marker_sets["particle_202 geometry"]
mark=s.place_marker((1768.67, 1943.35, 1895.07), (0.7, 0.7, 0.7), 134.979)
if "particle_203 geometry" not in marker_sets:
s=new_marker_set('particle_203 geometry')
marker_sets["particle_203 geometry"]=s
s= marker_sets["particle_203 geometry"]
mark=s.place_marker((2057.64, 1911.34, 1997.76), (0.7, 0.7, 0.7), 136.375)
if "particle_204 geometry" not in marker_sets:
s=new_marker_set('particle_204 geometry')
marker_sets["particle_204 geometry"]=s
s= marker_sets["particle_204 geometry"]
mark=s.place_marker((1774.9, 1830.72, 2216.04), (0.7, 0.7, 0.7), 151.688)
if "particle_205 geometry" not in marker_sets:
s=new_marker_set('particle_205 geometry')
marker_sets["particle_205 geometry"]=s
s= marker_sets["particle_205 geometry"]
mark=s.place_marker((1842.69, 1593.05, 2230.1), (0.7, 0.7, 0.7), 116.156)
if "particle_206 geometry" not in marker_sets:
s=new_marker_set('particle_206 geometry')
marker_sets["particle_206 geometry"]=s
s= marker_sets["particle_206 geometry"]
mark=s.place_marker((2246.62, 1999.09, 2632.53), (0.7, 0.7, 0.7), 122.839)
if "particle_207 geometry" not in marker_sets:
s=new_marker_set('particle_207 geometry')
marker_sets["particle_207 geometry"]=s
s= marker_sets["particle_207 geometry"]
mark=s.place_marker((2299.53, 2434.33, 2910.3), (0.7, 0.7, 0.7), 164.716)
if "particle_208 geometry" not in marker_sets:
s=new_marker_set('particle_208 geometry')
marker_sets["particle_208 geometry"]=s
s= marker_sets["particle_208 geometry"]
mark=s.place_marker((1535.73, 2481.03, 2500.23), (0.7, 0.7, 0.7), 303.672)
if "particle_209 geometry" not in marker_sets:
s=new_marker_set('particle_209 geometry')
marker_sets["particle_209 geometry"]=s
s= marker_sets["particle_209 geometry"]
mark=s.place_marker((884.472, 2222.55, 1700.26), (0.7, 0.7, 0.7), 220.298)
if "particle_210 geometry" not in marker_sets:
s=new_marker_set('particle_210 geometry')
marker_sets["particle_210 geometry"]=s
s= marker_sets["particle_210 geometry"]
mark=s.place_marker((1121.34, 1639.07, 1805.06), (0.7, 0.7, 0.7), 175.883)
if "particle_211 geometry" not in marker_sets:
s=new_marker_set('particle_211 geometry')
marker_sets["particle_211 geometry"]=s
s= marker_sets["particle_211 geometry"]
mark=s.place_marker((1214.33, 1147.05, 2280.78), (0.7, 0.7, 0.7), 233.581)
if "particle_212 geometry" not in marker_sets:
s=new_marker_set('particle_212 geometry')
marker_sets["particle_212 geometry"]=s
s= marker_sets["particle_212 geometry"]
mark=s.place_marker((1195.48, 1154.97, 3065.55), (0.7, 0.7, 0.7), 231.127)
if "particle_213 geometry" not in marker_sets:
s=new_marker_set('particle_213 geometry')
marker_sets["particle_213 geometry"]=s
s= marker_sets["particle_213 geometry"]
mark=s.place_marker((1456.07, 790.8, 3514.03), (0.7, 0.7, 0.7), 247.413)
if "particle_214 geometry" not in marker_sets:
s=new_marker_set('particle_214 geometry')
marker_sets["particle_214 geometry"]=s
s= marker_sets["particle_214 geometry"]
mark=s.place_marker((1861.61, 279.25, 3598.95), (0.7, 0.7, 0.7), 200.206)
if "particle_215 geometry" not in marker_sets:
s=new_marker_set('particle_215 geometry')
marker_sets["particle_215 geometry"]=s
s= marker_sets["particle_215 geometry"]
mark=s.place_marker((2117.27, 89.8132, 3318.65), (0.7, 0.7, 0.7), 150.419)
if "particle_216 geometry" not in marker_sets:
s=new_marker_set('particle_216 geometry')
marker_sets["particle_216 geometry"]=s
s= marker_sets["particle_216 geometry"]
mark=s.place_marker((2338.17, 630.252, 3498.27), (0.7, 0.7, 0.7), 140.14)
if "particle_217 geometry" not in marker_sets:
s=new_marker_set('particle_217 geometry')
marker_sets["particle_217 geometry"]=s
s= marker_sets["particle_217 geometry"]
mark=s.place_marker((2384.47, 889.44, 3849.54), (0.7, 0.7, 0.7), 132.949)
if "particle_218 geometry" not in marker_sets:
s=new_marker_set('particle_218 geometry')
marker_sets["particle_218 geometry"]=s
s= marker_sets["particle_218 geometry"]
mark=s.place_marker((2499.25, 1193.79, 4020.39), (0.7, 0.7, 0.7), 141.113)
if "particle_219 geometry" not in marker_sets:
s=new_marker_set('particle_219 geometry')
marker_sets["particle_219 geometry"]=s
s= marker_sets["particle_219 geometry"]
mark=s.place_marker((2190.7, 1321.92, 4072.23), (0.7, 0.7, 0.7), 171.526)
if "particle_220 geometry" not in marker_sets:
s=new_marker_set('particle_220 geometry')
marker_sets["particle_220 geometry"]=s
s= marker_sets["particle_220 geometry"]
mark=s.place_marker((1678.66, 1155.81, 3843.79), (0.7, 0.7, 0.7), 326.937)
if "particle_221 geometry" not in marker_sets:
s=new_marker_set('particle_221 geometry')
marker_sets["particle_221 geometry"]=s
s= marker_sets["particle_221 geometry"]
mark=s.place_marker((1414.68, 1114.86, 3331.28), (0.7, 0.7, 0.7), 92.0871)
if "particle_222 geometry" not in marker_sets:
s=new_marker_set('particle_222 geometry')
marker_sets["particle_222 geometry"]=s
s= marker_sets["particle_222 geometry"]
mark=s.place_marker((1414.86, 1568.91, 3204.72), (0.7, 0.7, 0.7), 210.273)
if "particle_223 geometry" not in marker_sets:
s=new_marker_set('particle_223 geometry')
marker_sets["particle_223 geometry"]=s
s= marker_sets["particle_223 geometry"]
mark=s.place_marker((1961.71, 2045.8, 3427.37), (0.7, 0.7, 0.7), 122.628)
if "particle_224 geometry" not in marker_sets:
s=new_marker_set('particle_224 geometry')
marker_sets["particle_224 geometry"]=s
s= marker_sets["particle_224 geometry"]
mark=s.place_marker((2149.39, 2141.91, 3574.62), (0.7, 0.7, 0.7), 109.176)
if "particle_225 geometry" not in marker_sets:
s=new_marker_set('particle_225 geometry')
marker_sets["particle_225 geometry"]=s
s= marker_sets["particle_225 geometry"]
mark=s.place_marker((2062.82, 1894.26, 3429.15), (0.7, 0.7, 0.7), 142.213)
if "particle_226 geometry" not in marker_sets:
s=new_marker_set('particle_226 geometry')
marker_sets["particle_226 geometry"]=s
s= marker_sets["particle_226 geometry"]
mark=s.place_marker((1748.05, 1982.65, 3191.98), (0.7, 0.7, 0.7), 250.078)
if "particle_227 geometry" not in marker_sets:
s=new_marker_set('particle_227 geometry')
marker_sets["particle_227 geometry"]=s
s= marker_sets["particle_227 geometry"]
mark=s.place_marker((2025.69, 1880.39, 2844.09), (0.7, 0.7, 0.7), 123.558)
if "particle_228 geometry" not in marker_sets:
s=new_marker_set('particle_228 geometry')
marker_sets["particle_228 geometry"]=s
s= marker_sets["particle_228 geometry"]
mark=s.place_marker((2479.84, 1953.46, 2720.15), (0.7, 0.7, 0.7), 235.992)
if "particle_229 geometry" not in marker_sets:
s=new_marker_set('particle_229 geometry')
marker_sets["particle_229 geometry"]=s
s= marker_sets["particle_229 geometry"]
mark=s.place_marker((2898.81, 1997.4, 2452.37), (0.7, 0.7, 0.7), 172.373)
if "particle_230 geometry" not in marker_sets:
s=new_marker_set('particle_230 geometry')
marker_sets["particle_230 geometry"]=s
s= marker_sets["particle_230 geometry"]
mark=s.place_marker((2899.01, 1796.56, 2040.65), (0.7, 0.7, 0.7), 152.322)
if "particle_231 geometry" not in marker_sets:
s=new_marker_set('particle_231 geometry')
marker_sets["particle_231 geometry"]=s
s= marker_sets["particle_231 geometry"]
mark=s.place_marker((2768.53, 1698.39, 1758.58), (0.7, 0.7, 0.7), 196.653)
if "particle_232 geometry" not in marker_sets:
s=new_marker_set('particle_232 geometry')
marker_sets["particle_232 geometry"]=s
s= marker_sets["particle_232 geometry"]
mark=s.place_marker((3011.25, 1626.51, 1998.57), (0.7, 0.7, 0.7), 134.091)
if "particle_233 geometry" not in marker_sets:
s=new_marker_set('particle_233 geometry')
marker_sets["particle_233 geometry"]=s
s= marker_sets["particle_233 geometry"]
mark=s.place_marker((3249.52, 1629.44, 2217.93), (0.7, 0.7, 0.7), 180.325)
if "particle_234 geometry" not in marker_sets:
s=new_marker_set('particle_234 geometry')
marker_sets["particle_234 geometry"]=s
s= marker_sets["particle_234 geometry"]
mark=s.place_marker((2870.18, 1836.74, 2375.88), (0.7, 0.7, 0.7), 218.437)
if "particle_235 geometry" not in marker_sets:
s=new_marker_set('particle_235 geometry')
marker_sets["particle_235 geometry"]=s
s= marker_sets["particle_235 geometry"]
mark=s.place_marker((2416.45, 1752.93, 2433.89), (0.7, 0.7, 0.7), 148.008)
if "particle_236 geometry" not in marker_sets:
s=new_marker_set('particle_236 geometry')
marker_sets["particle_236 geometry"]=s
s= marker_sets["particle_236 geometry"]
mark=s.place_marker((1954.13, 1323.18, 2340.36), (0.7, 0.7, 0.7), 191.873)
if "particle_237 geometry" not in marker_sets:
s=new_marker_set('particle_237 geometry')
marker_sets["particle_237 geometry"]=s
s= marker_sets["particle_237 geometry"]
mark=s.place_marker((1660.32, 887.866, 2494.78), (0.7, 0.7, 0.7), 138.575)
if "particle_238 geometry" not in marker_sets:
s=new_marker_set('particle_238 geometry')
marker_sets["particle_238 geometry"]=s
s= marker_sets["particle_238 geometry"]
mark=s.place_marker((1752.55, 474.123, 2384.95), (0.7, 0.7, 0.7), 161.205)
if "particle_239 geometry" not in marker_sets:
s=new_marker_set('particle_239 geometry')
marker_sets["particle_239 geometry"]=s
s= marker_sets["particle_239 geometry"]
mark=s.place_marker((1738.45, 876.879, 2179.64), (0.7, 0.7, 0.7), 288.021)
if "particle_240 geometry" not in marker_sets:
s=new_marker_set('particle_240 geometry')
marker_sets["particle_240 geometry"]=s
s= marker_sets["particle_240 geometry"]
mark=s.place_marker((2414.78, 1064.76, 2185.36), (0.7, 0.7, 0.7), 227.405)
if "particle_241 geometry" not in marker_sets:
s=new_marker_set('particle_241 geometry')
marker_sets["particle_241 geometry"]=s
s= marker_sets["particle_241 geometry"]
mark=s.place_marker((2753.7, 1451.8, 2241.63), (0.7, 0.7, 0.7), 126.519)
if "particle_242 geometry" not in marker_sets:
s=new_marker_set('particle_242 geometry')
marker_sets["particle_242 geometry"]=s
s= marker_sets["particle_242 geometry"]
mark=s.place_marker((2622.41, 1439.97, 1966.53), (0.7, 0.7, 0.7), 117.975)
if "particle_243 geometry" not in marker_sets:
s=new_marker_set('particle_243 geometry')
marker_sets["particle_243 geometry"]=s
s= marker_sets["particle_243 geometry"]
mark=s.place_marker((2551.8, 1776.82, 2191.6), (0.7, 0.7, 0.7), 200.883)
if "particle_244 geometry" not in marker_sets:
s=new_marker_set('particle_244 geometry')
marker_sets["particle_244 geometry"]=s
s= marker_sets["particle_244 geometry"]
mark=s.place_marker((2783.2, 1774.3, 2449.27), (0.7, 0.7, 0.7), 158.794)
if "particle_245 geometry" not in marker_sets:
s=new_marker_set('particle_245 geometry')
marker_sets["particle_245 geometry"]=s
s= marker_sets["particle_245 geometry"]
mark=s.place_marker((2881.6, 1599.58, 2696.98), (0.7, 0.7, 0.7), 115.86)
if "particle_246 geometry" not in marker_sets:
s=new_marker_set('particle_246 geometry')
marker_sets["particle_246 geometry"]=s
s= marker_sets["particle_246 geometry"]
mark=s.place_marker((2967.4, 1814.45, 2794.71), (0.7, 0.7, 0.7), 133.034)
if "particle_247 geometry" not in marker_sets:
s=new_marker_set('particle_247 geometry')
marker_sets["particle_247 geometry"]=s
s= marker_sets["particle_247 geometry"]
mark=s.place_marker((3124.1, 2201.65, 2562.16), (0.7, 0.7, 0.7), 314.627)
if "particle_248 geometry" not in marker_sets:
s=new_marker_set('particle_248 geometry')
marker_sets["particle_248 geometry"]=s
s= marker_sets["particle_248 geometry"]
mark=s.place_marker((2904.93, 2033.48, 2324.32), (0.7, 0.7, 0.7), 115.352)
if "particle_249 geometry" not in marker_sets:
s=new_marker_set('particle_249 geometry')
marker_sets["particle_249 geometry"]=s
s= marker_sets["particle_249 geometry"]
mark=s.place_marker((2734.54, 1651.52, 2244.45), (0.7, 0.7, 0.7), 180.621)
if "particle_250 geometry" not in marker_sets:
s=new_marker_set('particle_250 geometry')
marker_sets["particle_250 geometry"]=s
s= marker_sets["particle_250 geometry"]
mark=s.place_marker((2773.7, 1457.69, 2539.39), (0.7, 0.7, 0.7), 126.265)
if "particle_251 geometry" not in marker_sets:
s=new_marker_set('particle_251 geometry')
marker_sets["particle_251 geometry"]=s
s= marker_sets["particle_251 geometry"]
mark=s.place_marker((2755.21, 1547.53, 2902.74), (0.7, 0.7, 0.7), 133.541)
if "particle_252 geometry" not in marker_sets:
s=new_marker_set('particle_252 geometry')
marker_sets["particle_252 geometry"]=s
s= marker_sets["particle_252 geometry"]
mark=s.place_marker((2593.21, 1495.32, 3292.93), (0.7, 0.7, 0.7), 171.019)
if "particle_253 geometry" not in marker_sets:
s=new_marker_set('particle_253 geometry')
marker_sets["particle_253 geometry"]=s
s= marker_sets["particle_253 geometry"]
mark=s.place_marker((2350.74, 1320.27, 3541.74), (0.7, 0.7, 0.7), 115.437)
if "particle_254 geometry" not in marker_sets:
s=new_marker_set('particle_254 geometry')
marker_sets["particle_254 geometry"]=s
s= marker_sets["particle_254 geometry"]
mark=s.place_marker((2516.51, 1299.83, 3250.33), (0.7, 0.7, 0.7), 158.583)
if "particle_255 geometry" not in marker_sets:
s=new_marker_set('particle_255 geometry')
marker_sets["particle_255 geometry"]=s
s= marker_sets["particle_255 geometry"]
mark=s.place_marker((2599.6, 1750.68, 3125.44), (0.7, 0.7, 0.7), 192)
if "particle_256 geometry" not in marker_sets:
s=new_marker_set('particle_256 geometry')
marker_sets["particle_256 geometry"]=s
s= marker_sets["particle_256 geometry"]
mark=s.place_marker((2831.54, 2044.5, 2921.39), (0.7, 0.7, 0.7), 150.165)
if "particle_257 geometry" not in marker_sets:
s=new_marker_set('particle_257 geometry')
marker_sets["particle_257 geometry"]=s
s= marker_sets["particle_257 geometry"]
mark=s.place_marker((2831.92, 2032.63, 3227.93), (0.7, 0.7, 0.7), 157.567)
if "particle_258 geometry" not in marker_sets:
s=new_marker_set('particle_258 geometry')
marker_sets["particle_258 geometry"]=s
s= marker_sets["particle_258 geometry"]
mark=s.place_marker((2773.74, 2116.1, 3189.7), (0.7, 0.7, 0.7), 199.36)
if "particle_259 geometry" not in marker_sets:
s=new_marker_set('particle_259 geometry')
marker_sets["particle_259 geometry"]=s
s= marker_sets["particle_259 geometry"]
mark=s.place_marker((2409.47, 1852.89, 3066.79), (0.7, 0.7, 0.7), 105.369)
if "particle_260 geometry" not in marker_sets:
s=new_marker_set('particle_260 geometry')
marker_sets["particle_260 geometry"]=s
s= marker_sets["particle_260 geometry"]
mark=s.place_marker((2181.51, 1841.48, 2916.64), (0.7, 0.7, 0.7), 118.651)
if "particle_261 geometry" not in marker_sets:
s=new_marker_set('particle_261 geometry')
marker_sets["particle_261 geometry"]=s
s= marker_sets["particle_261 geometry"]
mark=s.place_marker((2526.58, 2100.86, 2803.45), (0.7, 0.7, 0.7), 219.664)
if "particle_262 geometry" not in marker_sets:
s=new_marker_set('particle_262 geometry')
marker_sets["particle_262 geometry"]=s
s= marker_sets["particle_262 geometry"]
mark=s.place_marker((3015.8, 2425.02, 2881.97), (0.7, 0.7, 0.7), 196.018)
if "particle_263 geometry" not in marker_sets:
s=new_marker_set('particle_263 geometry')
marker_sets["particle_263 geometry"]=s
s= marker_sets["particle_263 geometry"]
mark=s.place_marker((3381.31, 2749.07, 2929.37), (0.7, 0.7, 0.7), 218.141)
if "particle_264 geometry" not in marker_sets:
s=new_marker_set('particle_264 geometry')
marker_sets["particle_264 geometry"]=s
s= marker_sets["particle_264 geometry"]
mark=s.place_marker((3228.27, 2914.16, 3207.32), (0.7, 0.7, 0.7), 181.636)
if "particle_265 geometry" not in marker_sets:
s=new_marker_set('particle_265 geometry')
marker_sets["particle_265 geometry"]=s
s= marker_sets["particle_265 geometry"]
mark=s.place_marker((2964.92, 2791.07, 3278.44), (0.7, 0.7, 0.7), 195.003)
if "particle_266 geometry" not in marker_sets:
s=new_marker_set('particle_266 geometry')
marker_sets["particle_266 geometry"]=s
s= marker_sets["particle_266 geometry"]
mark=s.place_marker((3217.37, 2840.36, 3272.54), (0.7, 0.7, 0.7), 139.209)
if "particle_267 geometry" not in marker_sets:
s=new_marker_set('particle_267 geometry')
marker_sets["particle_267 geometry"]=s
s= marker_sets["particle_267 geometry"]
mark=s.place_marker((3259.57, 2817.95, 3343.47), (0.7, 0.7, 0.7), 189.885)
if "particle_268 geometry" not in marker_sets:
s=new_marker_set('particle_268 geometry')
marker_sets["particle_268 geometry"]=s
s= marker_sets["particle_268 geometry"]
mark=s.place_marker((3331.15, 2551.42, 3292.1), (0.7, 0.7, 0.7), 267.674)
if "particle_269 geometry" not in marker_sets:
s=new_marker_set('particle_269 geometry')
marker_sets["particle_269 geometry"]=s
s= marker_sets["particle_269 geometry"]
mark=s.place_marker((3460.07, 2048.74, 3059.78), (0.7, 0.7, 0.7), 196.568)
if "particle_270 geometry" not in marker_sets:
s=new_marker_set('particle_270 geometry')
marker_sets["particle_270 geometry"]=s
s= marker_sets["particle_270 geometry"]
mark=s.place_marker((3474.54, 1983.71, 3395.24), (0.7, 0.7, 0.7), 192.423)
if "particle_271 geometry" not in marker_sets:
s=new_marker_set('particle_271 geometry')
marker_sets["particle_271 geometry"]=s
s= marker_sets["particle_271 geometry"]
mark=s.place_marker((3571.13, 2299.17, 3586.25), (1, 0.7, 0), 202.405)
if "particle_272 geometry" not in marker_sets:
s=new_marker_set('particle_272 geometry')
marker_sets["particle_272 geometry"]=s
s= marker_sets["particle_272 geometry"]
mark=s.place_marker((3466.08, 1544.08, 3226.11), (0.7, 0.7, 0.7), 135.529)
if "particle_273 geometry" not in marker_sets:
s=new_marker_set('particle_273 geometry')
marker_sets["particle_273 geometry"]=s
s= marker_sets["particle_273 geometry"]
mark=s.place_marker((3301.97, 633.701, 2995.76), (0.7, 0.7, 0.7), 114.21)
if "particle_274 geometry" not in marker_sets:
s=new_marker_set('particle_274 geometry')
marker_sets["particle_274 geometry"]=s
s= marker_sets["particle_274 geometry"]
mark=s.place_marker((2987.77, 690.958, 2925.82), (0.7, 0.7, 0.7), 159.133)
if "particle_275 geometry" not in marker_sets:
s=new_marker_set('particle_275 geometry')
marker_sets["particle_275 geometry"]=s
s= marker_sets["particle_275 geometry"]
mark=s.place_marker((2850.87, 1030.99, 2737.12), (0.7, 0.7, 0.7), 144.412)
if "particle_276 geometry" not in marker_sets:
s=new_marker_set('particle_276 geometry')
marker_sets["particle_276 geometry"]=s
s= marker_sets["particle_276 geometry"]
mark=s.place_marker((2783.71, 1301.4, 2579.77), (0.7, 0.7, 0.7), 70.8525)
if "particle_277 geometry" not in marker_sets:
s=new_marker_set('particle_277 geometry')
marker_sets["particle_277 geometry"]=s
s= marker_sets["particle_277 geometry"]
mark=s.place_marker((2882.65, 1874.31, 2788.84), (0.7, 0.7, 0.7), 141.874)
if "particle_278 geometry" not in marker_sets:
s=new_marker_set('particle_278 geometry')
marker_sets["particle_278 geometry"]=s
s= marker_sets["particle_278 geometry"]
mark=s.place_marker((3064.55, 2387.15, 3075.98), (0.7, 0.7, 0.7), 217.337)
if "particle_279 geometry" not in marker_sets:
s=new_marker_set('particle_279 geometry')
marker_sets["particle_279 geometry"]=s
s= marker_sets["particle_279 geometry"]
mark=s.place_marker((3120.17, 2418.67, 3038.07), (0.7, 0.7, 0.7), 237.641)
if "particle_280 geometry" not in marker_sets:
s=new_marker_set('particle_280 geometry')
marker_sets["particle_280 geometry"]=s
s= marker_sets["particle_280 geometry"]
mark=s.place_marker((3070.11, 2233.3, 2610.53), (0.7, 0.7, 0.7), 229.393)
if "particle_281 geometry" not in marker_sets:
s=new_marker_set('particle_281 geometry')
marker_sets["particle_281 geometry"]=s
s= marker_sets["particle_281 geometry"]
mark=s.place_marker((3501.14, 2630.84, 2480.91), (0.7, 0.7, 0.7), 349.906)
if "particle_282 geometry" not in marker_sets:
s=new_marker_set('particle_282 geometry')
marker_sets["particle_282 geometry"]=s
s= marker_sets["particle_282 geometry"]
mark=s.place_marker((4011.46, 2802.37, 2663.01), (0.7, 0.7, 0.7), 162.347)
if "particle_283 geometry" not in marker_sets:
s=new_marker_set('particle_283 geometry')
marker_sets["particle_283 geometry"]=s
s= marker_sets["particle_283 geometry"]
mark=s.place_marker((4194.71, 2816.88, 2652.96), (0.7, 0.7, 0.7), 194.072)
if "particle_284 geometry" not in marker_sets:
s=new_marker_set('particle_284 geometry')
marker_sets["particle_284 geometry"]=s
s= marker_sets["particle_284 geometry"]
mark=s.place_marker((4225.48, 2790.29, 2474.77), (0.7, 0.7, 0.7), 242.21)
if "particle_285 geometry" not in marker_sets:
s=new_marker_set('particle_285 geometry')
marker_sets["particle_285 geometry"]=s
s= marker_sets["particle_285 geometry"]
mark=s.place_marker((4336.5, 2363.18, 2188.66), (0.7, 0.7, 0.7), 320.93)
if "particle_286 geometry" not in marker_sets:
s=new_marker_set('particle_286 geometry')
marker_sets["particle_286 geometry"]=s
s= marker_sets["particle_286 geometry"]
mark=s.place_marker((4752.36, 2179.67, 1851.79), (0.7, 0.7, 0.7), 226.432)
if "particle_287 geometry" not in marker_sets:
s=new_marker_set('particle_287 geometry')
marker_sets["particle_287 geometry"]=s
s= marker_sets["particle_287 geometry"]
mark=s.place_marker((4562.94, 2488.17, 1779.18), (0.7, 0.7, 0.7), 125.208)
if "particle_288 geometry" not in marker_sets:
s=new_marker_set('particle_288 geometry')
marker_sets["particle_288 geometry"]=s
s= marker_sets["particle_288 geometry"]
mark=s.place_marker((4278.31, 3012.7, 1826.79), (0.7, 0.7, 0.7), 197.837)
if "particle_289 geometry" not in marker_sets:
s=new_marker_set('particle_289 geometry')
marker_sets["particle_289 geometry"]=s
s= marker_sets["particle_289 geometry"]
mark=s.place_marker((4311.86, 3390.32, 1318.4), (0.7, 0.7, 0.7), 167.804)
if "particle_290 geometry" not in marker_sets:
s=new_marker_set('particle_290 geometry')
marker_sets["particle_290 geometry"]=s
s= marker_sets["particle_290 geometry"]
mark=s.place_marker((4509.41, 3633.6, 552.179), (0.7, 0.7, 0.7), 136.84)
if "particle_291 geometry" not in marker_sets:
s=new_marker_set('particle_291 geometry')
marker_sets["particle_291 geometry"]=s
s= marker_sets["particle_291 geometry"]
mark=s.place_marker((4615.95, 3242.42, 412.954), (0.7, 0.7, 0.7), 85.7421)
if "particle_292 geometry" not in marker_sets:
s=new_marker_set('particle_292 geometry')
marker_sets["particle_292 geometry"]=s
s= marker_sets["particle_292 geometry"]
mark=s.place_marker((4684.83, 2820.11, 1774), (1, 0.7, 0), 256)
if "particle_293 geometry" not in marker_sets:
s=new_marker_set('particle_293 geometry')
marker_sets["particle_293 geometry"]=s
s= marker_sets["particle_293 geometry"]
mark=s.place_marker((4306.57, 3630.66, 1091), (0.7, 0.7, 0.7), 138.702)
if "particle_294 geometry" not in marker_sets:
s=new_marker_set('particle_294 geometry')
marker_sets["particle_294 geometry"]=s
s= marker_sets["particle_294 geometry"]
mark=s.place_marker((4234.8, 4078.98, 950.138), (0.7, 0.7, 0.7), 140.732)
if "particle_295 geometry" not in marker_sets:
s=new_marker_set('particle_295 geometry')
marker_sets["particle_295 geometry"]=s
s= marker_sets["particle_295 geometry"]
mark=s.place_marker((4500.42, 3932.57, 1058.17), (0.7, 0.7, 0.7), 81.3006)
if "particle_296 geometry" not in marker_sets:
s=new_marker_set('particle_296 geometry')
marker_sets["particle_296 geometry"]=s
s= marker_sets["particle_296 geometry"]
mark=s.place_marker((4891.01, 3833.47, 855.543), (0.7, 0.7, 0.7), 133.837)
if "particle_297 geometry" not in marker_sets:
s=new_marker_set('particle_297 geometry')
marker_sets["particle_297 geometry"]=s
s= marker_sets["particle_297 geometry"]
mark=s.place_marker((4804.93, 3438.47, 1322.52), (0.7, 0.7, 0.7), 98.3475)
if "particle_298 geometry" not in marker_sets:
s=new_marker_set('particle_298 geometry')
marker_sets["particle_298 geometry"]=s
s= marker_sets["particle_298 geometry"]
mark=s.place_marker((4482.09, 3198.21, 2042.73), (0.7, 0.7, 0.7), 297.623)
if "particle_299 geometry" not in marker_sets:
s=new_marker_set('particle_299 geometry')
marker_sets["particle_299 geometry"]=s
s= marker_sets["particle_299 geometry"]
mark=s.place_marker((4431.4, 2872.71, 2324.48), (0.7, 0.7, 0.7), 212.938)
if "particle_300 geometry" not in marker_sets:
s=new_marker_set('particle_300 geometry')
marker_sets["particle_300 geometry"]=s
s= marker_sets["particle_300 geometry"]
mark=s.place_marker((4614.6, 2980.88, 2438.55), (0.7, 0.7, 0.7), 154.183)
if "particle_301 geometry" not in marker_sets:
s=new_marker_set('particle_301 geometry')
marker_sets["particle_301 geometry"]=s
s= marker_sets["particle_301 geometry"]
mark=s.place_marker((4921.42, 2712.29, 2403.03), (0.7, 0.7, 0.7), 180.832)
if "particle_302 geometry" not in marker_sets:
s=new_marker_set('particle_302 geometry')
marker_sets["particle_302 geometry"]=s
s= marker_sets["particle_302 geometry"]
mark=s.place_marker((4988.74, 2388.5, 2272.44), (0.7, 0.7, 0.7), 122.332)
if "particle_303 geometry" not in marker_sets:
s=new_marker_set('particle_303 geometry')
marker_sets["particle_303 geometry"]=s
s= marker_sets["particle_303 geometry"]
mark=s.place_marker((4991.85, 2101.53, 2050.3), (0.7, 0.7, 0.7), 209.047)
if "particle_304 geometry" not in marker_sets:
s=new_marker_set('particle_304 geometry')
marker_sets["particle_304 geometry"]=s
s= marker_sets["particle_304 geometry"]
mark=s.place_marker((5327.33, 2083.91, 2296.79), (0.7, 0.7, 0.7), 126.985)
if "particle_305 geometry" not in marker_sets:
s=new_marker_set('particle_305 geometry')
marker_sets["particle_305 geometry"]=s
s= marker_sets["particle_305 geometry"]
mark=s.place_marker((5707.25, 2025.51, 2227.07), (0.7, 0.7, 0.7), 122.205)
if "particle_306 geometry" not in marker_sets:
s=new_marker_set('particle_306 geometry')
marker_sets["particle_306 geometry"]=s
s= marker_sets["particle_306 geometry"]
mark=s.place_marker((5755.37, 2064.68, 2020.59), (0.7, 0.7, 0.7), 107.95)
if "particle_307 geometry" not in marker_sets:
s=new_marker_set('particle_307 geometry')
marker_sets["particle_307 geometry"]=s
s= marker_sets["particle_307 geometry"]
mark=s.place_marker((5274.05, 2285.16, 2318.03), (0.7, 0.7, 0.7), 182.567)
if "particle_308 geometry" not in marker_sets:
s=new_marker_set('particle_308 geometry')
marker_sets["particle_308 geometry"]=s
s= marker_sets["particle_308 geometry"]
mark=s.place_marker((4715.81, 2559.83, 2436.75), (0.7, 0.7, 0.7), 185.274)
if "particle_309 geometry" not in marker_sets:
s=new_marker_set('particle_309 geometry')
marker_sets["particle_309 geometry"]=s
s= marker_sets["particle_309 geometry"]
mark=s.place_marker((4254.85, 2574.56, 2306.89), (0.7, 0.7, 0.7), 413.567)
if "particle_310 geometry" not in marker_sets:
s=new_marker_set('particle_310 geometry')
marker_sets["particle_310 geometry"]=s
s= marker_sets["particle_310 geometry"]
mark=s.place_marker((4170.11, 2735.92, 2486.12), (0.7, 0.7, 0.7), 240.01)
if "particle_311 geometry" not in marker_sets:
s=new_marker_set('particle_311 geometry')
marker_sets["particle_311 geometry"]=s
s= marker_sets["particle_311 geometry"]
mark=s.place_marker((4173.87, 2688.1, 2458.17), (0.7, 0.7, 0.7), 238.995)
if "particle_312 geometry" not in marker_sets:
s=new_marker_set('particle_312 geometry')
marker_sets["particle_312 geometry"]=s
s= marker_sets["particle_312 geometry"]
mark=s.place_marker((4424.16, 2694.95, 2540.83), (0.7, 0.7, 0.7), 203.674)
if "particle_313 geometry" not in marker_sets:
s=new_marker_set('particle_313 geometry')
marker_sets["particle_313 geometry"]=s
s= marker_sets["particle_313 geometry"]
mark=s.place_marker((4789.23, 2340.14, 2864.99), (0.7, 0.7, 0.7), 266.744)
if "particle_314 geometry" not in marker_sets:
s=new_marker_set('particle_314 geometry')
marker_sets["particle_314 geometry"]=s
s= marker_sets["particle_314 geometry"]
mark=s.place_marker((5028.08, 2731.96, 2906.55), (0.7, 0.7, 0.7), 147.585)
if "particle_315 geometry" not in marker_sets:
s=new_marker_set('particle_315 geometry')
marker_sets["particle_315 geometry"]=s
s= marker_sets["particle_315 geometry"]
mark=s.place_marker((4786.7, 2799.58, 2816.75), (0.7, 0.7, 0.7), 249.485)
if "particle_316 geometry" not in marker_sets:
s=new_marker_set('particle_316 geometry')
marker_sets["particle_316 geometry"]=s
s= marker_sets["particle_316 geometry"]
mark=s.place_marker((4526.81, 2476.19, 2750.35), (0.7, 0.7, 0.7), 119.371)
if "particle_317 geometry" not in marker_sets:
s=new_marker_set('particle_317 geometry')
marker_sets["particle_317 geometry"]=s
s= marker_sets["particle_317 geometry"]
mark=s.place_marker((4449.67, 1925.79, 2317.33), (0.7, 0.7, 0.7), 155.875)
if "particle_318 geometry" not in marker_sets:
s=new_marker_set('particle_318 geometry')
marker_sets["particle_318 geometry"]=s
s= marker_sets["particle_318 geometry"]
mark=s.place_marker((4378.39, 1747.54, 1579.96), (0.7, 0.7, 0.7), 189.419)
if "particle_319 geometry" not in marker_sets:
s=new_marker_set('particle_319 geometry')
marker_sets["particle_319 geometry"]=s
s= marker_sets["particle_319 geometry"]
mark=s.place_marker((4069.91, 2089.97, 1279.18), (0.7, 0.7, 0.7), 137.475)
if "particle_320 geometry" not in marker_sets:
s=new_marker_set('particle_320 geometry')
marker_sets["particle_320 geometry"]=s
s= marker_sets["particle_320 geometry"]
mark=s.place_marker((3730.76, 2467.57, 1266.91), (0.7, 0.7, 0.7), 176.179)
if "particle_321 geometry" not in marker_sets:
s=new_marker_set('particle_321 geometry')
marker_sets["particle_321 geometry"]=s
s= marker_sets["particle_321 geometry"]
mark=s.place_marker((3489.17, 2844.16, 1117.23), (0.7, 0.7, 0.7), 138.829)
if "particle_322 geometry" not in marker_sets:
s=new_marker_set('particle_322 geometry')
marker_sets["particle_322 geometry"]=s
s= marker_sets["particle_322 geometry"]
mark=s.place_marker((3403.27, 3196.55, 896.098), (0.7, 0.7, 0.7), 148.727)
if "particle_323 geometry" not in marker_sets:
s=new_marker_set('particle_323 geometry')
marker_sets["particle_323 geometry"]=s
s= marker_sets["particle_323 geometry"]
mark=s.place_marker((3435.2, 3524.05, 483.942), (0.7, 0.7, 0.7), 230.323)
if "particle_324 geometry" not in marker_sets:
s=new_marker_set('particle_324 geometry')
marker_sets["particle_324 geometry"]=s
s= marker_sets["particle_324 geometry"]
mark=s.place_marker((3738.71, 3078.8, 824.448), (0.7, 0.7, 0.7), 175.376)
if "particle_325 geometry" not in marker_sets:
s=new_marker_set('particle_325 geometry')
marker_sets["particle_325 geometry"]=s
s= marker_sets["particle_325 geometry"]
mark=s.place_marker((3834.35, 2767.45, 1214.9), (0.7, 0.7, 0.7), 161.163)
if "particle_326 geometry" not in marker_sets:
s=new_marker_set('particle_326 geometry')
marker_sets["particle_326 geometry"]=s
s= marker_sets["particle_326 geometry"]
mark=s.place_marker((3532.1, 2447.98, 1012.46), (0.7, 0.7, 0.7), 125.885)
if "particle_327 geometry" not in marker_sets:
s=new_marker_set('particle_327 geometry')
marker_sets["particle_327 geometry"]=s
s= marker_sets["particle_327 geometry"]
mark=s.place_marker((3403.8, 2097.61, 744.902), (0.7, 0.7, 0.7), 206.635)
if "particle_328 geometry" not in marker_sets:
s=new_marker_set('particle_328 geometry')
marker_sets["particle_328 geometry"]=s
s= marker_sets["particle_328 geometry"]
mark=s.place_marker((3162.02, 2310.14, 1067.68), (0.7, 0.7, 0.7), 151.392)
if "particle_329 geometry" not in marker_sets:
s=new_marker_set('particle_329 geometry')
marker_sets["particle_329 geometry"]=s
s= marker_sets["particle_329 geometry"]
mark=s.place_marker((3015.75, 2593.73, 1238.66), (0.7, 0.7, 0.7), 173.388)
if "particle_330 geometry" not in marker_sets:
s=new_marker_set('particle_330 geometry')
marker_sets["particle_330 geometry"]=s
s= marker_sets["particle_330 geometry"]
mark=s.place_marker((3067.43, 2847.18, 1025.19), (0.7, 0.7, 0.7), 135.825)
if "particle_331 geometry" not in marker_sets:
s=new_marker_set('particle_331 geometry')
marker_sets["particle_331 geometry"]=s
s= marker_sets["particle_331 geometry"]
mark=s.place_marker((3198.96, 3059.58, 665.749), (0.7, 0.7, 0.7), 186.839)
if "particle_332 geometry" not in marker_sets:
s=new_marker_set('particle_332 geometry')
marker_sets["particle_332 geometry"]=s
s= marker_sets["particle_332 geometry"]
mark=s.place_marker((3361.74, 3261.97, 266.597), (0.7, 0.7, 0.7), 121.189)
if "particle_333 geometry" not in marker_sets:
s=new_marker_set('particle_333 geometry')
marker_sets["particle_333 geometry"]=s
s= marker_sets["particle_333 geometry"]
mark=s.place_marker((3474.49, 2997.29, 578.486), (0.7, 0.7, 0.7), 102.916)
if "particle_334 geometry" not in marker_sets:
s=new_marker_set('particle_334 geometry')
marker_sets["particle_334 geometry"]=s
s= marker_sets["particle_334 geometry"]
mark=s.place_marker((3704.33, 2722.38, 1088.12), (0.7, 0.7, 0.7), 212.769)
if "particle_335 geometry" not in marker_sets:
s=new_marker_set('particle_335 geometry')
marker_sets["particle_335 geometry"]=s
s= marker_sets["particle_335 geometry"]
mark=s.place_marker((3709.04, 2497.45, 1712.79), (0.7, 0.7, 0.7), 173.092)
if "particle_336 geometry" not in marker_sets:
s=new_marker_set('particle_336 geometry')
marker_sets["particle_336 geometry"]=s
s= marker_sets["particle_336 geometry"]
mark=s.place_marker((3893.06, 2194.75, 2060.4), (0.7, 0.7, 0.7), 264.502)
if "particle_337 geometry" not in marker_sets:
s=new_marker_set('particle_337 geometry')
marker_sets["particle_337 geometry"]=s
s= marker_sets["particle_337 geometry"]
mark=s.place_marker((4274.66, 1801.39, 2060.41), (0.7, 0.7, 0.7), 208.666)
if "particle_338 geometry" not in marker_sets:
s=new_marker_set('particle_338 geometry')
marker_sets["particle_338 geometry"]=s
s= marker_sets["particle_338 geometry"]
mark=s.place_marker((4743.68, 1673.55, 2041.41), (0.7, 0.7, 0.7), 186.797)
if "particle_339 geometry" not in marker_sets:
s=new_marker_set('particle_339 geometry')
marker_sets["particle_339 geometry"]=s
s= marker_sets["particle_339 geometry"]
mark=s.place_marker((4991.08, 1824.33, 2450.91), (0.7, 0.7, 0.7), 255.534)
if "particle_340 geometry" not in marker_sets:
s=new_marker_set('particle_340 geometry')
marker_sets["particle_340 geometry"]=s
s= marker_sets["particle_340 geometry"]
mark=s.place_marker((5306.74, 2105.19, 2419.33), (0.7, 0.7, 0.7), 153.126)
if "particle_341 geometry" not in marker_sets:
s=new_marker_set('particle_341 geometry')
marker_sets["particle_341 geometry"]=s
s= marker_sets["particle_341 geometry"]
mark=s.place_marker((5440.48, 1763.78, 2253.7), (0.7, 0.7, 0.7), 165.816)
if "particle_342 geometry" not in marker_sets:
s=new_marker_set('particle_342 geometry')
marker_sets["particle_342 geometry"]=s
s= marker_sets["particle_342 geometry"]
mark=s.place_marker((5132.5, 1602.26, 2447.13), (0.7, 0.7, 0.7), 134.429)
if "particle_343 geometry" not in marker_sets:
s=new_marker_set('particle_343 geometry')
marker_sets["particle_343 geometry"]=s
s= marker_sets["particle_343 geometry"]
mark=s.place_marker((4744.16, 1605.75, 2318.8), (0.7, 0.7, 0.7), 178.971)
if "particle_344 geometry" not in marker_sets:
s=new_marker_set('particle_344 geometry')
marker_sets["particle_344 geometry"]=s
s= marker_sets["particle_344 geometry"]
mark=s.place_marker((4541.95, 1727.83, 1843.62), (0.7, 0.7, 0.7), 189.969)
if "particle_345 geometry" not in marker_sets:
s=new_marker_set('particle_345 geometry')
marker_sets["particle_345 geometry"]=s
s= marker_sets["particle_345 geometry"]
mark=s.place_marker((4623.82, 1487.7, 1268.41), (0.7, 0.7, 0.7), 121.359)
if "particle_346 geometry" not in marker_sets:
s=new_marker_set('particle_346 geometry')
marker_sets["particle_346 geometry"]=s
s= marker_sets["particle_346 geometry"]
mark=s.place_marker((4322.88, 1713.6, 883.234), (0.7, 0.7, 0.7), 187.262)
if "particle_347 geometry" not in marker_sets:
s=new_marker_set('particle_347 geometry')
marker_sets["particle_347 geometry"]=s
s= marker_sets["particle_347 geometry"]
mark=s.place_marker((3811.99, 2113.1, 888.839), (0.7, 0.7, 0.7), 164.335)
if "particle_348 geometry" not in marker_sets:
s=new_marker_set('particle_348 geometry')
marker_sets["particle_348 geometry"]=s
s= marker_sets["particle_348 geometry"]
mark=s.place_marker((3797.13, 2613.17, 881.257), (0.7, 0.7, 0.7), 138.363)
if "particle_349 geometry" not in marker_sets:
s=new_marker_set('particle_349 geometry')
marker_sets["particle_349 geometry"]=s
s= marker_sets["particle_349 geometry"]
mark=s.place_marker((3710.98, 2919.37, 705.159), (0.7, 0.7, 0.7), 138.49)
if "particle_350 geometry" not in marker_sets:
s=new_marker_set('particle_350 geometry')
marker_sets["particle_350 geometry"]=s
s= marker_sets["particle_350 geometry"]
mark=s.place_marker((3387.63, 2773.49, 709.053), (0.7, 0.7, 0.7), 116.325)
if "particle_351 geometry" not in marker_sets:
s=new_marker_set('particle_351 geometry')
marker_sets["particle_351 geometry"]=s
s= marker_sets["particle_351 geometry"]
mark=s.place_marker((3458.57, 2381.73, 960.765), (0.7, 0.7, 0.7), 106.511)
if "particle_352 geometry" not in marker_sets:
s=new_marker_set('particle_352 geometry')
marker_sets["particle_352 geometry"]=s
s= marker_sets["particle_352 geometry"]
mark=s.place_marker((3801.8, 2110.26, 1301.45), (0.7, 0.7, 0.7), 151.096)
if "particle_353 geometry" not in marker_sets:
s=new_marker_set('particle_353 geometry')
marker_sets["particle_353 geometry"]=s
s= marker_sets["particle_353 geometry"]
mark=s.place_marker((4358.21, 1805.29, 1541.84), (0.7, 0.7, 0.7), 240.856)
if "particle_354 geometry" not in marker_sets:
s=new_marker_set('particle_354 geometry')
marker_sets["particle_354 geometry"]=s
s= marker_sets["particle_354 geometry"]
mark=s.place_marker((4835.22, 1652.18, 1695.08), (0.7, 0.7, 0.7), 149.7)
if "particle_355 geometry" not in marker_sets:
s=new_marker_set('particle_355 geometry')
marker_sets["particle_355 geometry"]=s
s= marker_sets["particle_355 geometry"]
mark=s.place_marker((4967.96, 1558.3, 2005.6), (0.7, 0.7, 0.7), 165.943)
if "particle_356 geometry" not in marker_sets:
s=new_marker_set('particle_356 geometry')
marker_sets["particle_356 geometry"]=s
s= marker_sets["particle_356 geometry"]
mark=s.place_marker((4721.65, 1905.02, 2449.97), (0.7, 0.7, 0.7), 178.971)
if "particle_357 geometry" not in marker_sets:
s=new_marker_set('particle_357 geometry')
marker_sets["particle_357 geometry"]=s
s= marker_sets["particle_357 geometry"]
mark=s.place_marker((4477.53, 2124.41, 3134.73), (0.7, 0.7, 0.7), 154.945)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| gpl-3.0 |
crosswalk-project/chromium-crosswalk-efl | third_party/closure_linter/closure_linter/error_fixer_test.py | 121 | 1719 | #!/usr/bin/env python
#
# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the error_fixer module."""
# Allow non-Google copyright
# pylint: disable=g-bad-file-header
import unittest as googletest
from closure_linter import error_fixer
from closure_linter import testutil
class ErrorFixerTest(googletest.TestCase):
"""Unit tests for error_fixer."""
def setUp(self):
self.error_fixer = error_fixer.ErrorFixer()
def testDeleteToken(self):
start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_SCRIPT)
second_token = start_token.next
self.error_fixer.HandleFile('test_file', start_token)
self.error_fixer._DeleteToken(start_token)
self.assertEqual(second_token, self.error_fixer._file_token)
def testDeleteTokens(self):
start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_SCRIPT)
fourth_token = start_token.next.next.next
self.error_fixer.HandleFile('test_file', start_token)
self.error_fixer._DeleteTokens(start_token, 3)
self.assertEqual(fourth_token, self.error_fixer._file_token)
_TEST_SCRIPT = """\
var x = 3;
"""
if __name__ == '__main__':
googletest.main()
| bsd-3-clause |
SHMEDIALIMITED/nodec | node_modules/grunt-css/node_modules/gzip-js/test/unzipTest.py | 182 | 1840 | import os
from colorama import Fore
from helpers import run_cmd
outDirDefault = 'test-outs'
testDirDefault = 'test-files'
"""
Run a single test
@param tFile- required; the file to check against (uncompressed data)
@param level- optional (default: all); the compression level [1-9]
@return True if all tests passed; False if at least one test failed
"""
def runTest(tFile, level=None, outDir=outDirDefault):
passed = True
if level == None:
for x in range(1, 10):
if runTest(tFile, x) == False:
passed = False
return passed
out1 = os.path.join(outDir, '%(file)s.%(level)d.gz' % {'file': os.path.basename(tFile), 'level' : level})
out2 = os.path.join(outDir, '%(file)s.%(level)d' % {'file' : os.path.basename(tFile), 'level' : level})
run_cmd('gzip -%(level)d -c %(file)s >> %(output)s' % {'level' : level, 'file' : tFile, 'output' : out1})
run_cmd('../bin/gunzip.js --file %(file)s --output %(output)s' % {'level' : level, 'file' : out1, 'output' : out2})
result = run_cmd('diff %(file1)s %(file2)s' % {'file1' : tFile, 'file2' : out2})
if result['returncode'] == 0:
status = Fore.GREEN + 'PASSED' + Fore.RESET
else:
passed = False
status = Fore.RED + 'FAILED' + Fore.RESET
print 'Level %(level)d: %(status)s' % {'level' : level, 'status' : status}
return passed
"""
Runs all tests on the given level. This iterates throuth the testDir directory defined above.
@param level- The level to run on [1-9] (default: None, runs on all levels all)
@return True if all levels passed, False if at least one failed
"""
def runAll(level=None, testDir=testDirDefault, outDir=outDirDefault):
passed = True
for tFile in os.listdir(testDir):
fullPath = os.path.join(testDir, tFile)
print Fore.YELLOW + tFile + Fore.RESET
if runTest(fullPath, level) == False:
passed = False
print ''
return passed
| mit |
hobarrera/django | tests/test_client_regress/tests.py | 15 | 63582 | # -*- coding: utf-8 -*-
"""
Regression tests for the Test Client, especially the customized assertions.
"""
from __future__ import unicode_literals
import itertools
import os
from django.contrib.auth.models import User
from django.contrib.auth.signals import user_logged_in, user_logged_out
from django.http import HttpResponse
from django.template import (
Context, RequestContext, TemplateSyntaxError, engines,
)
from django.template.response import SimpleTemplateResponse
from django.test import (
Client, SimpleTestCase, TestCase, ignore_warnings, override_settings,
)
from django.test.client import RedirectCycleError, RequestFactory, encode_file
from django.test.utils import ContextList, str_prefix
from django.urls import NoReverseMatch, reverse
from django.utils._os import upath
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.translation import ugettext_lazy
from .models import CustomUser
from .views import CustomTestException
class TestDataMixin(object):
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create_user(username='testclient', password='password')
cls.staff = User.objects.create_user(username='staff', password='password', is_staff=True)
@override_settings(ROOT_URLCONF='test_client_regress.urls')
class AssertContainsTests(SimpleTestCase):
def test_contains(self):
"Responses can be inspected for content, including counting repeated substrings"
response = self.client.get('/no_template_view/')
self.assertNotContains(response, 'never')
self.assertContains(response, 'never', 0)
self.assertContains(response, 'once')
self.assertContains(response, 'once', 1)
self.assertContains(response, 'twice')
self.assertContains(response, 'twice', 2)
try:
self.assertContains(response, 'text', status_code=999)
except AssertionError as e:
self.assertIn("Couldn't retrieve content: Response code was 200 (expected 999)", str(e))
try:
self.assertContains(response, 'text', status_code=999, msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Couldn't retrieve content: Response code was 200 (expected 999)", str(e))
try:
self.assertNotContains(response, 'text', status_code=999)
except AssertionError as e:
self.assertIn("Couldn't retrieve content: Response code was 200 (expected 999)", str(e))
try:
self.assertNotContains(response, 'text', status_code=999, msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Couldn't retrieve content: Response code was 200 (expected 999)", str(e))
try:
self.assertNotContains(response, 'once')
except AssertionError as e:
self.assertIn("Response should not contain 'once'", str(e))
try:
self.assertNotContains(response, 'once', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Response should not contain 'once'", str(e))
try:
self.assertContains(response, 'never', 1)
except AssertionError as e:
self.assertIn("Found 0 instances of 'never' in response (expected 1)", str(e))
try:
self.assertContains(response, 'never', 1, msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Found 0 instances of 'never' in response (expected 1)", str(e))
try:
self.assertContains(response, 'once', 0)
except AssertionError as e:
self.assertIn("Found 1 instances of 'once' in response (expected 0)", str(e))
try:
self.assertContains(response, 'once', 0, msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Found 1 instances of 'once' in response (expected 0)", str(e))
try:
self.assertContains(response, 'once', 2)
except AssertionError as e:
self.assertIn("Found 1 instances of 'once' in response (expected 2)", str(e))
try:
self.assertContains(response, 'once', 2, msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Found 1 instances of 'once' in response (expected 2)", str(e))
try:
self.assertContains(response, 'twice', 1)
except AssertionError as e:
self.assertIn("Found 2 instances of 'twice' in response (expected 1)", str(e))
try:
self.assertContains(response, 'twice', 1, msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Found 2 instances of 'twice' in response (expected 1)", str(e))
try:
self.assertContains(response, 'thrice')
except AssertionError as e:
self.assertIn("Couldn't find 'thrice' in response", str(e))
try:
self.assertContains(response, 'thrice', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Couldn't find 'thrice' in response", str(e))
try:
self.assertContains(response, 'thrice', 3)
except AssertionError as e:
self.assertIn("Found 0 instances of 'thrice' in response (expected 3)", str(e))
try:
self.assertContains(response, 'thrice', 3, msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Found 0 instances of 'thrice' in response (expected 3)", str(e))
def test_unicode_contains(self):
"Unicode characters can be found in template context"
# Regression test for #10183
r = self.client.get('/check_unicode/')
self.assertContains(r, 'さかき')
self.assertContains(r, b'\xe5\xb3\xa0'.decode('utf-8'))
def test_unicode_not_contains(self):
"Unicode characters can be searched for, and not found in template context"
# Regression test for #10183
r = self.client.get('/check_unicode/')
self.assertNotContains(r, 'はたけ')
self.assertNotContains(r, b'\xe3\x81\xaf\xe3\x81\x9f\xe3\x81\x91'.decode('utf-8'))
def test_binary_contains(self):
r = self.client.get('/check_binary/')
self.assertContains(r, b'%PDF-1.4\r\n%\x93\x8c\x8b\x9e')
with self.assertRaises(AssertionError):
self.assertContains(r, b'%PDF-1.4\r\n%\x93\x8c\x8b\x9e', count=2)
def test_binary_not_contains(self):
r = self.client.get('/check_binary/')
self.assertNotContains(r, b'%ODF-1.4\r\n%\x93\x8c\x8b\x9e')
with self.assertRaises(AssertionError):
self.assertNotContains(r, b'%PDF-1.4\r\n%\x93\x8c\x8b\x9e')
def test_nontext_contains(self):
r = self.client.get('/no_template_view/')
self.assertContains(r, ugettext_lazy('once'))
def test_nontext_not_contains(self):
r = self.client.get('/no_template_view/')
self.assertNotContains(r, ugettext_lazy('never'))
def test_assert_contains_renders_template_response(self):
""" Test that we can pass in an unrendered SimpleTemplateResponse
without throwing an error.
Refs #15826.
"""
template = engines['django'].from_string('Hello')
response = SimpleTemplateResponse(template)
self.assertContains(response, 'Hello')
def test_assert_contains_using_non_template_response(self):
""" Test that auto-rendering does not affect responses that aren't
instances (or subclasses) of SimpleTemplateResponse.
Refs #15826.
"""
response = HttpResponse('Hello')
self.assertContains(response, 'Hello')
def test_assert_not_contains_renders_template_response(self):
""" Test that we can pass in an unrendered SimpleTemplateResponse
without throwing an error.
Refs #15826.
"""
template = engines['django'].from_string('Hello')
response = SimpleTemplateResponse(template)
self.assertNotContains(response, 'Bye')
def test_assert_not_contains_using_non_template_response(self):
""" Test that auto-rendering does not affect responses that aren't
instances (or subclasses) of SimpleTemplateResponse.
Refs #15826.
"""
response = HttpResponse('Hello')
self.assertNotContains(response, 'Bye')
@override_settings(ROOT_URLCONF='test_client_regress.urls')
class AssertTemplateUsedTests(TestDataMixin, TestCase):
def test_no_context(self):
"Template usage assertions work then templates aren't in use"
response = self.client.get('/no_template_view/')
# Check that the no template case doesn't mess with the template assertions
self.assertTemplateNotUsed(response, 'GET Template')
try:
self.assertTemplateUsed(response, 'GET Template')
except AssertionError as e:
self.assertIn("No templates used to render the response", str(e))
try:
self.assertTemplateUsed(response, 'GET Template', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: No templates used to render the response", str(e))
with self.assertRaises(AssertionError) as context:
self.assertTemplateUsed(response, 'GET Template', count=2)
self.assertIn(
"No templates used to render the response",
str(context.exception))
def test_single_context(self):
"Template assertions work when there is a single context"
response = self.client.get('/post_view/', {})
try:
self.assertTemplateNotUsed(response, 'Empty GET Template')
except AssertionError as e:
self.assertIn("Template 'Empty GET Template' was used unexpectedly in rendering the response", str(e))
try:
self.assertTemplateNotUsed(response, 'Empty GET Template', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Template 'Empty GET Template' was used unexpectedly in rendering the response", str(e))
try:
self.assertTemplateUsed(response, 'Empty POST Template')
except AssertionError as e:
self.assertIn(
"Template 'Empty POST Template' was not a template used to "
"render the response. Actual template(s) used: Empty GET Template",
str(e)
)
try:
self.assertTemplateUsed(response, 'Empty POST Template', msg_prefix='abc')
except AssertionError as e:
self.assertIn(
"abc: Template 'Empty POST Template' was not a template used "
"to render the response. Actual template(s) used: Empty GET Template",
str(e)
)
with self.assertRaises(AssertionError) as context:
self.assertTemplateUsed(response, 'Empty GET Template', count=2)
self.assertIn(
"Template 'Empty GET Template' was expected to be rendered 2 "
"time(s) but was actually rendered 1 time(s).",
str(context.exception))
with self.assertRaises(AssertionError) as context:
self.assertTemplateUsed(
response, 'Empty GET Template', msg_prefix='abc', count=2)
self.assertIn(
"abc: Template 'Empty GET Template' was expected to be rendered 2 "
"time(s) but was actually rendered 1 time(s).",
str(context.exception))
def test_multiple_context(self):
"Template assertions work when there are multiple contexts"
post_data = {
'text': 'Hello World',
'email': '[email protected]',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view_with_template/', post_data)
self.assertContains(response, 'POST data OK')
try:
self.assertTemplateNotUsed(response, "form_view.html")
except AssertionError as e:
self.assertIn("Template 'form_view.html' was used unexpectedly in rendering the response", str(e))
try:
self.assertTemplateNotUsed(response, 'base.html')
except AssertionError as e:
self.assertIn("Template 'base.html' was used unexpectedly in rendering the response", str(e))
try:
self.assertTemplateUsed(response, "Valid POST Template")
except AssertionError as e:
self.assertIn(
"Template 'Valid POST Template' was not a template used to "
"render the response. Actual template(s) used: form_view.html, base.html",
str(e)
)
with self.assertRaises(AssertionError) as context:
self.assertTemplateUsed(response, 'base.html', count=2)
self.assertIn(
"Template 'base.html' was expected to be rendered 2 "
"time(s) but was actually rendered 1 time(s).",
str(context.exception))
def test_template_rendered_multiple_times(self):
"""Template assertions work when a template is rendered multiple times."""
response = self.client.get('/render_template_multiple_times/')
self.assertTemplateUsed(response, 'base.html', count=2)
@override_settings(ROOT_URLCONF='test_client_regress.urls')
class AssertRedirectsTests(SimpleTestCase):
def test_redirect_page(self):
"An assertion is raised if the original page couldn't be retrieved as expected"
# This page will redirect with code 301, not 302
response = self.client.get('/permanent_redirect_view/')
try:
self.assertRedirects(response, '/get_view/')
except AssertionError as e:
self.assertIn("Response didn't redirect as expected: Response code was 301 (expected 302)", str(e))
try:
self.assertRedirects(response, '/get_view/', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Response didn't redirect as expected: Response code was 301 (expected 302)", str(e))
def test_lost_query(self):
"An assertion is raised if the redirect location doesn't preserve GET parameters"
response = self.client.get('/redirect_view/', {'var': 'value'})
try:
self.assertRedirects(response, '/get_view/')
except AssertionError as e:
self.assertIn("Response redirected to '/get_view/?var=value', expected '/get_view/'", str(e))
try:
self.assertRedirects(response, '/get_view/', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Response redirected to '/get_view/?var=value', expected '/get_view/'", str(e))
def test_incorrect_target(self):
"An assertion is raised if the response redirects to another target"
response = self.client.get('/permanent_redirect_view/')
try:
# Should redirect to get_view
self.assertRedirects(response, '/some_view/')
except AssertionError as e:
self.assertIn("Response didn't redirect as expected: Response code was 301 (expected 302)", str(e))
def test_target_page(self):
"An assertion is raised if the response redirect target cannot be retrieved as expected"
response = self.client.get('/double_redirect_view/')
try:
# The redirect target responds with a 301 code, not 200
self.assertRedirects(response, 'http://testserver/permanent_redirect_view/')
except AssertionError as e:
self.assertIn(
"Couldn't retrieve redirection page '/permanent_redirect_view/': "
"response code was 301 (expected 200)",
str(e)
)
try:
# The redirect target responds with a 301 code, not 200
self.assertRedirects(response, 'http://testserver/permanent_redirect_view/', msg_prefix='abc')
except AssertionError as e:
self.assertIn(
"abc: Couldn't retrieve redirection page '/permanent_redirect_view/': "
"response code was 301 (expected 200)",
str(e)
)
def test_redirect_chain(self):
"You can follow a redirect chain of multiple redirects"
response = self.client.get('/redirects/further/more/', {}, follow=True)
self.assertRedirects(response, '/no_template_view/', status_code=302, target_status_code=200)
self.assertEqual(len(response.redirect_chain), 1)
self.assertEqual(response.redirect_chain[0], ('/no_template_view/', 302))
def test_multiple_redirect_chain(self):
"You can follow a redirect chain of multiple redirects"
response = self.client.get('/redirects/', {}, follow=True)
self.assertRedirects(response, '/no_template_view/', status_code=302, target_status_code=200)
self.assertEqual(len(response.redirect_chain), 3)
self.assertEqual(response.redirect_chain[0], ('/redirects/further/', 302))
self.assertEqual(response.redirect_chain[1], ('/redirects/further/more/', 302))
self.assertEqual(response.redirect_chain[2], ('/no_template_view/', 302))
def test_redirect_chain_to_non_existent(self):
"You can follow a chain to a non-existent view"
response = self.client.get('/redirect_to_non_existent_view2/', {}, follow=True)
self.assertRedirects(response, '/non_existent_view/', status_code=302, target_status_code=404)
def test_redirect_chain_to_self(self):
"Redirections to self are caught and escaped"
with self.assertRaises(RedirectCycleError) as context:
self.client.get('/redirect_to_self/', {}, follow=True)
response = context.exception.last_response
# The chain of redirects stops once the cycle is detected.
self.assertRedirects(response, '/redirect_to_self/', status_code=302, target_status_code=302)
self.assertEqual(len(response.redirect_chain), 2)
def test_redirect_to_self_with_changing_query(self):
"Redirections don't loop forever even if query is changing"
with self.assertRaises(RedirectCycleError):
self.client.get('/redirect_to_self_with_changing_query_view/', {'counter': '0'}, follow=True)
def test_circular_redirect(self):
"Circular redirect chains are caught and escaped"
with self.assertRaises(RedirectCycleError) as context:
self.client.get('/circular_redirect_1/', {}, follow=True)
response = context.exception.last_response
# The chain of redirects will get back to the starting point, but stop there.
self.assertRedirects(response, '/circular_redirect_2/', status_code=302, target_status_code=302)
self.assertEqual(len(response.redirect_chain), 4)
def test_redirect_chain_post(self):
"A redirect chain will be followed from an initial POST post"
response = self.client.post('/redirects/', {'nothing': 'to_send'}, follow=True)
self.assertRedirects(response, '/no_template_view/', 302, 200)
self.assertEqual(len(response.redirect_chain), 3)
def test_redirect_chain_head(self):
"A redirect chain will be followed from an initial HEAD request"
response = self.client.head('/redirects/', {'nothing': 'to_send'}, follow=True)
self.assertRedirects(response, '/no_template_view/', 302, 200)
self.assertEqual(len(response.redirect_chain), 3)
def test_redirect_chain_options(self):
"A redirect chain will be followed from an initial OPTIONS request"
response = self.client.options('/redirects/', follow=True)
self.assertRedirects(response, '/no_template_view/', 302, 200)
self.assertEqual(len(response.redirect_chain), 3)
def test_redirect_chain_put(self):
"A redirect chain will be followed from an initial PUT request"
response = self.client.put('/redirects/', follow=True)
self.assertRedirects(response, '/no_template_view/', 302, 200)
self.assertEqual(len(response.redirect_chain), 3)
def test_redirect_chain_delete(self):
"A redirect chain will be followed from an initial DELETE request"
response = self.client.delete('/redirects/', follow=True)
self.assertRedirects(response, '/no_template_view/', 302, 200)
self.assertEqual(len(response.redirect_chain), 3)
def test_redirect_to_different_host(self):
"The test client will preserve scheme, host and port changes"
response = self.client.get('/redirect_other_host/', follow=True)
self.assertRedirects(
response, 'https://otherserver:8443/no_template_view/',
status_code=302, target_status_code=200
)
# We can't use is_secure() or get_host()
# because response.request is a dictionary, not an HttpRequest
self.assertEqual(response.request.get('wsgi.url_scheme'), 'https')
self.assertEqual(response.request.get('SERVER_NAME'), 'otherserver')
self.assertEqual(response.request.get('SERVER_PORT'), '8443')
def test_redirect_chain_on_non_redirect_page(self):
"An assertion is raised if the original page couldn't be retrieved as expected"
# This page will redirect with code 301, not 302
response = self.client.get('/get_view/', follow=True)
try:
self.assertRedirects(response, '/get_view/')
except AssertionError as e:
self.assertIn("Response didn't redirect as expected: Response code was 200 (expected 302)", str(e))
try:
self.assertRedirects(response, '/get_view/', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Response didn't redirect as expected: Response code was 200 (expected 302)", str(e))
def test_redirect_on_non_redirect_page(self):
"An assertion is raised if the original page couldn't be retrieved as expected"
# This page will redirect with code 301, not 302
response = self.client.get('/get_view/')
try:
self.assertRedirects(response, '/get_view/')
except AssertionError as e:
self.assertIn("Response didn't redirect as expected: Response code was 200 (expected 302)", str(e))
try:
self.assertRedirects(response, '/get_view/', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Response didn't redirect as expected: Response code was 200 (expected 302)", str(e))
def test_redirect_scheme(self):
"An assertion is raised if the response doesn't have the scheme specified in expected_url"
# For all possible True/False combinations of follow and secure
for follow, secure in itertools.product([True, False], repeat=2):
# always redirects to https
response = self.client.get('/https_redirect_view/', follow=follow, secure=secure)
# the goal scheme is https
self.assertRedirects(response, 'https://testserver/secure_view/', status_code=302)
with self.assertRaises(AssertionError):
self.assertRedirects(response, 'http://testserver/secure_view/', status_code=302)
@ignore_warnings(category=RemovedInDjango20Warning)
def test_full_path_in_expected_urls(self):
"""
Test that specifying a full URL as assertRedirects expected_url still
work as backwards compatible behavior until Django 2.0.
"""
response = self.client.get('/redirect_view/')
self.assertRedirects(response, 'http://testserver/get_view/')
@override_settings(ROOT_URLCONF='test_client_regress.urls')
class AssertFormErrorTests(SimpleTestCase):
def test_unknown_form(self):
"An assertion is raised if the form name is unknown"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'wrong_form', 'some_field', 'Some error.')
except AssertionError as e:
self.assertIn("The form 'wrong_form' was not used to render the response", str(e))
try:
self.assertFormError(response, 'wrong_form', 'some_field', 'Some error.', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: The form 'wrong_form' was not used to render the response", str(e))
def test_unknown_field(self):
"An assertion is raised if the field name is unknown"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'form', 'some_field', 'Some error.')
except AssertionError as e:
self.assertIn("The form 'form' in context 0 does not contain the field 'some_field'", str(e))
try:
self.assertFormError(response, 'form', 'some_field', 'Some error.', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: The form 'form' in context 0 does not contain the field 'some_field'", str(e))
def test_noerror_field(self):
"An assertion is raised if the field doesn't have any errors"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'form', 'value', 'Some error.')
except AssertionError as e:
self.assertIn("The field 'value' on form 'form' in context 0 contains no errors", str(e))
try:
self.assertFormError(response, 'form', 'value', 'Some error.', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: The field 'value' on form 'form' in context 0 contains no errors", str(e))
def test_unknown_error(self):
"An assertion is raised if the field doesn't contain the provided error"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'form', 'email', 'Some error.')
except AssertionError as e:
self.assertIn(
str_prefix(
"The field 'email' on form 'form' in context 0 does not "
"contain the error 'Some error.' (actual errors: "
"[%(_)s'Enter a valid email address.'])"
), str(e)
)
try:
self.assertFormError(response, 'form', 'email', 'Some error.', msg_prefix='abc')
except AssertionError as e:
self.assertIn(
str_prefix(
"abc: The field 'email' on form 'form' in context 0 does "
"not contain the error 'Some error.' (actual errors: "
"[%(_)s'Enter a valid email address.'])",
), str(e)
)
def test_unknown_nonfield_error(self):
"""
Checks that an assertion is raised if the form's non field errors
doesn't contain the provided error.
"""
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'form', None, 'Some error.')
except AssertionError as e:
self.assertIn(
"The form 'form' in context 0 does not contain the non-field "
"error 'Some error.' (actual errors: )",
str(e)
)
try:
self.assertFormError(response, 'form', None, 'Some error.', msg_prefix='abc')
except AssertionError as e:
self.assertIn(
"abc: The form 'form' in context 0 does not contain the "
"non-field error 'Some error.' (actual errors: )",
str(e)
)
@override_settings(ROOT_URLCONF='test_client_regress.urls')
class AssertFormsetErrorTests(SimpleTestCase):
msg_prefixes = [("", {}), ("abc: ", {"msg_prefix": "abc"})]
def setUp(self):
"""Makes response object for testing field and non-field errors"""
# For testing field and non-field errors
self.response_form_errors = self.getResponse({
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '2',
'form-0-text': 'Raise non-field error',
'form-0-email': 'not an email address',
'form-0-value': 37,
'form-0-single': 'b',
'form-0-multi': ('b', 'c', 'e'),
'form-1-text': 'Hello World',
'form-1-email': '[email protected]',
'form-1-value': 37,
'form-1-single': 'b',
'form-1-multi': ('b', 'c', 'e'),
})
# For testing non-form errors
self.response_nonform_errors = self.getResponse({
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '2',
'form-0-text': 'Hello World',
'form-0-email': '[email protected]',
'form-0-value': 37,
'form-0-single': 'b',
'form-0-multi': ('b', 'c', 'e'),
'form-1-text': 'Hello World',
'form-1-email': '[email protected]',
'form-1-value': 37,
'form-1-single': 'b',
'form-1-multi': ('b', 'c', 'e'),
})
def getResponse(self, post_data):
response = self.client.post('/formset_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
return response
def test_unknown_formset(self):
"An assertion is raised if the formset name is unknown"
for prefix, kwargs in self.msg_prefixes:
msg = prefix + "The formset 'wrong_formset' was not used to render the response"
with self.assertRaisesMessage(AssertionError, msg):
self.assertFormsetError(
self.response_form_errors,
'wrong_formset', 0, 'Some_field', 'Some error.', **kwargs
)
def test_unknown_field(self):
"An assertion is raised if the field name is unknown"
for prefix, kwargs in self.msg_prefixes:
msg = prefix + "The formset 'my_formset', form 0 in context 0 does not contain the field 'Some_field'"
with self.assertRaisesMessage(AssertionError, msg):
self.assertFormsetError(
self.response_form_errors,
'my_formset', 0, 'Some_field', 'Some error.', **kwargs
)
def test_no_error_field(self):
"An assertion is raised if the field doesn't have any errors"
for prefix, kwargs in self.msg_prefixes:
msg = prefix + "The field 'value' on formset 'my_formset', form 1 in context 0 contains no errors"
with self.assertRaisesMessage(AssertionError, msg):
self.assertFormsetError(self.response_form_errors, 'my_formset', 1, 'value', 'Some error.', **kwargs)
def test_unknown_error(self):
"An assertion is raised if the field doesn't contain the specified error"
for prefix, kwargs in self.msg_prefixes:
msg = str_prefix(
prefix + "The field 'email' on formset 'my_formset', form 0 "
"in context 0 does not contain the error 'Some error.' "
"(actual errors: [%(_)s'Enter a valid email address.'])"
)
with self.assertRaisesMessage(AssertionError, msg):
self.assertFormsetError(self.response_form_errors, 'my_formset', 0, 'email', 'Some error.', **kwargs)
def test_field_error(self):
"No assertion is raised if the field contains the provided error"
error_msg = ['Enter a valid email address.']
for prefix, kwargs in self.msg_prefixes:
self.assertFormsetError(self.response_form_errors, 'my_formset', 0, 'email', error_msg, **kwargs)
def test_no_nonfield_error(self):
"An assertion is raised if the formsets non-field errors doesn't contain any errors."
for prefix, kwargs in self.msg_prefixes:
msg = prefix + "The formset 'my_formset', form 1 in context 0 does not contain any non-field errors."
with self.assertRaisesMessage(AssertionError, msg):
self.assertFormsetError(self.response_form_errors, 'my_formset', 1, None, 'Some error.', **kwargs)
def test_unknown_nonfield_error(self):
"An assertion is raised if the formsets non-field errors doesn't contain the provided error."
for prefix, kwargs in self.msg_prefixes:
msg = str_prefix(
prefix + "The formset 'my_formset', form 0 in context 0 does not "
"contain the non-field error 'Some error.' (actual errors: "
"[%(_)s'Non-field error.'])"
)
with self.assertRaisesMessage(AssertionError, msg):
self.assertFormsetError(self.response_form_errors, 'my_formset', 0, None, 'Some error.', **kwargs)
def test_nonfield_error(self):
"No assertion is raised if the formsets non-field errors contains the provided error."
for prefix, kwargs in self.msg_prefixes:
self.assertFormsetError(self.response_form_errors, 'my_formset', 0, None, 'Non-field error.', **kwargs)
def test_no_nonform_error(self):
"An assertion is raised if the formsets non-form errors doesn't contain any errors."
for prefix, kwargs in self.msg_prefixes:
msg = prefix + "The formset 'my_formset' in context 0 does not contain any non-form errors."
with self.assertRaisesMessage(AssertionError, msg):
self.assertFormsetError(self.response_form_errors, 'my_formset', None, None, 'Some error.', **kwargs)
def test_unknown_nonform_error(self):
"An assertion is raised if the formsets non-form errors doesn't contain the provided error."
for prefix, kwargs in self.msg_prefixes:
msg = str_prefix(
prefix +
"The formset 'my_formset' in context 0 does not contain the "
"non-form error 'Some error.' (actual errors: [%(_)s'Forms "
"in a set must have distinct email addresses.'])"
)
with self.assertRaisesMessage(AssertionError, msg):
self.assertFormsetError(
self.response_nonform_errors,
'my_formset', None, None, 'Some error.', **kwargs
)
def test_nonform_error(self):
"No assertion is raised if the formsets non-form errors contains the provided error."
msg = 'Forms in a set must have distinct email addresses.'
for prefix, kwargs in self.msg_prefixes:
self.assertFormsetError(self.response_nonform_errors, 'my_formset', None, None, msg, **kwargs)
@override_settings(ROOT_URLCONF='test_client_regress.urls')
class LoginTests(TestDataMixin, TestCase):
def test_login_different_client(self):
"Check that using a different test client doesn't violate authentication"
# Create a second client, and log in.
c = Client()
login = c.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Get a redirection page with the second client.
response = c.get("/login_protected_redirect_view/")
# At this points, the self.client isn't logged in.
# Check that assertRedirects uses the original client, not the
# default client.
self.assertRedirects(response, "/get_view/")
@override_settings(
SESSION_ENGINE='test_client_regress.session',
ROOT_URLCONF='test_client_regress.urls',
)
class SessionEngineTests(TestDataMixin, TestCase):
def test_login(self):
"A session engine that modifies the session key can be used to log in"
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Try to access a login protected page.
response = self.client.get("/login_protected_view/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
@override_settings(ROOT_URLCONF='test_client_regress.urls',)
class URLEscapingTests(SimpleTestCase):
def test_simple_argument_get(self):
"Get a view that has a simple string argument"
response = self.client.get(reverse('arg_view', args=['Slartibartfast']))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'Howdy, Slartibartfast')
def test_argument_with_space_get(self):
"Get a view that has a string argument that requires escaping"
response = self.client.get(reverse('arg_view', args=['Arthur Dent']))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'Hi, Arthur')
def test_simple_argument_post(self):
"Post for a view that has a simple string argument"
response = self.client.post(reverse('arg_view', args=['Slartibartfast']))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'Howdy, Slartibartfast')
def test_argument_with_space_post(self):
"Post for a view that has a string argument that requires escaping"
response = self.client.post(reverse('arg_view', args=['Arthur Dent']))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'Hi, Arthur')
@override_settings(ROOT_URLCONF='test_client_regress.urls')
class ExceptionTests(TestDataMixin, TestCase):
def test_exception_cleared(self):
"#5836 - A stale user exception isn't re-raised by the test client."
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
try:
self.client.get("/staff_only/")
self.fail("General users should not be able to visit this page")
except CustomTestException:
pass
# At this point, an exception has been raised, and should be cleared.
# This next operation should be successful; if it isn't we have a problem.
login = self.client.login(username='staff', password='password')
self.assertTrue(login, 'Could not log in')
try:
self.client.get("/staff_only/")
except CustomTestException:
self.fail("Staff should be able to visit this page")
@override_settings(ROOT_URLCONF='test_client_regress.urls')
class TemplateExceptionTests(SimpleTestCase):
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(os.path.dirname(upath(__file__)), 'bad_templates')],
}])
def test_bad_404_template(self):
"Errors found when rendering 404 error templates are re-raised"
try:
self.client.get("/no_such_view/")
except TemplateSyntaxError:
pass
else:
self.fail("Should get error about syntax error in template")
# We need two different tests to check URLconf substitution - one to check
# it was changed, and another one (without self.urls) to check it was reverted on
# teardown. This pair of tests relies upon the alphabetical ordering of test execution.
@override_settings(ROOT_URLCONF='test_client_regress.urls')
class UrlconfSubstitutionTests(SimpleTestCase):
def test_urlconf_was_changed(self):
"TestCase can enforce a custom URLconf on a per-test basis"
url = reverse('arg_view', args=['somename'])
self.assertEqual(url, '/arg_view/somename/')
# This test needs to run *after* UrlconfSubstitutionTests; the zz prefix in the
# name is to ensure alphabetical ordering.
class zzUrlconfSubstitutionTests(SimpleTestCase):
def test_urlconf_was_reverted(self):
"""URLconf is reverted to original value after modification in a TestCase
This will not find a match as the default ROOT_URLCONF is empty.
"""
with self.assertRaises(NoReverseMatch):
reverse('arg_view', args=['somename'])
@override_settings(ROOT_URLCONF='test_client_regress.urls')
class ContextTests(TestDataMixin, TestCase):
def test_single_context(self):
"Context variables can be retrieved from a single context"
response = self.client.get("/request_data/", data={'foo': 'whiz'})
self.assertIsInstance(response.context, RequestContext)
self.assertIn('get-foo', response.context)
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['data'], 'sausage')
try:
response.context['does-not-exist']
self.fail('Should not be able to retrieve non-existent key')
except KeyError as e:
self.assertEqual(e.args[0], 'does-not-exist')
def test_inherited_context(self):
"Context variables can be retrieved from a list of contexts"
response = self.client.get("/request_data_extended/", data={'foo': 'whiz'})
self.assertEqual(response.context.__class__, ContextList)
self.assertEqual(len(response.context), 2)
self.assertIn('get-foo', response.context)
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['data'], 'bacon')
with self.assertRaises(KeyError) as cm:
response.context['does-not-exist']
self.assertEqual(cm.exception.args[0], 'does-not-exist')
def test_contextlist_keys(self):
c1 = Context()
c1.update({'hello': 'world', 'goodbye': 'john'})
c1.update({'hello': 'dolly', 'dolly': 'parton'})
c2 = Context()
c2.update({'goodbye': 'world', 'python': 'rocks'})
c2.update({'goodbye': 'dolly'})
l = ContextList([c1, c2])
# None, True and False are builtins of BaseContext, and present
# in every Context without needing to be added.
self.assertEqual({'None', 'True', 'False', 'hello', 'goodbye', 'python', 'dolly'}, l.keys())
def test_15368(self):
# Need to insert a context processor that assumes certain things about
# the request instance. This triggers a bug caused by some ways of
# copying RequestContext.
with self.settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'test_client_regress.context_processors.special',
],
},
}]):
response = self.client.get("/request_context_view/")
self.assertContains(response, 'Path: /request_context_view/')
def test_nested_requests(self):
"""
response.context is not lost when view call another view.
"""
response = self.client.get("/nested_view/")
self.assertIsInstance(response.context, RequestContext)
self.assertEqual(response.context['nested'], 'yes')
@override_settings(ROOT_URLCONF='test_client_regress.urls')
class SessionTests(TestDataMixin, TestCase):
def test_session(self):
"The session isn't lost if a user logs in"
# The session doesn't exist to start.
response = self.client.get('/check_session/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'NO')
# This request sets a session variable.
response = self.client.get('/set_session/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'set_session')
# Check that the session has been modified
response = self.client.get('/check_session/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'YES')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Session should still contain the modified value
response = self.client.get('/check_session/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'YES')
def test_session_initiated(self):
session = self.client.session
session['session_var'] = 'foo'
session.save()
response = self.client.get('/check_session/')
self.assertEqual(response.content, b'foo')
def test_logout(self):
"""Logout should work whether the user is logged in or not (#9978)."""
self.client.logout()
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
self.client.logout()
self.client.logout()
def test_logout_with_user(self):
"""Logout should send user_logged_out signal if user was logged in."""
def listener(*args, **kwargs):
listener.executed = True
self.assertEqual(kwargs['sender'], User)
listener.executed = False
user_logged_out.connect(listener)
self.client.login(username='testclient', password='password')
self.client.logout()
user_logged_out.disconnect(listener)
self.assertTrue(listener.executed)
@override_settings(AUTH_USER_MODEL='test_client_regress.CustomUser')
def test_logout_with_custom_user(self):
"""Logout should send user_logged_out signal if custom user was logged in."""
def listener(*args, **kwargs):
self.assertEqual(kwargs['sender'], CustomUser)
listener.executed = True
listener.executed = False
u = CustomUser.custom_objects.create(email='[email protected]')
u.set_password('password')
u.save()
user_logged_out.connect(listener)
self.client.login(username='[email protected]', password='password')
self.client.logout()
user_logged_out.disconnect(listener)
self.assertTrue(listener.executed)
@override_settings(AUTHENTICATION_BACKENDS=(
'django.contrib.auth.backends.ModelBackend',
'test_client_regress.auth_backends.CustomUserBackend'))
def test_logout_with_custom_auth_backend(self):
"Request a logout after logging in with custom authentication backend"
def listener(*args, **kwargs):
self.assertEqual(kwargs['sender'], CustomUser)
listener.executed = True
listener.executed = False
u = CustomUser.custom_objects.create(email='[email protected]')
u.set_password('password')
u.save()
user_logged_out.connect(listener)
self.client.login(username='[email protected]', password='password')
self.client.logout()
user_logged_out.disconnect(listener)
self.assertTrue(listener.executed)
def test_logout_without_user(self):
"""Logout should send signal even if user not authenticated."""
def listener(user, *args, **kwargs):
listener.user = user
listener.executed = True
listener.executed = False
user_logged_out.connect(listener)
self.client.login(username='incorrect', password='password')
self.client.logout()
user_logged_out.disconnect(listener)
self.assertTrue(listener.executed)
self.assertIsNone(listener.user)
def test_login_with_user(self):
"""Login should send user_logged_in signal on successful login."""
def listener(*args, **kwargs):
listener.executed = True
listener.executed = False
user_logged_in.connect(listener)
self.client.login(username='testclient', password='password')
user_logged_out.disconnect(listener)
self.assertTrue(listener.executed)
def test_login_without_signal(self):
"""Login shouldn't send signal if user wasn't logged in"""
def listener(*args, **kwargs):
listener.executed = True
listener.executed = False
user_logged_in.connect(listener)
self.client.login(username='incorrect', password='password')
user_logged_in.disconnect(listener)
self.assertFalse(listener.executed)
@override_settings(ROOT_URLCONF='test_client_regress.urls')
class RequestMethodTests(SimpleTestCase):
def test_get(self):
"Request a view via request method GET"
response = self.client.get('/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'request method: GET')
def test_post(self):
"Request a view via request method POST"
response = self.client.post('/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'request method: POST')
def test_head(self):
"Request a view via request method HEAD"
response = self.client.head('/request_methods/')
self.assertEqual(response.status_code, 200)
# A HEAD request doesn't return any content.
self.assertNotEqual(response.content, b'request method: HEAD')
self.assertEqual(response.content, b'')
def test_options(self):
"Request a view via request method OPTIONS"
response = self.client.options('/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'request method: OPTIONS')
def test_put(self):
"Request a view via request method PUT"
response = self.client.put('/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'request method: PUT')
def test_delete(self):
"Request a view via request method DELETE"
response = self.client.delete('/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'request method: DELETE')
def test_patch(self):
"Request a view via request method PATCH"
response = self.client.patch('/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'request method: PATCH')
@override_settings(ROOT_URLCONF='test_client_regress.urls')
class RequestMethodStringDataTests(SimpleTestCase):
def test_post(self):
"Request a view with string data via request method POST"
# Regression test for #11371
data = '{"test": "json"}'
response = self.client.post('/request_methods/', data=data, content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'request method: POST')
def test_put(self):
"Request a view with string data via request method PUT"
# Regression test for #11371
data = '{"test": "json"}'
response = self.client.put('/request_methods/', data=data, content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'request method: PUT')
def test_patch(self):
"Request a view with string data via request method PATCH"
# Regression test for #17797
data = '{"test": "json"}'
response = self.client.patch('/request_methods/', data=data, content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'request method: PATCH')
def test_empty_string_data(self):
"Request a view with empty string data via request method GET/POST/HEAD"
# Regression test for #21740
response = self.client.get('/body/', data='', content_type='application/json')
self.assertEqual(response.content, b'')
response = self.client.post('/body/', data='', content_type='application/json')
self.assertEqual(response.content, b'')
response = self.client.head('/body/', data='', content_type='application/json')
self.assertEqual(response.content, b'')
def test_json(self):
response = self.client.get('/json_response/')
self.assertEqual(response.json(), {'key': 'value'})
def test_json_wrong_header(self):
response = self.client.get('/body/')
msg = 'Content-Type header is "text/html; charset=utf-8", not "application/json"'
with self.assertRaisesMessage(ValueError, msg):
self.assertEqual(response.json(), {'key': 'value'})
@override_settings(ROOT_URLCONF='test_client_regress.urls',)
class QueryStringTests(SimpleTestCase):
def test_get_like_requests(self):
# See: https://code.djangoproject.com/ticket/10571.
for method_name in ('get', 'head'):
# A GET-like request can pass a query string as data
method = getattr(self.client, method_name)
response = method("/request_data/", data={'foo': 'whiz'})
self.assertEqual(response.context['get-foo'], 'whiz')
# A GET-like request can pass a query string as part of the URL
response = method("/request_data/?foo=whiz")
self.assertEqual(response.context['get-foo'], 'whiz')
# Data provided in the URL to a GET-like request is overridden by actual form data
response = method("/request_data/?foo=whiz", data={'foo': 'bang'})
self.assertEqual(response.context['get-foo'], 'bang')
response = method("/request_data/?foo=whiz", data={'bar': 'bang'})
self.assertEqual(response.context['get-foo'], None)
self.assertEqual(response.context['get-bar'], 'bang')
def test_post_like_requests(self):
# A POST-like request can pass a query string as data
response = self.client.post("/request_data/", data={'foo': 'whiz'})
self.assertEqual(response.context['get-foo'], None)
self.assertEqual(response.context['post-foo'], 'whiz')
# A POST-like request can pass a query string as part of the URL
response = self.client.post("/request_data/?foo=whiz")
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['post-foo'], None)
# POST data provided in the URL augments actual form data
response = self.client.post("/request_data/?foo=whiz", data={'foo': 'bang'})
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['post-foo'], 'bang')
response = self.client.post("/request_data/?foo=whiz", data={'bar': 'bang'})
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['get-bar'], None)
self.assertEqual(response.context['post-foo'], None)
self.assertEqual(response.context['post-bar'], 'bang')
@override_settings(ROOT_URLCONF='test_client_regress.urls')
class UnicodePayloadTests(SimpleTestCase):
def test_simple_unicode_payload(self):
"A simple ASCII-only unicode JSON document can be POSTed"
# Regression test for #10571
json = '{"english": "mountain pass"}'
response = self.client.post("/parse_unicode_json/", json, content_type="application/json")
self.assertEqual(response.content, json.encode())
def test_unicode_payload_utf8(self):
"A non-ASCII unicode data encoded as UTF-8 can be POSTed"
# Regression test for #10571
json = '{"dog": "собака"}'
response = self.client.post("/parse_unicode_json/", json, content_type="application/json; charset=utf-8")
self.assertEqual(response.content, json.encode('utf-8'))
def test_unicode_payload_utf16(self):
"A non-ASCII unicode data encoded as UTF-16 can be POSTed"
# Regression test for #10571
json = '{"dog": "собака"}'
response = self.client.post("/parse_unicode_json/", json, content_type="application/json; charset=utf-16")
self.assertEqual(response.content, json.encode('utf-16'))
def test_unicode_payload_non_utf(self):
"A non-ASCII unicode data as a non-UTF based encoding can be POSTed"
# Regression test for #10571
json = '{"dog": "собака"}'
response = self.client.post("/parse_unicode_json/", json, content_type="application/json; charset=koi8-r")
self.assertEqual(response.content, json.encode('koi8-r'))
class DummyFile(object):
def __init__(self, filename):
self.name = filename
def read(self):
return b'TEST_FILE_CONTENT'
class UploadedFileEncodingTest(SimpleTestCase):
def test_file_encoding(self):
encoded_file = encode_file('TEST_BOUNDARY', 'TEST_KEY', DummyFile('test_name.bin'))
self.assertEqual(b'--TEST_BOUNDARY', encoded_file[0])
self.assertEqual(b'Content-Disposition: form-data; name="TEST_KEY"; filename="test_name.bin"', encoded_file[1])
self.assertEqual(b'TEST_FILE_CONTENT', encoded_file[-1])
def test_guesses_content_type_on_file_encoding(self):
self.assertEqual(b'Content-Type: application/octet-stream',
encode_file('IGNORE', 'IGNORE', DummyFile("file.bin"))[2])
self.assertEqual(b'Content-Type: text/plain',
encode_file('IGNORE', 'IGNORE', DummyFile("file.txt"))[2])
self.assertIn(encode_file('IGNORE', 'IGNORE', DummyFile("file.zip"))[2], (
b'Content-Type: application/x-compress',
b'Content-Type: application/x-zip',
b'Content-Type: application/x-zip-compressed',
b'Content-Type: application/zip',))
self.assertEqual(b'Content-Type: application/octet-stream',
encode_file('IGNORE', 'IGNORE', DummyFile("file.unknown"))[2])
@override_settings(ROOT_URLCONF='test_client_regress.urls',)
class RequestHeadersTest(SimpleTestCase):
def test_client_headers(self):
"A test client can receive custom headers"
response = self.client.get("/check_headers/", HTTP_X_ARG_CHECK='Testing 123')
self.assertEqual(response.content, b"HTTP_X_ARG_CHECK: Testing 123")
self.assertEqual(response.status_code, 200)
def test_client_headers_redirect(self):
"Test client headers are preserved through redirects"
response = self.client.get("/check_headers_redirect/", follow=True, HTTP_X_ARG_CHECK='Testing 123')
self.assertEqual(response.content, b"HTTP_X_ARG_CHECK: Testing 123")
self.assertRedirects(response, '/check_headers/', status_code=302, target_status_code=200)
@override_settings(ROOT_URLCONF='test_client_regress.urls')
class ReadLimitedStreamTest(SimpleTestCase):
"""
Tests that ensure that HttpRequest.body, HttpRequest.read() and
HttpRequest.read(BUFFER) have proper LimitedStream behavior.
Refs #14753, #15785
"""
def test_body_from_empty_request(self):
"""HttpRequest.body on a test client GET request should return
the empty string."""
self.assertEqual(self.client.get("/body/").content, b'')
def test_read_from_empty_request(self):
"""HttpRequest.read() on a test client GET request should return the
empty string."""
self.assertEqual(self.client.get("/read_all/").content, b'')
def test_read_numbytes_from_empty_request(self):
"""HttpRequest.read(LARGE_BUFFER) on a test client GET request should
return the empty string."""
self.assertEqual(self.client.get("/read_buffer/").content, b'')
def test_read_from_nonempty_request(self):
"""HttpRequest.read() on a test client PUT request with some payload
should return that payload."""
payload = b'foobar'
self.assertEqual(self.client.put("/read_all/", data=payload, content_type='text/plain').content, payload)
def test_read_numbytes_from_nonempty_request(self):
"""HttpRequest.read(LARGE_BUFFER) on a test client PUT request with
some payload should return that payload."""
payload = b'foobar'
self.assertEqual(self.client.put("/read_buffer/", data=payload, content_type='text/plain').content, payload)
@override_settings(ROOT_URLCONF='test_client_regress.urls')
class RequestFactoryStateTest(SimpleTestCase):
"""Regression tests for #15929."""
# These tests are checking that certain middleware don't change certain
# global state. Alternatively, from the point of view of a test, they are
# ensuring test isolation behavior. So, unusually, it doesn't make sense to
# run the tests individually, and if any are failing it is confusing to run
# them with any other set of tests.
def common_test_that_should_always_pass(self):
request = RequestFactory().get('/')
request.session = {}
self.assertFalse(hasattr(request, 'user'))
def test_request(self):
self.common_test_that_should_always_pass()
def test_request_after_client(self):
# apart from the next line the three tests are identical
self.client.get('/')
self.common_test_that_should_always_pass()
def test_request_after_client_2(self):
# This test is executed after the previous one
self.common_test_that_should_always_pass()
@override_settings(ROOT_URLCONF='test_client_regress.urls')
class RequestFactoryEnvironmentTests(SimpleTestCase):
"""
Regression tests for #8551 and #17067: ensure that environment variables
are set correctly in RequestFactory.
"""
def test_should_set_correct_env_variables(self):
request = RequestFactory().get('/path/')
self.assertEqual(request.META.get('REMOTE_ADDR'), '127.0.0.1')
self.assertEqual(request.META.get('SERVER_NAME'), 'testserver')
self.assertEqual(request.META.get('SERVER_PORT'), '80')
self.assertEqual(request.META.get('SERVER_PROTOCOL'), 'HTTP/1.1')
self.assertEqual(request.META.get('SCRIPT_NAME') + request.META.get('PATH_INFO'), '/path/')
| bsd-3-clause |
kiall/designate-py3 | designate/storage/impl_sqlalchemy/migrate_repo/versions/058_placeholder.py | 140 | 1035 | # Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# This is a placeholder for Kilo backports.
# Do not use this number for new Liberty work. New Liberty work starts after
# all the placeholders.
#
# See https://blueprints.launchpad.net/nova/+spec/backportable-db-migrations
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass
| apache-2.0 |
chirilo/mozillians | vendor-local/lib/python/djcelery/tests/test_backends/test_cache.py | 30 | 4992 | from __future__ import absolute_import
import sys
from datetime import timedelta
from django.core.cache.backends.base import InvalidCacheBackendError
from celery import result
from celery import states
from celery.utils import gen_unique_id
from celery.datastructures import ExceptionInfo
from djcelery.backends.cache import CacheBackend
from djcelery.tests.utils import unittest
class SomeClass(object):
def __init__(self, data):
self.data = data
class test_CacheBackend(unittest.TestCase):
def test_mark_as_done(self):
cb = CacheBackend()
tid = gen_unique_id()
self.assertEqual(cb.get_status(tid), states.PENDING)
self.assertIsNone(cb.get_result(tid))
cb.mark_as_done(tid, 42)
self.assertEqual(cb.get_status(tid), states.SUCCESS)
self.assertEqual(cb.get_result(tid), 42)
self.assertTrue(cb.get_result(tid), 42)
def test_forget(self):
b = CacheBackend()
tid = gen_unique_id()
b.mark_as_done(tid, {"foo": "bar"})
self.assertEqual(b.get_result(tid).get("foo"), "bar")
b.forget(tid)
self.assertNotIn(tid, b._cache)
self.assertIsNone(b.get_result(tid))
def test_save_restore_delete_taskset(self):
backend = CacheBackend()
taskset_id = gen_unique_id()
subtask_ids = [gen_unique_id() for i in range(10)]
subtasks = map(result.AsyncResult, subtask_ids)
res = result.TaskSetResult(taskset_id, subtasks)
res.save(backend=backend)
saved = result.TaskSetResult.restore(taskset_id, backend=backend)
self.assertListEqual(saved.subtasks, subtasks)
self.assertEqual(saved.taskset_id, taskset_id)
saved.delete(backend=backend)
self.assertIsNone(result.TaskSetResult.restore(taskset_id,
backend=backend))
def test_is_pickled(self):
cb = CacheBackend()
tid2 = gen_unique_id()
result = {"foo": "baz", "bar": SomeClass(12345)}
cb.mark_as_done(tid2, result)
# is serialized properly.
rindb = cb.get_result(tid2)
self.assertEqual(rindb.get("foo"), "baz")
self.assertEqual(rindb.get("bar").data, 12345)
def test_mark_as_failure(self):
cb = CacheBackend()
einfo = None
tid3 = gen_unique_id()
try:
raise KeyError("foo")
except KeyError, exception:
einfo = ExceptionInfo(sys.exc_info())
pass
cb.mark_as_failure(tid3, exception, traceback=einfo.traceback)
self.assertEqual(cb.get_status(tid3), states.FAILURE)
self.assertIsInstance(cb.get_result(tid3), KeyError)
self.assertEqual(cb.get_traceback(tid3), einfo.traceback)
def test_process_cleanup(self):
cb = CacheBackend()
cb.process_cleanup()
def test_set_expires(self):
cb1 = CacheBackend(expires=timedelta(seconds=16))
self.assertEqual(cb1.expires, 16)
cb2 = CacheBackend(expires=32)
self.assertEqual(cb2.expires, 32)
class test_custom_CacheBackend(unittest.TestCase):
def test_custom_cache_backend(self):
from celery import current_app
prev_backend = current_app.conf.CELERY_CACHE_BACKEND
prev_module = sys.modules["djcelery.backends.cache"]
current_app.conf.CELERY_CACHE_BACKEND = "dummy://"
sys.modules.pop("djcelery.backends.cache")
try:
from djcelery.backends.cache import cache
from django.core.cache import cache as django_cache
self.assertEqual(cache.__class__.__module__,
"django.core.cache.backends.dummy")
self.assertIsNot(cache, django_cache)
finally:
current_app.conf.CELERY_CACHE_BACKEND = prev_backend
sys.modules["djcelery.backends.cache"] = prev_module
class test_MemcacheWrapper(unittest.TestCase):
def test_memcache_wrapper(self):
try:
from django.core.cache.backends import memcached
from django.core.cache.backends import locmem
except InvalidCacheBackendError:
sys.stderr.write(
"\n* Memcache library is not installed. Skipping test.\n")
return
prev_cache_cls = memcached.CacheClass
memcached.CacheClass = locmem.CacheClass
prev_backend_module = sys.modules.pop("djcelery.backends.cache")
try:
from djcelery.backends.cache import cache
key = "cu.test_memcache_wrapper"
val = "The quick brown fox."
default = "The lazy dog."
self.assertEqual(cache.get(key, default=default), default)
cache.set(key, val)
self.assertEqual(cache.get(key, default=default),
val)
finally:
memcached.CacheClass = prev_cache_cls
sys.modules["djcelery.backends.cache"] = prev_backend_module
| bsd-3-clause |
duolinwang/MusiteDeep | MusiteDeep/train_general.py | 1 | 4521 | import sys
import os
import pandas as pd
import numpy as np
import argparse
def main():
parser=argparse.ArgumentParser(description='MusiteDeep custom training tool for general PTM prediction.')
parser.add_argument('-input', dest='inputfile', type=str, help='training data in fasta format. Sites followed by "#" are positive sites for a specific PTM prediction.', required=True)
parser.add_argument('-output-prefix', dest='outputprefix', type=str, help='prefix of the output files (model and parameter files).', required=True)
parser.add_argument('-residue-types', dest='residues', type=str, help='Residue types that this model focus on. For multiple residues, seperate each with \',\'. \n\
Note: all the residues specified by this parameter will be trained in one model.', required=True)
parser.add_argument('-valinput', dest='valfile', type=str, help='validation data in fasta format if any. It will randomly select 10 percent of samples from the training data as a validation data set, if no validation file is provided.', required=False,default=None)
parser.add_argument('-nclass', dest='nclass', type=int, help='number of classifiers to be trained for one time. [Default:5]', required=False, default=5)
parser.add_argument('-window', dest='window', type=int, help='window size: the number of amino acid of the left part or right part adjacent to a potential PTM site. 2*\'windo size\'+1 amino acid will be extracted for one protential fragment. [Default:16]', required=False, default=16)
parser.add_argument('-maxneg', dest='maxneg', type=int, help='maximum iterations for each classifier which controls the maximum copy number of the negative data which has the same size with the positive data. [Default: 50]', required=False, default=50)
parser.add_argument('-nb_epoch', dest='nb_epoch', type=int, help='number of epoches for one bootstrap step. It is invalidate, if earlystop is set.', required=False, default=None)
parser.add_argument('-earlystop', dest='earlystop', type=int, help='after the \'earlystop\' number of epochs with no improvement the training will be stopped for one bootstrap step. [Default: 20]', required=False, default=20)
parser.add_argument('-inputweights', dest='inputweights', type=int, help='Initial weights saved in a HDF5 file.', required=False, default=None)
parser.add_argument('-backupweights', dest='backupweights', type=int, help='Set the intermediate weights for backup in a HDF5 file.', required=False, default=None)
parser.add_argument('-transferlayer', dest='transferlayer', type=int, help='Set the last \'transferlayer\' number of layers to be randomly initialized.', required=False, default=1)
args = parser.parse_args()
inputfile=args.inputfile;
valfile=args.valfile;
outputprefix=args.outputprefix;
nclass=args.nclass;
window=args.window;
maxneg=args.maxneg;
np_epoch2=args.nb_epoch;
earlystop=args.earlystop;
inputweights=args.inputweights;
backupweights=args.backupweights;
transferlayer=args.transferlayer
residues=args.residues.split(",")
outputmodel=outputprefix+str("_HDF5model");
outputparameter=outputprefix+str("_parameters");
try:
output = open(outputparameter, 'w')
except IOError:
print 'cannot write to ' + outputparameter+ "!\n";
exit()
else:
print >> output, "%d\t%d\t%s\tgeneral" % (nclass,window,args.residues)
from methods.Bootstrapping_allneg_continue_val import bootStrapping_allneg_continue_val
from methods.EXtractfragment_sort import extractFragforTraining
trainfrag=extractFragforTraining(inputfile,window,'-',focus=residues)
if(valfile is not None):
valfrag=extractFragforTraining(valfile,window,'-',focus= residues)
else:
valfrag=None;
for bt in range(nclass):
models=bootStrapping_allneg_continue_val(trainfrag.as_matrix(),valfile=valfrag,
srate=1,nb_epoch1=1,nb_epoch2=np_epoch2,earlystop=earlystop,maxneg=maxneg,
outputweights=backupweights,
inputweights=inputweights,
transferlayer=transferlayer)
models.save_weights(outputmodel+'_class'+str(bt),overwrite=True)
if __name__ == "__main__":
main()
| gpl-2.0 |
keyurpatel076/MissionPlannerGit | packages/IronPython.StdLib.2.7.4/content/Lib/io.py | 191 | 3624 | """The io module provides the Python interfaces to stream handling. The
builtin open function is defined in this module.
At the top of the I/O hierarchy is the abstract base class IOBase. It
defines the basic interface to a stream. Note, however, that there is no
separation between reading and writing to streams; implementations are
allowed to throw an IOError if they do not support a given operation.
Extending IOBase is RawIOBase which deals simply with the reading and
writing of raw bytes to a stream. FileIO subclasses RawIOBase to provide
an interface to OS files.
BufferedIOBase deals with buffering on a raw byte stream (RawIOBase). Its
subclasses, BufferedWriter, BufferedReader, and BufferedRWPair buffer
streams that are readable, writable, and both respectively.
BufferedRandom provides a buffered interface to random access
streams. BytesIO is a simple stream of in-memory bytes.
Another IOBase subclass, TextIOBase, deals with the encoding and decoding
of streams into text. TextIOWrapper, which extends it, is a buffered text
interface to a buffered raw stream (`BufferedIOBase`). Finally, StringIO
is a in-memory stream for text.
Argument names are not part of the specification, and only the arguments
of open() are intended to be used as keyword arguments.
data:
DEFAULT_BUFFER_SIZE
An int containing the default buffer size used by the module's buffered
I/O classes. open() uses the file's blksize (as obtained by os.stat) if
possible.
"""
# New I/O library conforming to PEP 3116.
# XXX edge cases when switching between reading/writing
# XXX need to support 1 meaning line-buffered
# XXX whenever an argument is None, use the default value
# XXX read/write ops should check readable/writable
# XXX buffered readinto should work with arbitrary buffer objects
# XXX use incremental encoder for text output, at least for UTF-16 and UTF-8-SIG
# XXX check writable, readable and seekable in appropriate places
__author__ = ("Guido van Rossum <[email protected]>, "
"Mike Verdone <[email protected]>, "
"Mark Russell <[email protected]>, "
"Antoine Pitrou <[email protected]>, "
"Amaury Forgeot d'Arc <[email protected]>, "
"Benjamin Peterson <[email protected]>")
__all__ = ["BlockingIOError", "open", "IOBase", "RawIOBase", "FileIO",
"BytesIO", "StringIO", "BufferedIOBase",
"BufferedReader", "BufferedWriter", "BufferedRWPair",
"BufferedRandom", "TextIOBase", "TextIOWrapper",
"UnsupportedOperation", "SEEK_SET", "SEEK_CUR", "SEEK_END"]
import _io
import abc
from _io import (DEFAULT_BUFFER_SIZE, BlockingIOError, UnsupportedOperation,
open, FileIO, BytesIO, StringIO, BufferedReader,
BufferedWriter, BufferedRWPair, BufferedRandom,
IncrementalNewlineDecoder, TextIOWrapper)
OpenWrapper = _io.open # for compatibility with _pyio
# for seek()
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
# Declaring ABCs in C is tricky so we do it here.
# Method descriptions and default implementations are inherited from the C
# version however.
class IOBase(_io._IOBase):
__metaclass__ = abc.ABCMeta
class RawIOBase(_io._RawIOBase, IOBase):
pass
class BufferedIOBase(_io._BufferedIOBase, IOBase):
pass
class TextIOBase(_io._TextIOBase, IOBase):
pass
RawIOBase.register(FileIO)
for klass in (BytesIO, BufferedReader, BufferedWriter, BufferedRandom,
BufferedRWPair):
BufferedIOBase.register(klass)
for klass in (StringIO, TextIOWrapper):
TextIOBase.register(klass)
del klass
| gpl-3.0 |
Unrepentant-Atheist/mame | 3rdparty/benchmark/.ycm_extra_conf.py | 84 | 3641 | import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Werror',
'-pendantic-errors',
'-std=c++0x',
'-fno-strict-aliasing',
'-O3',
'-DNDEBUG',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x', 'c++',
'-I', 'include',
'-isystem', '/usr/include',
'-isystem', '/usr/local/include',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cc' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
| gpl-2.0 |
tencentyun/cos-python-sdk | qcloud_cos/cos_config.py | 1 | 1384 | #!/usr/bin/env python
# coding=utf-8
################################################################################
# CosConfig 有关cos的配置
################################################################################
class CosConfig(object):
def __init__(self):
self._end_point = 'http://web.file.myqcloud.com/files/v1'
self._user_agent = 'cos-python-sdk-v3.3'
self._timeout = 30
self._sign_expired = 300
# 设置COS的域名地址
def set_end_point(self, end_point):
self._end_point = end_point
# 获取域名地址
def get_end_point(self):
return self._end_point
# 获取HTTP头中的user_agent
def get_user_agent(self):
return self._user_agent
# 设置连接超时, 单位秒
def set_timeout(self, time_out):
assert isinstance(time_out, int)
self._timeout = time_out
# 获取连接超时,单位秒
def get_timeout(self):
return self._timeout
# 设置签名过期时间, 单位秒
def set_sign_expired(self, expired):
assert isinstance(expired, int)
self._sign_expired = expired
# 获取签名过期时间, 单位秒
def get_sign_expired(self):
return self._sign_expired
# 打开https
def enable_https(self):
self._end_point = 'https://web.file.myqcloud.com/files/v1'
| mit |
riveridea/gnuradio | gr-trellis/examples/python/test_cpm.py | 11 | 5508 | #!/usr/bin/env python
##################################################
# Gnuradio Python Flow Graph
# Title: CPM test
# Author: Achilleas Anastasopoulos
# Description: gnuradio flow graph
# Generated: Thu Feb 19 23:16:23 2009
##################################################
from gnuradio import gr
from gnuradio import trellis, digital, filter, blocks
from grc_gnuradio import blks2 as grc_blks2
import math
import numpy
import fsm_utils
from gnuradio import trellis
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
import scipy.stats
except ImportError:
print "Error: Program requires scipy (see: www.scipy.org)."
sys.exit(1)
def run_test(seed,blocksize):
tb = gr.top_block()
##################################################
# Variables
##################################################
M = 2
K = 1
P = 2
h = (1.0*K)/P
L = 3
Q = 4
frac = 0.99
f = trellis.fsm(P,M,L)
# CPFSK signals
#p = numpy.ones(Q)/(2.0)
#q = numpy.cumsum(p)/(1.0*Q)
# GMSK signals
BT=0.3;
tt=numpy.arange(0,L*Q)/(1.0*Q)-L/2.0;
#print tt
p=(0.5*scipy.special.erfc(2*math.pi*BT*(tt-0.5)/math.sqrt(math.log(2.0))/math.sqrt(2.0))-0.5*scipy.special.erfc(2*math.pi*BT*(tt+0.5)/math.sqrt(math.log(2.0))/math.sqrt(2.0)))/2.0;
p=p/sum(p)*Q/2.0;
#print p
q=numpy.cumsum(p)/Q;
q=q/q[-1]/2.0;
#print q
(f0T,SS,S,F,Sf,Ff,N) = fsm_utils.make_cpm_signals(K,P,M,L,q,frac)
#print N
#print Ff
Ffa = numpy.insert(Ff,Q,numpy.zeros(N),axis=0)
#print Ffa
MF = numpy.fliplr(numpy.transpose(Ffa))
#print MF
E = numpy.sum(numpy.abs(Sf)**2,axis=0)
Es = numpy.sum(E)/f.O()
#print Es
constellation = numpy.reshape(numpy.transpose(Sf),N*f.O())
#print Ff
#print Sf
#print constellation
#print numpy.max(numpy.abs(SS - numpy.dot(Ff , Sf)))
EsN0_db = 10.0
N0 = Es * 10.0**(-(1.0*EsN0_db)/10.0)
#N0 = 0.0
#print N0
head = 4
tail = 4
numpy.random.seed(seed*666)
data = numpy.random.randint(0, M, head+blocksize+tail+1)
#data = numpy.zeros(blocksize+1+head+tail,'int')
for i in range(head):
data[i]=0
for i in range(tail+1):
data[-i]=0
##################################################
# Blocks
##################################################
random_source_x_0 = blocks.vector_source_b(data.tolist(), False)
digital_chunks_to_symbols_xx_0 = digital.chunks_to_symbols_bf((-1, 1), 1)
filter_interp_fir_filter_xxx_0 = filter.interp_fir_filter_fff(Q, p)
analog_frequency_modulator_fc_0 = analog.frequency_modulator_fc(2*math.pi*h*(1.0/Q))
blocks_add_vxx_0 = blocks.add_vcc(1)
analog_noise_source_x_0 = analog.noise_source_c(analog.GR_GAUSSIAN, (N0/2.0)**0.5, -long(seed))
blokcs_multiply_vxx_0 = blocks.multiply_vcc(1)
analog_sig_source_x_0 = analog.sig_source_c(Q, analog.GR_COS_WAVE, -f0T, 1, 0)
# only works for N=2, do it manually for N>2...
filter_fir_filter_xxx_0_0 = filter.fir_filter_ccc(Q, MF[0].conjugate())
filter_fir_filter_xxx_0_0_0 = filter.fir_filter_ccc(Q, MF[1].conjugate())
blocks_streams_to_stream_0 = blocks.streams_to_stream(gr.sizeof_gr_complex*1, int(N))
blocks_skiphead_0 = blocks.skiphead(gr.sizeof_gr_complex*1, int(N*(1+0)))
viterbi = trellis.viterbi_combined_cb(f, head+blocksize+tail, 0, -1, int(N),
constellation, digital.TRELLIS_EUCLIDEAN)
blocks_vector_sink_x_0 = blocks.vector_sink_b()
##################################################
# Connections
##################################################
tb.connect((random_source_x_0, 0), (digital_chunks_to_symbols_xx_0, 0))
tb.connect((digital_chunks_to_symbols_xx_0, 0), (filter_interp_fir_filter_xxx_0, 0))
tb.connect((filter_interp_fir_filter_xxx_0, 0), (analog_frequency_modulator_fc_0, 0))
tb.connect((analog_frequency_modulator_fc_0, 0), (blocks_add_vxx_0, 0))
tb.connect((analog_noise_source_x_0, 0), (blocks_add_vxx_0, 1))
tb.connect((blocks_add_vxx_0, 0), (blocks_multiply_vxx_0, 0))
tb.connect((analog_sig_source_x_0, 0), (blocks_multiply_vxx_0, 1))
tb.connect((blocks_multiply_vxx_0, 0), (filter_fir_filter_xxx_0_0, 0))
tb.connect((blocks_multiply_vxx_0, 0), (filter_fir_filter_xxx_0_0_0, 0))
tb.connect((filter_fir_filter_xxx_0_0, 0), (blocks_streams_to_stream_0, 0))
tb.connect((filter_fir_filter_xxx_0_0_0, 0), (blocks_streams_to_stream_0, 1))
tb.connect((blocks_streams_to_stream_0, 0), (blocks_skiphead_0, 0))
tb.connect((blocks_skiphead_0, 0), (viterbi, 0))
tb.connect((viterbi, 0), (blocks_vector_sink_x_0, 0))
tb.run()
dataest = blocks_vector_sink_x_0.data()
#print data
#print numpy.array(dataest)
perr = 0
err = 0
for i in range(blocksize):
if data[head+i] != dataest[head+i]:
#print i
err += 1
if err != 0 :
perr = 1
return (err,perr)
if __name__ == '__main__':
blocksize = 1000
ss=0
ee=0
for i in range(10000):
(s,e) = run_test(i,blocksize)
ss += s
ee += e
if (i+1) % 100 == 0:
print i+1,ss,ee,(1.0*ss)/(i+1)/(1.0*blocksize),(1.0*ee)/(i+1)
print i+1,ss,ee,(1.0*ss)/(i+1)/(1.0*blocksize),(1.0*ee)/(i+1)
| gpl-3.0 |
onshape-public/onshape-clients | python/onshape_client/oas/models/bt_graphics_appearance1152.py | 1 | 5506 | # coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
class BTGraphicsAppearance1152(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"bt_type": (str,), # noqa: E501
"color": ([str],), # noqa: E501
"non_trivial": (bool,), # noqa: E501
"opacity": (int,), # noqa: E501
"reset": (bool,), # noqa: E501
"rgba_color": ([str],), # noqa: E501
"usable_appearance": (bool,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"bt_type": "btType", # noqa: E501
"color": "color", # noqa: E501
"non_trivial": "nonTrivial", # noqa: E501
"opacity": "opacity", # noqa: E501
"reset": "reset", # noqa: E501
"rgba_color": "rgbaColor", # noqa: E501
"usable_appearance": "usableAppearance", # noqa: E501
}
@staticmethod
def _composed_schemas():
return None
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""bt_graphics_appearance1152.BTGraphicsAppearance1152 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
bt_type (str): [optional] # noqa: E501
color ([str]): [optional] # noqa: E501
non_trivial (bool): [optional] # noqa: E501
opacity (int): [optional] # noqa: E501
reset (bool): [optional] # noqa: E501
rgba_color ([str]): [optional] # noqa: E501
usable_appearance (bool): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
for var_name, var_value in six.iteritems(kwargs):
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
| mit |
lyft/incubator-airflow | tests/providers/google/cloud/sensors/test_pubsub.py | 4 | 4436 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import mock
from google.cloud.pubsub_v1.types import ReceivedMessage
from google.protobuf.json_format import MessageToDict, ParseDict
from airflow.exceptions import AirflowSensorTimeout
from airflow.providers.google.cloud.sensors.pubsub import PubSubPullSensor
TASK_ID = 'test-task-id'
TEST_PROJECT = 'test-project'
TEST_SUBSCRIPTION = 'test-subscription'
class TestPubSubPullSensor(unittest.TestCase):
def _generate_messages(self, count):
return [
ParseDict(
{
"ack_id": "%s" % i,
"message": {
"data": 'Message {}'.format(i).encode('utf8'),
"attributes": {"type": "generated message"},
},
},
ReceivedMessage(),
)
for i in range(1, count + 1)
]
def _generate_dicts(self, count):
return [MessageToDict(m) for m in self._generate_messages(count)]
@mock.patch('airflow.providers.google.cloud.sensors.pubsub.PubSubHook')
def test_poke_no_messages(self, mock_hook):
operator = PubSubPullSensor(task_id=TASK_ID, project_id=TEST_PROJECT,
subscription=TEST_SUBSCRIPTION)
mock_hook.return_value.pull.return_value = []
self.assertEqual([], operator.poke(None))
@mock.patch('airflow.providers.google.cloud.sensors.pubsub.PubSubHook')
def test_poke_with_ack_messages(self, mock_hook):
operator = PubSubPullSensor(task_id=TASK_ID, project_id=TEST_PROJECT,
subscription=TEST_SUBSCRIPTION,
ack_messages=True)
generated_messages = self._generate_messages(5)
generated_dicts = self._generate_dicts(5)
mock_hook.return_value.pull.return_value = generated_messages
self.assertEqual(generated_dicts, operator.poke(None))
mock_hook.return_value.acknowledge.assert_called_once_with(
project_id=TEST_PROJECT,
subscription=TEST_SUBSCRIPTION,
ack_ids=['1', '2', '3', '4', '5']
)
@mock.patch('airflow.providers.google.cloud.sensors.pubsub.PubSubHook')
def test_execute(self, mock_hook):
operator = PubSubPullSensor(
task_id=TASK_ID,
project_id=TEST_PROJECT,
subscription=TEST_SUBSCRIPTION,
poke_interval=0
)
generated_messages = self._generate_messages(5)
generated_dicts = self._generate_dicts(5)
mock_hook.return_value.pull.return_value = generated_messages
response = operator.execute(None)
mock_hook.return_value.pull.assert_called_once_with(
project_id=TEST_PROJECT,
subscription=TEST_SUBSCRIPTION,
max_messages=5,
return_immediately=False
)
self.assertEqual(generated_dicts, response)
@mock.patch('airflow.providers.google.cloud.sensors.pubsub.PubSubHook')
def test_execute_timeout(self, mock_hook):
operator = PubSubPullSensor(task_id=TASK_ID, project_id=TEST_PROJECT,
subscription=TEST_SUBSCRIPTION,
poke_interval=0, timeout=1)
mock_hook.return_value.pull.return_value = []
with self.assertRaises(AirflowSensorTimeout):
operator.execute(None)
mock_hook.return_value.pull.assert_called_once_with(
project_id=TEST_PROJECT,
subscription=TEST_SUBSCRIPTION,
max_messages=5,
return_immediately=False
)
| apache-2.0 |
allenai/allennlp | allennlp/modules/seq2seq_encoders/pytorch_transformer_wrapper.py | 1 | 4656 | from typing import Optional
from overrides import overrides
import torch
from torch import nn
from allennlp.modules.seq2seq_encoders.seq2seq_encoder import Seq2SeqEncoder
from allennlp.nn.util import add_positional_features
@Seq2SeqEncoder.register("pytorch_transformer")
class PytorchTransformer(Seq2SeqEncoder):
"""
Implements a stacked self-attention encoder similar to the Transformer
architecture in [Attention is all you Need]
(https://www.semanticscholar.org/paper/Attention-Is-All-You-Need-Vaswani-Shazeer/0737da0767d77606169cbf4187b83e1ab62f6077).
This class adapts the Transformer from torch.nn for use in AllenNLP. Optionally, it adds positional encodings.
Registered as a `Seq2SeqEncoder` with name "pytorch_transformer".
# Parameters
input_dim : `int`, required.
The input dimension of the encoder.
feedforward_hidden_dim : `int`, required.
The middle dimension of the FeedForward network. The input and output
dimensions are fixed to ensure sizes match up for the self attention layers.
num_layers : `int`, required.
The number of stacked self attention -> feedforward -> layer normalisation blocks.
num_attention_heads : `int`, required.
The number of attention heads to use per layer.
use_positional_encoding : `bool`, optional, (default = `True`)
Whether to add sinusoidal frequencies to the input tensor. This is strongly recommended,
as without this feature, the self attention layers have no idea of absolute or relative
position (as they are just computing pairwise similarity between vectors of elements),
which can be important features for many tasks.
dropout_prob : `float`, optional, (default = `0.1`)
The dropout probability for the feedforward network.
""" # noqa
def __init__(
self,
input_dim: int,
num_layers: int,
feedforward_hidden_dim: int = 2048,
num_attention_heads: int = 8,
positional_encoding: Optional[str] = None,
positional_embedding_size: int = 512,
dropout_prob: float = 0.1,
activation: str = "relu",
) -> None:
super().__init__()
layer = nn.TransformerEncoderLayer(
d_model=input_dim,
nhead=num_attention_heads,
dim_feedforward=feedforward_hidden_dim,
dropout=dropout_prob,
activation=activation,
)
self._transformer = nn.TransformerEncoder(layer, num_layers)
self._input_dim = input_dim
# initialize parameters
# We do this before the embeddings are initialized so we get the default initialization for the embeddings.
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
if positional_encoding is None:
self._sinusoidal_positional_encoding = False
self._positional_embedding = None
elif positional_encoding == "sinusoidal":
self._sinusoidal_positional_encoding = True
self._positional_embedding = None
elif positional_encoding == "embedding":
self._sinusoidal_positional_encoding = False
self._positional_embedding = nn.Embedding(positional_embedding_size, input_dim)
else:
raise ValueError(
"positional_encoding must be one of None, 'sinusoidal', or 'embedding'"
)
@overrides
def get_input_dim(self) -> int:
return self._input_dim
@overrides
def get_output_dim(self) -> int:
return self._input_dim
@overrides
def is_bidirectional(self):
return False
@overrides
def forward(self, inputs: torch.Tensor, mask: torch.BoolTensor):
output = inputs
if self._sinusoidal_positional_encoding:
output = add_positional_features(output)
if self._positional_embedding is not None:
position_ids = torch.arange(inputs.size(1), dtype=torch.long, device=output.device)
position_ids = position_ids.unsqueeze(0).expand(inputs.shape[:-1])
output = output + self._positional_embedding(position_ids)
# For some reason the torch transformer expects the shape (sequence, batch, features), not the more
# familiar (batch, sequence, features), so we have to fix it.
output = output.permute(1, 0, 2)
# For some other reason, the torch transformer takes the mask backwards.
mask = ~mask
output = self._transformer(output, src_key_padding_mask=mask)
output = output.permute(1, 0, 2)
return output
| apache-2.0 |
davidcusatis/horizon | openstack_dashboard/test/api_tests/cinder_rest_tests.py | 8 | 10320 | # Copyright 2015 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import mock
from django.conf import settings
from openstack_dashboard import api
from openstack_dashboard.api.rest import cinder
from openstack_dashboard.test import helpers as test
class CinderRestTestCase(test.TestCase):
#
# Volumes
#
def test_volumes_get(self):
self._test_volumes_get(False, {})
def test_volumes_get_all(self):
self._test_volumes_get(True, {})
def test_volumes_get_with_filters(self):
filters = {'status': 'available'}
self._test_volumes_get(False, filters)
@mock.patch.object(cinder.api, 'cinder')
def _test_volumes_get(self, all, filters, cc):
if all:
request = self.mock_rest_request(GET={'all_projects': 'true'})
else:
request = self.mock_rest_request(**{'GET': filters})
cc.volume_list_paged.return_value = [
mock.Mock(**{'to_dict.return_value': {'id': 'one'}}),
mock.Mock(**{'to_dict.return_value': {'id': 'two'}}),
], False, False
response = cinder.Volumes().get(request)
self.assertStatusCode(response, 200)
self.assertEqual(response.json,
{"items": [{"id": "one"}, {"id": "two"}],
"has_more_data": False,
"has_prev_data": False})
if all:
cc.volume_list_paged.assert_called_once_with(request,
{'all_tenants': 1})
else:
cc.volume_list_paged.assert_called_once_with(request,
search_opts=filters)
@mock.patch.object(cinder.api, 'cinder')
def test_volume_get(self, cc):
request = self.mock_rest_request(**{'GET': {}})
cc.volume_get.return_value = mock.Mock(
**{'to_dict.return_value': {'id': 'one'}})
response = cinder.Volume().get(request, '1')
self.assertStatusCode(response, 200)
self.assertEqual(response.json, {"id": "one"})
cc.volume_get.assert_called_once_with(request, '1')
@mock.patch.object(cinder.api, 'cinder')
def test_volume_create(self, cc):
mock_body = '''{
"size": "",
"name": "",
"description": "",
"volume_type": "",
"snapshot_id": "",
"metadata": "",
"image_id": "",
"availability_zone": "",
"source_volid": ""
}'''
mock_volume_create_response = {
"size": ""
}
mock_post_response = '{"size": ""}'
request = self.mock_rest_request(POST={}, body=mock_body)
cc.volume_create.return_value = \
mock.Mock(**{'to_dict.return_value': mock_volume_create_response})
response = cinder.Volumes().post(request)
self.assertStatusCode(response, 201)
self.assertEqual(response.content.decode("utf-8"), mock_post_response)
#
# Volume Types
#
@mock.patch.object(cinder.api, 'cinder')
def test_volume_types_get(self, cc):
request = self.mock_rest_request(**{'GET': {}})
cc.VolumeType.return_value = mock.Mock(
**{'to_dict.return_value': {'id': 'one'}})
cc.volume_type_list.return_value = [{'id': 'one'}]
response = cinder.VolumeTypes().get(request)
self.assertStatusCode(response, 200)
self.assertEqual(response.json, {"items": [{"id": "one"}]})
cc.volume_type_list.assert_called_once_with(request)
cc.VolumeType.assert_called_once_with({'id': 'one'})
@mock.patch.object(cinder.api, 'cinder')
def test_volume_type_get(self, cc):
request = self.mock_rest_request(**{'GET': {}})
cc.volume_type_get.return_value = {'name': 'one'}
cc.VolumeType.return_value = mock.Mock(
**{'to_dict.return_value': {'id': 'one'}})
response = cinder.VolumeType().get(request, '1')
self.assertStatusCode(response, 200)
self.assertEqual(response.json, {"id": "one"})
cc.volume_type_get.assert_called_once_with(request, '1')
cc.VolumeType.assert_called_once_with({'name': 'one'})
@mock.patch.object(cinder.api, 'cinder')
def test_volume_type_get_default(self, cc):
request = self.mock_rest_request(**{'GET': {}})
cc.volume_type_default.return_value = {'name': 'one'}
cc.VolumeType.return_value = mock.Mock(
**{'to_dict.return_value': {'id': 'one'}})
response = cinder.VolumeType().get(request, 'default')
self.assertStatusCode(response, 200)
self.assertEqual(response.json, {"id": "one"})
cc.volume_type_default.assert_called_once_with(request)
cc.VolumeType.assert_called_once_with({'name': 'one'})
#
# Volume Snapshots
#
@mock.patch.object(cinder.api, 'cinder')
def test_volume_snaps_get(self, cc):
request = self.mock_rest_request(**{'GET': {}})
cc.volume_snapshot_list.return_value = [
mock.Mock(**{'to_dict.return_value': {'id': 'one'}}),
mock.Mock(**{'to_dict.return_value': {'id': 'two'}}),
]
response = cinder.VolumeSnapshots().get(request)
self.assertStatusCode(response, 200)
self.assertEqual(response.json,
{"items": [{"id": "one"}, {"id": "two"}]})
cc.volume_snapshot_list.assert_called_once_with(request,
search_opts={})
@mock.patch.object(cinder.api, 'cinder')
def test_volume_snaps_get_with_filters(self, cc):
filters = {'status': 'available'}
request = self.mock_rest_request(**{'GET': dict(filters)})
cc.volume_snapshot_list.return_value = [
mock.Mock(**{'to_dict.return_value': {'id': 'one'}}),
mock.Mock(**{'to_dict.return_value': {'id': 'two'}}),
]
response = cinder.VolumeSnapshots().get(request)
self.assertStatusCode(response, 200)
self.assertEqual(response.json,
{"items": [{"id": "one"}, {"id": "two"}]})
cc.volume_snapshot_list.assert_called_once_with(request,
search_opts=filters)
#
# Extensions
#
@mock.patch.object(cinder.api, 'cinder')
@mock.patch.object(settings,
'OPENSTACK_CINDER_EXTENSIONS_BLACKLIST', ['baz'])
def _test_extension_list(self, cc):
request = self.mock_rest_request()
cc.list_extensions.return_value = [
mock.Mock(**{'to_dict.return_value': {'name': 'foo'}}),
mock.Mock(**{'to_dict.return_value': {'name': 'bar'}}),
mock.Mock(**{'to_dict.return_value': {'name': 'baz'}}),
]
response = cinder.Extensions().get(request)
self.assertStatusCode(response, 200)
self.assertEqual(response.content,
'{"items": [{"name": "foo"}, {"name": "bar"}]}')
cc.list_extensions.assert_called_once_with(request)
@mock.patch.object(cinder.api, 'cinder')
def test_qos_specs_get(self, cc):
request = self.mock_rest_request(GET={})
cc.qos_specs_list.return_value = [
mock.Mock(**{'to_dict.return_value': {'id': 'one'}}),
mock.Mock(**{'to_dict.return_value': {'id': 'two'}}),
]
response = cinder.QoSSpecs().get(request)
self.assertStatusCode(response, 200)
self.assertEqual(response.content.decode("utf-8"),
'{"items": [{"id": "one"}, {"id": "two"}]}')
cc.qos_specs_list.assert_called_once_with(request)
@mock.patch.object(cinder.api, 'cinder')
def test_tenant_absolute_limits_get(self, cc):
request = self.mock_rest_request(GET={})
cc.tenant_absolute_limits.return_value = \
{'id': 'one'}
response = cinder.TenantAbsoluteLimits().get(request)
self.assertStatusCode(response, 200)
self.assertEqual(response.content.decode("utf-8"), '{"id": "one"}')
cc.tenant_absolute_limits.assert_called_once_with(request)
#
# Services
#
@test.create_stubs({api.base: ('is_service_enabled',)})
@mock.patch.object(cinder.api, 'cinder')
def test_services_get(self, cc):
request = self.mock_rest_request(GET={})
cc.service_list.return_value = [mock.Mock(
binary='binary_1',
host='host_1',
zone='zone_1',
updated_at='updated_at_1',
status='status_1',
state='state_1'
), mock.Mock(
binary='binary_2',
host='host_2',
zone='zone_2',
updated_at='updated_at_2',
status='status_2',
state='state_2'
)]
api.base.is_service_enabled(request, 'volume').AndReturn(True)
self.mox.ReplayAll()
response = cinder.Services().get(request)
self.assertStatusCode(response, 200)
response_as_json = json.loads(response.content.decode('utf-8'))
self.assertEqual(response_as_json['items'][0]['id'], 1)
self.assertEqual(response_as_json['items'][0]['binary'], 'binary_1')
self.assertEqual(response_as_json['items'][1]['id'], 2)
self.assertEqual(response_as_json['items'][1]['binary'], 'binary_2')
cc.service_list.assert_called_once_with(request)
@test.create_stubs({api.base: ('is_service_enabled',)})
def test_services_get_disabled(self):
request = self.mock_rest_request(GET={})
api.base.is_service_enabled(request, 'volume').AndReturn(False)
self.mox.ReplayAll()
response = cinder.Services().get(request)
self.assertStatusCode(response, 501)
| apache-2.0 |
chenleicpp/p970_kernel_3.0.8 | Documentation/networking/cxacru-cf.py | 14668 | 1626 | #!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| gpl-2.0 |
jbremer/androguard | androguard/misc.py | 12 | 6313 | from androguard.core import *
from androguard.core.androgen import *
from androguard.core.bytecode import *
from androguard.core.bytecodes.dvm import *
from androguard.core.bytecodes.apk import *
from androguard.core.analysis.analysis import *
from androguard.core.analysis.ganalysis import *
from androguard.decompiler.decompiler import *
from cPickle import dumps, loads
from androguard.core import androconf
def save_session(l, filename):
"""
save your session !
:param l: a list of objects
:type: a list of object
:param filename: output filename to save the session
:type filename: string
:Example:
save_session([a, vm, vmx], "msession.json")
"""
with open(filename, "w") as fd:
fd.write(dumps(l, -1))
def load_session(filename):
"""
load your session !
:param filename: the filename where the session has been saved
:type filename: string
:rtype: the elements of your session :)
:Example:
a, vm, vmx = load_session("mysession.json")
"""
return loads(read(filename, binary=False))
def AnalyzeAPK(filename, raw=False, decompiler="dad"):
"""
Analyze an android application and setup all stuff for a more quickly analysis !
:param filename: the filename of the android application or a buffer which represents the application
:type filename: string
:param raw: True is you would like to use a buffer (optional)
:type raw: boolean
:param decompiler: ded, dex2jad, dad (optional)
:type decompiler: string
:rtype: return the :class:`APK`, :class:`DalvikVMFormat`, and :class:`VMAnalysis` objects
"""
androconf.debug("APK ...")
a = APK(filename, raw)
d, dx = AnalyzeDex(a.get_dex(), raw=True, decompiler=decompiler)
return a, d, dx
def AnalyzeDex(filename, raw=False, decompiler="dad"):
"""
Analyze an android dex file and setup all stuff for a more quickly analysis !
:param filename: the filename of the android dex file or a buffer which represents the dex file
:type filename: string
:param raw: True is you would like to use a buffer (optional)
:type raw: boolean
:rtype: return the :class:`DalvikVMFormat`, and :class:`VMAnalysis` objects
"""
androconf.debug("DalvikVMFormat ...")
d = None
if raw == False:
d = DalvikVMFormat(read(filename))
else:
d = DalvikVMFormat(filename)
androconf.debug("Export VM to python namespace")
d.create_python_export()
androconf.debug("VMAnalysis ...")
dx = uVMAnalysis(d)
androconf.debug("GVMAnalysis ...")
gx = GVMAnalysis(dx, None)
d.set_vmanalysis(dx)
d.set_gvmanalysis(gx)
RunDecompiler(d, dx, decompiler)
androconf.debug("XREF ...")
d.create_xref()
androconf.debug("DREF ...")
d.create_dref()
return d, dx
def AnalyzeODex(filename, raw=False, decompiler="dad"):
"""
Analyze an android odex file and setup all stuff for a more quickly analysis !
:param filename: the filename of the android dex file or a buffer which represents the dex file
:type filename: string
:param raw: True is you would like to use a buffer (optional)
:type raw: boolean
:rtype: return the :class:`DalvikOdexVMFormat`, and :class:`VMAnalysis` objects
"""
androconf.debug("DalvikOdexVMFormat ...")
d = None
if raw == False:
d = DalvikOdexVMFormat(read(filename))
else:
d = DalvikOdexVMFormat(filename)
androconf.debug("Export VM to python namespace")
d.create_python_export()
androconf.debug("VMAnalysis ...")
dx = uVMAnalysis(d)
androconf.debug("GVMAnalysis ...")
gx = GVMAnalysis(dx, None)
d.set_vmanalysis(dx)
d.set_gvmanalysis(gx)
RunDecompiler(d, dx, decompiler)
androconf.debug("XREF ...")
d.create_xref()
androconf.debug("DREF ...")
d.create_dref()
return d, dx
def RunDecompiler(d, dx, decompiler):
"""
Run the decompiler on a specific analysis
:param d: the DalvikVMFormat object
:type d: :class:`DalvikVMFormat` object
:param dx: the analysis of the format
:type dx: :class:`VMAnalysis` object
:param decompiler: the type of decompiler to use ("dad", "dex2jad", "ded")
:type decompiler: string
"""
if decompiler != None:
androconf.debug("Decompiler ...")
decompiler = decompiler.lower()
if decompiler == "dex2jad":
d.set_decompiler(DecompilerDex2Jad(d,
androconf.CONF["PATH_DEX2JAR"],
androconf.CONF["BIN_DEX2JAR"],
androconf.CONF["PATH_JAD"],
androconf.CONF["BIN_JAD"],
androconf.CONF["TMP_DIRECTORY"]))
elif decompiler == "dex2fernflower":
d.set_decompiler(DecompilerDex2Fernflower(d,
androconf.CONF["PATH_DEX2JAR"],
androconf.CONF["BIN_DEX2JAR"],
androconf.CONF["PATH_FERNFLOWER"],
androconf.CONF["BIN_FERNFLOWER"],
androconf.CONF["OPTIONS_FERNFLOWER"],
androconf.CONF["TMP_DIRECTORY"]))
elif decompiler == "ded":
d.set_decompiler(DecompilerDed(d,
androconf.CONF["PATH_DED"],
androconf.CONF["BIN_DED"],
androconf.CONF["TMP_DIRECTORY"]))
else:
d.set_decompiler(DecompilerDAD(d, dx))
def AnalyzeElf(filename, raw=False):
# avoid to install smiasm for everybody
from androguard.core.binaries.elf import ELF
e = None
if raw == False:
e = ELF(read(filename))
else:
e = ELF(filename)
ExportElfToPython(e)
return e
def ExportElfToPython(e):
for function in e.get_functions():
name = "FUNCTION_" + function.name
setattr(e, name, function)
| apache-2.0 |
mogoweb/chromium-crosswalk | native_client_sdk/src/tools/genhttpfs.py | 79 | 2524 | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This scripts generates a manifest file for the MountHttp file system.
# Files and directory paths are specified on the command-line. The names
# with glob and directories are recursed to form a list of files.
#
# For each file, the mode bits, size and path relative to the CWD are written
# to the output file which is stdout by default.
#
import glob
import optparse
import os
import sys
def main(argv):
parser = optparse.OptionParser(
usage='Usage: %prog [options] filename ...')
parser.add_option('-C', '--srcdir',
help='Change directory.', dest='srcdir', default=None)
parser.add_option('-o', '--output',
help='Output file name.', dest='output', default=None)
parser.add_option('-v', '--verbose',
help='Verbose output.', dest='verbose',
action='store_true')
parser.add_option('-r', '--recursive',
help='Recursive search.', action='store_true')
options, args = parser.parse_args(argv)
if options.output:
outfile = open(options.output, 'w')
else:
outfile = sys.stdout
if options.srcdir:
os.chdir(options.srcdir)
# Generate a set of unique file names bases on the input globs
fileset = set()
for fileglob in args:
filelist = glob.glob(fileglob)
if not filelist:
raise RuntimeError('Could not find match for "%s".\n' % fileglob)
for filename in filelist:
if os.path.isfile(filename):
fileset |= set([filename])
continue
if os.path.isdir(filename) and options.recursive:
for root, _, files in os.walk(filename):
fileset |= set([os.path.join(root, name) for name in files])
continue
raise RuntimeError('Can not handle path "%s".\n' % filename)
cwd = os.path.abspath(os.getcwd())
cwdlen = len(cwd)
for filename in sorted(fileset):
relname = os.path.abspath(filename)
if cwd not in relname:
raise RuntimeError('%s is not relative to CWD %s.\n' % filename, cwd)
relname = relname[cwdlen:]
stat = os.stat(filename)
mode = '-r--'
outfile.write('%s %d %s\n' % (mode, stat.st_size, relname))
return 0
if __name__ == '__main__':
try:
sys.exit(main(sys.argv[1:]))
except OSError, e:
sys.stderr.write('%s: %s\n' % (os.path.basename(__file__), e))
sys.exit(1)
| bsd-3-clause |
jammerful/buildbot | master/buildbot/steps/slave.py | 11 | 1812 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
# This module is left for backward compatibility of old-named worker API.
# It should never be imported by Buildbot.
from __future__ import absolute_import
from __future__ import print_function
from buildbot.steps.worker import CompositeStepMixin
from buildbot.steps.worker import CopyDirectory
from buildbot.steps.worker import FileExists
from buildbot.steps.worker import MakeDirectory
from buildbot.steps.worker import RemoveDirectory
from buildbot.steps.worker import SetPropertiesFromEnv
from buildbot.steps.worker import WorkerBuildStep
# pylint: disable=unused-import
from buildbot.worker_transition import deprecatedWorkerModuleAttribute
from buildbot.worker_transition import reportDeprecatedWorkerModuleUsage
__all__ = [
'CompositeStepMixin',
'CopyDirectory',
'FileExists',
'MakeDirectory',
'RemoveDirectory',
'SetPropertiesFromEnv',
]
reportDeprecatedWorkerModuleUsage(
"'{old}' module is deprecated, use "
"'buildbot.steps.worker' module instead".format(old=__name__))
deprecatedWorkerModuleAttribute(locals(), WorkerBuildStep)
del WorkerBuildStep # noqa
| gpl-2.0 |
brianr/uuss | scripts/process_userstates_buckets.py | 1 | 10860 | from __future__ import with_statement
import base64
import optparse
import os
import pymongo
import simplejson
import sys
import time
import zlib
from paste.deploy import converters
from lolapps.util import processor
from lolapps.util import db
from lolapps.util.adapters import mongo, chunking
from lolapps.common import userstate_multi
from uuss.server import model
from lolapps import helpers
# logging
log = processor.configure_logging()
game = None
max_batch_size = None
mongo_db = None
tmp_dir = None
bucket = None
shard = None
cycle_time = None
rebalancing = False
userstate = None
def _app_id_from_user_id(user_id):
return helpers.app_id_from_user_id(user_id)
def do_processing(onerun=False):
for collection_name in mongo_db.collection_names():
proc_time = 0
if collection_name.startswith('user_state-modified-%s' % bucket):
try:
load_name = 'user_state-modified-%s-%s' % (bucket, time.time())
try:
mongo_db[collection_name].rename(load_name)
except pymongo.errors.OperationFailure:
# effectively this makes sure the renamed collection exists
if mongo_db[load_name].count() <= 0:
raise
log.error("Error encountered renaming collection %s to %s, but able to continue" %
(collection_name, load_name))
modified = mongo_db[load_name]
start_time = time.time()
log.info("%s userstates to save for %s ..." % (modified.count(), load_name))
# prepare for userstate size monitoring
userstates_processed = 0
max_userstate_size = -1
total_userstate_size = 0
mod_count = 0
mod_rows = list(modified.find())
batch_count = 1
while mod_rows:
proc_rows = mod_rows[:max_batch_size]
mod_rows = mod_rows[max_batch_size:]
base_file_name = "%s.%s" % (load_name, batch_count)
with open(os.path.join(tmp_dir, base_file_name+".usrtmp"), 'w') as user_file:
with open(os.path.join(tmp_dir, base_file_name+".plrtmp"), 'w') as player_file:
for row in proc_rows:
mod_count += row['count']
try:
if row['_id'] == 'null':
log.error("null user_id encountered")
else:
if rebalancing:
(state, chunked) = userstate.get(row['_id'], raw=True)
else:
(state, chunked) = userstate.backup(row['_id'], raw=True)
if state is not None:
if not chunked:
# NOTE(jpatrin): Shouldn't happen, but just in case...
if isinstance(state, str):
state = simplejson.loads(zlib.decompress(state))
# NOTE(jpatrin): This will be just a single chunk for now,
# but will serve until the state gets saved by the game
raw_state = chunking.blow_chunks(state)
else:
raw_state = state
state = chunking.reconstitute_chunks(raw_state, True)
#state_zlib_json = zlib.compress(simplejson.dumps(state))
#user_line = gen_userstate_line(row['_id'], state_zlib_json)
user_line = gen_userstate_line(row['_id'], raw_state)
player_line = gen_player_line(row['_id'], state)
user_file.write(user_line+'\n')
if player_line:
player_file.write(player_line+'\n')
# keep userstate size for average and max size tracking
userstates_processed += 1
userstate_size = len(raw_state) / 1024
total_userstate_size += userstate_size
max_userstate_size = max(userstate_size, max_userstate_size)
except userstate_multi.UserstateException, e:
log.exception(e)
except Exception, e:
# (jay) errors are bad here, but we don't want to keep the
# rest of the userstates from being saved, so log it and go on
log.exception(e)
# don't want the file reader to get to these before we're done, so keep
# as temporary name until finished writing
os.rename(os.path.join(tmp_dir, base_file_name+".usrtmp"),
os.path.join(tmp_dir, base_file_name+".user"))
os.rename(os.path.join(tmp_dir, base_file_name+".plrtmp"),
os.path.join(tmp_dir, base_file_name+".player"))
log.info("processed batch, %s remaining" % len(mod_rows))
batch_count += 1
mongo_db.drop_collection(load_name)
log.info("Saved. Total updates since last backup: %s" % mod_count)
proc_time = time.time() - start_time
log.info("%s seconds to process userstates" % proc_time)
# if we processed userstates, log their size characteristics
if userstates_processed:
avg_userstate_size = total_userstate_size / userstates_processed
log.info("processed userstate sizes(k): avg %r max %r",
avg_userstate_size, max_userstate_size)
except Exception, e:
# problems here are bad, but shouldn't stop processing of
# other collections
log.exception(e)
if onerun and proc_time > 0:
log.debug("one run finished, returning")
return
def gen_userstate_line(user_id, state):
state = base64.b64encode(state)
return "%s\t%s\t%s\t%s\t%s" % (
db.str_nullsafe_quote(user_id),
db.str_nullsafe_quote(state),
db.str_nullsafe_quote(int(time.time())), # date_created
db.str_nullsafe_quote(int(time.time())), # date_modified
db.str_nullsafe_quote(1) # revision_id
)
def gen_player_line(user_id, state):
player = state.get('player')
if not player:
return ''
#player = {}
try:
now = int(time.time())
last_login = player.get('lastLogin', 0)
line = "('%s', %s, %s, %s, %s, %s, %s)" % (user_id,
_app_id_from_user_id(user_id),
player['tutorialStep'],
player['level'],
player['xp'],
int(last_login / 86400.0), # last_visit
now # timestamp
)
except KeyError, e:
line = None # No player data
return line
def configure(options):
"""
Find the first app we got command-line options for and setup for it.
We are only processing one app with this script, so if more than one
is specified on the command-line, only one of them will get processed.
No guarantees on which one that is.
"""
global mongo_db, tmp_dir, max_batch_size, bucket, shard, cycle_time, rebalancing, game, userstate
game = options.game
if game is None:
print "game option is required"
sys.exit(1)
ini_file = options.ini
if ini_file is None:
print "ini option is required"
sys.exit(1)
log.info('Configuring...')
bucket = int(options.bucket)
shard = int(options.shard)
max_batch_size = int(options.batch)
cycle_time = int(options.time)
from uuss.server import configuration
(full_config, config) = configuration.init_from_ini(ini_file, [game], {'uuss.use_gevent': False})
helpers.config = config
userstate = getattr(model, game).userstate
tmp_dir = config["%s_user_state.tmp_dir%s" % (game, shard)]
if not os.path.exists(tmp_dir):
os.mkdir(tmp_dir)
rebalancing = converters.asbool(config['%s_user_state.rebalancing' % game]) and options.rebal
if rebalancing:
hosts = config['%s_user_state.mongo_hosts_rebal' % game].split(";")
else:
hosts = config['%s_user_state.mongo_hosts' % game].split(";")
mongo_db = mongo.connect(hosts[shard])[config['%s_user_state.mongo_dbname' % game]]
def main():
import gc
log.info("GC collected %r objects", gc.collect())
processor.register_signal_handlers()
processor.start_log_processor(do_processing, cycle_time)
if __name__ == '__main__':
parser = optparse.OptionParser()
# default to staging.ini so never accidentaly run against production
parser.add_option("-i", "--ini", dest="ini", help="uuss ini file")
parser.add_option("-b", "--batch", dest="batch", default="1000", help="maximum batch size")
parser.add_option("-u", "--bucket", dest="bucket", default="0", help="bucket to process")
parser.add_option("-s", "--shard", dest="shard", default="0", help="shard to process")
parser.add_option("-t", "--time", dest="time", default="120", help="minimum cycle time in seconds")
parser.add_option("-r", "--rebal", dest="rebal", action="store_true", default=False,
help="rebalancing node")
parser.add_option("-g", "--game", dest="game", help="game to process (dane, india)")
options, args = parser.parse_args()
configure(options)
main() # run forever
#do_processing() # run once
| mit |
mohammed-alfatih/servo | tests/wpt/css-tests/tools/html5lib/doc/conf.py | 436 | 9028 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# html5lib documentation build configuration file, created by
# sphinx-quickstart on Wed May 8 00:04:49 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'html5lib'
copyright = '2006 - 2013, James Graham, Geoffrey Sneddon, and contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
sys.path.append(os.path.abspath('..'))
from html5lib import __version__
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'theme']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'html5libdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'html5lib.tex', 'html5lib Documentation',
'James Graham, Geoffrey Sneddon, and contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'html5lib', 'html5lib Documentation',
['James Graham, Geoffrey Sneddon, and contributors'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'html5lib', 'html5lib Documentation',
'James Graham, Geoffrey Sneddon, and contributors', 'html5lib', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
class CExtMock(object):
"""Required for autodoc on readthedocs.org where you cannot build C extensions."""
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return CExtMock()
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
else:
return CExtMock()
try:
import lxml # flake8: noqa
except ImportError:
sys.modules['lxml'] = CExtMock()
sys.modules['lxml.etree'] = CExtMock()
print("warning: lxml modules mocked.")
try:
import genshi # flake8: noqa
except ImportError:
sys.modules['genshi'] = CExtMock()
sys.modules['genshi.core'] = CExtMock()
print("warning: genshi modules mocked.")
| mpl-2.0 |
elimence/edx-platform | cms/djangoapps/contentstore/management/commands/check_course.py | 5 | 2830 | from django.core.management.base import BaseCommand, CommandError
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.xml_importer import check_module_metadata_editability
from xmodule.course_module import CourseDescriptor
from request_cache.middleware import RequestCache
class Command(BaseCommand):
help = '''Enumerates through the course and find common errors'''
def handle(self, *args, **options):
if len(args) != 1:
raise CommandError("check_course requires one argument: <location>")
loc_str = args[0]
loc = CourseDescriptor.id_to_location(loc_str)
store = modulestore()
# setup a request cache so we don't throttle the DB with all the metadata inheritance requests
store.request_cache = RequestCache.get_request_cache()
course = store.get_item(loc, depth=3)
err_cnt = 0
def _xlint_metadata(module):
err_cnt = check_module_metadata_editability(module)
for child in module.get_children():
err_cnt = err_cnt + _xlint_metadata(child)
return err_cnt
err_cnt = err_cnt + _xlint_metadata(course)
# we've had a bug where the xml_attributes field can we rewritten as a string rather than a dict
def _check_xml_attributes_field(module):
err_cnt = 0
if hasattr(module, 'xml_attributes') and isinstance(module.xml_attributes, basestring):
print 'module = {0} has xml_attributes as a string. It should be a dict'.format(module.location.url())
err_cnt = err_cnt + 1
for child in module.get_children():
err_cnt = err_cnt + _check_xml_attributes_field(child)
return err_cnt
err_cnt = err_cnt + _check_xml_attributes_field(course)
# check for dangling discussion items, this can cause errors in the forums
def _get_discussion_items(module):
discussion_items = []
if module.location.category == 'discussion':
discussion_items = discussion_items + [module.location.url()]
for child in module.get_children():
discussion_items = discussion_items + _get_discussion_items(child)
return discussion_items
discussion_items = _get_discussion_items(course)
# now query all discussion items via get_items() and compare with the tree-traversal
queried_discussion_items = store.get_items(['i4x', course.location.org, course.location.course,
'discussion', None, None])
for item in queried_discussion_items:
if item.location.url() not in discussion_items:
print 'Found dangling discussion module = {0}'.format(item.location.url())
| agpl-3.0 |
suninsky/ReceiptOCR | Python/server/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/jisfreq.py | 3131 | 47315 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Sampling from about 20M text materials include literature and computer technology
#
# Japanese frequency table, applied to both S-JIS and EUC-JP
# They are sorted in order.
# 128 --> 0.77094
# 256 --> 0.85710
# 512 --> 0.92635
# 1024 --> 0.97130
# 2048 --> 0.99431
#
# Ideal Distribution Ratio = 0.92635 / (1-0.92635) = 12.58
# Random Distribution Ration = 512 / (2965+62+83+86-512) = 0.191
#
# Typical Distribution Ratio, 25% of IDR
JIS_TYPICAL_DISTRIBUTION_RATIO = 3.0
# Char to FreqOrder table ,
JIS_TABLE_SIZE = 4368
JISCharToFreqOrder = (
40, 1, 6, 182, 152, 180, 295,2127, 285, 381,3295,4304,3068,4606,3165,3510, # 16
3511,1822,2785,4607,1193,2226,5070,4608, 171,2996,1247, 18, 179,5071, 856,1661, # 32
1262,5072, 619, 127,3431,3512,3230,1899,1700, 232, 228,1294,1298, 284, 283,2041, # 48
2042,1061,1062, 48, 49, 44, 45, 433, 434,1040,1041, 996, 787,2997,1255,4305, # 64
2108,4609,1684,1648,5073,5074,5075,5076,5077,5078,3687,5079,4610,5080,3927,3928, # 80
5081,3296,3432, 290,2285,1471,2187,5082,2580,2825,1303,2140,1739,1445,2691,3375, # 96
1691,3297,4306,4307,4611, 452,3376,1182,2713,3688,3069,4308,5083,5084,5085,5086, # 112
5087,5088,5089,5090,5091,5092,5093,5094,5095,5096,5097,5098,5099,5100,5101,5102, # 128
5103,5104,5105,5106,5107,5108,5109,5110,5111,5112,4097,5113,5114,5115,5116,5117, # 144
5118,5119,5120,5121,5122,5123,5124,5125,5126,5127,5128,5129,5130,5131,5132,5133, # 160
5134,5135,5136,5137,5138,5139,5140,5141,5142,5143,5144,5145,5146,5147,5148,5149, # 176
5150,5151,5152,4612,5153,5154,5155,5156,5157,5158,5159,5160,5161,5162,5163,5164, # 192
5165,5166,5167,5168,5169,5170,5171,5172,5173,5174,5175,1472, 598, 618, 820,1205, # 208
1309,1412,1858,1307,1692,5176,5177,5178,5179,5180,5181,5182,1142,1452,1234,1172, # 224
1875,2043,2149,1793,1382,2973, 925,2404,1067,1241, 960,1377,2935,1491, 919,1217, # 240
1865,2030,1406,1499,2749,4098,5183,5184,5185,5186,5187,5188,2561,4099,3117,1804, # 256
2049,3689,4309,3513,1663,5189,3166,3118,3298,1587,1561,3433,5190,3119,1625,2998, # 272
3299,4613,1766,3690,2786,4614,5191,5192,5193,5194,2161, 26,3377, 2,3929, 20, # 288
3691, 47,4100, 50, 17, 16, 35, 268, 27, 243, 42, 155, 24, 154, 29, 184, # 304
4, 91, 14, 92, 53, 396, 33, 289, 9, 37, 64, 620, 21, 39, 321, 5, # 320
12, 11, 52, 13, 3, 208, 138, 0, 7, 60, 526, 141, 151,1069, 181, 275, # 336
1591, 83, 132,1475, 126, 331, 829, 15, 69, 160, 59, 22, 157, 55,1079, 312, # 352
109, 38, 23, 25, 10, 19, 79,5195, 61, 382,1124, 8, 30,5196,5197,5198, # 368
5199,5200,5201,5202,5203,5204,5205,5206, 89, 62, 74, 34,2416, 112, 139, 196, # 384
271, 149, 84, 607, 131, 765, 46, 88, 153, 683, 76, 874, 101, 258, 57, 80, # 400
32, 364, 121,1508, 169,1547, 68, 235, 145,2999, 41, 360,3027, 70, 63, 31, # 416
43, 259, 262,1383, 99, 533, 194, 66, 93, 846, 217, 192, 56, 106, 58, 565, # 432
280, 272, 311, 256, 146, 82, 308, 71, 100, 128, 214, 655, 110, 261, 104,1140, # 448
54, 51, 36, 87, 67,3070, 185,2618,2936,2020, 28,1066,2390,2059,5207,5208, # 464
5209,5210,5211,5212,5213,5214,5215,5216,4615,5217,5218,5219,5220,5221,5222,5223, # 480
5224,5225,5226,5227,5228,5229,5230,5231,5232,5233,5234,5235,5236,3514,5237,5238, # 496
5239,5240,5241,5242,5243,5244,2297,2031,4616,4310,3692,5245,3071,5246,3598,5247, # 512
4617,3231,3515,5248,4101,4311,4618,3808,4312,4102,5249,4103,4104,3599,5250,5251, # 528
5252,5253,5254,5255,5256,5257,5258,5259,5260,5261,5262,5263,5264,5265,5266,5267, # 544
5268,5269,5270,5271,5272,5273,5274,5275,5276,5277,5278,5279,5280,5281,5282,5283, # 560
5284,5285,5286,5287,5288,5289,5290,5291,5292,5293,5294,5295,5296,5297,5298,5299, # 576
5300,5301,5302,5303,5304,5305,5306,5307,5308,5309,5310,5311,5312,5313,5314,5315, # 592
5316,5317,5318,5319,5320,5321,5322,5323,5324,5325,5326,5327,5328,5329,5330,5331, # 608
5332,5333,5334,5335,5336,5337,5338,5339,5340,5341,5342,5343,5344,5345,5346,5347, # 624
5348,5349,5350,5351,5352,5353,5354,5355,5356,5357,5358,5359,5360,5361,5362,5363, # 640
5364,5365,5366,5367,5368,5369,5370,5371,5372,5373,5374,5375,5376,5377,5378,5379, # 656
5380,5381, 363, 642,2787,2878,2788,2789,2316,3232,2317,3434,2011, 165,1942,3930, # 672
3931,3932,3933,5382,4619,5383,4620,5384,5385,5386,5387,5388,5389,5390,5391,5392, # 688
5393,5394,5395,5396,5397,5398,5399,5400,5401,5402,5403,5404,5405,5406,5407,5408, # 704
5409,5410,5411,5412,5413,5414,5415,5416,5417,5418,5419,5420,5421,5422,5423,5424, # 720
5425,5426,5427,5428,5429,5430,5431,5432,5433,5434,5435,5436,5437,5438,5439,5440, # 736
5441,5442,5443,5444,5445,5446,5447,5448,5449,5450,5451,5452,5453,5454,5455,5456, # 752
5457,5458,5459,5460,5461,5462,5463,5464,5465,5466,5467,5468,5469,5470,5471,5472, # 768
5473,5474,5475,5476,5477,5478,5479,5480,5481,5482,5483,5484,5485,5486,5487,5488, # 784
5489,5490,5491,5492,5493,5494,5495,5496,5497,5498,5499,5500,5501,5502,5503,5504, # 800
5505,5506,5507,5508,5509,5510,5511,5512,5513,5514,5515,5516,5517,5518,5519,5520, # 816
5521,5522,5523,5524,5525,5526,5527,5528,5529,5530,5531,5532,5533,5534,5535,5536, # 832
5537,5538,5539,5540,5541,5542,5543,5544,5545,5546,5547,5548,5549,5550,5551,5552, # 848
5553,5554,5555,5556,5557,5558,5559,5560,5561,5562,5563,5564,5565,5566,5567,5568, # 864
5569,5570,5571,5572,5573,5574,5575,5576,5577,5578,5579,5580,5581,5582,5583,5584, # 880
5585,5586,5587,5588,5589,5590,5591,5592,5593,5594,5595,5596,5597,5598,5599,5600, # 896
5601,5602,5603,5604,5605,5606,5607,5608,5609,5610,5611,5612,5613,5614,5615,5616, # 912
5617,5618,5619,5620,5621,5622,5623,5624,5625,5626,5627,5628,5629,5630,5631,5632, # 928
5633,5634,5635,5636,5637,5638,5639,5640,5641,5642,5643,5644,5645,5646,5647,5648, # 944
5649,5650,5651,5652,5653,5654,5655,5656,5657,5658,5659,5660,5661,5662,5663,5664, # 960
5665,5666,5667,5668,5669,5670,5671,5672,5673,5674,5675,5676,5677,5678,5679,5680, # 976
5681,5682,5683,5684,5685,5686,5687,5688,5689,5690,5691,5692,5693,5694,5695,5696, # 992
5697,5698,5699,5700,5701,5702,5703,5704,5705,5706,5707,5708,5709,5710,5711,5712, # 1008
5713,5714,5715,5716,5717,5718,5719,5720,5721,5722,5723,5724,5725,5726,5727,5728, # 1024
5729,5730,5731,5732,5733,5734,5735,5736,5737,5738,5739,5740,5741,5742,5743,5744, # 1040
5745,5746,5747,5748,5749,5750,5751,5752,5753,5754,5755,5756,5757,5758,5759,5760, # 1056
5761,5762,5763,5764,5765,5766,5767,5768,5769,5770,5771,5772,5773,5774,5775,5776, # 1072
5777,5778,5779,5780,5781,5782,5783,5784,5785,5786,5787,5788,5789,5790,5791,5792, # 1088
5793,5794,5795,5796,5797,5798,5799,5800,5801,5802,5803,5804,5805,5806,5807,5808, # 1104
5809,5810,5811,5812,5813,5814,5815,5816,5817,5818,5819,5820,5821,5822,5823,5824, # 1120
5825,5826,5827,5828,5829,5830,5831,5832,5833,5834,5835,5836,5837,5838,5839,5840, # 1136
5841,5842,5843,5844,5845,5846,5847,5848,5849,5850,5851,5852,5853,5854,5855,5856, # 1152
5857,5858,5859,5860,5861,5862,5863,5864,5865,5866,5867,5868,5869,5870,5871,5872, # 1168
5873,5874,5875,5876,5877,5878,5879,5880,5881,5882,5883,5884,5885,5886,5887,5888, # 1184
5889,5890,5891,5892,5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904, # 1200
5905,5906,5907,5908,5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920, # 1216
5921,5922,5923,5924,5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,5936, # 1232
5937,5938,5939,5940,5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,5952, # 1248
5953,5954,5955,5956,5957,5958,5959,5960,5961,5962,5963,5964,5965,5966,5967,5968, # 1264
5969,5970,5971,5972,5973,5974,5975,5976,5977,5978,5979,5980,5981,5982,5983,5984, # 1280
5985,5986,5987,5988,5989,5990,5991,5992,5993,5994,5995,5996,5997,5998,5999,6000, # 1296
6001,6002,6003,6004,6005,6006,6007,6008,6009,6010,6011,6012,6013,6014,6015,6016, # 1312
6017,6018,6019,6020,6021,6022,6023,6024,6025,6026,6027,6028,6029,6030,6031,6032, # 1328
6033,6034,6035,6036,6037,6038,6039,6040,6041,6042,6043,6044,6045,6046,6047,6048, # 1344
6049,6050,6051,6052,6053,6054,6055,6056,6057,6058,6059,6060,6061,6062,6063,6064, # 1360
6065,6066,6067,6068,6069,6070,6071,6072,6073,6074,6075,6076,6077,6078,6079,6080, # 1376
6081,6082,6083,6084,6085,6086,6087,6088,6089,6090,6091,6092,6093,6094,6095,6096, # 1392
6097,6098,6099,6100,6101,6102,6103,6104,6105,6106,6107,6108,6109,6110,6111,6112, # 1408
6113,6114,2044,2060,4621, 997,1235, 473,1186,4622, 920,3378,6115,6116, 379,1108, # 1424
4313,2657,2735,3934,6117,3809, 636,3233, 573,1026,3693,3435,2974,3300,2298,4105, # 1440
854,2937,2463, 393,2581,2417, 539, 752,1280,2750,2480, 140,1161, 440, 708,1569, # 1456
665,2497,1746,1291,1523,3000, 164,1603, 847,1331, 537,1997, 486, 508,1693,2418, # 1472
1970,2227, 878,1220, 299,1030, 969, 652,2751, 624,1137,3301,2619, 65,3302,2045, # 1488
1761,1859,3120,1930,3694,3516, 663,1767, 852, 835,3695, 269, 767,2826,2339,1305, # 1504
896,1150, 770,1616,6118, 506,1502,2075,1012,2519, 775,2520,2975,2340,2938,4314, # 1520
3028,2086,1224,1943,2286,6119,3072,4315,2240,1273,1987,3935,1557, 175, 597, 985, # 1536
3517,2419,2521,1416,3029, 585, 938,1931,1007,1052,1932,1685,6120,3379,4316,4623, # 1552
804, 599,3121,1333,2128,2539,1159,1554,2032,3810, 687,2033,2904, 952, 675,1467, # 1568
3436,6121,2241,1096,1786,2440,1543,1924, 980,1813,2228, 781,2692,1879, 728,1918, # 1584
3696,4624, 548,1950,4625,1809,1088,1356,3303,2522,1944, 502, 972, 373, 513,2827, # 1600
586,2377,2391,1003,1976,1631,6122,2464,1084, 648,1776,4626,2141, 324, 962,2012, # 1616
2177,2076,1384, 742,2178,1448,1173,1810, 222, 102, 301, 445, 125,2420, 662,2498, # 1632
277, 200,1476,1165,1068, 224,2562,1378,1446, 450,1880, 659, 791, 582,4627,2939, # 1648
3936,1516,1274, 555,2099,3697,1020,1389,1526,3380,1762,1723,1787,2229, 412,2114, # 1664
1900,2392,3518, 512,2597, 427,1925,2341,3122,1653,1686,2465,2499, 697, 330, 273, # 1680
380,2162, 951, 832, 780, 991,1301,3073, 965,2270,3519, 668,2523,2636,1286, 535, # 1696
1407, 518, 671, 957,2658,2378, 267, 611,2197,3030,6123, 248,2299, 967,1799,2356, # 1712
850,1418,3437,1876,1256,1480,2828,1718,6124,6125,1755,1664,2405,6126,4628,2879, # 1728
2829, 499,2179, 676,4629, 557,2329,2214,2090, 325,3234, 464, 811,3001, 992,2342, # 1744
2481,1232,1469, 303,2242, 466,1070,2163, 603,1777,2091,4630,2752,4631,2714, 322, # 1760
2659,1964,1768, 481,2188,1463,2330,2857,3600,2092,3031,2421,4632,2318,2070,1849, # 1776
2598,4633,1302,2254,1668,1701,2422,3811,2905,3032,3123,2046,4106,1763,1694,4634, # 1792
1604, 943,1724,1454, 917, 868,2215,1169,2940, 552,1145,1800,1228,1823,1955, 316, # 1808
1080,2510, 361,1807,2830,4107,2660,3381,1346,1423,1134,4108,6127, 541,1263,1229, # 1824
1148,2540, 545, 465,1833,2880,3438,1901,3074,2482, 816,3937, 713,1788,2500, 122, # 1840
1575, 195,1451,2501,1111,6128, 859, 374,1225,2243,2483,4317, 390,1033,3439,3075, # 1856
2524,1687, 266, 793,1440,2599, 946, 779, 802, 507, 897,1081, 528,2189,1292, 711, # 1872
1866,1725,1167,1640, 753, 398,2661,1053, 246, 348,4318, 137,1024,3440,1600,2077, # 1888
2129, 825,4319, 698, 238, 521, 187,2300,1157,2423,1641,1605,1464,1610,1097,2541, # 1904
1260,1436, 759,2255,1814,2150, 705,3235, 409,2563,3304, 561,3033,2005,2564, 726, # 1920
1956,2343,3698,4109, 949,3812,3813,3520,1669, 653,1379,2525, 881,2198, 632,2256, # 1936
1027, 778,1074, 733,1957, 514,1481,2466, 554,2180, 702,3938,1606,1017,1398,6129, # 1952
1380,3521, 921, 993,1313, 594, 449,1489,1617,1166, 768,1426,1360, 495,1794,3601, # 1968
1177,3602,1170,4320,2344, 476, 425,3167,4635,3168,1424, 401,2662,1171,3382,1998, # 1984
1089,4110, 477,3169, 474,6130,1909, 596,2831,1842, 494, 693,1051,1028,1207,3076, # 2000
606,2115, 727,2790,1473,1115, 743,3522, 630, 805,1532,4321,2021, 366,1057, 838, # 2016
684,1114,2142,4322,2050,1492,1892,1808,2271,3814,2424,1971,1447,1373,3305,1090, # 2032
1536,3939,3523,3306,1455,2199, 336, 369,2331,1035, 584,2393, 902, 718,2600,6131, # 2048
2753, 463,2151,1149,1611,2467, 715,1308,3124,1268, 343,1413,3236,1517,1347,2663, # 2064
2093,3940,2022,1131,1553,2100,2941,1427,3441,2942,1323,2484,6132,1980, 872,2368, # 2080
2441,2943, 320,2369,2116,1082, 679,1933,3941,2791,3815, 625,1143,2023, 422,2200, # 2096
3816,6133, 730,1695, 356,2257,1626,2301,2858,2637,1627,1778, 937, 883,2906,2693, # 2112
3002,1769,1086, 400,1063,1325,3307,2792,4111,3077, 456,2345,1046, 747,6134,1524, # 2128
884,1094,3383,1474,2164,1059, 974,1688,2181,2258,1047, 345,1665,1187, 358, 875, # 2144
3170, 305, 660,3524,2190,1334,1135,3171,1540,1649,2542,1527, 927, 968,2793, 885, # 2160
1972,1850, 482, 500,2638,1218,1109,1085,2543,1654,2034, 876, 78,2287,1482,1277, # 2176
861,1675,1083,1779, 724,2754, 454, 397,1132,1612,2332, 893, 672,1237, 257,2259, # 2192
2370, 135,3384, 337,2244, 547, 352, 340, 709,2485,1400, 788,1138,2511, 540, 772, # 2208
1682,2260,2272,2544,2013,1843,1902,4636,1999,1562,2288,4637,2201,1403,1533, 407, # 2224
576,3308,1254,2071, 978,3385, 170, 136,1201,3125,2664,3172,2394, 213, 912, 873, # 2240
3603,1713,2202, 699,3604,3699, 813,3442, 493, 531,1054, 468,2907,1483, 304, 281, # 2256
4112,1726,1252,2094, 339,2319,2130,2639, 756,1563,2944, 748, 571,2976,1588,2425, # 2272
2715,1851,1460,2426,1528,1392,1973,3237, 288,3309, 685,3386, 296, 892,2716,2216, # 2288
1570,2245, 722,1747,2217, 905,3238,1103,6135,1893,1441,1965, 251,1805,2371,3700, # 2304
2601,1919,1078, 75,2182,1509,1592,1270,2640,4638,2152,6136,3310,3817, 524, 706, # 2320
1075, 292,3818,1756,2602, 317, 98,3173,3605,3525,1844,2218,3819,2502, 814, 567, # 2336
385,2908,1534,6137, 534,1642,3239, 797,6138,1670,1529, 953,4323, 188,1071, 538, # 2352
178, 729,3240,2109,1226,1374,2000,2357,2977, 731,2468,1116,2014,2051,6139,1261, # 2368
1593, 803,2859,2736,3443, 556, 682, 823,1541,6140,1369,2289,1706,2794, 845, 462, # 2384
2603,2665,1361, 387, 162,2358,1740, 739,1770,1720,1304,1401,3241,1049, 627,1571, # 2400
2427,3526,1877,3942,1852,1500, 431,1910,1503, 677, 297,2795, 286,1433,1038,1198, # 2416
2290,1133,1596,4113,4639,2469,1510,1484,3943,6141,2442, 108, 712,4640,2372, 866, # 2432
3701,2755,3242,1348, 834,1945,1408,3527,2395,3243,1811, 824, 994,1179,2110,1548, # 2448
1453, 790,3003, 690,4324,4325,2832,2909,3820,1860,3821, 225,1748, 310, 346,1780, # 2464
2470, 821,1993,2717,2796, 828, 877,3528,2860,2471,1702,2165,2910,2486,1789, 453, # 2480
359,2291,1676, 73,1164,1461,1127,3311, 421, 604, 314,1037, 589, 116,2487, 737, # 2496
837,1180, 111, 244, 735,6142,2261,1861,1362, 986, 523, 418, 581,2666,3822, 103, # 2512
855, 503,1414,1867,2488,1091, 657,1597, 979, 605,1316,4641,1021,2443,2078,2001, # 2528
1209, 96, 587,2166,1032, 260,1072,2153, 173, 94, 226,3244, 819,2006,4642,4114, # 2544
2203, 231,1744, 782, 97,2667, 786,3387, 887, 391, 442,2219,4326,1425,6143,2694, # 2560
633,1544,1202, 483,2015, 592,2052,1958,2472,1655, 419, 129,4327,3444,3312,1714, # 2576
1257,3078,4328,1518,1098, 865,1310,1019,1885,1512,1734, 469,2444, 148, 773, 436, # 2592
1815,1868,1128,1055,4329,1245,2756,3445,2154,1934,1039,4643, 579,1238, 932,2320, # 2608
353, 205, 801, 115,2428, 944,2321,1881, 399,2565,1211, 678, 766,3944, 335,2101, # 2624
1459,1781,1402,3945,2737,2131,1010, 844, 981,1326,1013, 550,1816,1545,2620,1335, # 2640
1008, 371,2881, 936,1419,1613,3529,1456,1395,2273,1834,2604,1317,2738,2503, 416, # 2656
1643,4330, 806,1126, 229, 591,3946,1314,1981,1576,1837,1666, 347,1790, 977,3313, # 2672
764,2861,1853, 688,2429,1920,1462, 77, 595, 415,2002,3034, 798,1192,4115,6144, # 2688
2978,4331,3035,2695,2582,2072,2566, 430,2430,1727, 842,1396,3947,3702, 613, 377, # 2704
278, 236,1417,3388,3314,3174, 757,1869, 107,3530,6145,1194, 623,2262, 207,1253, # 2720
2167,3446,3948, 492,1117,1935, 536,1838,2757,1246,4332, 696,2095,2406,1393,1572, # 2736
3175,1782, 583, 190, 253,1390,2230, 830,3126,3389, 934,3245,1703,1749,2979,1870, # 2752
2545,1656,2204, 869,2346,4116,3176,1817, 496,1764,4644, 942,1504, 404,1903,1122, # 2768
1580,3606,2945,1022, 515, 372,1735, 955,2431,3036,6146,2797,1110,2302,2798, 617, # 2784
6147, 441, 762,1771,3447,3607,3608,1904, 840,3037, 86, 939,1385, 572,1370,2445, # 2800
1336, 114,3703, 898, 294, 203,3315, 703,1583,2274, 429, 961,4333,1854,1951,3390, # 2816
2373,3704,4334,1318,1381, 966,1911,2322,1006,1155, 309, 989, 458,2718,1795,1372, # 2832
1203, 252,1689,1363,3177, 517,1936, 168,1490, 562, 193,3823,1042,4117,1835, 551, # 2848
470,4645, 395, 489,3448,1871,1465,2583,2641, 417,1493, 279,1295, 511,1236,1119, # 2864
72,1231,1982,1812,3004, 871,1564, 984,3449,1667,2696,2096,4646,2347,2833,1673, # 2880
3609, 695,3246,2668, 807,1183,4647, 890, 388,2333,1801,1457,2911,1765,1477,1031, # 2896
3316,3317,1278,3391,2799,2292,2526, 163,3450,4335,2669,1404,1802,6148,2323,2407, # 2912
1584,1728,1494,1824,1269, 298, 909,3318,1034,1632, 375, 776,1683,2061, 291, 210, # 2928
1123, 809,1249,1002,2642,3038, 206,1011,2132, 144, 975, 882,1565, 342, 667, 754, # 2944
1442,2143,1299,2303,2062, 447, 626,2205,1221,2739,2912,1144,1214,2206,2584, 760, # 2960
1715, 614, 950,1281,2670,2621, 810, 577,1287,2546,4648, 242,2168, 250,2643, 691, # 2976
123,2644, 647, 313,1029, 689,1357,2946,1650, 216, 771,1339,1306, 808,2063, 549, # 2992
913,1371,2913,2914,6149,1466,1092,1174,1196,1311,2605,2396,1783,1796,3079, 406, # 3008
2671,2117,3949,4649, 487,1825,2220,6150,2915, 448,2348,1073,6151,2397,1707, 130, # 3024
900,1598, 329, 176,1959,2527,1620,6152,2275,4336,3319,1983,2191,3705,3610,2155, # 3040
3706,1912,1513,1614,6153,1988, 646, 392,2304,1589,3320,3039,1826,1239,1352,1340, # 3056
2916, 505,2567,1709,1437,2408,2547, 906,6154,2672, 384,1458,1594,1100,1329, 710, # 3072
423,3531,2064,2231,2622,1989,2673,1087,1882, 333, 841,3005,1296,2882,2379, 580, # 3088
1937,1827,1293,2585, 601, 574, 249,1772,4118,2079,1120, 645, 901,1176,1690, 795, # 3104
2207, 478,1434, 516,1190,1530, 761,2080, 930,1264, 355, 435,1552, 644,1791, 987, # 3120
220,1364,1163,1121,1538, 306,2169,1327,1222, 546,2645, 218, 241, 610,1704,3321, # 3136
1984,1839,1966,2528, 451,6155,2586,3707,2568, 907,3178, 254,2947, 186,1845,4650, # 3152
745, 432,1757, 428,1633, 888,2246,2221,2489,3611,2118,1258,1265, 956,3127,1784, # 3168
4337,2490, 319, 510, 119, 457,3612, 274,2035,2007,4651,1409,3128, 970,2758, 590, # 3184
2800, 661,2247,4652,2008,3950,1420,1549,3080,3322,3951,1651,1375,2111, 485,2491, # 3200
1429,1156,6156,2548,2183,1495, 831,1840,2529,2446, 501,1657, 307,1894,3247,1341, # 3216
666, 899,2156,1539,2549,1559, 886, 349,2208,3081,2305,1736,3824,2170,2759,1014, # 3232
1913,1386, 542,1397,2948, 490, 368, 716, 362, 159, 282,2569,1129,1658,1288,1750, # 3248
2674, 276, 649,2016, 751,1496, 658,1818,1284,1862,2209,2087,2512,3451, 622,2834, # 3264
376, 117,1060,2053,1208,1721,1101,1443, 247,1250,3179,1792,3952,2760,2398,3953, # 3280
6157,2144,3708, 446,2432,1151,2570,3452,2447,2761,2835,1210,2448,3082, 424,2222, # 3296
1251,2449,2119,2836, 504,1581,4338, 602, 817, 857,3825,2349,2306, 357,3826,1470, # 3312
1883,2883, 255, 958, 929,2917,3248, 302,4653,1050,1271,1751,2307,1952,1430,2697, # 3328
2719,2359, 354,3180, 777, 158,2036,4339,1659,4340,4654,2308,2949,2248,1146,2232, # 3344
3532,2720,1696,2623,3827,6158,3129,1550,2698,1485,1297,1428, 637, 931,2721,2145, # 3360
914,2550,2587, 81,2450, 612, 827,2646,1242,4655,1118,2884, 472,1855,3181,3533, # 3376
3534, 569,1353,2699,1244,1758,2588,4119,2009,2762,2171,3709,1312,1531,6159,1152, # 3392
1938, 134,1830, 471,3710,2276,1112,1535,3323,3453,3535, 982,1337,2950, 488, 826, # 3408
674,1058,1628,4120,2017, 522,2399, 211, 568,1367,3454, 350, 293,1872,1139,3249, # 3424
1399,1946,3006,1300,2360,3324, 588, 736,6160,2606, 744, 669,3536,3828,6161,1358, # 3440
199, 723, 848, 933, 851,1939,1505,1514,1338,1618,1831,4656,1634,3613, 443,2740, # 3456
3829, 717,1947, 491,1914,6162,2551,1542,4121,1025,6163,1099,1223, 198,3040,2722, # 3472
370, 410,1905,2589, 998,1248,3182,2380, 519,1449,4122,1710, 947, 928,1153,4341, # 3488
2277, 344,2624,1511, 615, 105, 161,1212,1076,1960,3130,2054,1926,1175,1906,2473, # 3504
414,1873,2801,6164,2309, 315,1319,3325, 318,2018,2146,2157, 963, 631, 223,4342, # 3520
4343,2675, 479,3711,1197,2625,3712,2676,2361,6165,4344,4123,6166,2451,3183,1886, # 3536
2184,1674,1330,1711,1635,1506, 799, 219,3250,3083,3954,1677,3713,3326,2081,3614, # 3552
1652,2073,4657,1147,3041,1752, 643,1961, 147,1974,3955,6167,1716,2037, 918,3007, # 3568
1994, 120,1537, 118, 609,3184,4345, 740,3455,1219, 332,1615,3830,6168,1621,2980, # 3584
1582, 783, 212, 553,2350,3714,1349,2433,2082,4124, 889,6169,2310,1275,1410, 973, # 3600
166,1320,3456,1797,1215,3185,2885,1846,2590,2763,4658, 629, 822,3008, 763, 940, # 3616
1990,2862, 439,2409,1566,1240,1622, 926,1282,1907,2764, 654,2210,1607, 327,1130, # 3632
3956,1678,1623,6170,2434,2192, 686, 608,3831,3715, 903,3957,3042,6171,2741,1522, # 3648
1915,1105,1555,2552,1359, 323,3251,4346,3457, 738,1354,2553,2311,2334,1828,2003, # 3664
3832,1753,2351,1227,6172,1887,4125,1478,6173,2410,1874,1712,1847, 520,1204,2607, # 3680
264,4659, 836,2677,2102, 600,4660,3833,2278,3084,6174,4347,3615,1342, 640, 532, # 3696
543,2608,1888,2400,2591,1009,4348,1497, 341,1737,3616,2723,1394, 529,3252,1321, # 3712
983,4661,1515,2120, 971,2592, 924, 287,1662,3186,4349,2700,4350,1519, 908,1948, # 3728
2452, 156, 796,1629,1486,2223,2055, 694,4126,1259,1036,3392,1213,2249,2742,1889, # 3744
1230,3958,1015, 910, 408, 559,3617,4662, 746, 725, 935,4663,3959,3009,1289, 563, # 3760
867,4664,3960,1567,2981,2038,2626, 988,2263,2381,4351, 143,2374, 704,1895,6175, # 3776
1188,3716,2088, 673,3085,2362,4352, 484,1608,1921,2765,2918, 215, 904,3618,3537, # 3792
894, 509, 976,3043,2701,3961,4353,2837,2982, 498,6176,6177,1102,3538,1332,3393, # 3808
1487,1636,1637, 233, 245,3962, 383, 650, 995,3044, 460,1520,1206,2352, 749,3327, # 3824
530, 700, 389,1438,1560,1773,3963,2264, 719,2951,2724,3834, 870,1832,1644,1000, # 3840
839,2474,3717, 197,1630,3394, 365,2886,3964,1285,2133, 734, 922, 818,1106, 732, # 3856
480,2083,1774,3458, 923,2279,1350, 221,3086, 85,2233,2234,3835,1585,3010,2147, # 3872
1387,1705,2382,1619,2475, 133, 239,2802,1991,1016,2084,2383, 411,2838,1113, 651, # 3888
1985,1160,3328, 990,1863,3087,1048,1276,2647, 265,2627,1599,3253,2056, 150, 638, # 3904
2019, 656, 853, 326,1479, 680,1439,4354,1001,1759, 413,3459,3395,2492,1431, 459, # 3920
4355,1125,3329,2265,1953,1450,2065,2863, 849, 351,2678,3131,3254,3255,1104,1577, # 3936
227,1351,1645,2453,2193,1421,2887, 812,2121, 634, 95,2435, 201,2312,4665,1646, # 3952
1671,2743,1601,2554,2702,2648,2280,1315,1366,2089,3132,1573,3718,3965,1729,1189, # 3968
328,2679,1077,1940,1136, 558,1283, 964,1195, 621,2074,1199,1743,3460,3619,1896, # 3984
1916,1890,3836,2952,1154,2112,1064, 862, 378,3011,2066,2113,2803,1568,2839,6178, # 4000
3088,2919,1941,1660,2004,1992,2194, 142, 707,1590,1708,1624,1922,1023,1836,1233, # 4016
1004,2313, 789, 741,3620,6179,1609,2411,1200,4127,3719,3720,4666,2057,3721, 593, # 4032
2840, 367,2920,1878,6180,3461,1521, 628,1168, 692,2211,2649, 300, 720,2067,2571, # 4048
2953,3396, 959,2504,3966,3539,3462,1977, 701,6181, 954,1043, 800, 681, 183,3722, # 4064
1803,1730,3540,4128,2103, 815,2314, 174, 467, 230,2454,1093,2134, 755,3541,3397, # 4080
1141,1162,6182,1738,2039, 270,3256,2513,1005,1647,2185,3837, 858,1679,1897,1719, # 4096
2954,2324,1806, 402, 670, 167,4129,1498,2158,2104, 750,6183, 915, 189,1680,1551, # 4112
455,4356,1501,2455, 405,1095,2955, 338,1586,1266,1819, 570, 641,1324, 237,1556, # 4128
2650,1388,3723,6184,1368,2384,1343,1978,3089,2436, 879,3724, 792,1191, 758,3012, # 4144
1411,2135,1322,4357, 240,4667,1848,3725,1574,6185, 420,3045,1546,1391, 714,4358, # 4160
1967, 941,1864, 863, 664, 426, 560,1731,2680,1785,2864,1949,2363, 403,3330,1415, # 4176
1279,2136,1697,2335, 204, 721,2097,3838, 90,6186,2085,2505, 191,3967, 124,2148, # 4192
1376,1798,1178,1107,1898,1405, 860,4359,1243,1272,2375,2983,1558,2456,1638, 113, # 4208
3621, 578,1923,2609, 880, 386,4130, 784,2186,2266,1422,2956,2172,1722, 497, 263, # 4224
2514,1267,2412,2610, 177,2703,3542, 774,1927,1344, 616,1432,1595,1018, 172,4360, # 4240
2325, 911,4361, 438,1468,3622, 794,3968,2024,2173,1681,1829,2957, 945, 895,3090, # 4256
575,2212,2476, 475,2401,2681, 785,2744,1745,2293,2555,1975,3133,2865, 394,4668, # 4272
3839, 635,4131, 639, 202,1507,2195,2766,1345,1435,2572,3726,1908,1184,1181,2457, # 4288
3727,3134,4362, 843,2611, 437, 916,4669, 234, 769,1884,3046,3047,3623, 833,6187, # 4304
1639,2250,2402,1355,1185,2010,2047, 999, 525,1732,1290,1488,2612, 948,1578,3728, # 4320
2413,2477,1216,2725,2159, 334,3840,1328,3624,2921,1525,4132, 564,1056, 891,4363, # 4336
1444,1698,2385,2251,3729,1365,2281,2235,1717,6188, 864,3841,2515, 444, 527,2767, # 4352
2922,3625, 544, 461,6189, 566, 209,2437,3398,2098,1065,2068,3331,3626,3257,2137, # 4368 #last 512
#Everything below is of no interest for detection purpose
2138,2122,3730,2888,1995,1820,1044,6190,6191,6192,6193,6194,6195,6196,6197,6198, # 4384
6199,6200,6201,6202,6203,6204,6205,4670,6206,6207,6208,6209,6210,6211,6212,6213, # 4400
6214,6215,6216,6217,6218,6219,6220,6221,6222,6223,6224,6225,6226,6227,6228,6229, # 4416
6230,6231,6232,6233,6234,6235,6236,6237,3187,6238,6239,3969,6240,6241,6242,6243, # 4432
6244,4671,6245,6246,4672,6247,6248,4133,6249,6250,4364,6251,2923,2556,2613,4673, # 4448
4365,3970,6252,6253,6254,6255,4674,6256,6257,6258,2768,2353,4366,4675,4676,3188, # 4464
4367,3463,6259,4134,4677,4678,6260,2267,6261,3842,3332,4368,3543,6262,6263,6264, # 4480
3013,1954,1928,4135,4679,6265,6266,2478,3091,6267,4680,4369,6268,6269,1699,6270, # 4496
3544,4136,4681,6271,4137,6272,4370,2804,6273,6274,2593,3971,3972,4682,6275,2236, # 4512
4683,6276,6277,4684,6278,6279,4138,3973,4685,6280,6281,3258,6282,6283,6284,6285, # 4528
3974,4686,2841,3975,6286,6287,3545,6288,6289,4139,4687,4140,6290,4141,6291,4142, # 4544
6292,6293,3333,6294,6295,6296,4371,6297,3399,6298,6299,4372,3976,6300,6301,6302, # 4560
4373,6303,6304,3843,3731,6305,4688,4374,6306,6307,3259,2294,6308,3732,2530,4143, # 4576
6309,4689,6310,6311,6312,3048,6313,6314,4690,3733,2237,6315,6316,2282,3334,6317, # 4592
6318,3844,6319,6320,4691,6321,3400,4692,6322,4693,6323,3049,6324,4375,6325,3977, # 4608
6326,6327,6328,3546,6329,4694,3335,6330,4695,4696,6331,6332,6333,6334,4376,3978, # 4624
6335,4697,3979,4144,6336,3980,4698,6337,6338,6339,6340,6341,4699,4700,4701,6342, # 4640
6343,4702,6344,6345,4703,6346,6347,4704,6348,4705,4706,3135,6349,4707,6350,4708, # 4656
6351,4377,6352,4709,3734,4145,6353,2506,4710,3189,6354,3050,4711,3981,6355,3547, # 4672
3014,4146,4378,3735,2651,3845,3260,3136,2224,1986,6356,3401,6357,4712,2594,3627, # 4688
3137,2573,3736,3982,4713,3628,4714,4715,2682,3629,4716,6358,3630,4379,3631,6359, # 4704
6360,6361,3983,6362,6363,6364,6365,4147,3846,4717,6366,6367,3737,2842,6368,4718, # 4720
2628,6369,3261,6370,2386,6371,6372,3738,3984,4719,3464,4720,3402,6373,2924,3336, # 4736
4148,2866,6374,2805,3262,4380,2704,2069,2531,3138,2806,2984,6375,2769,6376,4721, # 4752
4722,3403,6377,6378,3548,6379,6380,2705,3092,1979,4149,2629,3337,2889,6381,3338, # 4768
4150,2557,3339,4381,6382,3190,3263,3739,6383,4151,4723,4152,2558,2574,3404,3191, # 4784
6384,6385,4153,6386,4724,4382,6387,6388,4383,6389,6390,4154,6391,4725,3985,6392, # 4800
3847,4155,6393,6394,6395,6396,6397,3465,6398,4384,6399,6400,6401,6402,6403,6404, # 4816
4156,6405,6406,6407,6408,2123,6409,6410,2326,3192,4726,6411,6412,6413,6414,4385, # 4832
4157,6415,6416,4158,6417,3093,3848,6418,3986,6419,6420,3849,6421,6422,6423,4159, # 4848
6424,6425,4160,6426,3740,6427,6428,6429,6430,3987,6431,4727,6432,2238,6433,6434, # 4864
4386,3988,6435,6436,3632,6437,6438,2843,6439,6440,6441,6442,3633,6443,2958,6444, # 4880
6445,3466,6446,2364,4387,3850,6447,4388,2959,3340,6448,3851,6449,4728,6450,6451, # 4896
3264,4729,6452,3193,6453,4389,4390,2706,3341,4730,6454,3139,6455,3194,6456,3051, # 4912
2124,3852,1602,4391,4161,3853,1158,3854,4162,3989,4392,3990,4731,4732,4393,2040, # 4928
4163,4394,3265,6457,2807,3467,3855,6458,6459,6460,3991,3468,4733,4734,6461,3140, # 4944
2960,6462,4735,6463,6464,6465,6466,4736,4737,4738,4739,6467,6468,4164,2403,3856, # 4960
6469,6470,2770,2844,6471,4740,6472,6473,6474,6475,6476,6477,6478,3195,6479,4741, # 4976
4395,6480,2867,6481,4742,2808,6482,2493,4165,6483,6484,6485,6486,2295,4743,6487, # 4992
6488,6489,3634,6490,6491,6492,6493,6494,6495,6496,2985,4744,6497,6498,4745,6499, # 5008
6500,2925,3141,4166,6501,6502,4746,6503,6504,4747,6505,6506,6507,2890,6508,6509, # 5024
6510,6511,6512,6513,6514,6515,6516,6517,6518,6519,3469,4167,6520,6521,6522,4748, # 5040
4396,3741,4397,4749,4398,3342,2125,4750,6523,4751,4752,4753,3052,6524,2961,4168, # 5056
6525,4754,6526,4755,4399,2926,4169,6527,3857,6528,4400,4170,6529,4171,6530,6531, # 5072
2595,6532,6533,6534,6535,3635,6536,6537,6538,6539,6540,6541,6542,4756,6543,6544, # 5088
6545,6546,6547,6548,4401,6549,6550,6551,6552,4402,3405,4757,4403,6553,6554,6555, # 5104
4172,3742,6556,6557,6558,3992,3636,6559,6560,3053,2726,6561,3549,4173,3054,4404, # 5120
6562,6563,3993,4405,3266,3550,2809,4406,6564,6565,6566,4758,4759,6567,3743,6568, # 5136
4760,3744,4761,3470,6569,6570,6571,4407,6572,3745,4174,6573,4175,2810,4176,3196, # 5152
4762,6574,4177,6575,6576,2494,2891,3551,6577,6578,3471,6579,4408,6580,3015,3197, # 5168
6581,3343,2532,3994,3858,6582,3094,3406,4409,6583,2892,4178,4763,4410,3016,4411, # 5184
6584,3995,3142,3017,2683,6585,4179,6586,6587,4764,4412,6588,6589,4413,6590,2986, # 5200
6591,2962,3552,6592,2963,3472,6593,6594,4180,4765,6595,6596,2225,3267,4414,6597, # 5216
3407,3637,4766,6598,6599,3198,6600,4415,6601,3859,3199,6602,3473,4767,2811,4416, # 5232
1856,3268,3200,2575,3996,3997,3201,4417,6603,3095,2927,6604,3143,6605,2268,6606, # 5248
3998,3860,3096,2771,6607,6608,3638,2495,4768,6609,3861,6610,3269,2745,4769,4181, # 5264
3553,6611,2845,3270,6612,6613,6614,3862,6615,6616,4770,4771,6617,3474,3999,4418, # 5280
4419,6618,3639,3344,6619,4772,4182,6620,2126,6621,6622,6623,4420,4773,6624,3018, # 5296
6625,4774,3554,6626,4183,2025,3746,6627,4184,2707,6628,4421,4422,3097,1775,4185, # 5312
3555,6629,6630,2868,6631,6632,4423,6633,6634,4424,2414,2533,2928,6635,4186,2387, # 5328
6636,4775,6637,4187,6638,1891,4425,3202,3203,6639,6640,4776,6641,3345,6642,6643, # 5344
3640,6644,3475,3346,3641,4000,6645,3144,6646,3098,2812,4188,3642,3204,6647,3863, # 5360
3476,6648,3864,6649,4426,4001,6650,6651,6652,2576,6653,4189,4777,6654,6655,6656, # 5376
2846,6657,3477,3205,4002,6658,4003,6659,3347,2252,6660,6661,6662,4778,6663,6664, # 5392
6665,6666,6667,6668,6669,4779,4780,2048,6670,3478,3099,6671,3556,3747,4004,6672, # 5408
6673,6674,3145,4005,3748,6675,6676,6677,6678,6679,3408,6680,6681,6682,6683,3206, # 5424
3207,6684,6685,4781,4427,6686,4782,4783,4784,6687,6688,6689,4190,6690,6691,3479, # 5440
6692,2746,6693,4428,6694,6695,6696,6697,6698,6699,4785,6700,6701,3208,2727,6702, # 5456
3146,6703,6704,3409,2196,6705,4429,6706,6707,6708,2534,1996,6709,6710,6711,2747, # 5472
6712,6713,6714,4786,3643,6715,4430,4431,6716,3557,6717,4432,4433,6718,6719,6720, # 5488
6721,3749,6722,4006,4787,6723,6724,3644,4788,4434,6725,6726,4789,2772,6727,6728, # 5504
6729,6730,6731,2708,3865,2813,4435,6732,6733,4790,4791,3480,6734,6735,6736,6737, # 5520
4436,3348,6738,3410,4007,6739,6740,4008,6741,6742,4792,3411,4191,6743,6744,6745, # 5536
6746,6747,3866,6748,3750,6749,6750,6751,6752,6753,6754,6755,3867,6756,4009,6757, # 5552
4793,4794,6758,2814,2987,6759,6760,6761,4437,6762,6763,6764,6765,3645,6766,6767, # 5568
3481,4192,6768,3751,6769,6770,2174,6771,3868,3752,6772,6773,6774,4193,4795,4438, # 5584
3558,4796,4439,6775,4797,6776,6777,4798,6778,4799,3559,4800,6779,6780,6781,3482, # 5600
6782,2893,6783,6784,4194,4801,4010,6785,6786,4440,6787,4011,6788,6789,6790,6791, # 5616
6792,6793,4802,6794,6795,6796,4012,6797,6798,6799,6800,3349,4803,3483,6801,4804, # 5632
4195,6802,4013,6803,6804,4196,6805,4014,4015,6806,2847,3271,2848,6807,3484,6808, # 5648
6809,6810,4441,6811,4442,4197,4443,3272,4805,6812,3412,4016,1579,6813,6814,4017, # 5664
6815,3869,6816,2964,6817,4806,6818,6819,4018,3646,6820,6821,4807,4019,4020,6822, # 5680
6823,3560,6824,6825,4021,4444,6826,4198,6827,6828,4445,6829,6830,4199,4808,6831, # 5696
6832,6833,3870,3019,2458,6834,3753,3413,3350,6835,4809,3871,4810,3561,4446,6836, # 5712
6837,4447,4811,4812,6838,2459,4448,6839,4449,6840,6841,4022,3872,6842,4813,4814, # 5728
6843,6844,4815,4200,4201,4202,6845,4023,6846,6847,4450,3562,3873,6848,6849,4816, # 5744
4817,6850,4451,4818,2139,6851,3563,6852,6853,3351,6854,6855,3352,4024,2709,3414, # 5760
4203,4452,6856,4204,6857,6858,3874,3875,6859,6860,4819,6861,6862,6863,6864,4453, # 5776
3647,6865,6866,4820,6867,6868,6869,6870,4454,6871,2869,6872,6873,4821,6874,3754, # 5792
6875,4822,4205,6876,6877,6878,3648,4206,4455,6879,4823,6880,4824,3876,6881,3055, # 5808
4207,6882,3415,6883,6884,6885,4208,4209,6886,4210,3353,6887,3354,3564,3209,3485, # 5824
2652,6888,2728,6889,3210,3755,6890,4025,4456,6891,4825,6892,6893,6894,6895,4211, # 5840
6896,6897,6898,4826,6899,6900,4212,6901,4827,6902,2773,3565,6903,4828,6904,6905, # 5856
6906,6907,3649,3650,6908,2849,3566,6909,3567,3100,6910,6911,6912,6913,6914,6915, # 5872
4026,6916,3355,4829,3056,4457,3756,6917,3651,6918,4213,3652,2870,6919,4458,6920, # 5888
2438,6921,6922,3757,2774,4830,6923,3356,4831,4832,6924,4833,4459,3653,2507,6925, # 5904
4834,2535,6926,6927,3273,4027,3147,6928,3568,6929,6930,6931,4460,6932,3877,4461, # 5920
2729,3654,6933,6934,6935,6936,2175,4835,2630,4214,4028,4462,4836,4215,6937,3148, # 5936
4216,4463,4837,4838,4217,6938,6939,2850,4839,6940,4464,6941,6942,6943,4840,6944, # 5952
4218,3274,4465,6945,6946,2710,6947,4841,4466,6948,6949,2894,6950,6951,4842,6952, # 5968
4219,3057,2871,6953,6954,6955,6956,4467,6957,2711,6958,6959,6960,3275,3101,4843, # 5984
6961,3357,3569,6962,4844,6963,6964,4468,4845,3570,6965,3102,4846,3758,6966,4847, # 6000
3878,4848,4849,4029,6967,2929,3879,4850,4851,6968,6969,1733,6970,4220,6971,6972, # 6016
6973,6974,6975,6976,4852,6977,6978,6979,6980,6981,6982,3759,6983,6984,6985,3486, # 6032
3487,6986,3488,3416,6987,6988,6989,6990,6991,6992,6993,6994,6995,6996,6997,4853, # 6048
6998,6999,4030,7000,7001,3211,7002,7003,4221,7004,7005,3571,4031,7006,3572,7007, # 6064
2614,4854,2577,7008,7009,2965,3655,3656,4855,2775,3489,3880,4222,4856,3881,4032, # 6080
3882,3657,2730,3490,4857,7010,3149,7011,4469,4858,2496,3491,4859,2283,7012,7013, # 6096
7014,2365,4860,4470,7015,7016,3760,7017,7018,4223,1917,7019,7020,7021,4471,7022, # 6112
2776,4472,7023,7024,7025,7026,4033,7027,3573,4224,4861,4034,4862,7028,7029,1929, # 6128
3883,4035,7030,4473,3058,7031,2536,3761,3884,7032,4036,7033,2966,2895,1968,4474, # 6144
3276,4225,3417,3492,4226,2105,7034,7035,1754,2596,3762,4227,4863,4475,3763,4864, # 6160
3764,2615,2777,3103,3765,3658,3418,4865,2296,3766,2815,7036,7037,7038,3574,2872, # 6176
3277,4476,7039,4037,4477,7040,7041,4038,7042,7043,7044,7045,7046,7047,2537,7048, # 6192
7049,7050,7051,7052,7053,7054,4478,7055,7056,3767,3659,4228,3575,7057,7058,4229, # 6208
7059,7060,7061,3660,7062,3212,7063,3885,4039,2460,7064,7065,7066,7067,7068,7069, # 6224
7070,7071,7072,7073,7074,4866,3768,4867,7075,7076,7077,7078,4868,3358,3278,2653, # 6240
7079,7080,4479,3886,7081,7082,4869,7083,7084,7085,7086,7087,7088,2538,7089,7090, # 6256
7091,4040,3150,3769,4870,4041,2896,3359,4230,2930,7092,3279,7093,2967,4480,3213, # 6272
4481,3661,7094,7095,7096,7097,7098,7099,7100,7101,7102,2461,3770,7103,7104,4231, # 6288
3151,7105,7106,7107,4042,3662,7108,7109,4871,3663,4872,4043,3059,7110,7111,7112, # 6304
3493,2988,7113,4873,7114,7115,7116,3771,4874,7117,7118,4232,4875,7119,3576,2336, # 6320
4876,7120,4233,3419,4044,4877,4878,4482,4483,4879,4484,4234,7121,3772,4880,1045, # 6336
3280,3664,4881,4882,7122,7123,7124,7125,4883,7126,2778,7127,4485,4486,7128,4884, # 6352
3214,3887,7129,7130,3215,7131,4885,4045,7132,7133,4046,7134,7135,7136,7137,7138, # 6368
7139,7140,7141,7142,7143,4235,7144,4886,7145,7146,7147,4887,7148,7149,7150,4487, # 6384
4047,4488,7151,7152,4888,4048,2989,3888,7153,3665,7154,4049,7155,7156,7157,7158, # 6400
7159,7160,2931,4889,4890,4489,7161,2631,3889,4236,2779,7162,7163,4891,7164,3060, # 6416
7165,1672,4892,7166,4893,4237,3281,4894,7167,7168,3666,7169,3494,7170,7171,4050, # 6432
7172,7173,3104,3360,3420,4490,4051,2684,4052,7174,4053,7175,7176,7177,2253,4054, # 6448
7178,7179,4895,7180,3152,3890,3153,4491,3216,7181,7182,7183,2968,4238,4492,4055, # 6464
7184,2990,7185,2479,7186,7187,4493,7188,7189,7190,7191,7192,4896,7193,4897,2969, # 6480
4494,4898,7194,3495,7195,7196,4899,4495,7197,3105,2731,7198,4900,7199,7200,7201, # 6496
4056,7202,3361,7203,7204,4496,4901,4902,7205,4497,7206,7207,2315,4903,7208,4904, # 6512
7209,4905,2851,7210,7211,3577,7212,3578,4906,7213,4057,3667,4907,7214,4058,2354, # 6528
3891,2376,3217,3773,7215,7216,7217,7218,7219,4498,7220,4908,3282,2685,7221,3496, # 6544
4909,2632,3154,4910,7222,2337,7223,4911,7224,7225,7226,4912,4913,3283,4239,4499, # 6560
7227,2816,7228,7229,7230,7231,7232,7233,7234,4914,4500,4501,7235,7236,7237,2686, # 6576
7238,4915,7239,2897,4502,7240,4503,7241,2516,7242,4504,3362,3218,7243,7244,7245, # 6592
4916,7246,7247,4505,3363,7248,7249,7250,7251,3774,4506,7252,7253,4917,7254,7255, # 6608
3284,2991,4918,4919,3219,3892,4920,3106,3497,4921,7256,7257,7258,4922,7259,4923, # 6624
3364,4507,4508,4059,7260,4240,3498,7261,7262,4924,7263,2992,3893,4060,3220,7264, # 6640
7265,7266,7267,7268,7269,4509,3775,7270,2817,7271,4061,4925,4510,3776,7272,4241, # 6656
4511,3285,7273,7274,3499,7275,7276,7277,4062,4512,4926,7278,3107,3894,7279,7280, # 6672
4927,7281,4513,7282,7283,3668,7284,7285,4242,4514,4243,7286,2058,4515,4928,4929, # 6688
4516,7287,3286,4244,7288,4517,7289,7290,7291,3669,7292,7293,4930,4931,4932,2355, # 6704
4933,7294,2633,4518,7295,4245,7296,7297,4519,7298,7299,4520,4521,4934,7300,4246, # 6720
4522,7301,7302,7303,3579,7304,4247,4935,7305,4936,7306,7307,7308,7309,3777,7310, # 6736
4523,7311,7312,7313,4248,3580,7314,4524,3778,4249,7315,3581,7316,3287,7317,3221, # 6752
7318,4937,7319,7320,7321,7322,7323,7324,4938,4939,7325,4525,7326,7327,7328,4063, # 6768
7329,7330,4940,7331,7332,4941,7333,4526,7334,3500,2780,1741,4942,2026,1742,7335, # 6784
7336,3582,4527,2388,7337,7338,7339,4528,7340,4250,4943,7341,7342,7343,4944,7344, # 6800
7345,7346,3020,7347,4945,7348,7349,7350,7351,3895,7352,3896,4064,3897,7353,7354, # 6816
7355,4251,7356,7357,3898,7358,3779,7359,3780,3288,7360,7361,4529,7362,4946,4530, # 6832
2027,7363,3899,4531,4947,3222,3583,7364,4948,7365,7366,7367,7368,4949,3501,4950, # 6848
3781,4951,4532,7369,2517,4952,4252,4953,3155,7370,4954,4955,4253,2518,4533,7371, # 6864
7372,2712,4254,7373,7374,7375,3670,4956,3671,7376,2389,3502,4065,7377,2338,7378, # 6880
7379,7380,7381,3061,7382,4957,7383,7384,7385,7386,4958,4534,7387,7388,2993,7389, # 6896
3062,7390,4959,7391,7392,7393,4960,3108,4961,7394,4535,7395,4962,3421,4536,7396, # 6912
4963,7397,4964,1857,7398,4965,7399,7400,2176,3584,4966,7401,7402,3422,4537,3900, # 6928
3585,7403,3782,7404,2852,7405,7406,7407,4538,3783,2654,3423,4967,4539,7408,3784, # 6944
3586,2853,4540,4541,7409,3901,7410,3902,7411,7412,3785,3109,2327,3903,7413,7414, # 6960
2970,4066,2932,7415,7416,7417,3904,3672,3424,7418,4542,4543,4544,7419,4968,7420, # 6976
7421,4255,7422,7423,7424,7425,7426,4067,7427,3673,3365,4545,7428,3110,2559,3674, # 6992
7429,7430,3156,7431,7432,3503,7433,3425,4546,7434,3063,2873,7435,3223,4969,4547, # 7008
4548,2898,4256,4068,7436,4069,3587,3786,2933,3787,4257,4970,4971,3788,7437,4972, # 7024
3064,7438,4549,7439,7440,7441,7442,7443,4973,3905,7444,2874,7445,7446,7447,7448, # 7040
3021,7449,4550,3906,3588,4974,7450,7451,3789,3675,7452,2578,7453,4070,7454,7455, # 7056
7456,4258,3676,7457,4975,7458,4976,4259,3790,3504,2634,4977,3677,4551,4260,7459, # 7072
7460,7461,7462,3907,4261,4978,7463,7464,7465,7466,4979,4980,7467,7468,2213,4262, # 7088
7469,7470,7471,3678,4981,7472,2439,7473,4263,3224,3289,7474,3908,2415,4982,7475, # 7104
4264,7476,4983,2655,7477,7478,2732,4552,2854,2875,7479,7480,4265,7481,4553,4984, # 7120
7482,7483,4266,7484,3679,3366,3680,2818,2781,2782,3367,3589,4554,3065,7485,4071, # 7136
2899,7486,7487,3157,2462,4072,4555,4073,4985,4986,3111,4267,2687,3368,4556,4074, # 7152
3791,4268,7488,3909,2783,7489,2656,1962,3158,4557,4987,1963,3159,3160,7490,3112, # 7168
4988,4989,3022,4990,4991,3792,2855,7491,7492,2971,4558,7493,7494,4992,7495,7496, # 7184
7497,7498,4993,7499,3426,4559,4994,7500,3681,4560,4269,4270,3910,7501,4075,4995, # 7200
4271,7502,7503,4076,7504,4996,7505,3225,4997,4272,4077,2819,3023,7506,7507,2733, # 7216
4561,7508,4562,7509,3369,3793,7510,3590,2508,7511,7512,4273,3113,2994,2616,7513, # 7232
7514,7515,7516,7517,7518,2820,3911,4078,2748,7519,7520,4563,4998,7521,7522,7523, # 7248
7524,4999,4274,7525,4564,3682,2239,4079,4565,7526,7527,7528,7529,5000,7530,7531, # 7264
5001,4275,3794,7532,7533,7534,3066,5002,4566,3161,7535,7536,4080,7537,3162,7538, # 7280
7539,4567,7540,7541,7542,7543,7544,7545,5003,7546,4568,7547,7548,7549,7550,7551, # 7296
7552,7553,7554,7555,7556,5004,7557,7558,7559,5005,7560,3795,7561,4569,7562,7563, # 7312
7564,2821,3796,4276,4277,4081,7565,2876,7566,5006,7567,7568,2900,7569,3797,3912, # 7328
7570,7571,7572,4278,7573,7574,7575,5007,7576,7577,5008,7578,7579,4279,2934,7580, # 7344
7581,5009,7582,4570,7583,4280,7584,7585,7586,4571,4572,3913,7587,4573,3505,7588, # 7360
5010,7589,7590,7591,7592,3798,4574,7593,7594,5011,7595,4281,7596,7597,7598,4282, # 7376
5012,7599,7600,5013,3163,7601,5014,7602,3914,7603,7604,2734,4575,4576,4577,7605, # 7392
7606,7607,7608,7609,3506,5015,4578,7610,4082,7611,2822,2901,2579,3683,3024,4579, # 7408
3507,7612,4580,7613,3226,3799,5016,7614,7615,7616,7617,7618,7619,7620,2995,3290, # 7424
7621,4083,7622,5017,7623,7624,7625,7626,7627,4581,3915,7628,3291,7629,5018,7630, # 7440
7631,7632,7633,4084,7634,7635,3427,3800,7636,7637,4582,7638,5019,4583,5020,7639, # 7456
3916,7640,3801,5021,4584,4283,7641,7642,3428,3591,2269,7643,2617,7644,4585,3592, # 7472
7645,4586,2902,7646,7647,3227,5022,7648,4587,7649,4284,7650,7651,7652,4588,2284, # 7488
7653,5023,7654,7655,7656,4589,5024,3802,7657,7658,5025,3508,4590,7659,7660,7661, # 7504
1969,5026,7662,7663,3684,1821,2688,7664,2028,2509,4285,7665,2823,1841,7666,2689, # 7520
3114,7667,3917,4085,2160,5027,5028,2972,7668,5029,7669,7670,7671,3593,4086,7672, # 7536
4591,4087,5030,3803,7673,7674,7675,7676,7677,7678,7679,4286,2366,4592,4593,3067, # 7552
2328,7680,7681,4594,3594,3918,2029,4287,7682,5031,3919,3370,4288,4595,2856,7683, # 7568
3509,7684,7685,5032,5033,7686,7687,3804,2784,7688,7689,7690,7691,3371,7692,7693, # 7584
2877,5034,7694,7695,3920,4289,4088,7696,7697,7698,5035,7699,5036,4290,5037,5038, # 7600
5039,7700,7701,7702,5040,5041,3228,7703,1760,7704,5042,3229,4596,2106,4089,7705, # 7616
4597,2824,5043,2107,3372,7706,4291,4090,5044,7707,4091,7708,5045,3025,3805,4598, # 7632
4292,4293,4294,3373,7709,4599,7710,5046,7711,7712,5047,5048,3806,7713,7714,7715, # 7648
5049,7716,7717,7718,7719,4600,5050,7720,7721,7722,5051,7723,4295,3429,7724,7725, # 7664
7726,7727,3921,7728,3292,5052,4092,7729,7730,7731,7732,7733,7734,7735,5053,5054, # 7680
7736,7737,7738,7739,3922,3685,7740,7741,7742,7743,2635,5055,7744,5056,4601,7745, # 7696
7746,2560,7747,7748,7749,7750,3923,7751,7752,7753,7754,7755,4296,2903,7756,7757, # 7712
7758,7759,7760,3924,7761,5057,4297,7762,7763,5058,4298,7764,4093,7765,7766,5059, # 7728
3925,7767,7768,7769,7770,7771,7772,7773,7774,7775,7776,3595,7777,4299,5060,4094, # 7744
7778,3293,5061,7779,7780,4300,7781,7782,4602,7783,3596,7784,7785,3430,2367,7786, # 7760
3164,5062,5063,4301,7787,7788,4095,5064,5065,7789,3374,3115,7790,7791,7792,7793, # 7776
7794,7795,7796,3597,4603,7797,7798,3686,3116,3807,5066,7799,7800,5067,7801,7802, # 7792
4604,4302,5068,4303,4096,7803,7804,3294,7805,7806,5069,4605,2690,7807,3026,7808, # 7808
7809,7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,7824, # 7824
7825,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,7840, # 7840
7841,7842,7843,7844,7845,7846,7847,7848,7849,7850,7851,7852,7853,7854,7855,7856, # 7856
7857,7858,7859,7860,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870,7871,7872, # 7872
7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886,7887,7888, # 7888
7889,7890,7891,7892,7893,7894,7895,7896,7897,7898,7899,7900,7901,7902,7903,7904, # 7904
7905,7906,7907,7908,7909,7910,7911,7912,7913,7914,7915,7916,7917,7918,7919,7920, # 7920
7921,7922,7923,7924,3926,7925,7926,7927,7928,7929,7930,7931,7932,7933,7934,7935, # 7936
7936,7937,7938,7939,7940,7941,7942,7943,7944,7945,7946,7947,7948,7949,7950,7951, # 7952
7952,7953,7954,7955,7956,7957,7958,7959,7960,7961,7962,7963,7964,7965,7966,7967, # 7968
7968,7969,7970,7971,7972,7973,7974,7975,7976,7977,7978,7979,7980,7981,7982,7983, # 7984
7984,7985,7986,7987,7988,7989,7990,7991,7992,7993,7994,7995,7996,7997,7998,7999, # 8000
8000,8001,8002,8003,8004,8005,8006,8007,8008,8009,8010,8011,8012,8013,8014,8015, # 8016
8016,8017,8018,8019,8020,8021,8022,8023,8024,8025,8026,8027,8028,8029,8030,8031, # 8032
8032,8033,8034,8035,8036,8037,8038,8039,8040,8041,8042,8043,8044,8045,8046,8047, # 8048
8048,8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063, # 8064
8064,8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079, # 8080
8080,8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095, # 8096
8096,8097,8098,8099,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110,8111, # 8112
8112,8113,8114,8115,8116,8117,8118,8119,8120,8121,8122,8123,8124,8125,8126,8127, # 8128
8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141,8142,8143, # 8144
8144,8145,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155,8156,8157,8158,8159, # 8160
8160,8161,8162,8163,8164,8165,8166,8167,8168,8169,8170,8171,8172,8173,8174,8175, # 8176
8176,8177,8178,8179,8180,8181,8182,8183,8184,8185,8186,8187,8188,8189,8190,8191, # 8192
8192,8193,8194,8195,8196,8197,8198,8199,8200,8201,8202,8203,8204,8205,8206,8207, # 8208
8208,8209,8210,8211,8212,8213,8214,8215,8216,8217,8218,8219,8220,8221,8222,8223, # 8224
8224,8225,8226,8227,8228,8229,8230,8231,8232,8233,8234,8235,8236,8237,8238,8239, # 8240
8240,8241,8242,8243,8244,8245,8246,8247,8248,8249,8250,8251,8252,8253,8254,8255, # 8256
8256,8257,8258,8259,8260,8261,8262,8263,8264,8265,8266,8267,8268,8269,8270,8271) # 8272
# flake8: noqa
| mit |
jeasoft/odoo | marcos_addons/marcos_ncf/account_voucher/number_to_letter.py | 3 | 4850 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2013-2015 Marcos Organizador de Negocios SRL http://marcos.do
# Write by Eneldo Serrata ([email protected])
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from itertools import ifilter
UNIDADES = (
'',
'UN ',
'DOS ',
'TRES ',
'CUATRO ',
'CINCO ',
'SEIS ',
'SIETE ',
'OCHO ',
'NUEVE ',
'DIEZ ',
'ONCE ',
'DOCE ',
'TRECE ',
'CATORCE ',
'QUINCE ',
'DIECISEIS ',
'DIECISIETE ',
'DIECIOCHO ',
'DIECINUEVE ',
'VEINTE '
)
DECENAS = (
'VENTI',
'TREINTA ',
'CUARENTA ',
'CINCUENTA ',
'SESENTA ',
'SETENTA ',
'OCHENTA ',
'NOVENTA ',
'CIEN '
)
CENTENAS = (
'CIENTO ',
'DOSCIENTOS ',
'TRESCIENTOS ',
'CUATROCIENTOS ',
'QUINIENTOS ',
'SEISCIENTOS ',
'SETECIENTOS ',
'OCHOCIENTOS ',
'NOVECIENTOS '
)
MONEDAS = (
{'country': u'Colombia', 'currency': 'COP', 'singular': u'PESO COLOMBIANO', 'plural': u'PESOS COLOMBIANOS', 'symbol': u'$'},
{'country': u'Estados Unidos', 'currency': 'USD', 'singular': u'DÓLAR', 'plural': u'DÓLARES', 'symbol': u'US$'},
{'country': u'Europa', 'currency': 'EUR', 'singular': u'EURO', 'plural': u'EUROS', 'symbol': u'€'},
{'country': u'México', 'currency': 'MXN', 'singular': u'PESO MEXICANO', 'plural': u'PESOS MEXICANOS', 'symbol': u'$'},
{'country': u'Perú', 'currency': 'PEN', 'singular': u'NUEVO SOL', 'plural': u'NUEVOS SOLES', 'symbol': u'S/.'},
{'country': u'Reino Unido', 'currency': 'GBP', 'singular': u'LIBRA', 'plural': u'LIBRAS', 'symbol': u'£'},
{'country': u'Republica Dominicana', 'currency': 'pesos', 'singular': u'peso', 'plural': u'pesos', 'symbol': u'RD'}
)
# Para definir la moneda me estoy basando en los código que establece el ISO 4217
# Decidí poner las variables en inglés, porque es más sencillo de ubicarlas sin importar el país
# Si, ya sé que Europa no es un país, pero no se me ocurrió un nombre mejor para la clave.
def to_word(number, mi_moneda=None):
if mi_moneda != None:
try:
moneda = ifilter(lambda x: x['currency'] == mi_moneda, MONEDAS).next()
if number < 2:
moneda = moneda['singular']
else:
moneda = moneda['plural']
except:
return u"Tipo de moneda inválida"
else:
moneda = ""
"""Converts a number into string representation"""
converted = ''
if not (0 < number < 999999999):
return 'No es posible convertir el numero a letras'
if "." in str(number):
number, decimal = str(number).split(".")
else:
number = str(number)
decimal = "00"
number_str = str(number).zfill(9)
millones = number_str[:3]
miles = number_str[3:6]
cientos = number_str[6:]
if(millones):
if(millones == '001'):
converted += 'UN MILLON '
elif(int(millones) > 0):
converted += '%sMILLONES ' % __convert_group(millones)
if(miles):
if(miles == '001'):
converted += 'MIL '
elif(int(miles) > 0):
converted += '%sMIL ' % __convert_group(miles)
if(cientos):
if(cientos == '001'):
converted += 'UN '
elif(int(cientos) > 0):
converted += '%s' % __convert_group(cientos)
#converted += moneda
if decimal:
decimal = "con %s%s" % (decimal, "/100")
converted += decimal
converted = "{}".format(converted)
return converted.upper()
def __convert_group(n):
"""Turn each group of numbers into letters"""
output = ''
if(n == '100'):
output = "CIEN "
elif(n[0] != '0'):
output = CENTENAS[int(n[0]) - 1]
k = int(n[1:])
if(k <= 20):
output += UNIDADES[k]
else:
if((k > 30) & (n[2] != '0')):
output += '%sY %s' % (DECENAS[int(n[1]) - 2], UNIDADES[int(n[2])])
else:
output += '%s%s' % (DECENAS[int(n[1]) - 2], UNIDADES[int(n[2])])
return output | agpl-3.0 |
jnhdny/parts-unlimited-to-bigcommerce | python_scripts/catalog_content_export_unzipper.py | 3 | 1551 | #!/usr/bin/python
# this script unzips the brand catalog content export zip files
# one at a time, and numbers each exported xml file
import zipfile
import shutil
import os
import glob
def bool_str(s):
s = s.lower()
T = ('1', 'true', 'y', 'ye', 'yes', 'yea', 'ya', 'yah', 'yup')
F = ('0', 'false', 'n', 'no', 'not', 'nope', 'na', 'nah')
if s in T:
return True
elif s in F:
return False
else:
raise VaueError('"%s" is not a recognized yes or no string' % s)
working_directory = "../catalog_content_export_zips"
output_directory = "../catalog_content_xml_files"
if os.path.isdir(working_directory):
os.chdir(working_directory)
print("Changing working directory to " + working_directory + " ...")
cont = True
else:
cont = bool_str(raw_input("Could not change working directory. Continue? (y/n):"))
if cont:
if os.path.isdir(output_directory):
print("Setting output directory to " + output_directory + " ...")
doproceed = True
else:
doproceed = bool_str(raw_input("Could not set ouput directory to " + output_directory + " ... Double check to see if it exists. Do you wish to proceed using current working directory for output? (y/n):"))
if doproceed:
for i in glob.glob('*.zip'):
#filename = "Brand_Catalog_Content_Export(%s).zip" % i
zf = zipfile.ZipFile(i)
print("Extracting " + i + " ...")
zf.extractall()
shutil.move("Brand_Catalog_Content_Export.xml", "../catalog_content_xml_files/%s.xml" % i)
print("Moving " + i + " to ~/parts-unlimited-to-bigcommerce/catalog_content_xml_files/" + i + ".xml")
| gpl-2.0 |
bastimeyer/streamlink | src/streamlink/plugins/livestream.py | 5 | 4599 | import re
from streamlink.compat import urljoin
from streamlink.plugin import Plugin
from streamlink.plugin.api import validate
from streamlink.plugin.api.utils import parse_json
from streamlink.stream import AkamaiHDStream, HLSStream
_url_re = re.compile(r"http(s)?://(www\.)?livestream.com/")
_stream_config_schema = validate.Schema({
"event": {
"stream_info": validate.any({
"is_live": bool,
"qualities": [{
"bitrate": int,
"height": int
}],
validate.optional("play_url"): validate.url(scheme="http"),
validate.optional("m3u8_url"): validate.url(
scheme="http",
path=validate.endswith(".m3u8")
),
}, None)
},
validate.optional("playerUri"): validate.text,
validate.optional("viewerPlusSwfUrl"): validate.url(scheme="http"),
validate.optional("lsPlayerSwfUrl"): validate.text,
validate.optional("hdPlayerSwfUrl"): validate.text
})
_smil_schema = validate.Schema(validate.union({
"http_base": validate.all(
validate.xml_find("{http://www.w3.org/2001/SMIL20/Language}head/"
"{http://www.w3.org/2001/SMIL20/Language}meta"
"[@name='httpBase']"),
validate.xml_element(attrib={
"content": validate.text
}),
validate.get("content")
),
"videos": validate.all(
validate.xml_findall("{http://www.w3.org/2001/SMIL20/Language}body/"
"{http://www.w3.org/2001/SMIL20/Language}switch/"
"{http://www.w3.org/2001/SMIL20/Language}video"),
[
validate.all(
validate.xml_element(attrib={
"src": validate.text,
"system-bitrate": validate.all(
validate.text,
validate.transform(int)
)
}),
validate.transform(
lambda e: (e.attrib["src"], e.attrib["system-bitrate"])
)
)
],
)
}))
class Livestream(Plugin):
@classmethod
def default_stream_types(cls, streams):
return ["akamaihd", "hls"]
@classmethod
def can_handle_url(self, url):
return _url_re.match(url)
def _get_stream_info(self):
res = self.session.http.get(self.url)
match = re.search("window.config = ({.+})", res.text)
if match:
config = match.group(1)
return parse_json(config, "config JSON",
schema=_stream_config_schema)
def _parse_smil(self, url, swf_url):
res = self.session.http.get(url)
smil = self.session.http.xml(res, "SMIL config", schema=_smil_schema)
for src, bitrate in smil["videos"]:
url = urljoin(smil["http_base"], src)
yield bitrate, AkamaiHDStream(self.session, url, swf=swf_url)
def _get_streams(self):
info = self._get_stream_info()
if not info:
return
stream_info = info["event"]["stream_info"]
if not (stream_info and stream_info["is_live"]):
# Stream is not live
return
play_url = stream_info.get("play_url")
if play_url:
swf_url = info.get("playerUri") or info.get("hdPlayerSwfUrl") or info.get("lsPlayerSwfUrl") or info.get("viewerPlusSwfUrl")
if swf_url:
if not swf_url.startswith("http"):
if swf_url.startswith("//"):
swf_url = "http:" + swf_url
else:
swf_url = "http://" + swf_url
# Work around broken SSL.
swf_url = swf_url.replace("https://", "http://")
qualities = stream_info["qualities"]
for bitrate, stream in self._parse_smil(play_url, swf_url):
name = "{0:d}k".format(int(bitrate / 1000))
for quality in qualities:
if quality["bitrate"] == bitrate:
name = "{0}p".format(quality["height"])
yield name, stream
m3u8_url = stream_info.get("m3u8_url")
if m3u8_url:
streams = HLSStream.parse_variant_playlist(self.session, m3u8_url,
namekey="pixels")
# TODO: Replace with "yield from" when dropping Python 2.
for stream in streams.items():
yield stream
__plugin__ = Livestream
| bsd-2-clause |
teichopsia-/take_brake | lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.py | 252 | 106466 | """
Package resource API
--------------------
A resource is a logical file contained within a package, or a logical
subdirectory thereof. The package resource API expects resource names
to have their path parts separated with ``/``, *not* whatever the local
path separator is. Do not use os.path operations to manipulate resource
names being passed into the API.
The package resource API is designed to work with normal filesystem packages,
.egg files, and unpacked .egg files. It can also work in a limited way with
.zip files and with custom PEP 302 loaders that support the ``get_data()``
method.
"""
from __future__ import absolute_import
import sys
import os
import io
import time
import re
import types
import zipfile
import zipimport
import warnings
import stat
import functools
import pkgutil
import token
import symbol
import operator
import platform
import collections
import plistlib
import email.parser
import tempfile
import textwrap
from pkgutil import get_importer
try:
import _imp
except ImportError:
# Python 3.2 compatibility
import imp as _imp
PY3 = sys.version_info > (3,)
PY2 = not PY3
if PY3:
from urllib.parse import urlparse, urlunparse
if PY2:
from urlparse import urlparse, urlunparse
if PY3:
string_types = str,
else:
string_types = str, eval('unicode')
iteritems = (lambda i: i.items()) if PY3 else lambda i: i.iteritems()
# capture these to bypass sandboxing
from os import utime
try:
from os import mkdir, rename, unlink
WRITE_SUPPORT = True
except ImportError:
# no write support, probably under GAE
WRITE_SUPPORT = False
from os import open as os_open
from os.path import isdir, split
# Avoid try/except due to potential problems with delayed import mechanisms.
if sys.version_info >= (3, 3) and sys.implementation.name == "cpython":
import importlib.machinery as importlib_machinery
else:
importlib_machinery = None
try:
import parser
except ImportError:
pass
import pip._vendor.packaging.version
import pip._vendor.packaging.specifiers
packaging = pip._vendor.packaging
# declare some globals that will be defined later to
# satisfy the linters.
require = None
working_set = None
class PEP440Warning(RuntimeWarning):
"""
Used when there is an issue with a version or specifier not complying with
PEP 440.
"""
class _SetuptoolsVersionMixin(object):
def __hash__(self):
return super(_SetuptoolsVersionMixin, self).__hash__()
def __lt__(self, other):
if isinstance(other, tuple):
return tuple(self) < other
else:
return super(_SetuptoolsVersionMixin, self).__lt__(other)
def __le__(self, other):
if isinstance(other, tuple):
return tuple(self) <= other
else:
return super(_SetuptoolsVersionMixin, self).__le__(other)
def __eq__(self, other):
if isinstance(other, tuple):
return tuple(self) == other
else:
return super(_SetuptoolsVersionMixin, self).__eq__(other)
def __ge__(self, other):
if isinstance(other, tuple):
return tuple(self) >= other
else:
return super(_SetuptoolsVersionMixin, self).__ge__(other)
def __gt__(self, other):
if isinstance(other, tuple):
return tuple(self) > other
else:
return super(_SetuptoolsVersionMixin, self).__gt__(other)
def __ne__(self, other):
if isinstance(other, tuple):
return tuple(self) != other
else:
return super(_SetuptoolsVersionMixin, self).__ne__(other)
def __getitem__(self, key):
return tuple(self)[key]
def __iter__(self):
component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE)
replace = {
'pre': 'c',
'preview': 'c',
'-': 'final-',
'rc': 'c',
'dev': '@',
}.get
def _parse_version_parts(s):
for part in component_re.split(s):
part = replace(part, part)
if not part or part == '.':
continue
if part[:1] in '0123456789':
# pad for numeric comparison
yield part.zfill(8)
else:
yield '*'+part
# ensure that alpha/beta/candidate are before final
yield '*final'
def old_parse_version(s):
parts = []
for part in _parse_version_parts(s.lower()):
if part.startswith('*'):
# remove '-' before a prerelease tag
if part < '*final':
while parts and parts[-1] == '*final-':
parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1] == '00000000':
parts.pop()
parts.append(part)
return tuple(parts)
# Warn for use of this function
warnings.warn(
"You have iterated over the result of "
"pkg_resources.parse_version. This is a legacy behavior which is "
"inconsistent with the new version class introduced in setuptools "
"8.0. In most cases, conversion to a tuple is unnecessary. For "
"comparison of versions, sort the Version instances directly. If "
"you have another use case requiring the tuple, please file a "
"bug with the setuptools project describing that need.",
RuntimeWarning,
stacklevel=1,
)
for part in old_parse_version(str(self)):
yield part
class SetuptoolsVersion(_SetuptoolsVersionMixin, packaging.version.Version):
pass
class SetuptoolsLegacyVersion(_SetuptoolsVersionMixin,
packaging.version.LegacyVersion):
pass
def parse_version(v):
try:
return SetuptoolsVersion(v)
except packaging.version.InvalidVersion:
return SetuptoolsLegacyVersion(v)
_state_vars = {}
def _declare_state(vartype, **kw):
globals().update(kw)
_state_vars.update(dict.fromkeys(kw, vartype))
def __getstate__():
state = {}
g = globals()
for k, v in _state_vars.items():
state[k] = g['_sget_'+v](g[k])
return state
def __setstate__(state):
g = globals()
for k, v in state.items():
g['_sset_'+_state_vars[k]](k, g[k], v)
return state
def _sget_dict(val):
return val.copy()
def _sset_dict(key, ob, state):
ob.clear()
ob.update(state)
def _sget_object(val):
return val.__getstate__()
def _sset_object(key, ob, state):
ob.__setstate__(state)
_sget_none = _sset_none = lambda *args: None
def get_supported_platform():
"""Return this platform's maximum compatible version.
distutils.util.get_platform() normally reports the minimum version
of Mac OS X that would be required to *use* extensions produced by
distutils. But what we want when checking compatibility is to know the
version of Mac OS X that we are *running*. To allow usage of packages that
explicitly require a newer version of Mac OS X, we must also know the
current version of the OS.
If this condition occurs for any other platform with a version in its
platform strings, this function should be extended accordingly.
"""
plat = get_build_platform()
m = macosVersionString.match(plat)
if m is not None and sys.platform == "darwin":
try:
plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3))
except ValueError:
# not Mac OS X
pass
return plat
__all__ = [
# Basic resource access and distribution/entry point discovery
'require', 'run_script', 'get_provider', 'get_distribution',
'load_entry_point', 'get_entry_map', 'get_entry_info',
'iter_entry_points',
'resource_string', 'resource_stream', 'resource_filename',
'resource_listdir', 'resource_exists', 'resource_isdir',
# Environmental control
'declare_namespace', 'working_set', 'add_activation_listener',
'find_distributions', 'set_extraction_path', 'cleanup_resources',
'get_default_cache',
# Primary implementation classes
'Environment', 'WorkingSet', 'ResourceManager',
'Distribution', 'Requirement', 'EntryPoint',
# Exceptions
'ResolutionError', 'VersionConflict', 'DistributionNotFound',
'UnknownExtra', 'ExtractionError',
# Warnings
'PEP440Warning',
# Parsing functions and string utilities
'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker',
# filesystem utilities
'ensure_directory', 'normalize_path',
# Distribution "precedence" constants
'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
# "Provider" interfaces, implementations, and registration/lookup APIs
'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
'register_finder', 'register_namespace_handler', 'register_loader_type',
'fixup_namespace_packages', 'get_importer',
# Deprecated/backward compatibility only
'run_main', 'AvailableDistributions',
]
class ResolutionError(Exception):
"""Abstract base for dependency resolution errors"""
def __repr__(self):
return self.__class__.__name__+repr(self.args)
class VersionConflict(ResolutionError):
"""
An already-installed version conflicts with the requested version.
Should be initialized with the installed Distribution and the requested
Requirement.
"""
_template = "{self.dist} is installed but {self.req} is required"
@property
def dist(self):
return self.args[0]
@property
def req(self):
return self.args[1]
def report(self):
return self._template.format(**locals())
def with_context(self, required_by):
"""
If required_by is non-empty, return a version of self that is a
ContextualVersionConflict.
"""
if not required_by:
return self
args = self.args + (required_by,)
return ContextualVersionConflict(*args)
class ContextualVersionConflict(VersionConflict):
"""
A VersionConflict that accepts a third parameter, the set of the
requirements that required the installed Distribution.
"""
_template = VersionConflict._template + ' by {self.required_by}'
@property
def required_by(self):
return self.args[2]
class DistributionNotFound(ResolutionError):
"""A requested distribution was not found"""
_template = ("The '{self.req}' distribution was not found "
"and is required by {self.requirers_str}")
@property
def req(self):
return self.args[0]
@property
def requirers(self):
return self.args[1]
@property
def requirers_str(self):
if not self.requirers:
return 'the application'
return ', '.join(self.requirers)
def report(self):
return self._template.format(**locals())
def __str__(self):
return self.report()
class UnknownExtra(ResolutionError):
"""Distribution doesn't have an "extra feature" of the given name"""
_provider_factories = {}
PY_MAJOR = sys.version[:3]
EGG_DIST = 3
BINARY_DIST = 2
SOURCE_DIST = 1
CHECKOUT_DIST = 0
DEVELOP_DIST = -1
def register_loader_type(loader_type, provider_factory):
"""Register `provider_factory` to make providers for `loader_type`
`loader_type` is the type or class of a PEP 302 ``module.__loader__``,
and `provider_factory` is a function that, passed a *module* object,
returns an ``IResourceProvider`` for that module.
"""
_provider_factories[loader_type] = provider_factory
def get_provider(moduleOrReq):
"""Return an IResourceProvider for the named module or requirement"""
if isinstance(moduleOrReq, Requirement):
return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
try:
module = sys.modules[moduleOrReq]
except KeyError:
__import__(moduleOrReq)
module = sys.modules[moduleOrReq]
loader = getattr(module, '__loader__', None)
return _find_adapter(_provider_factories, loader)(module)
def _macosx_vers(_cache=[]):
if not _cache:
version = platform.mac_ver()[0]
# fallback for MacPorts
if version == '':
plist = '/System/Library/CoreServices/SystemVersion.plist'
if os.path.exists(plist):
if hasattr(plistlib, 'readPlist'):
plist_content = plistlib.readPlist(plist)
if 'ProductVersion' in plist_content:
version = plist_content['ProductVersion']
_cache.append(version.split('.'))
return _cache[0]
def _macosx_arch(machine):
return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine)
def get_build_platform():
"""Return this platform's string for platform-specific distributions
XXX Currently this is the same as ``distutils.util.get_platform()``, but it
needs some hacks for Linux and Mac OS X.
"""
try:
# Python 2.7 or >=3.2
from sysconfig import get_platform
except ImportError:
from distutils.util import get_platform
plat = get_platform()
if sys.platform == "darwin" and not plat.startswith('macosx-'):
try:
version = _macosx_vers()
machine = os.uname()[4].replace(" ", "_")
return "macosx-%d.%d-%s" % (int(version[0]), int(version[1]),
_macosx_arch(machine))
except ValueError:
# if someone is running a non-Mac darwin system, this will fall
# through to the default implementation
pass
return plat
macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
# XXX backward compat
get_platform = get_build_platform
def compatible_platforms(provided, required):
"""Can code for the `provided` platform run on the `required` platform?
Returns true if either platform is ``None``, or the platforms are equal.
XXX Needs compatibility checks for Linux and other unixy OSes.
"""
if provided is None or required is None or provided==required:
# easy case
return True
# Mac OS X special cases
reqMac = macosVersionString.match(required)
if reqMac:
provMac = macosVersionString.match(provided)
# is this a Mac package?
if not provMac:
# this is backwards compatibility for packages built before
# setuptools 0.6. All packages built after this point will
# use the new macosx designation.
provDarwin = darwinVersionString.match(provided)
if provDarwin:
dversion = int(provDarwin.group(1))
macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
if dversion == 7 and macosversion >= "10.3" or \
dversion == 8 and macosversion >= "10.4":
return True
# egg isn't macosx or legacy darwin
return False
# are they the same major version and machine type?
if provMac.group(1) != reqMac.group(1) or \
provMac.group(3) != reqMac.group(3):
return False
# is the required OS major update >= the provided one?
if int(provMac.group(2)) > int(reqMac.group(2)):
return False
return True
# XXX Linux and other platforms' special cases should go here
return False
def run_script(dist_spec, script_name):
"""Locate distribution `dist_spec` and run its `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
require(dist_spec)[0].run_script(script_name, ns)
# backward compatibility
run_main = run_script
def get_distribution(dist):
"""Return a current distribution object for a Requirement or string"""
if isinstance(dist, string_types):
dist = Requirement.parse(dist)
if isinstance(dist, Requirement):
dist = get_provider(dist)
if not isinstance(dist, Distribution):
raise TypeError("Expected string, Requirement, or Distribution", dist)
return dist
def load_entry_point(dist, group, name):
"""Return `name` entry point of `group` for `dist` or raise ImportError"""
return get_distribution(dist).load_entry_point(group, name)
def get_entry_map(dist, group=None):
"""Return the entry point map for `group`, or the full entry map"""
return get_distribution(dist).get_entry_map(group)
def get_entry_info(dist, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return get_distribution(dist).get_entry_info(group, name)
class IMetadataProvider:
def has_metadata(name):
"""Does the package's distribution contain the named metadata?"""
def get_metadata(name):
"""The named metadata resource as a string"""
def get_metadata_lines(name):
"""Yield named metadata resource as list of non-blank non-comment lines
Leading and trailing whitespace is stripped from each line, and lines
with ``#`` as the first non-blank character are omitted."""
def metadata_isdir(name):
"""Is the named metadata a directory? (like ``os.path.isdir()``)"""
def metadata_listdir(name):
"""List of metadata names in the directory (like ``os.listdir()``)"""
def run_script(script_name, namespace):
"""Execute the named script in the supplied namespace dictionary"""
class IResourceProvider(IMetadataProvider):
"""An object that provides access to package resources"""
def get_resource_filename(manager, resource_name):
"""Return a true filesystem path for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_stream(manager, resource_name):
"""Return a readable file-like object for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_string(manager, resource_name):
"""Return a string containing the contents of `resource_name`
`manager` must be an ``IResourceManager``"""
def has_resource(resource_name):
"""Does the package contain the named resource?"""
def resource_isdir(resource_name):
"""Is the named resource a directory? (like ``os.path.isdir()``)"""
def resource_listdir(resource_name):
"""List of resource names in the directory (like ``os.listdir()``)"""
class WorkingSet(object):
"""A collection of active distributions on sys.path (or a similar list)"""
def __init__(self, entries=None):
"""Create working set from list of path entries (default=sys.path)"""
self.entries = []
self.entry_keys = {}
self.by_key = {}
self.callbacks = []
if entries is None:
entries = sys.path
for entry in entries:
self.add_entry(entry)
@classmethod
def _build_master(cls):
"""
Prepare the master working set.
"""
ws = cls()
try:
from __main__ import __requires__
except ImportError:
# The main program does not list any requirements
return ws
# ensure the requirements are met
try:
ws.require(__requires__)
except VersionConflict:
return cls._build_from_requirements(__requires__)
return ws
@classmethod
def _build_from_requirements(cls, req_spec):
"""
Build a working set from a requirement spec. Rewrites sys.path.
"""
# try it without defaults already on sys.path
# by starting with an empty path
ws = cls([])
reqs = parse_requirements(req_spec)
dists = ws.resolve(reqs, Environment())
for dist in dists:
ws.add(dist)
# add any missing entries from sys.path
for entry in sys.path:
if entry not in ws.entries:
ws.add_entry(entry)
# then copy back to sys.path
sys.path[:] = ws.entries
return ws
def add_entry(self, entry):
"""Add a path item to ``.entries``, finding any distributions on it
``find_distributions(entry, True)`` is used to find distributions
corresponding to the path entry, and they are added. `entry` is
always appended to ``.entries``, even if it is already present.
(This is because ``sys.path`` can contain the same value more than
once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
equal ``sys.path``.)
"""
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in find_distributions(entry, True):
self.add(dist, entry, False)
def __contains__(self, dist):
"""True if `dist` is the active distribution for its project"""
return self.by_key.get(dist.key) == dist
def find(self, req):
"""Find a distribution matching requirement `req`
If there is an active distribution for the requested project, this
returns it as long as it meets the version requirement specified by
`req`. But, if there is an active distribution for the project and it
does *not* meet the `req` requirement, ``VersionConflict`` is raised.
If there is no active distribution for the requested project, ``None``
is returned.
"""
dist = self.by_key.get(req.key)
if dist is not None and dist not in req:
# XXX add more info
raise VersionConflict(dist, req)
return dist
def iter_entry_points(self, group, name=None):
"""Yield entry point objects from `group` matching `name`
If `name` is None, yields all entry points in `group` from all
distributions in the working set, otherwise only ones matching
both `group` and `name` are yielded (in distribution order).
"""
for dist in self:
entries = dist.get_entry_map(group)
if name is None:
for ep in entries.values():
yield ep
elif name in entries:
yield entries[name]
def run_script(self, requires, script_name):
"""Locate distribution for `requires` and run `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
self.require(requires)[0].run_script(script_name, ns)
def __iter__(self):
"""Yield distributions for non-duplicate projects in the working set
The yield order is the order in which the items' path entries were
added to the working set.
"""
seen = {}
for item in self.entries:
if item not in self.entry_keys:
# workaround a cache issue
continue
for key in self.entry_keys[item]:
if key not in seen:
seen[key]=1
yield self.by_key[key]
def add(self, dist, entry=None, insert=True, replace=False):
"""Add `dist` to working set, associated with `entry`
If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
On exit from this routine, `entry` is added to the end of the working
set's ``.entries`` (if it wasn't already present).
`dist` is only added to the working set if it's for a project that
doesn't already have a distribution in the set, unless `replace=True`.
If it's added, any callbacks registered with the ``subscribe()`` method
will be called.
"""
if insert:
dist.insert_on(self.entries, entry)
if entry is None:
entry = dist.location
keys = self.entry_keys.setdefault(entry,[])
keys2 = self.entry_keys.setdefault(dist.location,[])
if not replace and dist.key in self.by_key:
# ignore hidden distros
return
self.by_key[dist.key] = dist
if dist.key not in keys:
keys.append(dist.key)
if dist.key not in keys2:
keys2.append(dist.key)
self._added_new(dist)
def resolve(self, requirements, env=None, installer=None,
replace_conflicting=False):
"""List all distributions needed to (recursively) meet `requirements`
`requirements` must be a sequence of ``Requirement`` objects. `env`,
if supplied, should be an ``Environment`` instance. If
not supplied, it defaults to all distributions available within any
entry or distribution in the working set. `installer`, if supplied,
will be invoked with each requirement that cannot be met by an
already-installed distribution; it should return a ``Distribution`` or
``None``.
Unless `replace_conflicting=True`, raises a VersionConflict exception if
any requirements are found on the path that have the correct name but
the wrong version. Otherwise, if an `installer` is supplied it will be
invoked to obtain the correct version of the requirement and activate
it.
"""
# set up the stack
requirements = list(requirements)[::-1]
# set of processed requirements
processed = {}
# key -> dist
best = {}
to_activate = []
# Mapping of requirement to set of distributions that required it;
# useful for reporting info about conflicts.
required_by = collections.defaultdict(set)
while requirements:
# process dependencies breadth-first
req = requirements.pop(0)
if req in processed:
# Ignore cyclic or redundant dependencies
continue
dist = best.get(req.key)
if dist is None:
# Find the best distribution and add it to the map
dist = self.by_key.get(req.key)
if dist is None or (dist not in req and replace_conflicting):
ws = self
if env is None:
if dist is None:
env = Environment(self.entries)
else:
# Use an empty environment and workingset to avoid
# any further conflicts with the conflicting
# distribution
env = Environment([])
ws = WorkingSet([])
dist = best[req.key] = env.best_match(req, ws, installer)
if dist is None:
requirers = required_by.get(req, None)
raise DistributionNotFound(req, requirers)
to_activate.append(dist)
if dist not in req:
# Oops, the "best" so far conflicts with a dependency
dependent_req = required_by[req]
raise VersionConflict(dist, req).with_context(dependent_req)
# push the new requirements onto the stack
new_requirements = dist.requires(req.extras)[::-1]
requirements.extend(new_requirements)
# Register the new requirements needed by req
for new_requirement in new_requirements:
required_by[new_requirement].add(req.project_name)
processed[req] = True
# return list of distros to activate
return to_activate
def find_plugins(self, plugin_env, full_env=None, installer=None,
fallback=True):
"""Find all activatable distributions in `plugin_env`
Example usage::
distributions, errors = working_set.find_plugins(
Environment(plugin_dirlist)
)
# add plugins+libs to sys.path
map(working_set.add, distributions)
# display errors
print('Could not load', errors)
The `plugin_env` should be an ``Environment`` instance that contains
only distributions that are in the project's "plugin directory" or
directories. The `full_env`, if supplied, should be an ``Environment``
contains all currently-available distributions. If `full_env` is not
supplied, one is created automatically from the ``WorkingSet`` this
method is called on, which will typically mean that every directory on
``sys.path`` will be scanned for distributions.
`installer` is a standard installer callback as used by the
``resolve()`` method. The `fallback` flag indicates whether we should
attempt to resolve older versions of a plugin if the newest version
cannot be resolved.
This method returns a 2-tuple: (`distributions`, `error_info`), where
`distributions` is a list of the distributions found in `plugin_env`
that were loadable, along with any other distributions that are needed
to resolve their dependencies. `error_info` is a dictionary mapping
unloadable plugin distributions to an exception instance describing the
error that occurred. Usually this will be a ``DistributionNotFound`` or
``VersionConflict`` instance.
"""
plugin_projects = list(plugin_env)
# scan project names in alphabetic order
plugin_projects.sort()
error_info = {}
distributions = {}
if full_env is None:
env = Environment(self.entries)
env += plugin_env
else:
env = full_env + plugin_env
shadow_set = self.__class__([])
# put all our entries in shadow_set
list(map(shadow_set.add, self))
for project_name in plugin_projects:
for dist in plugin_env[project_name]:
req = [dist.as_requirement()]
try:
resolvees = shadow_set.resolve(req, env, installer)
except ResolutionError as v:
# save error info
error_info[dist] = v
if fallback:
# try the next older version of project
continue
else:
# give up on this project, keep going
break
else:
list(map(shadow_set.add, resolvees))
distributions.update(dict.fromkeys(resolvees))
# success, no need to try any more versions of this project
break
distributions = list(distributions)
distributions.sort()
return distributions, error_info
def require(self, *requirements):
"""Ensure that distributions matching `requirements` are activated
`requirements` must be a string or a (possibly-nested) sequence
thereof, specifying the distributions and versions required. The
return value is a sequence of the distributions that needed to be
activated to fulfill the requirements; all relevant distributions are
included, even if they were already activated in this working set.
"""
needed = self.resolve(parse_requirements(requirements))
for dist in needed:
self.add(dist)
return needed
def subscribe(self, callback):
"""Invoke `callback` for all distributions (including existing ones)"""
if callback in self.callbacks:
return
self.callbacks.append(callback)
for dist in self:
callback(dist)
def _added_new(self, dist):
for callback in self.callbacks:
callback(dist)
def __getstate__(self):
return (
self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
self.callbacks[:]
)
def __setstate__(self, e_k_b_c):
entries, keys, by_key, callbacks = e_k_b_c
self.entries = entries[:]
self.entry_keys = keys.copy()
self.by_key = by_key.copy()
self.callbacks = callbacks[:]
class Environment(object):
"""Searchable snapshot of distributions on a search path"""
def __init__(self, search_path=None, platform=get_supported_platform(),
python=PY_MAJOR):
"""Snapshot distributions available on a search path
Any distributions found on `search_path` are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used.
`platform` is an optional string specifying the name of the platform
that platform-specific distributions must be compatible with. If
unspecified, it defaults to the current platform. `python` is an
optional string naming the desired version of Python (e.g. ``'3.3'``);
it defaults to the current version.
You may explicitly set `platform` (and/or `python`) to ``None`` if you
wish to map *all* distributions, not just those compatible with the
running platform or Python version.
"""
self._distmap = {}
self.platform = platform
self.python = python
self.scan(search_path)
def can_add(self, dist):
"""Is distribution `dist` acceptable for this environment?
The distribution must match the platform and python version
requirements specified when this environment was created, or False
is returned.
"""
return (self.python is None or dist.py_version is None
or dist.py_version==self.python) \
and compatible_platforms(dist.platform, self.platform)
def remove(self, dist):
"""Remove `dist` from the environment"""
self._distmap[dist.key].remove(dist)
def scan(self, search_path=None):
"""Scan `search_path` for distributions usable in this environment
Any distributions found are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used. Only distributions conforming to
the platform/python version defined at initialization are added.
"""
if search_path is None:
search_path = sys.path
for item in search_path:
for dist in find_distributions(item):
self.add(dist)
def __getitem__(self, project_name):
"""Return a newest-to-oldest list of distributions for `project_name`
Uses case-insensitive `project_name` comparison, assuming all the
project's distributions use their project's name converted to all
lowercase as their key.
"""
distribution_key = project_name.lower()
return self._distmap.get(distribution_key, [])
def add(self, dist):
"""Add `dist` if we ``can_add()`` it and it has not already been added
"""
if self.can_add(dist) and dist.has_version():
dists = self._distmap.setdefault(dist.key, [])
if dist not in dists:
dists.append(dist)
dists.sort(key=operator.attrgetter('hashcmp'), reverse=True)
def best_match(self, req, working_set, installer=None):
"""Find distribution best matching `req` and usable on `working_set`
This calls the ``find(req)`` method of the `working_set` to see if a
suitable distribution is already active. (This may raise
``VersionConflict`` if an unsuitable version of the project is already
active in the specified `working_set`.) If a suitable distribution
isn't active, this method returns the newest distribution in the
environment that meets the ``Requirement`` in `req`. If no suitable
distribution is found, and `installer` is supplied, then the result of
calling the environment's ``obtain(req, installer)`` method will be
returned.
"""
dist = working_set.find(req)
if dist is not None:
return dist
for dist in self[req.key]:
if dist in req:
return dist
# try to download/install
return self.obtain(req, installer)
def obtain(self, requirement, installer=None):
"""Obtain a distribution matching `requirement` (e.g. via download)
Obtain a distro that matches requirement (e.g. via download). In the
base ``Environment`` class, this routine just returns
``installer(requirement)``, unless `installer` is None, in which case
None is returned instead. This method is a hook that allows subclasses
to attempt other ways of obtaining a distribution before falling back
to the `installer` argument."""
if installer is not None:
return installer(requirement)
def __iter__(self):
"""Yield the unique project names of the available distributions"""
for key in self._distmap.keys():
if self[key]:
yield key
def __iadd__(self, other):
"""In-place addition of a distribution or environment"""
if isinstance(other, Distribution):
self.add(other)
elif isinstance(other, Environment):
for project in other:
for dist in other[project]:
self.add(dist)
else:
raise TypeError("Can't add %r to environment" % (other,))
return self
def __add__(self, other):
"""Add an environment or distribution to an environment"""
new = self.__class__([], platform=None, python=None)
for env in self, other:
new += env
return new
# XXX backward compatibility
AvailableDistributions = Environment
class ExtractionError(RuntimeError):
"""An error occurred extracting a resource
The following attributes are available from instances of this exception:
manager
The resource manager that raised this exception
cache_path
The base directory for resource extraction
original_error
The exception instance that caused extraction to fail
"""
class ResourceManager:
"""Manage resource extraction and packages"""
extraction_path = None
def __init__(self):
self.cached_files = {}
def resource_exists(self, package_or_requirement, resource_name):
"""Does the named resource exist?"""
return get_provider(package_or_requirement).has_resource(resource_name)
def resource_isdir(self, package_or_requirement, resource_name):
"""Is the named resource an existing directory?"""
return get_provider(package_or_requirement).resource_isdir(
resource_name
)
def resource_filename(self, package_or_requirement, resource_name):
"""Return a true filesystem path for specified resource"""
return get_provider(package_or_requirement).get_resource_filename(
self, resource_name
)
def resource_stream(self, package_or_requirement, resource_name):
"""Return a readable file-like object for specified resource"""
return get_provider(package_or_requirement).get_resource_stream(
self, resource_name
)
def resource_string(self, package_or_requirement, resource_name):
"""Return specified resource as a string"""
return get_provider(package_or_requirement).get_resource_string(
self, resource_name
)
def resource_listdir(self, package_or_requirement, resource_name):
"""List the contents of the named resource directory"""
return get_provider(package_or_requirement).resource_listdir(
resource_name
)
def extraction_error(self):
"""Give an error message for problems extracting file(s)"""
old_exc = sys.exc_info()[1]
cache_path = self.extraction_path or get_default_cache()
err = ExtractionError("""Can't extract file(s) to egg cache
The following error occurred while trying to extract file(s) to the Python egg
cache:
%s
The Python egg cache directory is currently set to:
%s
Perhaps your account does not have write access to this directory? You can
change the cache directory by setting the PYTHON_EGG_CACHE environment
variable to point to an accessible directory.
""" % (old_exc, cache_path)
)
err.manager = self
err.cache_path = cache_path
err.original_error = old_exc
raise err
def get_cache_path(self, archive_name, names=()):
"""Return absolute location in cache for `archive_name` and `names`
The parent directory of the resulting path will be created if it does
not already exist. `archive_name` should be the base filename of the
enclosing egg (which may not be the name of the enclosing zipfile!),
including its ".egg" extension. `names`, if provided, should be a
sequence of path name parts "under" the egg's extraction location.
This method should only be called by resource providers that need to
obtain an extraction location, and only for names they intend to
extract, as it tracks the generated names for possible cleanup later.
"""
extract_path = self.extraction_path or get_default_cache()
target_path = os.path.join(extract_path, archive_name+'-tmp', *names)
try:
_bypass_ensure_directory(target_path)
except:
self.extraction_error()
self._warn_unsafe_extraction_path(extract_path)
self.cached_files[target_path] = 1
return target_path
@staticmethod
def _warn_unsafe_extraction_path(path):
"""
If the default extraction path is overridden and set to an insecure
location, such as /tmp, it opens up an opportunity for an attacker to
replace an extracted file with an unauthorized payload. Warn the user
if a known insecure location is used.
See Distribute #375 for more details.
"""
if os.name == 'nt' and not path.startswith(os.environ['windir']):
# On Windows, permissions are generally restrictive by default
# and temp directories are not writable by other users, so
# bypass the warning.
return
mode = os.stat(path).st_mode
if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
msg = ("%s is writable by group/others and vulnerable to attack "
"when "
"used with get_resource_filename. Consider a more secure "
"location (set with .set_extraction_path or the "
"PYTHON_EGG_CACHE environment variable)." % path)
warnings.warn(msg, UserWarning)
def postprocess(self, tempname, filename):
"""Perform any platform-specific postprocessing of `tempname`
This is where Mac header rewrites should be done; other platforms don't
have anything special they should do.
Resource providers should call this method ONLY after successfully
extracting a compressed resource. They must NOT call it on resources
that are already in the filesystem.
`tempname` is the current (temporary) name of the file, and `filename`
is the name it will be renamed to by the caller after this routine
returns.
"""
if os.name == 'posix':
# Make the resource executable
mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777
os.chmod(tempname, mode)
def set_extraction_path(self, path):
"""Set the base path where resources will be extracted to, if needed.
If you do not call this routine before any extractions take place, the
path defaults to the return value of ``get_default_cache()``. (Which
is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
platform-specific fallbacks. See that routine's documentation for more
details.)
Resources are extracted to subdirectories of this path based upon
information given by the ``IResourceProvider``. You may set this to a
temporary directory, but then you must call ``cleanup_resources()`` to
delete the extracted files when done. There is no guarantee that
``cleanup_resources()`` will be able to remove all extracted files.
(Note: you may not change the extraction path for a given resource
manager once resources have been extracted, unless you first call
``cleanup_resources()``.)
"""
if self.cached_files:
raise ValueError(
"Can't change extraction path, files already extracted"
)
self.extraction_path = path
def cleanup_resources(self, force=False):
"""
Delete all extracted resource files and directories, returning a list
of the file and directory names that could not be successfully removed.
This function does not have any concurrency protection, so it should
generally only be called when the extraction path is a temporary
directory exclusive to a single process. This method is not
automatically called; you must call it explicitly or register it as an
``atexit`` function if you wish to ensure cleanup of a temporary
directory used for extractions.
"""
# XXX
def get_default_cache():
"""Determine the default cache location
This returns the ``PYTHON_EGG_CACHE`` environment variable, if set.
Otherwise, on Windows, it returns a "Python-Eggs" subdirectory of the
"Application Data" directory. On all other systems, it's "~/.python-eggs".
"""
try:
return os.environ['PYTHON_EGG_CACHE']
except KeyError:
pass
if os.name!='nt':
return os.path.expanduser('~/.python-eggs')
# XXX this may be locale-specific!
app_data = 'Application Data'
app_homes = [
# best option, should be locale-safe
(('APPDATA',), None),
(('USERPROFILE',), app_data),
(('HOMEDRIVE','HOMEPATH'), app_data),
(('HOMEPATH',), app_data),
(('HOME',), None),
# 95/98/ME
(('WINDIR',), app_data),
]
for keys, subdir in app_homes:
dirname = ''
for key in keys:
if key in os.environ:
dirname = os.path.join(dirname, os.environ[key])
else:
break
else:
if subdir:
dirname = os.path.join(dirname, subdir)
return os.path.join(dirname, 'Python-Eggs')
else:
raise RuntimeError(
"Please set the PYTHON_EGG_CACHE enviroment variable"
)
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
"""
Convert an arbitrary string to a standard version string
"""
try:
# normalize the version
return str(packaging.version.Version(version))
except packaging.version.InvalidVersion:
version = version.replace(' ','.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def safe_extra(extra):
"""Convert an arbitrary string to a standard 'extra' name
Any runs of non-alphanumeric characters are replaced with a single '_',
and the result is always lowercased.
"""
return re.sub('[^A-Za-z0-9.]+', '_', extra).lower()
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-','_')
class MarkerEvaluation(object):
values = {
'os_name': lambda: os.name,
'sys_platform': lambda: sys.platform,
'python_full_version': platform.python_version,
'python_version': lambda: platform.python_version()[:3],
'platform_version': platform.version,
'platform_machine': platform.machine,
'python_implementation': platform.python_implementation,
}
@classmethod
def is_invalid_marker(cls, text):
"""
Validate text as a PEP 426 environment marker; return an exception
if invalid or False otherwise.
"""
try:
cls.evaluate_marker(text)
except SyntaxError as e:
return cls.normalize_exception(e)
return False
@staticmethod
def normalize_exception(exc):
"""
Given a SyntaxError from a marker evaluation, normalize the error
message:
- Remove indications of filename and line number.
- Replace platform-specific error messages with standard error
messages.
"""
subs = {
'unexpected EOF while parsing': 'invalid syntax',
'parenthesis is never closed': 'invalid syntax',
}
exc.filename = None
exc.lineno = None
exc.msg = subs.get(exc.msg, exc.msg)
return exc
@classmethod
def and_test(cls, nodelist):
# MUST NOT short-circuit evaluation, or invalid syntax can be skipped!
items = [
cls.interpret(nodelist[i])
for i in range(1, len(nodelist), 2)
]
return functools.reduce(operator.and_, items)
@classmethod
def test(cls, nodelist):
# MUST NOT short-circuit evaluation, or invalid syntax can be skipped!
items = [
cls.interpret(nodelist[i])
for i in range(1, len(nodelist), 2)
]
return functools.reduce(operator.or_, items)
@classmethod
def atom(cls, nodelist):
t = nodelist[1][0]
if t == token.LPAR:
if nodelist[2][0] == token.RPAR:
raise SyntaxError("Empty parentheses")
return cls.interpret(nodelist[2])
msg = "Language feature not supported in environment markers"
raise SyntaxError(msg)
@classmethod
def comparison(cls, nodelist):
if len(nodelist) > 4:
msg = "Chained comparison not allowed in environment markers"
raise SyntaxError(msg)
comp = nodelist[2][1]
cop = comp[1]
if comp[0] == token.NAME:
if len(nodelist[2]) == 3:
if cop == 'not':
cop = 'not in'
else:
cop = 'is not'
try:
cop = cls.get_op(cop)
except KeyError:
msg = repr(cop) + " operator not allowed in environment markers"
raise SyntaxError(msg)
return cop(cls.evaluate(nodelist[1]), cls.evaluate(nodelist[3]))
@classmethod
def get_op(cls, op):
ops = {
symbol.test: cls.test,
symbol.and_test: cls.and_test,
symbol.atom: cls.atom,
symbol.comparison: cls.comparison,
'not in': lambda x, y: x not in y,
'in': lambda x, y: x in y,
'==': operator.eq,
'!=': operator.ne,
'<': operator.lt,
'>': operator.gt,
'<=': operator.le,
'>=': operator.ge,
}
if hasattr(symbol, 'or_test'):
ops[symbol.or_test] = cls.test
return ops[op]
@classmethod
def evaluate_marker(cls, text, extra=None):
"""
Evaluate a PEP 426 environment marker on CPython 2.4+.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
This implementation uses the 'parser' module, which is not implemented
on
Jython and has been superseded by the 'ast' module in Python 2.6 and
later.
"""
return cls.interpret(parser.expr(text).totuple(1)[1])
@classmethod
def _markerlib_evaluate(cls, text):
"""
Evaluate a PEP 426 environment marker using markerlib.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
"""
from pip._vendor import _markerlib
# markerlib implements Metadata 1.2 (PEP 345) environment markers.
# Translate the variables to Metadata 2.0 (PEP 426).
env = _markerlib.default_environment()
for key in env.keys():
new_key = key.replace('.', '_')
env[new_key] = env.pop(key)
try:
result = _markerlib.interpret(text, env)
except NameError as e:
raise SyntaxError(e.args[0])
return result
if 'parser' not in globals():
# Fall back to less-complete _markerlib implementation if 'parser' module
# is not available.
evaluate_marker = _markerlib_evaluate
@classmethod
def interpret(cls, nodelist):
while len(nodelist)==2: nodelist = nodelist[1]
try:
op = cls.get_op(nodelist[0])
except KeyError:
raise SyntaxError("Comparison or logical expression expected")
return op(nodelist)
@classmethod
def evaluate(cls, nodelist):
while len(nodelist)==2: nodelist = nodelist[1]
kind = nodelist[0]
name = nodelist[1]
if kind==token.NAME:
try:
op = cls.values[name]
except KeyError:
raise SyntaxError("Unknown name %r" % name)
return op()
if kind==token.STRING:
s = nodelist[1]
if not cls._safe_string(s):
raise SyntaxError(
"Only plain strings allowed in environment markers")
return s[1:-1]
msg = "Language feature not supported in environment markers"
raise SyntaxError(msg)
@staticmethod
def _safe_string(cand):
return (
cand[:1] in "'\"" and
not cand.startswith('"""') and
not cand.startswith("'''") and
'\\' not in cand
)
invalid_marker = MarkerEvaluation.is_invalid_marker
evaluate_marker = MarkerEvaluation.evaluate_marker
class NullProvider:
"""Try to implement resources and metadata for arbitrary PEP 302 loaders"""
egg_name = None
egg_info = None
loader = None
def __init__(self, module):
self.loader = getattr(module, '__loader__', None)
self.module_path = os.path.dirname(getattr(module, '__file__', ''))
def get_resource_filename(self, manager, resource_name):
return self._fn(self.module_path, resource_name)
def get_resource_stream(self, manager, resource_name):
return io.BytesIO(self.get_resource_string(manager, resource_name))
def get_resource_string(self, manager, resource_name):
return self._get(self._fn(self.module_path, resource_name))
def has_resource(self, resource_name):
return self._has(self._fn(self.module_path, resource_name))
def has_metadata(self, name):
return self.egg_info and self._has(self._fn(self.egg_info, name))
if sys.version_info <= (3,):
def get_metadata(self, name):
if not self.egg_info:
return ""
return self._get(self._fn(self.egg_info, name))
else:
def get_metadata(self, name):
if not self.egg_info:
return ""
return self._get(self._fn(self.egg_info, name)).decode("utf-8")
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
def resource_isdir(self, resource_name):
return self._isdir(self._fn(self.module_path, resource_name))
def metadata_isdir(self, name):
return self.egg_info and self._isdir(self._fn(self.egg_info, name))
def resource_listdir(self, resource_name):
return self._listdir(self._fn(self.module_path, resource_name))
def metadata_listdir(self, name):
if self.egg_info:
return self._listdir(self._fn(self.egg_info, name))
return []
def run_script(self, script_name, namespace):
script = 'scripts/'+script_name
if not self.has_metadata(script):
raise ResolutionError("No script named %r" % script_name)
script_text = self.get_metadata(script).replace('\r\n', '\n')
script_text = script_text.replace('\r', '\n')
script_filename = self._fn(self.egg_info, script)
namespace['__file__'] = script_filename
if os.path.exists(script_filename):
source = open(script_filename).read()
code = compile(source, script_filename, 'exec')
exec(code, namespace, namespace)
else:
from linecache import cache
cache[script_filename] = (
len(script_text), 0, script_text.split('\n'), script_filename
)
script_code = compile(script_text, script_filename,'exec')
exec(script_code, namespace, namespace)
def _has(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _isdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _listdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _fn(self, base, resource_name):
if resource_name:
return os.path.join(base, *resource_name.split('/'))
return base
def _get(self, path):
if hasattr(self.loader, 'get_data'):
return self.loader.get_data(path)
raise NotImplementedError(
"Can't perform this operation for loaders without 'get_data()'"
)
register_loader_type(object, NullProvider)
class EggProvider(NullProvider):
"""Provider based on a virtual filesystem"""
def __init__(self, module):
NullProvider.__init__(self, module)
self._setup_prefix()
def _setup_prefix(self):
# we assume here that our metadata may be nested inside a "basket"
# of multiple eggs; that's why we use module_path instead of .archive
path = self.module_path
old = None
while path!=old:
if path.lower().endswith('.egg'):
self.egg_name = os.path.basename(path)
self.egg_info = os.path.join(path, 'EGG-INFO')
self.egg_root = path
break
old = path
path, base = os.path.split(path)
class DefaultProvider(EggProvider):
"""Provides access to package resources in the filesystem"""
def _has(self, path):
return os.path.exists(path)
def _isdir(self, path):
return os.path.isdir(path)
def _listdir(self, path):
return os.listdir(path)
def get_resource_stream(self, manager, resource_name):
return open(self._fn(self.module_path, resource_name), 'rb')
def _get(self, path):
with open(path, 'rb') as stream:
return stream.read()
register_loader_type(type(None), DefaultProvider)
if importlib_machinery is not None:
register_loader_type(importlib_machinery.SourceFileLoader, DefaultProvider)
class EmptyProvider(NullProvider):
"""Provider that returns nothing for all requests"""
_isdir = _has = lambda self, path: False
_get = lambda self, path: ''
_listdir = lambda self, path: []
module_path = None
def __init__(self):
pass
empty_provider = EmptyProvider()
class ZipManifests(dict):
"""
zip manifest builder
"""
@classmethod
def build(cls, path):
"""
Build a dictionary similar to the zipimport directory
caches, except instead of tuples, store ZipInfo objects.
Use a platform-specific path separator (os.sep) for the path keys
for compatibility with pypy on Windows.
"""
with ContextualZipFile(path) as zfile:
items = (
(
name.replace('/', os.sep),
zfile.getinfo(name),
)
for name in zfile.namelist()
)
return dict(items)
load = build
class MemoizedZipManifests(ZipManifests):
"""
Memoized zipfile manifests.
"""
manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime')
def load(self, path):
"""
Load a manifest at path or return a suitable manifest already loaded.
"""
path = os.path.normpath(path)
mtime = os.stat(path).st_mtime
if path not in self or self[path].mtime != mtime:
manifest = self.build(path)
self[path] = self.manifest_mod(manifest, mtime)
return self[path].manifest
class ContextualZipFile(zipfile.ZipFile):
"""
Supplement ZipFile class to support context manager for Python 2.6
"""
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __new__(cls, *args, **kwargs):
"""
Construct a ZipFile or ContextualZipFile as appropriate
"""
if hasattr(zipfile.ZipFile, '__exit__'):
return zipfile.ZipFile(*args, **kwargs)
return super(ContextualZipFile, cls).__new__(cls)
class ZipProvider(EggProvider):
"""Resource support for zips and eggs"""
eagers = None
_zip_manifests = MemoizedZipManifests()
def __init__(self, module):
EggProvider.__init__(self, module)
self.zip_pre = self.loader.archive+os.sep
def _zipinfo_name(self, fspath):
# Convert a virtual filename (full path to file) into a zipfile subpath
# usable with the zipimport directory cache for our target archive
if fspath.startswith(self.zip_pre):
return fspath[len(self.zip_pre):]
raise AssertionError(
"%s is not a subpath of %s" % (fspath, self.zip_pre)
)
def _parts(self, zip_path):
# Convert a zipfile subpath into an egg-relative path part list.
# pseudo-fs path
fspath = self.zip_pre+zip_path
if fspath.startswith(self.egg_root+os.sep):
return fspath[len(self.egg_root)+1:].split(os.sep)
raise AssertionError(
"%s is not a subpath of %s" % (fspath, self.egg_root)
)
@property
def zipinfo(self):
return self._zip_manifests.load(self.loader.archive)
def get_resource_filename(self, manager, resource_name):
if not self.egg_name:
raise NotImplementedError(
"resource_filename() only supported for .egg, not .zip"
)
# no need to lock for extraction, since we use temp names
zip_path = self._resource_to_zip(resource_name)
eagers = self._get_eager_resources()
if '/'.join(self._parts(zip_path)) in eagers:
for name in eagers:
self._extract_resource(manager, self._eager_to_zip(name))
return self._extract_resource(manager, zip_path)
@staticmethod
def _get_date_and_size(zip_stat):
size = zip_stat.file_size
# ymdhms+wday, yday, dst
date_time = zip_stat.date_time + (0, 0, -1)
# 1980 offset already done
timestamp = time.mktime(date_time)
return timestamp, size
def _extract_resource(self, manager, zip_path):
if zip_path in self._index():
for name in self._index()[zip_path]:
last = self._extract_resource(
manager, os.path.join(zip_path, name)
)
# return the extracted directory name
return os.path.dirname(last)
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not WRITE_SUPPORT:
raise IOError('"os.rename" and "os.unlink" are not supported '
'on this platform')
try:
real_path = manager.get_cache_path(
self.egg_name, self._parts(zip_path)
)
if self._is_current(real_path, zip_path):
return real_path
outf, tmpnam = _mkstemp(".$extract", dir=os.path.dirname(real_path))
os.write(outf, self.loader.get_data(zip_path))
os.close(outf)
utime(tmpnam, (timestamp, timestamp))
manager.postprocess(tmpnam, real_path)
try:
rename(tmpnam, real_path)
except os.error:
if os.path.isfile(real_path):
if self._is_current(real_path, zip_path):
# the file became current since it was checked above,
# so proceed.
return real_path
# Windows, del old file and retry
elif os.name=='nt':
unlink(real_path)
rename(tmpnam, real_path)
return real_path
raise
except os.error:
# report a user-friendly error
manager.extraction_error()
return real_path
def _is_current(self, file_path, zip_path):
"""
Return True if the file_path is current for this zip_path
"""
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not os.path.isfile(file_path):
return False
stat = os.stat(file_path)
if stat.st_size!=size or stat.st_mtime!=timestamp:
return False
# check that the contents match
zip_contents = self.loader.get_data(zip_path)
with open(file_path, 'rb') as f:
file_contents = f.read()
return zip_contents == file_contents
def _get_eager_resources(self):
if self.eagers is None:
eagers = []
for name in ('native_libs.txt', 'eager_resources.txt'):
if self.has_metadata(name):
eagers.extend(self.get_metadata_lines(name))
self.eagers = eagers
return self.eagers
def _index(self):
try:
return self._dirindex
except AttributeError:
ind = {}
for path in self.zipinfo:
parts = path.split(os.sep)
while parts:
parent = os.sep.join(parts[:-1])
if parent in ind:
ind[parent].append(parts[-1])
break
else:
ind[parent] = [parts.pop()]
self._dirindex = ind
return ind
def _has(self, fspath):
zip_path = self._zipinfo_name(fspath)
return zip_path in self.zipinfo or zip_path in self._index()
def _isdir(self, fspath):
return self._zipinfo_name(fspath) in self._index()
def _listdir(self, fspath):
return list(self._index().get(self._zipinfo_name(fspath), ()))
def _eager_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.egg_root, resource_name))
def _resource_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.module_path, resource_name))
register_loader_type(zipimport.zipimporter, ZipProvider)
class FileMetadata(EmptyProvider):
"""Metadata handler for standalone PKG-INFO files
Usage::
metadata = FileMetadata("/path/to/PKG-INFO")
This provider rejects all data and metadata requests except for PKG-INFO,
which is treated as existing, and will be the contents of the file at
the provided location.
"""
def __init__(self, path):
self.path = path
def has_metadata(self, name):
return name=='PKG-INFO'
def get_metadata(self, name):
if name=='PKG-INFO':
with open(self.path,'rU') as f:
metadata = f.read()
return metadata
raise KeyError("No metadata except PKG-INFO is available")
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
class PathMetadata(DefaultProvider):
"""Metadata provider for egg directories
Usage::
# Development eggs:
egg_info = "/path/to/PackageName.egg-info"
base_dir = os.path.dirname(egg_info)
metadata = PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
dist = Distribution(basedir, project_name=dist_name, metadata=metadata)
# Unpacked egg directories:
egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
dist = Distribution.from_filename(egg_path, metadata=metadata)
"""
def __init__(self, path, egg_info):
self.module_path = path
self.egg_info = egg_info
class EggMetadata(ZipProvider):
"""Metadata provider for .egg files"""
def __init__(self, importer):
"""Create a metadata provider from a zipimporter"""
self.zip_pre = importer.archive+os.sep
self.loader = importer
if importer.prefix:
self.module_path = os.path.join(importer.archive, importer.prefix)
else:
self.module_path = importer.archive
self._setup_prefix()
_declare_state('dict', _distribution_finders = {})
def register_finder(importer_type, distribution_finder):
"""Register `distribution_finder` to find distributions in sys.path items
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `distribution_finder` is a callable that, passed a path
item and the importer instance, yields ``Distribution`` instances found on
that path item. See ``pkg_resources.find_on_path`` for an example."""
_distribution_finders[importer_type] = distribution_finder
def find_distributions(path_item, only=False):
"""Yield distributions accessible via `path_item`"""
importer = get_importer(path_item)
finder = _find_adapter(_distribution_finders, importer)
return finder(importer, path_item, only)
def find_eggs_in_zip(importer, path_item, only=False):
"""
Find eggs in zip files; possibly multiple nested eggs.
"""
if importer.archive.endswith('.whl'):
# wheels are not supported with this finder
# they don't have PKG-INFO metadata, and won't ever contain eggs
return
metadata = EggMetadata(importer)
if metadata.has_metadata('PKG-INFO'):
yield Distribution.from_filename(path_item, metadata=metadata)
if only:
# don't yield nested distros
return
for subitem in metadata.resource_listdir('/'):
if subitem.endswith('.egg'):
subpath = os.path.join(path_item, subitem)
for dist in find_eggs_in_zip(zipimport.zipimporter(subpath), subpath):
yield dist
register_finder(zipimport.zipimporter, find_eggs_in_zip)
def find_nothing(importer, path_item, only=False):
return ()
register_finder(object, find_nothing)
def find_on_path(importer, path_item, only=False):
"""Yield distributions accessible on a sys.path directory"""
path_item = _normalize_cached(path_item)
if os.path.isdir(path_item) and os.access(path_item, os.R_OK):
if path_item.lower().endswith('.egg'):
# unpacked egg
yield Distribution.from_filename(
path_item, metadata=PathMetadata(
path_item, os.path.join(path_item,'EGG-INFO')
)
)
else:
# scan for .egg and .egg-info in directory
for entry in os.listdir(path_item):
lower = entry.lower()
if lower.endswith('.egg-info') or lower.endswith('.dist-info'):
fullpath = os.path.join(path_item, entry)
if os.path.isdir(fullpath):
# egg-info directory, allow getting metadata
metadata = PathMetadata(path_item, fullpath)
else:
metadata = FileMetadata(fullpath)
yield Distribution.from_location(
path_item, entry, metadata, precedence=DEVELOP_DIST
)
elif not only and lower.endswith('.egg'):
dists = find_distributions(os.path.join(path_item, entry))
for dist in dists:
yield dist
elif not only and lower.endswith('.egg-link'):
with open(os.path.join(path_item, entry)) as entry_file:
entry_lines = entry_file.readlines()
for line in entry_lines:
if not line.strip():
continue
path = os.path.join(path_item, line.rstrip())
dists = find_distributions(path)
for item in dists:
yield item
break
register_finder(pkgutil.ImpImporter, find_on_path)
if importlib_machinery is not None:
register_finder(importlib_machinery.FileFinder, find_on_path)
_declare_state('dict', _namespace_handlers={})
_declare_state('dict', _namespace_packages={})
def register_namespace_handler(importer_type, namespace_handler):
"""Register `namespace_handler` to declare namespace packages
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `namespace_handler` is a callable like this::
def namespace_handler(importer, path_entry, moduleName, module):
# return a path_entry to use for child packages
Namespace handlers are only called if the importer object has already
agreed that it can handle the relevant path item, and they should only
return a subpath if the module __path__ does not already contain an
equivalent subpath. For an example namespace handler, see
``pkg_resources.file_ns_handler``.
"""
_namespace_handlers[importer_type] = namespace_handler
def _handle_ns(packageName, path_item):
"""Ensure that named package includes a subpath of path_item (if needed)"""
importer = get_importer(path_item)
if importer is None:
return None
loader = importer.find_module(packageName)
if loader is None:
return None
module = sys.modules.get(packageName)
if module is None:
module = sys.modules[packageName] = types.ModuleType(packageName)
module.__path__ = []
_set_parent_ns(packageName)
elif not hasattr(module,'__path__'):
raise TypeError("Not a package:", packageName)
handler = _find_adapter(_namespace_handlers, importer)
subpath = handler(importer, path_item, packageName, module)
if subpath is not None:
path = module.__path__
path.append(subpath)
loader.load_module(packageName)
for path_item in path:
if path_item not in module.__path__:
module.__path__.append(path_item)
return subpath
def declare_namespace(packageName):
"""Declare that package 'packageName' is a namespace package"""
_imp.acquire_lock()
try:
if packageName in _namespace_packages:
return
path, parent = sys.path, None
if '.' in packageName:
parent = '.'.join(packageName.split('.')[:-1])
declare_namespace(parent)
if parent not in _namespace_packages:
__import__(parent)
try:
path = sys.modules[parent].__path__
except AttributeError:
raise TypeError("Not a package:", parent)
# Track what packages are namespaces, so when new path items are added,
# they can be updated
_namespace_packages.setdefault(parent,[]).append(packageName)
_namespace_packages.setdefault(packageName,[])
for path_item in path:
# Ensure all the parent's path items are reflected in the child,
# if they apply
_handle_ns(packageName, path_item)
finally:
_imp.release_lock()
def fixup_namespace_packages(path_item, parent=None):
"""Ensure that previously-declared namespace packages include path_item"""
_imp.acquire_lock()
try:
for package in _namespace_packages.get(parent,()):
subpath = _handle_ns(package, path_item)
if subpath:
fixup_namespace_packages(subpath, package)
finally:
_imp.release_lock()
def file_ns_handler(importer, path_item, packageName, module):
"""Compute an ns-package subpath for a filesystem or zipfile importer"""
subpath = os.path.join(path_item, packageName.split('.')[-1])
normalized = _normalize_cached(subpath)
for item in module.__path__:
if _normalize_cached(item)==normalized:
break
else:
# Only return the path if it's not already there
return subpath
register_namespace_handler(pkgutil.ImpImporter, file_ns_handler)
register_namespace_handler(zipimport.zipimporter, file_ns_handler)
if importlib_machinery is not None:
register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler)
def null_ns_handler(importer, path_item, packageName, module):
return None
register_namespace_handler(object, null_ns_handler)
def normalize_path(filename):
"""Normalize a file/dir name for comparison purposes"""
return os.path.normcase(os.path.realpath(filename))
def _normalize_cached(filename, _cache={}):
try:
return _cache[filename]
except KeyError:
_cache[filename] = result = normalize_path(filename)
return result
def _set_parent_ns(packageName):
parts = packageName.split('.')
name = parts.pop()
if parts:
parent = '.'.join(parts)
setattr(sys.modules[parent], name, sys.modules[packageName])
def yield_lines(strs):
"""Yield non-empty/non-comment lines of a string or sequence"""
if isinstance(strs, string_types):
for s in strs.splitlines():
s = s.strip()
# skip blank lines/comments
if s and not s.startswith('#'):
yield s
else:
for ss in strs:
for s in yield_lines(ss):
yield s
# whitespace and comment
LINE_END = re.compile(r"\s*(#.*)?$").match
# line continuation
CONTINUE = re.compile(r"\s*\\\s*(#.*)?$").match
# Distribution or extra
DISTRO = re.compile(r"\s*((\w|[-.])+)").match
# ver. info
VERSION = re.compile(r"\s*(<=?|>=?|===?|!=|~=)\s*((\w|[-.*_!+])+)").match
# comma between items
COMMA = re.compile(r"\s*,").match
OBRACKET = re.compile(r"\s*\[").match
CBRACKET = re.compile(r"\s*\]").match
MODULE = re.compile(r"\w+(\.\w+)*$").match
EGG_NAME = re.compile(
r"""
(?P<name>[^-]+) (
-(?P<ver>[^-]+) (
-py(?P<pyver>[^-]+) (
-(?P<plat>.+)
)?
)?
)?
""",
re.VERBOSE | re.IGNORECASE,
).match
class EntryPoint(object):
"""Object representing an advertised importable object"""
def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
if not MODULE(module_name):
raise ValueError("Invalid module name", module_name)
self.name = name
self.module_name = module_name
self.attrs = tuple(attrs)
self.extras = Requirement.parse(("x[%s]" % ','.join(extras))).extras
self.dist = dist
def __str__(self):
s = "%s = %s" % (self.name, self.module_name)
if self.attrs:
s += ':' + '.'.join(self.attrs)
if self.extras:
s += ' [%s]' % ','.join(self.extras)
return s
def __repr__(self):
return "EntryPoint.parse(%r)" % str(self)
def load(self, require=True, *args, **kwargs):
"""
Require packages for this EntryPoint, then resolve it.
"""
if not require or args or kwargs:
warnings.warn(
"Parameters to load are deprecated. Call .resolve and "
".require separately.",
DeprecationWarning,
stacklevel=2,
)
if require:
self.require(*args, **kwargs)
return self.resolve()
def resolve(self):
"""
Resolve the entry point from its module and attrs.
"""
module = __import__(self.module_name, fromlist=['__name__'], level=0)
try:
return functools.reduce(getattr, self.attrs, module)
except AttributeError as exc:
raise ImportError(str(exc))
def require(self, env=None, installer=None):
if self.extras and not self.dist:
raise UnknownExtra("Can't require() without a distribution", self)
reqs = self.dist.requires(self.extras)
items = working_set.resolve(reqs, env, installer)
list(map(working_set.add, items))
pattern = re.compile(
r'\s*'
r'(?P<name>.+?)\s*'
r'=\s*'
r'(?P<module>[\w.]+)\s*'
r'(:\s*(?P<attr>[\w.]+))?\s*'
r'(?P<extras>\[.*\])?\s*$'
)
@classmethod
def parse(cls, src, dist=None):
"""Parse a single entry point from string `src`
Entry point syntax follows the form::
name = some.module:some.attr [extra1, extra2]
The entry name and module name are required, but the ``:attrs`` and
``[extras]`` parts are optional
"""
m = cls.pattern.match(src)
if not m:
msg = "EntryPoint must be in 'name=module:attrs [extras]' format"
raise ValueError(msg, src)
res = m.groupdict()
extras = cls._parse_extras(res['extras'])
attrs = res['attr'].split('.') if res['attr'] else ()
return cls(res['name'], res['module'], attrs, extras, dist)
@classmethod
def _parse_extras(cls, extras_spec):
if not extras_spec:
return ()
req = Requirement.parse('x' + extras_spec)
if req.specs:
raise ValueError()
return req.extras
@classmethod
def parse_group(cls, group, lines, dist=None):
"""Parse an entry point group"""
if not MODULE(group):
raise ValueError("Invalid group name", group)
this = {}
for line in yield_lines(lines):
ep = cls.parse(line, dist)
if ep.name in this:
raise ValueError("Duplicate entry point", group, ep.name)
this[ep.name]=ep
return this
@classmethod
def parse_map(cls, data, dist=None):
"""Parse a map of entry point groups"""
if isinstance(data, dict):
data = data.items()
else:
data = split_sections(data)
maps = {}
for group, lines in data:
if group is None:
if not lines:
continue
raise ValueError("Entry points must be listed in groups")
group = group.strip()
if group in maps:
raise ValueError("Duplicate group name", group)
maps[group] = cls.parse_group(group, lines, dist)
return maps
def _remove_md5_fragment(location):
if not location:
return ''
parsed = urlparse(location)
if parsed[-1].startswith('md5='):
return urlunparse(parsed[:-1] + ('',))
return location
class Distribution(object):
"""Wrap an actual or potential sys.path entry w/metadata"""
PKG_INFO = 'PKG-INFO'
def __init__(self, location=None, metadata=None, project_name=None,
version=None, py_version=PY_MAJOR, platform=None,
precedence=EGG_DIST):
self.project_name = safe_name(project_name or 'Unknown')
if version is not None:
self._version = safe_version(version)
self.py_version = py_version
self.platform = platform
self.location = location
self.precedence = precedence
self._provider = metadata or empty_provider
@classmethod
def from_location(cls, location, basename, metadata=None,**kw):
project_name, version, py_version, platform = [None]*4
basename, ext = os.path.splitext(basename)
if ext.lower() in _distributionImpl:
# .dist-info gets much metadata differently
match = EGG_NAME(basename)
if match:
project_name, version, py_version, platform = match.group(
'name','ver','pyver','plat'
)
cls = _distributionImpl[ext.lower()]
return cls(
location, metadata, project_name=project_name, version=version,
py_version=py_version, platform=platform, **kw
)
@property
def hashcmp(self):
return (
self.parsed_version,
self.precedence,
self.key,
_remove_md5_fragment(self.location),
self.py_version or '',
self.platform or '',
)
def __hash__(self):
return hash(self.hashcmp)
def __lt__(self, other):
return self.hashcmp < other.hashcmp
def __le__(self, other):
return self.hashcmp <= other.hashcmp
def __gt__(self, other):
return self.hashcmp > other.hashcmp
def __ge__(self, other):
return self.hashcmp >= other.hashcmp
def __eq__(self, other):
if not isinstance(other, self.__class__):
# It's not a Distribution, so they are not equal
return False
return self.hashcmp == other.hashcmp
def __ne__(self, other):
return not self == other
# These properties have to be lazy so that we don't have to load any
# metadata until/unless it's actually needed. (i.e., some distributions
# may not know their name or version without loading PKG-INFO)
@property
def key(self):
try:
return self._key
except AttributeError:
self._key = key = self.project_name.lower()
return key
@property
def parsed_version(self):
if not hasattr(self, "_parsed_version"):
self._parsed_version = parse_version(self.version)
return self._parsed_version
def _warn_legacy_version(self):
LV = packaging.version.LegacyVersion
is_legacy = isinstance(self._parsed_version, LV)
if not is_legacy:
return
# While an empty version is technically a legacy version and
# is not a valid PEP 440 version, it's also unlikely to
# actually come from someone and instead it is more likely that
# it comes from setuptools attempting to parse a filename and
# including it in the list. So for that we'll gate this warning
# on if the version is anything at all or not.
if not self.version:
return
tmpl = textwrap.dedent("""
'{project_name} ({version})' is being parsed as a legacy,
non PEP 440,
version. You may find odd behavior and sort order.
In particular it will be sorted as less than 0.0. It
is recommended to migrate to PEP 440 compatible
versions.
""").strip().replace('\n', ' ')
warnings.warn(tmpl.format(**vars(self)), PEP440Warning)
@property
def version(self):
try:
return self._version
except AttributeError:
for line in self._get_metadata(self.PKG_INFO):
if line.lower().startswith('version:'):
self._version = safe_version(line.split(':',1)[1].strip())
return self._version
else:
tmpl = "Missing 'Version:' header and/or %s file"
raise ValueError(tmpl % self.PKG_INFO, self)
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
dm = self.__dep_map = {None: []}
for name in 'requires.txt', 'depends.txt':
for extra, reqs in split_sections(self._get_metadata(name)):
if extra:
if ':' in extra:
extra, marker = extra.split(':', 1)
if invalid_marker(marker):
# XXX warn
reqs=[]
elif not evaluate_marker(marker):
reqs=[]
extra = safe_extra(extra) or None
dm.setdefault(extra,[]).extend(parse_requirements(reqs))
return dm
def requires(self, extras=()):
"""List of Requirements needed for this distro if `extras` are used"""
dm = self._dep_map
deps = []
deps.extend(dm.get(None, ()))
for ext in extras:
try:
deps.extend(dm[safe_extra(ext)])
except KeyError:
raise UnknownExtra(
"%s has no such extra feature %r" % (self, ext)
)
return deps
def _get_metadata(self, name):
if self.has_metadata(name):
for line in self.get_metadata_lines(name):
yield line
def activate(self, path=None):
"""Ensure distribution is importable on `path` (default=sys.path)"""
if path is None:
path = sys.path
self.insert_on(path)
if path is sys.path:
fixup_namespace_packages(self.location)
for pkg in self._get_metadata('namespace_packages.txt'):
if pkg in sys.modules:
declare_namespace(pkg)
def egg_name(self):
"""Return what this distribution's standard .egg filename should be"""
filename = "%s-%s-py%s" % (
to_filename(self.project_name), to_filename(self.version),
self.py_version or PY_MAJOR
)
if self.platform:
filename += '-' + self.platform
return filename
def __repr__(self):
if self.location:
return "%s (%s)" % (self, self.location)
else:
return str(self)
def __str__(self):
try:
version = getattr(self, 'version', None)
except ValueError:
version = None
version = version or "[unknown version]"
return "%s %s" % (self.project_name, version)
def __getattr__(self, attr):
"""Delegate all unrecognized public attributes to .metadata provider"""
if attr.startswith('_'):
raise AttributeError(attr)
return getattr(self._provider, attr)
@classmethod
def from_filename(cls, filename, metadata=None, **kw):
return cls.from_location(
_normalize_cached(filename), os.path.basename(filename), metadata,
**kw
)
def as_requirement(self):
"""Return a ``Requirement`` that matches this distribution exactly"""
if isinstance(self.parsed_version, packaging.version.Version):
spec = "%s==%s" % (self.project_name, self.parsed_version)
else:
spec = "%s===%s" % (self.project_name, self.parsed_version)
return Requirement.parse(spec)
def load_entry_point(self, group, name):
"""Return the `name` entry point of `group` or raise ImportError"""
ep = self.get_entry_info(group, name)
if ep is None:
raise ImportError("Entry point %r not found" % ((group, name),))
return ep.load()
def get_entry_map(self, group=None):
"""Return the entry point map for `group`, or the full entry map"""
try:
ep_map = self._ep_map
except AttributeError:
ep_map = self._ep_map = EntryPoint.parse_map(
self._get_metadata('entry_points.txt'), self
)
if group is not None:
return ep_map.get(group,{})
return ep_map
def get_entry_info(self, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return self.get_entry_map(group).get(name)
def insert_on(self, path, loc = None):
"""Insert self.location in path before its nearest parent directory"""
loc = loc or self.location
if not loc:
return
nloc = _normalize_cached(loc)
bdir = os.path.dirname(nloc)
npath= [(p and _normalize_cached(p) or p) for p in path]
for p, item in enumerate(npath):
if item == nloc:
break
elif item == bdir and self.precedence == EGG_DIST:
# if it's an .egg, give it precedence over its directory
if path is sys.path:
self.check_version_conflict()
path.insert(p, loc)
npath.insert(p, nloc)
break
else:
if path is sys.path:
self.check_version_conflict()
path.append(loc)
return
# p is the spot where we found or inserted loc; now remove duplicates
while True:
try:
np = npath.index(nloc, p+1)
except ValueError:
break
else:
del npath[np], path[np]
# ha!
p = np
return
def check_version_conflict(self):
if self.key == 'setuptools':
# ignore the inevitable setuptools self-conflicts :(
return
nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
loc = normalize_path(self.location)
for modname in self._get_metadata('top_level.txt'):
if (modname not in sys.modules or modname in nsp
or modname in _namespace_packages):
continue
if modname in ('pkg_resources', 'setuptools', 'site'):
continue
fn = getattr(sys.modules[modname], '__file__', None)
if fn and (normalize_path(fn).startswith(loc) or
fn.startswith(self.location)):
continue
issue_warning(
"Module %s was already imported from %s, but %s is being added"
" to sys.path" % (modname, fn, self.location),
)
def has_version(self):
try:
self.version
except ValueError:
issue_warning("Unbuilt egg for " + repr(self))
return False
return True
def clone(self,**kw):
"""Copy this distribution, substituting in any changed keyword args"""
names = 'project_name version py_version platform location precedence'
for attr in names.split():
kw.setdefault(attr, getattr(self, attr, None))
kw.setdefault('metadata', self._provider)
return self.__class__(**kw)
@property
def extras(self):
return [dep for dep in self._dep_map if dep]
class DistInfoDistribution(Distribution):
"""Wrap an actual or potential sys.path entry w/metadata, .dist-info style"""
PKG_INFO = 'METADATA'
EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
@property
def _parsed_pkg_info(self):
"""Parse and cache metadata"""
try:
return self._pkg_info
except AttributeError:
metadata = self.get_metadata(self.PKG_INFO)
self._pkg_info = email.parser.Parser().parsestr(metadata)
return self._pkg_info
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._compute_dependencies()
return self.__dep_map
def _preparse_requirement(self, requires_dist):
"""Convert 'Foobar (1); baz' to ('Foobar ==1', 'baz')
Split environment marker, add == prefix to version specifiers as
necessary, and remove parenthesis.
"""
parts = requires_dist.split(';', 1) + ['']
distvers = parts[0].strip()
mark = parts[1].strip()
distvers = re.sub(self.EQEQ, r"\1==\2\3", distvers)
distvers = distvers.replace('(', '').replace(')', '')
return (distvers, mark)
def _compute_dependencies(self):
"""Recompute this distribution's dependencies."""
from pip._vendor._markerlib import compile as compile_marker
dm = self.__dep_map = {None: []}
reqs = []
# Including any condition expressions
for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
distvers, mark = self._preparse_requirement(req)
parsed = next(parse_requirements(distvers))
parsed.marker_fn = compile_marker(mark)
reqs.append(parsed)
def reqs_for_extra(extra):
for req in reqs:
if req.marker_fn(override={'extra':extra}):
yield req
common = frozenset(reqs_for_extra(None))
dm[None].extend(common)
for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
extra = safe_extra(extra.strip())
dm[extra] = list(frozenset(reqs_for_extra(extra)) - common)
return dm
_distributionImpl = {
'.egg': Distribution,
'.egg-info': Distribution,
'.dist-info': DistInfoDistribution,
}
def issue_warning(*args,**kw):
level = 1
g = globals()
try:
# find the first stack frame that is *not* code in
# the pkg_resources module, to use for the warning
while sys._getframe(level).f_globals is g:
level += 1
except ValueError:
pass
warnings.warn(stacklevel=level + 1, *args, **kw)
class RequirementParseError(ValueError):
def __str__(self):
return ' '.join(self.args)
def parse_requirements(strs):
"""Yield ``Requirement`` objects for each specification in `strs`
`strs` must be a string, or a (possibly-nested) iterable thereof.
"""
# create a steppable iterator, so we can handle \-continuations
lines = iter(yield_lines(strs))
def scan_list(ITEM, TERMINATOR, line, p, groups, item_name):
items = []
while not TERMINATOR(line, p):
if CONTINUE(line, p):
try:
line = next(lines)
p = 0
except StopIteration:
msg = "\\ must not appear on the last nonblank line"
raise RequirementParseError(msg)
match = ITEM(line, p)
if not match:
msg = "Expected " + item_name + " in"
raise RequirementParseError(msg, line, "at", line[p:])
items.append(match.group(*groups))
p = match.end()
match = COMMA(line, p)
if match:
# skip the comma
p = match.end()
elif not TERMINATOR(line, p):
msg = "Expected ',' or end-of-list in"
raise RequirementParseError(msg, line, "at", line[p:])
match = TERMINATOR(line, p)
# skip the terminator, if any
if match:
p = match.end()
return line, p, items
for line in lines:
match = DISTRO(line)
if not match:
raise RequirementParseError("Missing distribution spec", line)
project_name = match.group(1)
p = match.end()
extras = []
match = OBRACKET(line, p)
if match:
p = match.end()
line, p, extras = scan_list(
DISTRO, CBRACKET, line, p, (1,), "'extra' name"
)
line, p, specs = scan_list(VERSION, LINE_END, line, p, (1, 2),
"version spec")
specs = [(op, val) for op, val in specs]
yield Requirement(project_name, specs, extras)
class Requirement:
def __init__(self, project_name, specs, extras):
"""DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
self.unsafe_name, project_name = project_name, safe_name(project_name)
self.project_name, self.key = project_name, project_name.lower()
self.specifier = packaging.specifiers.SpecifierSet(
",".join(["".join([x, y]) for x, y in specs])
)
self.specs = specs
self.extras = tuple(map(safe_extra, extras))
self.hashCmp = (
self.key,
self.specifier,
frozenset(self.extras),
)
self.__hash = hash(self.hashCmp)
def __str__(self):
extras = ','.join(self.extras)
if extras:
extras = '[%s]' % extras
return '%s%s%s' % (self.project_name, extras, self.specifier)
def __eq__(self, other):
return (
isinstance(other, Requirement) and
self.hashCmp == other.hashCmp
)
def __ne__(self, other):
return not self == other
def __contains__(self, item):
if isinstance(item, Distribution):
if item.key != self.key:
return False
item = item.version
# Allow prereleases always in order to match the previous behavior of
# this method. In the future this should be smarter and follow PEP 440
# more accurately.
return self.specifier.contains(item, prereleases=True)
def __hash__(self):
return self.__hash
def __repr__(self): return "Requirement.parse(%r)" % str(self)
@staticmethod
def parse(s):
reqs = list(parse_requirements(s))
if reqs:
if len(reqs) == 1:
return reqs[0]
raise ValueError("Expected only one requirement", s)
raise ValueError("No requirements found", s)
def _get_mro(cls):
"""Get an mro for a type or classic class"""
if not isinstance(cls, type):
class cls(cls, object): pass
return cls.__mro__[1:]
return cls.__mro__
def _find_adapter(registry, ob):
"""Return an adapter factory for `ob` from `registry`"""
for t in _get_mro(getattr(ob, '__class__', type(ob))):
if t in registry:
return registry[t]
def ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def _bypass_ensure_directory(path):
"""Sandbox-bypassing version of ensure_directory()"""
if not WRITE_SUPPORT:
raise IOError('"os.mkdir" not supported on this platform.')
dirname, filename = split(path)
if dirname and filename and not isdir(dirname):
_bypass_ensure_directory(dirname)
mkdir(dirname, 0o755)
def split_sections(s):
"""Split a string or iterable thereof into (section, content) pairs
Each ``section`` is a stripped version of the section header ("[section]")
and each ``content`` is a list of stripped lines excluding blank lines and
comment-only lines. If there are any such lines before the first section
header, they're returned in a first ``section`` of ``None``.
"""
section = None
content = []
for line in yield_lines(s):
if line.startswith("["):
if line.endswith("]"):
if section or content:
yield section, content
section = line[1:-1].strip()
content = []
else:
raise ValueError("Invalid section heading", line)
else:
content.append(line)
# wrap up last segment
yield section, content
def _mkstemp(*args,**kw):
old_open = os.open
try:
# temporarily bypass sandboxing
os.open = os_open
return tempfile.mkstemp(*args,**kw)
finally:
# and then put it back
os.open = old_open
# Silence the PEP440Warning by default, so that end users don't get hit by it
# randomly just because they use pkg_resources. We want to append the rule
# because we want earlier uses of filterwarnings to take precedence over this
# one.
warnings.filterwarnings("ignore", category=PEP440Warning, append=True)
# from jaraco.functools 1.3
def _call_aside(f, *args, **kwargs):
f(*args, **kwargs)
return f
@_call_aside
def _initialize(g=globals()):
"Set up global resource manager (deliberately not state-saved)"
manager = ResourceManager()
g['_manager'] = manager
for name in dir(manager):
if not name.startswith('_'):
g[name] = getattr(manager, name)
@_call_aside
def _initialize_master_working_set():
"""
Prepare the master working set and make the ``require()``
API available.
This function has explicit effects on the global state
of pkg_resources. It is intended to be invoked once at
the initialization of this module.
Invocation by other packages is unsupported and done
at their own risk.
"""
working_set = WorkingSet._build_master()
_declare_state('object', working_set=working_set)
require = working_set.require
iter_entry_points = working_set.iter_entry_points
add_activation_listener = working_set.subscribe
run_script = working_set.run_script
# backward compatibility
run_main = run_script
# Activate all distributions already on sys.path, and ensure that
# all distributions added to the working set in the future (e.g. by
# calling ``require()``) will get activated as well.
add_activation_listener(lambda dist: dist.activate())
working_set.entries=[]
# match order
list(map(working_set.add_entry, sys.path))
globals().update(locals())
| mpl-2.0 |
sergiosvieira/ns3-dtn | src/uan/bindings/callbacks_list.py | 150 | 1150 | callback_classes = [
['void', 'int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Packet>', 'double', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Packet>', 'double', 'ns3::UanTxMode', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Packet>', 'ns3::UanAddress const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::Address const&', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
]
| gpl-2.0 |
weiyirong/crosswalk-1 | app/tools/android/parse_xpk.py | 39 | 3550 | #!/usr/bin/env python
# Copyright (c) 2013 Intel Corporation. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=C0301
"""The script is used to parse an XPK file.
It will do:
1. Check the magic file header;
2. Verify the signature of the XPK file;
3. Extract the content of the XPK file to some folder.
The format of XPK file can be found at
https://github.com/crosswalk-project/crosswalk-website/wiki/Crosswalk-package-management
This file is used by make_apk.py.
"""
import optparse
import os
import struct
import sys
import zipfile
EXIT_CODE_CRYPTO_NOT_FOUND = 1
EXIT_CODE_NO_XPK_FILE = 2
EXIT_CODE_XPK_FILE_NOT_EXIST = 3
EXIT_CODE_MAGIC_FAILED = 4
EXIT_CODE_VERIFICATION_FAILED = 5
EXIT_CODE_XPK_FILE_IO_ERROR = 6
XPK_MAGIC_HEAD = 'CrWk'
errorMessageMap = {
EXIT_CODE_CRYPTO_NOT_FOUND: 'Python module Crypto('\
'https://www.dlitz.net/software/pycrypto/) is needed',
EXIT_CODE_NO_XPK_FILE: 'Please specify XPK file by --file',
EXIT_CODE_XPK_FILE_NOT_EXIST: 'The XPK file you specified does not exist',
EXIT_CODE_MAGIC_FAILED: 'The file you specified is not in XPK format',
EXIT_CODE_VERIFICATION_FAILED:
'Signature verification failed for the XPK file',
EXIT_CODE_XPK_FILE_IO_ERROR: 'Error happened when reading the XPK file',
}
def HandleError(err_code):
print('Error: %s' % errorMessageMap[err_code])
sys.exit(err_code)
try:
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5
from Crypto.Hash import SHA
except ImportError:
HandleError(EXIT_CODE_CRYPTO_NOT_FOUND)
def CheckMagic(input_file):
magic = input_file.read(4)
if magic != XPK_MAGIC_HEAD:
HandleError(EXIT_CODE_MAGIC_FAILED)
def GetPubkeySignature(input_file):
"""Return (pubkey, signature) pair"""
pubkey_size, signature_size = struct.unpack('II', input_file.read(8))
return (input_file.read(pubkey_size), input_file.read(signature_size))
def ExtractXPKContent(input_file, zip_path):
zip_file = open(zip_path, 'wb')
zip_file.write(input_file.read())
zip_file.close()
def VerifySignature(pubkey, signature, zip_path):
zip_file = open(zip_path, 'rb')
key = RSA.importKey(pubkey)
content = SHA.new(zip_file.read())
zip_file.close()
verifier = PKCS1_v1_5.new(key)
if not verifier.verify(content, signature):
HandleError(EXIT_CODE_VERIFICATION_FAILED)
def main():
option_parser = optparse.OptionParser()
option_parser.add_option('--file', '-f', help='Path to the xpk file')
option_parser.add_option('--out', '-o', help='Path to extract the xpk to')
opts, _ = option_parser.parse_args()
if opts.file is None:
HandleError(EXIT_CODE_NO_XPK_FILE)
app_name = os.path.splitext(os.path.basename(opts.file))[0]
if opts.out is None:
opts.out = app_name
if os.path.isfile(opts.file):
zip_path = None
try:
xpk_file = open(opts.file, 'rb')
CheckMagic(xpk_file)
pubkey, signature = GetPubkeySignature(xpk_file)
zip_path = '%s.zip' % app_name
ExtractXPKContent(xpk_file, zip_path)
VerifySignature(pubkey, signature, zip_path)
zipfile.ZipFile(zip_path).extractall(opts.out)
except SystemExit as ec:
return ec.code
except IOError:
HandleError(EXIT_CODE_XPK_FILE_IO_ERROR)
finally:
xpk_file.close()
if zip_path and os.path.isfile(zip_path):
os.remove(zip_path)
else:
HandleError(EXIT_CODE_XPK_FILE_NOT_EXIST)
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
stonegithubs/odoo | addons/membership/wizard/membership_invoice.py | 380 | 3229 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
class membership_invoice(osv.osv_memory):
"""Membership Invoice"""
_name = "membership.invoice"
_description = "Membership Invoice"
_columns = {
'product_id': fields.many2one('product.product','Membership', required=True),
'member_price': fields.float('Member Price', digits_compute= dp.get_precision('Product Price'), required=True),
}
def onchange_product(self, cr, uid, ids, product_id=False):
"""This function returns value of product's member price based on product id.
"""
if not product_id:
return {'value': {'member_price': False}}
return {'value': {'member_price': self.pool.get('product.product').price_get(cr, uid, [product_id])[product_id]}}
def membership_invoice(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.model.data')
partner_obj = self.pool.get('res.partner')
datas = {}
if context is None:
context = {}
data = self.browse(cr, uid, ids, context=context)
if data:
data = data[0]
datas = {
'membership_product_id': data.product_id.id,
'amount': data.member_price
}
invoice_list = partner_obj.create_membership_invoice(cr, uid, context.get('active_ids', []), datas=datas, context=context)
try:
search_view_id = mod_obj.get_object_reference(cr, uid, 'account', 'view_account_invoice_filter')[1]
except ValueError:
search_view_id = False
try:
form_view_id = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_form')[1]
except ValueError:
form_view_id = False
return {
'domain': [('id', 'in', invoice_list)],
'name': 'Membership Invoices',
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'account.invoice',
'type': 'ir.actions.act_window',
'views': [(False, 'tree'), (form_view_id, 'form')],
'search_view_id': search_view_id,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
DMRookie/RoomAI | models/crm/crm_kuhn/play.py | 1 | 2054 | #!/bin/python
import roomai.kuhn
import roomai.common
import random
class HumanInputPlayer(roomai.common.AbstractPlayer):
"""
"""
def receive_info(self, info):
"""
Args:
info:
"""
available_actions = info
def take_action(self):
"""
Returns:
"""
action = raw_input("choosed_acton:")
#action = ""
return roomai.kuhn.KuhnPokerAction.lookup(action)
def reset(self):
"""
"""
pass
def show_person(person_state):
print ("%s"%(person_state.id) + "'s card:%d"%(person_state.card))
print ("%s"%(person_state.id) + "'s available_actions:\t" + " ".join(person_state.available_actions.keys()))
def show_public(public_state):
print ("turn:%d"%public_state.turn)
if __name__ == "__main__":
import crm_kuhn
crm_player = crm_kuhn.KuhnPokerCRMPlayer()
import algorithms
algo = algorithms.CRMAlgorithm()
env = roomai.kuhn.KuhnPokerEnv()
for i in range(10000):
algo.dfs(env = env, player=crm_player, p0 = 1, p1 = 1, deep = 0)
print crm_player.regrets
print crm_player.strategies
crm_player.is_train = False
players = [HumanInputPlayer(), crm_player]
num_players = len(players)
infos, public_state, person_states, private_state = env.init({"num_players":2,"record_history":False})
for i in range(num_players):
players[i].receive_info(infos[i])
show_person(infos[i].person_state)
show_public(public_state)
print ("\n")
while public_state.is_terminal == False:
turn = public_state.turn
action = players[turn].take_action()
print "%d player take an action (%s)"%(turn,action.key)
infos, public_state, person_states, private_state = env.forward(action)
for i in range(num_players):
players[i].receive_info(infos[i])
show_person(infos[i].person_state)
show_public(public_state)
print ("\n")
print (public_state.scores)
| mit |
tavendo/AutobahnPython | examples/asyncio/wamp/pubsub/unsubscribe/frontend.py | 3 | 2752 | ###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Crossbar.io Technologies GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from os import environ
import asyncio
from autobahn.asyncio.wamp import ApplicationSession, ApplicationRunner
class Component(ApplicationSession):
"""
An application component that subscribes and receives events.
After receiving 5 events, it unsubscribes, sleeps and then
resubscribes for another run. Then it stops.
"""
async def test(self):
self.received = 0
async def on_event(i):
print("Got event: {}".format(i))
self.received += 1
if self.received > 5:
self.runs += 1
if self.runs > 1:
self.leave()
else:
await self.subscription.unsubscribe()
print("Unsubscribed .. continue in 5s ..")
# can't use loop.call_later() with a coroutine for some reason
await asyncio.sleep(5)
await self.test()
self.subscription = await self.subscribe(on_event, 'com.myapp.topic1')
print("Subscribed with subscription ID {}".format(self.subscription.id))
async def onJoin(self, details):
self.runs = 0
await self.test()
def onDisconnect(self):
asyncio.get_event_loop().stop()
if __name__ == '__main__':
url = environ.get("AUTOBAHN_DEMO_ROUTER", "ws://127.0.0.1:8080/ws")
realm = "crossbardemo"
runner = ApplicationRunner(url, realm)
runner.run(Component)
| mit |
sachinkum/Bal-Aveksha | WebServer/BalAvekshaEnv/lib/python3.5/site-packages/django/utils/safestring.py | 41 | 4575 | """
Functions for working with "safe strings": strings that can be displayed safely
without further escaping in HTML. Marking something as a "safe string" means
that the producer of the string has already turned characters that should not
be interpreted by the HTML engine (e.g. '<') into the appropriate entities.
"""
import warnings
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.functional import Promise, curry
class EscapeData(object):
pass
class EscapeBytes(bytes, EscapeData):
"""
A byte string that should be HTML-escaped when output.
"""
pass
class EscapeText(six.text_type, EscapeData):
"""
A unicode string object that should be HTML-escaped when output.
"""
pass
if six.PY3:
EscapeString = EscapeText
else:
EscapeString = EscapeBytes
# backwards compatibility for Python 2
EscapeUnicode = EscapeText
class SafeData(object):
def __html__(self):
"""
Returns the html representation of a string for interoperability.
This allows other template engines to understand Django's SafeData.
"""
return self
class SafeBytes(bytes, SafeData):
"""
A bytes subclass that has been specifically marked as "safe" (requires no
further escaping) for HTML output purposes.
"""
def __add__(self, rhs):
"""
Concatenating a safe byte string with another safe byte string or safe
unicode string is safe. Otherwise, the result is no longer safe.
"""
t = super(SafeBytes, self).__add__(rhs)
if isinstance(rhs, SafeText):
return SafeText(t)
elif isinstance(rhs, SafeBytes):
return SafeBytes(t)
return t
def _proxy_method(self, *args, **kwargs):
"""
Wrap a call to a normal unicode method up so that we return safe
results. The method that is being wrapped is passed in the 'method'
argument.
"""
method = kwargs.pop('method')
data = method(self, *args, **kwargs)
if isinstance(data, bytes):
return SafeBytes(data)
else:
return SafeText(data)
decode = curry(_proxy_method, method=bytes.decode)
class SafeText(six.text_type, SafeData):
"""
A unicode (Python 2) / str (Python 3) subclass that has been specifically
marked as "safe" for HTML output purposes.
"""
def __add__(self, rhs):
"""
Concatenating a safe unicode string with another safe byte string or
safe unicode string is safe. Otherwise, the result is no longer safe.
"""
t = super(SafeText, self).__add__(rhs)
if isinstance(rhs, SafeData):
return SafeText(t)
return t
def _proxy_method(self, *args, **kwargs):
"""
Wrap a call to a normal unicode method up so that we return safe
results. The method that is being wrapped is passed in the 'method'
argument.
"""
method = kwargs.pop('method')
data = method(self, *args, **kwargs)
if isinstance(data, bytes):
return SafeBytes(data)
else:
return SafeText(data)
encode = curry(_proxy_method, method=six.text_type.encode)
if six.PY3:
SafeString = SafeText
else:
SafeString = SafeBytes
# backwards compatibility for Python 2
SafeUnicode = SafeText
def mark_safe(s):
"""
Explicitly mark a string as safe for (HTML) output purposes. The returned
object can be used everywhere a string or unicode object is appropriate.
Can be called multiple times on a single string.
"""
if hasattr(s, '__html__'):
return s
if isinstance(s, bytes) or (isinstance(s, Promise) and s._delegate_bytes):
return SafeBytes(s)
if isinstance(s, (six.text_type, Promise)):
return SafeText(s)
return SafeString(str(s))
def mark_for_escaping(s):
"""
Explicitly mark a string as requiring HTML escaping upon output. Has no
effect on SafeData subclasses.
Can be called multiple times on a single string (the resulting escaping is
only applied once).
"""
warnings.warn('mark_for_escaping() is deprecated.', RemovedInDjango20Warning)
if hasattr(s, '__html__') or isinstance(s, EscapeData):
return s
if isinstance(s, bytes) or (isinstance(s, Promise) and s._delegate_bytes):
return EscapeBytes(s)
if isinstance(s, (six.text_type, Promise)):
return EscapeText(s)
return EscapeString(str(s))
| gpl-3.0 |
gangadharkadam/shfr | frappe/widgets/query_builder.py | 35 | 6998 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
out = frappe.response
from frappe.utils import cint
import frappe.defaults
def get_sql_tables(q):
if q.find('WHERE') != -1:
tl = q.split('FROM')[1].split('WHERE')[0].split(',')
elif q.find('GROUP BY') != -1:
tl = q.split('FROM')[1].split('GROUP BY')[0].split(',')
else:
tl = q.split('FROM')[1].split('ORDER BY')[0].split(',')
return [t.strip().strip('`')[3:] for t in tl]
def get_parent_dt(dt):
pdt = ''
if frappe.db.sql('select name from `tabDocType` where istable=1 and name=%s', dt):
import frappe.model.meta
return frappe.model.meta.get_parent_dt(dt)
return pdt
def get_sql_meta(tl):
std_columns = {
'owner':('Owner', '', '', '100'),
'creation':('Created on', 'Date', '', '100'),
'modified':('Last modified on', 'Date', '', '100'),
'modified_by':('Modified By', '', '', '100')
}
meta = {}
for dt in tl:
meta[dt] = std_columns.copy()
# for table doctype, the ID is the parent id
pdt = get_parent_dt(dt)
if pdt:
meta[dt]['parent'] = ('ID', 'Link', pdt, '200')
# get the field properties from DocField
res = frappe.db.sql("select fieldname, label, fieldtype, options, width \
from tabDocField where parent=%s", dt)
for r in res:
if r[0]:
meta[dt][r[0]] = (r[1], r[2], r[3], r[4]);
# name
meta[dt]['name'] = ('ID', 'Link', dt, '200')
return meta
def add_match_conditions(q, tl):
from frappe.widgets.reportview import build_match_conditions
sl = []
for dt in tl:
s = build_match_conditions(dt)
if s:
sl.append(s)
# insert the conditions
if sl:
condition_st = q.find('WHERE')!=-1 and ' AND ' or ' WHERE '
condition_end = q.find('ORDER BY')!=-1 and 'ORDER BY' or 'LIMIT'
condition_end = q.find('GROUP BY')!=-1 and 'GROUP BY' or condition_end
if q.find('ORDER BY')!=-1 or q.find('LIMIT')!=-1 or q.find('GROUP BY')!=-1: # if query continues beyond conditions
q = q.split(condition_end)
q = q[0] + condition_st + '(' + ' OR '.join(sl) + ') ' + condition_end + q[1]
else:
q = q + condition_st + '(' + ' OR '.join(sl) + ')'
return q
def guess_type(m):
"""
Returns fieldtype depending on the MySQLdb Description
"""
import MySQLdb
if m in MySQLdb.NUMBER:
return 'Currency'
elif m in MySQLdb.DATE:
return 'Date'
else:
return 'Data'
def build_description_simple():
colnames, coltypes, coloptions, colwidths = [], [], [], []
for m in frappe.db.get_description():
colnames.append(m[0])
coltypes.append(guess_type[m[0]])
coloptions.append('')
colwidths.append('100')
return colnames, coltypes, coloptions, colwidths
def build_description_standard(meta, tl):
desc = frappe.db.get_description()
colnames, coltypes, coloptions, colwidths = [], [], [], []
# merged metadata - used if we are unable to
# get both the table name and field name from
# the description - in case of joins
merged_meta = {}
for d in meta:
merged_meta.update(meta[d])
for f in desc:
fn, dt = f[0], ''
if '.' in fn:
dt, fn = fn.split('.')
if (not dt) and merged_meta.get(fn):
# no "AS" given, find type from merged description
desc = merged_meta[fn]
colnames.append(desc[0] or fn)
coltypes.append(desc[1] or '')
coloptions.append(desc[2] or '')
colwidths.append(desc[3] or '100')
elif meta.get(dt,{}).has_key(fn):
# type specified for a multi-table join
# usually from Report Builder
desc = meta[dt][fn]
colnames.append(desc[0] or fn)
coltypes.append(desc[1] or '')
coloptions.append(desc[2] or '')
colwidths.append(desc[3] or '100')
else:
# nothing found
# guess
colnames.append(fn)
coltypes.append(guess_type(f[1]))
coloptions.append('')
colwidths.append('100')
return colnames, coltypes, coloptions, colwidths
@frappe.whitelist()
def runquery(q='', ret=0, from_export=0):
import frappe.utils
formatted = cint(frappe.form_dict.get('formatted'))
# CASE A: Simple Query
# --------------------
if frappe.form_dict.get('simple_query') or frappe.form_dict.get('is_simple'):
if not q: q = frappe.form_dict.get('simple_query') or frappe.form_dict.get('query')
if q.split()[0].lower() != 'select':
raise Exception, 'Query must be a SELECT'
as_dict = cint(frappe.form_dict.get('as_dict'))
res = frappe.db.sql(q, as_dict = as_dict, as_list = not as_dict, formatted=formatted)
# build colnames etc from metadata
colnames, coltypes, coloptions, colwidths = [], [], [], []
# CASE B: Standard Query
# -----------------------
else:
if not q: q = frappe.form_dict.get('query')
tl = get_sql_tables(q)
meta = get_sql_meta(tl)
q = add_match_conditions(q, tl)
# replace special variables
q = q.replace('__user', frappe.session.user)
q = q.replace('__today', frappe.utils.nowdate())
res = frappe.db.sql(q, as_list=1, formatted=formatted)
colnames, coltypes, coloptions, colwidths = build_description_standard(meta, tl)
# run server script
# -----------------
style, header_html, footer_html, page_template = '', '', '', ''
out['colnames'] = colnames
out['coltypes'] = coltypes
out['coloptions'] = coloptions
out['colwidths'] = colwidths
out['header_html'] = header_html
out['footer_html'] = footer_html
out['page_template'] = page_template
if style:
out['style'] = style
# just the data - return
if ret==1:
return res
out['values'] = res
# return num of entries
qm = frappe.form_dict.get('query_max') or ''
if qm and qm.strip():
if qm.split()[0].lower() != 'select':
raise Exception, 'Query (Max) must be a SELECT'
if not frappe.form_dict.get('simple_query'):
qm = add_match_conditions(qm, tl)
out['n_values'] = frappe.utils.cint(frappe.db.sql(qm)[0][0])
@frappe.whitelist()
def runquery_csv():
global out
# run query
res = runquery(from_export = 1)
q = frappe.form_dict.get('query')
rep_name = frappe.form_dict.get('report_name')
if not frappe.form_dict.get('simple_query'):
# Report Name
if not rep_name:
rep_name = get_sql_tables(q)[0]
if not rep_name: rep_name = 'DataExport'
# Headings
heads = []
rows = [[rep_name], out['colnames']] + out['values']
from cStringIO import StringIO
import csv
f = StringIO()
writer = csv.writer(f)
for r in rows:
# encode only unicode type strings and not int, floats etc.
writer.writerow(map(lambda v: isinstance(v, unicode) and v.encode('utf-8') or v, r))
f.seek(0)
out['result'] = unicode(f.read(), 'utf-8')
out['type'] = 'csv'
out['doctype'] = rep_name
def add_limit_to_query(query, args):
"""
Add limit condition to query
can be used by methods called in listing to add limit condition
"""
if args.get('limit_page_length'):
query += """
limit %(limit_start)s, %(limit_page_length)s"""
import frappe.utils
args['limit_start'] = frappe.utils.cint(args.get('limit_start'))
args['limit_page_length'] = frappe.utils.cint(args.get('limit_page_length'))
return query, args
| mit |
aleida/django | tests/regressiontests/file_uploads/views.py | 6 | 4373 | from __future__ import absolute_import, unicode_literals
import hashlib
import json
import os
from django.core.files.uploadedfile import UploadedFile
from django.http import HttpResponse, HttpResponseServerError
from .models import FileModel, UPLOAD_TO
from .tests import UNICODE_FILENAME
from .uploadhandler import QuotaUploadHandler, ErroringUploadHandler
def file_upload_view(request):
"""
Check that a file upload can be updated into the POST dictionary without
going pear-shaped.
"""
form_data = request.POST.copy()
form_data.update(request.FILES)
if isinstance(form_data.get('file_field'), UploadedFile) and isinstance(form_data['name'], unicode):
# If a file is posted, the dummy client should only post the file name,
# not the full path.
if os.path.dirname(form_data['file_field'].name) != '':
return HttpResponseServerError()
return HttpResponse('')
else:
return HttpResponseServerError()
def file_upload_view_verify(request):
"""
Use the sha digest hash to verify the uploaded contents.
"""
form_data = request.POST.copy()
form_data.update(request.FILES)
for key, value in form_data.items():
if key.endswith('_hash'):
continue
if key + '_hash' not in form_data:
continue
submitted_hash = form_data[key + '_hash']
if isinstance(value, UploadedFile):
new_hash = hashlib.sha1(value.read()).hexdigest()
else:
new_hash = hashlib.sha1(value).hexdigest()
if new_hash != submitted_hash:
return HttpResponseServerError()
# Adding large file to the database should succeed
largefile = request.FILES['file_field2']
obj = FileModel()
obj.testfile.save(largefile.name, largefile)
return HttpResponse('')
def file_upload_unicode_name(request):
# Check to see if unicode name came through properly.
if not request.FILES['file_unicode'].name.endswith(UNICODE_FILENAME):
return HttpResponseServerError()
response = None
# Check to make sure the exotic characters are preserved even
# through file save.
uni_named_file = request.FILES['file_unicode']
obj = FileModel.objects.create(testfile=uni_named_file)
full_name = '%s/%s' % (UPLOAD_TO, uni_named_file.name)
if not os.path.exists(full_name):
response = HttpResponseServerError()
# Cleanup the object with its exotic file name immediately.
# (shutil.rmtree used elsewhere in the tests to clean up the
# upload directory has been seen to choke on unicode
# filenames on Windows.)
obj.delete()
os.unlink(full_name)
if response:
return response
else:
return HttpResponse('')
def file_upload_echo(request):
"""
Simple view to echo back info about uploaded files for tests.
"""
r = dict([(k, f.name) for k, f in request.FILES.items()])
return HttpResponse(json.dumps(r))
def file_upload_echo_content(request):
"""
Simple view to echo back the content of uploaded files for tests.
"""
r = dict([(k, f.read()) for k, f in request.FILES.items()])
return HttpResponse(json.dumps(r))
def file_upload_quota(request):
"""
Dynamically add in an upload handler.
"""
request.upload_handlers.insert(0, QuotaUploadHandler())
return file_upload_echo(request)
def file_upload_quota_broken(request):
"""
You can't change handlers after reading FILES; this view shouldn't work.
"""
response = file_upload_echo(request)
request.upload_handlers.insert(0, QuotaUploadHandler())
return response
def file_upload_getlist_count(request):
"""
Check the .getlist() function to ensure we receive the correct number of files.
"""
file_counts = {}
for key in request.FILES.keys():
file_counts[key] = len(request.FILES.getlist(key))
return HttpResponse(json.dumps(file_counts))
def file_upload_errors(request):
request.upload_handlers.insert(0, ErroringUploadHandler())
return file_upload_echo(request)
def file_upload_filename_case_view(request):
"""
Check adding the file to the database will preserve the filename case.
"""
file = request.FILES['file_field']
obj = FileModel()
obj.testfile.save(file.name, file)
return HttpResponse('%d' % obj.pk)
| bsd-3-clause |
BT-ojossen/odoo | addons/l10n_do/__openerp__.py | 309 | 2992 | # -*- coding: utf-8 -*-
# #############################################################################
#
# First author: Jose Ernesto Mendez <[email protected]> (Open Business Solutions SRL.)
# Copyright (c) 2012 -TODAY Open Business Solutions, SRL. (http://obsdr.com). All rights reserved.
#
# This is a fork to upgrade to odoo 8.0
# by Marcos Organizador de Negocios - Eneldo Serrata - www.marcos.org.do
#
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs.
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company like Marcos Organizador de Negocios.
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
{
'name': 'Dominican Republic - Accounting',
'version': '1.0',
'category': 'Localization/Account Charts',
'description': """
This is the base module to manage the accounting chart for Dominican Republic.
==============================================================================
* Chart of Accounts.
* The Tax Code Chart for Domincan Republic
* The main taxes used in Domincan Republic
* Fiscal position for local """,
'author': 'Eneldo Serrata - Marcos Organizador de Negocios, SRL.',
'website': 'http://marcos.do',
'depends': ['account', 'base_iban'],
'data': [
# basic accounting data
'data/ir_sequence_type.xml',
'data/ir_sequence.xml',
'data/account_journal.xml',
'data/account.account.type.csv',
'data/account.account.template.csv',
'data/account.tax.code.template.csv',
'data/account_chart_template.xml',
'data/account.tax.template.csv',
'data/l10n_do_base_data.xml',
# Adds fiscal position
'data/account.fiscal.position.template.csv',
'data/account.fiscal.position.tax.template.csv',
# configuration wizard, views, reports...
'data/l10n_do_wizard.xml'
],
'test': [],
'demo': [],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
algorhythms/LintCode | Maximal Square.py | 4 | 2163 | """
Given a 2D binary matrix filled with 0's and 1's, find the largest square containing all 1's and return its area.
Example
For example, given the following matrix:
1 0 1 0 0
1 0 1 1 1
1 1 1 1 1
1 0 0 1 0
Return 4.
"""
__author__ = 'Daniel'
class Solution:
def maxSquare(self, matrix):
"""
Algorithm:
O(n^2)
let F_{i, j} represents the max square's length ended at matrix_{i, j} (lower right corner).
F_{i, j} = min{F_{i-1, j}, F_{i, j-1}, F_{i-1, j-1}}+1 // if matrix{i, j} == 1
F_{i, j} = 0 // otherwise
O(n^3)
sandwich approach
:param matrix: a matrix of 0 and 1
:return: an integer
"""
M = len(matrix)
N = len(matrix[0])
F = [[0 for _ in xrange(N+1)] for _ in xrange(M+1)]
gmax = 0
for i in xrange(1, M+1):
for j in xrange(1, N+1):
if matrix[i-1][j-1] == 1:
F[i][j] = min(F[i-1][j], F[i][j-1], F[i-1][j-1])+1
gmax = max(gmax, F[i][j])
return gmax*gmax
def maxSquare_error(self, matrix):
"""
stack
:param matrix: a matrix of 0 and 1
:return: an integer
"""
M = len(matrix)
N = len(matrix[0])
h = [[0 for _ in xrange(N+1)] for _ in xrange(M+1)]
for i in xrange(1, M+1):
for j in xrange(1, N+1):
if matrix[i-1][j-1] == 1:
h[i][j] = h[i-1][j]+1
else:
h[i][j] = 0
ret = 0
for i in xrange(M):
stk = [] # col index, inc_stk
for j in xrange(N):
while stk and h[i+1][stk[-1]+1] >= h[i+1][j+1]:
stk.pop()
idx = -1
if stk: idx = stk[-1]
cur_square = min(j-idx, h[i+1][j+1])
cur_square *= cur_square
ret = max(ret, cur_square)
stk.append(j)
return ret
if __name__ == "__main__":
assert Solution().maxSquare([[0,1,0,1,1,0],[1,0,1,0,1,1],[1,1,1,1,1,0],[1,1,1,1,1,1],[0,0,1,1,1,0],[1,1,1,0,1,1]]
) == 9 | apache-2.0 |
leomorsy/medical8.0 | oemedical_emr/models/oemedical_prescription_order.py | 3 | 3191 | # -*- coding: utf-8 -*-
#/#############################################################################
#
# Tech-Receptives Solutions Pvt. Ltd.
# Copyright (C) 2004-TODAY Tech-Receptives(<http://www.techreceptives.com>)
# Special Credit and Thanks to Thymbra Latinoamericana S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#/#############################################################################
from openerp.osv import fields, orm
from openerp.tools.translate import _
from openerp import netsvc
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
import time
class OeMedicalPrescriptionOrder(orm.Model):
_name='oemedical.prescription.order'
_columns={
'patient_id': fields.many2one('oemedical.patient', string='Patient', required=True),
'pregnancy_warning': fields.boolean(string='Pregancy Warning', readonly=True),
'notes': fields.text(string='Prescription Notes'),
'prescription_line': fields.one2many('oemedical.prescription.line', 'name', string='Prescription line',),
'pharmacy': fields.many2one('res.partner', string='Pharmacy',),
'prescription_date': fields.datetime(string='Prescription Date'),
'prescription_warning_ack': fields.boolean( string='Prescription verified'),
'physician_id': fields.many2one('oemedical.physician', string='Prescribing Doctor', required=True),
'name': fields.char(size=256, string='Prescription ID', required=True, help='Type in the ID of this prescription'),
}
_defaults={
'name': lambda obj, cr, uid, context:
obj.pool.get('ir.sequence').get(cr, uid,
'oemedical.prescription.order'),
'prescription_date':lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
}
def print_prescription(self, cr, uid, ids, context=None):
'''
'''
# assert len(ids) == 1, 'This option should only be used for a single id at a time'
# wf_service = netsvc.LocalService("workflow")
# wf_service.trg_validate(uid, 'oemedical.prescription.order', ids[0], 'prescription_sent', cr)
datas = {
'model': 'oemedical.prescription.order',
'ids': ids,
'form': self.read(cr, uid, ids[0], context=context),
}
return {'type': 'ir.actions.report.xml', 'report_name': 'prescription.order', 'datas': datas, 'nodestroy': True}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
kosz85/django | django/db/migrations/executor.py | 18 | 17479 | from django.apps.registry import apps as global_apps
from django.db import migrations, router
from .exceptions import InvalidMigrationPlan
from .loader import MigrationLoader
from .recorder import MigrationRecorder
from .state import ProjectState
class MigrationExecutor:
"""
End-to-end migration execution - load migrations and run them up or down
to a specified set of targets.
"""
def __init__(self, connection, progress_callback=None):
self.connection = connection
self.loader = MigrationLoader(self.connection)
self.recorder = MigrationRecorder(self.connection)
self.progress_callback = progress_callback
def migration_plan(self, targets, clean_start=False):
"""
Given a set of targets, return a list of (Migration instance, backwards?).
"""
plan = []
if clean_start:
applied = set()
else:
applied = set(self.loader.applied_migrations)
for target in targets:
# If the target is (app_label, None), that means unmigrate everything
if target[1] is None:
for root in self.loader.graph.root_nodes():
if root[0] == target[0]:
for migration in self.loader.graph.backwards_plan(root):
if migration in applied:
plan.append((self.loader.graph.nodes[migration], True))
applied.remove(migration)
# If the migration is already applied, do backwards mode,
# otherwise do forwards mode.
elif target in applied:
# Don't migrate backwards all the way to the target node (that
# may roll back dependencies in other apps that don't need to
# be rolled back); instead roll back through target's immediate
# child(ren) in the same app, and no further.
next_in_app = sorted(
n for n in
self.loader.graph.node_map[target].children
if n[0] == target[0]
)
for node in next_in_app:
for migration in self.loader.graph.backwards_plan(node):
if migration in applied:
plan.append((self.loader.graph.nodes[migration], True))
applied.remove(migration)
else:
for migration in self.loader.graph.forwards_plan(target):
if migration not in applied:
plan.append((self.loader.graph.nodes[migration], False))
applied.add(migration)
return plan
def _create_project_state(self, with_applied_migrations=False):
"""
Create a project state including all the applications without
migrations and applied migrations if with_applied_migrations=True.
"""
state = ProjectState(real_apps=list(self.loader.unmigrated_apps))
if with_applied_migrations:
# Create the forwards plan Django would follow on an empty database
full_plan = self.migration_plan(self.loader.graph.leaf_nodes(), clean_start=True)
applied_migrations = {
self.loader.graph.nodes[key] for key in self.loader.applied_migrations
if key in self.loader.graph.nodes
}
for migration, _ in full_plan:
if migration in applied_migrations:
migration.mutate_state(state, preserve=False)
return state
def migrate(self, targets, plan=None, state=None, fake=False, fake_initial=False):
"""
Migrate the database up to the given targets.
Django first needs to create all project states before a migration is
(un)applied and in a second step run all the database operations.
"""
# The django_migrations table must be present to record applied
# migrations.
self.recorder.ensure_schema()
if plan is None:
plan = self.migration_plan(targets)
# Create the forwards plan Django would follow on an empty database
full_plan = self.migration_plan(self.loader.graph.leaf_nodes(), clean_start=True)
all_forwards = all(not backwards for mig, backwards in plan)
all_backwards = all(backwards for mig, backwards in plan)
if not plan:
if state is None:
# The resulting state should include applied migrations.
state = self._create_project_state(with_applied_migrations=True)
elif all_forwards == all_backwards:
# This should only happen if there's a mixed plan
raise InvalidMigrationPlan(
"Migration plans with both forwards and backwards migrations "
"are not supported. Please split your migration process into "
"separate plans of only forwards OR backwards migrations.",
plan
)
elif all_forwards:
if state is None:
# The resulting state should still include applied migrations.
state = self._create_project_state(with_applied_migrations=True)
state = self._migrate_all_forwards(state, plan, full_plan, fake=fake, fake_initial=fake_initial)
else:
# No need to check for `elif all_backwards` here, as that condition
# would always evaluate to true.
state = self._migrate_all_backwards(plan, full_plan, fake=fake)
self.check_replacements()
return state
def _migrate_all_forwards(self, state, plan, full_plan, fake, fake_initial):
"""
Take a list of 2-tuples of the form (migration instance, False) and
apply them in the order they occur in the full_plan.
"""
migrations_to_run = {m[0] for m in plan}
for migration, _ in full_plan:
if not migrations_to_run:
# We remove every migration that we applied from these sets so
# that we can bail out once the last migration has been applied
# and don't always run until the very end of the migration
# process.
break
if migration in migrations_to_run:
if 'apps' not in state.__dict__:
if self.progress_callback:
self.progress_callback("render_start")
state.apps # Render all -- performance critical
if self.progress_callback:
self.progress_callback("render_success")
state = self.apply_migration(state, migration, fake=fake, fake_initial=fake_initial)
migrations_to_run.remove(migration)
return state
def _migrate_all_backwards(self, plan, full_plan, fake):
"""
Take a list of 2-tuples of the form (migration instance, True) and
unapply them in reverse order they occur in the full_plan.
Since unapplying a migration requires the project state prior to that
migration, Django will compute the migration states before each of them
in a first run over the plan and then unapply them in a second run over
the plan.
"""
migrations_to_run = {m[0] for m in plan}
# Holds all migration states prior to the migrations being unapplied
states = {}
state = self._create_project_state()
applied_migrations = {
self.loader.graph.nodes[key] for key in self.loader.applied_migrations
if key in self.loader.graph.nodes
}
if self.progress_callback:
self.progress_callback("render_start")
for migration, _ in full_plan:
if not migrations_to_run:
# We remove every migration that we applied from this set so
# that we can bail out once the last migration has been applied
# and don't always run until the very end of the migration
# process.
break
if migration in migrations_to_run:
if 'apps' not in state.__dict__:
state.apps # Render all -- performance critical
# The state before this migration
states[migration] = state
# The old state keeps as-is, we continue with the new state
state = migration.mutate_state(state, preserve=True)
migrations_to_run.remove(migration)
elif migration in applied_migrations:
# Only mutate the state if the migration is actually applied
# to make sure the resulting state doesn't include changes
# from unrelated migrations.
migration.mutate_state(state, preserve=False)
if self.progress_callback:
self.progress_callback("render_success")
for migration, _ in plan:
self.unapply_migration(states[migration], migration, fake=fake)
applied_migrations.remove(migration)
# Generate the post migration state by starting from the state before
# the last migration is unapplied and mutating it to include all the
# remaining applied migrations.
last_unapplied_migration = plan[-1][0]
state = states[last_unapplied_migration]
for index, (migration, _) in enumerate(full_plan):
if migration == last_unapplied_migration:
for migration, _ in full_plan[index:]:
if migration in applied_migrations:
migration.mutate_state(state, preserve=False)
break
return state
def collect_sql(self, plan):
"""
Take a migration plan and return a list of collected SQL statements
that represent the best-efforts version of that plan.
"""
statements = []
state = None
for migration, backwards in plan:
with self.connection.schema_editor(collect_sql=True, atomic=migration.atomic) as schema_editor:
if state is None:
state = self.loader.project_state((migration.app_label, migration.name), at_end=False)
if not backwards:
state = migration.apply(state, schema_editor, collect_sql=True)
else:
state = migration.unapply(state, schema_editor, collect_sql=True)
statements.extend(schema_editor.collected_sql)
return statements
def apply_migration(self, state, migration, fake=False, fake_initial=False):
"""Run a migration forwards."""
if self.progress_callback:
self.progress_callback("apply_start", migration, fake)
if not fake:
if fake_initial:
# Test to see if this is an already-applied initial migration
applied, state = self.detect_soft_applied(state, migration)
if applied:
fake = True
if not fake:
# Alright, do it normally
with self.connection.schema_editor(atomic=migration.atomic) as schema_editor:
state = migration.apply(state, schema_editor)
# For replacement migrations, record individual statuses
if migration.replaces:
for app_label, name in migration.replaces:
self.recorder.record_applied(app_label, name)
else:
self.recorder.record_applied(migration.app_label, migration.name)
# Report progress
if self.progress_callback:
self.progress_callback("apply_success", migration, fake)
return state
def unapply_migration(self, state, migration, fake=False):
"""Run a migration backwards."""
if self.progress_callback:
self.progress_callback("unapply_start", migration, fake)
if not fake:
with self.connection.schema_editor(atomic=migration.atomic) as schema_editor:
state = migration.unapply(state, schema_editor)
# For replacement migrations, record individual statuses
if migration.replaces:
for app_label, name in migration.replaces:
self.recorder.record_unapplied(app_label, name)
else:
self.recorder.record_unapplied(migration.app_label, migration.name)
# Report progress
if self.progress_callback:
self.progress_callback("unapply_success", migration, fake)
return state
def check_replacements(self):
"""
Mark replacement migrations applied if their replaced set all are.
Do this unconditionally on every migrate, rather than just when
migrations are applied or unapplied, to correctly handle the case
when a new squash migration is pushed to a deployment that already had
all its replaced migrations applied. In this case no new migration will
be applied, but the applied state of the squashed migration must be
maintained.
"""
applied = self.recorder.applied_migrations()
for key, migration in self.loader.replacements.items():
all_applied = all(m in applied for m in migration.replaces)
if all_applied and key not in applied:
self.recorder.record_applied(*key)
def detect_soft_applied(self, project_state, migration):
"""
Test whether a migration has been implicitly applied - that the
tables or columns it would create exist. This is intended only for use
on initial migrations (as it only looks for CreateModel and AddField).
"""
def should_skip_detecting_model(migration, model):
"""
No need to detect tables for proxy models, unmanaged models, or
models that can't be migrated on the current database.
"""
return (
model._meta.proxy or not model._meta.managed or not
router.allow_migrate(
self.connection.alias, migration.app_label,
model_name=model._meta.model_name,
)
)
if migration.initial is None:
# Bail if the migration isn't the first one in its app
if any(app == migration.app_label for app, name in migration.dependencies):
return False, project_state
elif migration.initial is False:
# Bail if it's NOT an initial migration
return False, project_state
if project_state is None:
after_state = self.loader.project_state((migration.app_label, migration.name), at_end=True)
else:
after_state = migration.mutate_state(project_state)
apps = after_state.apps
found_create_model_migration = False
found_add_field_migration = False
existing_table_names = self.connection.introspection.table_names(self.connection.cursor())
# Make sure all create model and add field operations are done
for operation in migration.operations:
if isinstance(operation, migrations.CreateModel):
model = apps.get_model(migration.app_label, operation.name)
if model._meta.swapped:
# We have to fetch the model to test with from the
# main app cache, as it's not a direct dependency.
model = global_apps.get_model(model._meta.swapped)
if should_skip_detecting_model(migration, model):
continue
if model._meta.db_table not in existing_table_names:
return False, project_state
found_create_model_migration = True
elif isinstance(operation, migrations.AddField):
model = apps.get_model(migration.app_label, operation.model_name)
if model._meta.swapped:
# We have to fetch the model to test with from the
# main app cache, as it's not a direct dependency.
model = global_apps.get_model(model._meta.swapped)
if should_skip_detecting_model(migration, model):
continue
table = model._meta.db_table
field = model._meta.get_field(operation.name)
# Handle implicit many-to-many tables created by AddField.
if field.many_to_many:
if field.remote_field.through._meta.db_table not in existing_table_names:
return False, project_state
else:
found_add_field_migration = True
continue
column_names = [
column.name for column in
self.connection.introspection.get_table_description(self.connection.cursor(), table)
]
if field.column not in column_names:
return False, project_state
found_add_field_migration = True
# If we get this far and we found at least one CreateModel or AddField migration,
# the migration is considered implicitly applied.
return (found_create_model_migration or found_add_field_migration), after_state
| bsd-3-clause |
vnsofthe/odoo | openerp/addons/base/ir/ir_qweb.py | 38 | 64677 | # -*- coding: utf-8 -*-
import collections
import cStringIO
import datetime
import hashlib
import json
import itertools
import logging
import math
import os
import re
import sys
import textwrap
import uuid
from subprocess import Popen, PIPE
from urlparse import urlparse
import babel
import babel.dates
import werkzeug
from lxml import etree, html
from PIL import Image
import psycopg2
import openerp.http
import openerp.tools
from openerp.tools.func import lazy_property
import openerp.tools.lru
from openerp.http import request
from openerp.tools.safe_eval import safe_eval as eval
from openerp.osv import osv, orm, fields
from openerp.tools import html_escape as escape
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
MAX_CSS_RULES = 4095
#--------------------------------------------------------------------
# QWeb template engine
#--------------------------------------------------------------------
class QWebException(Exception):
def __init__(self, message, **kw):
Exception.__init__(self, message)
self.qweb = dict(kw)
def pretty_xml(self):
if 'node' not in self.qweb:
return ''
return etree.tostring(self.qweb['node'], pretty_print=True)
class QWebTemplateNotFound(QWebException):
pass
def raise_qweb_exception(etype=None, **kw):
if etype is None:
etype = QWebException
orig_type, original, tb = sys.exc_info()
try:
raise etype, original, tb
except etype, e:
for k, v in kw.items():
e.qweb[k] = v
# Will use `raise foo from bar` in python 3 and rename cause to __cause__
e.qweb['cause'] = original
raise
def _build_attribute(name, value):
value = escape(value)
if isinstance(name, unicode): name = name.encode('utf-8')
if isinstance(value, unicode): value = value.encode('utf-8')
return ' %s="%s"' % (name, value)
class QWebContext(dict):
def __init__(self, cr, uid, data, loader=None, templates=None, context=None):
self.cr = cr
self.uid = uid
self.loader = loader
self.templates = templates or {}
self.context = context
dic = dict(data)
super(QWebContext, self).__init__(dic)
self['defined'] = lambda key: key in self
def safe_eval(self, expr):
locals_dict = collections.defaultdict(lambda: None)
locals_dict.update(self)
locals_dict.pop('cr', None)
locals_dict.pop('loader', None)
return eval(expr, None, locals_dict, nocopy=True, locals_builtins=True)
def copy(self):
""" Clones the current context, conserving all data and metadata
(loader, template cache, ...)
"""
return QWebContext(self.cr, self.uid, dict.copy(self),
loader=self.loader,
templates=self.templates,
context=self.context)
def __copy__(self):
return self.copy()
class QWeb(orm.AbstractModel):
""" Base QWeb rendering engine
* to customize ``t-field`` rendering, subclass ``ir.qweb.field`` and
create new models called :samp:`ir.qweb.field.{widget}`
* alternatively, override :meth:`~.get_converter_for` and return an
arbitrary model to use as field converter
Beware that if you need extensions or alterations which could be
incompatible with other subsystems, you should create a local object
inheriting from ``ir.qweb`` and customize that.
"""
_name = 'ir.qweb'
_void_elements = frozenset([
u'area', u'base', u'br', u'col', u'embed', u'hr', u'img', u'input',
u'keygen', u'link', u'menuitem', u'meta', u'param', u'source',
u'track', u'wbr'])
_format_regex = re.compile(
'(?:'
# ruby-style pattern
'#\{(.+?)\}'
')|(?:'
# jinja-style pattern
'\{\{(.+?)\}\}'
')')
def __init__(self, pool, cr):
super(QWeb, self).__init__(pool, cr)
self._render_tag = self.prefixed_methods('render_tag_')
self._render_att = self.prefixed_methods('render_att_')
def prefixed_methods(self, prefix):
""" Extracts all methods prefixed by ``prefix``, and returns a mapping
of (t-name, method) where the t-name is the method name with prefix
removed and underscore converted to dashes
:param str prefix:
:return: dict
"""
n_prefix = len(prefix)
return dict(
(name[n_prefix:].replace('_', '-'), getattr(type(self), name))
for name in dir(self)
if name.startswith(prefix)
)
def register_tag(self, tag, func):
self._render_tag[tag] = func
def add_template(self, qwebcontext, name, node):
"""Add a parsed template in the context. Used to preprocess templates."""
qwebcontext.templates[name] = node
def load_document(self, document, res_id, qwebcontext):
"""
Loads an XML document and installs any contained template in the engine
:type document: a parsed lxml.etree element, an unparsed XML document
(as a string) or the path of an XML file to load
"""
if not isinstance(document, basestring):
# assume lxml.etree.Element
dom = document
elif document.startswith("<?xml"):
dom = etree.fromstring(document)
else:
dom = etree.parse(document).getroot()
for node in dom:
if node.get('t-name'):
name = str(node.get("t-name"))
self.add_template(qwebcontext, name, node)
if res_id and node.tag == "t":
self.add_template(qwebcontext, res_id, node)
res_id = None
def get_template(self, name, qwebcontext):
""" Tries to fetch the template ``name``, either gets it from the
context's template cache or loads one with the context's loader (if
any).
:raises QWebTemplateNotFound: if the template can not be found or loaded
"""
origin_template = qwebcontext.get('__caller__') or qwebcontext['__stack__'][0]
if qwebcontext.loader and name not in qwebcontext.templates:
try:
xml_doc = qwebcontext.loader(name)
except ValueError:
raise_qweb_exception(QWebTemplateNotFound, message="Loader could not find template %r" % name, template=origin_template)
self.load_document(xml_doc, isinstance(name, (int, long)) and name or None, qwebcontext=qwebcontext)
if name in qwebcontext.templates:
return qwebcontext.templates[name]
raise QWebTemplateNotFound("Template %r not found" % name, template=origin_template)
def eval(self, expr, qwebcontext):
try:
return qwebcontext.safe_eval(expr)
except Exception:
template = qwebcontext.get('__template__')
raise_qweb_exception(message="Could not evaluate expression %r" % expr, expression=expr, template=template)
def eval_object(self, expr, qwebcontext):
return self.eval(expr, qwebcontext)
def eval_str(self, expr, qwebcontext):
if expr == "0":
return qwebcontext.get(0, '')
val = self.eval(expr, qwebcontext)
if isinstance(val, unicode):
return val.encode("utf8")
if val is False or val is None:
return ''
return str(val)
def eval_format(self, expr, qwebcontext):
expr, replacements = self._format_regex.subn(
lambda m: self.eval_str(m.group(1) or m.group(2), qwebcontext),
expr
)
if replacements:
return expr
try:
return str(expr % qwebcontext)
except Exception:
template = qwebcontext.get('__template__')
raise_qweb_exception(message="Format error for expression %r" % expr, expression=expr, template=template)
def eval_bool(self, expr, qwebcontext):
return int(bool(self.eval(expr, qwebcontext)))
def render(self, cr, uid, id_or_xml_id, qwebcontext=None, loader=None, context=None):
""" render(cr, uid, id_or_xml_id, qwebcontext=None, loader=None, context=None)
Renders the template specified by the provided template name
:param qwebcontext: context for rendering the template
:type qwebcontext: dict or :class:`QWebContext` instance
:param loader: if ``qwebcontext`` is a dict, loader set into the
context instantiated for rendering
"""
if qwebcontext is None:
qwebcontext = {}
if not isinstance(qwebcontext, QWebContext):
qwebcontext = QWebContext(cr, uid, qwebcontext, loader=loader, context=context)
qwebcontext['__template__'] = id_or_xml_id
stack = qwebcontext.get('__stack__', [])
if stack:
qwebcontext['__caller__'] = stack[-1]
stack.append(id_or_xml_id)
qwebcontext['__stack__'] = stack
qwebcontext['xmlid'] = str(stack[0]) # Temporary fix
return self.render_node(self.get_template(id_or_xml_id, qwebcontext), qwebcontext)
def render_node(self, element, qwebcontext):
generated_attributes = ""
t_render = None
template_attributes = {}
for (attribute_name, attribute_value) in element.attrib.iteritems():
attribute_name = unicode(attribute_name)
if attribute_name == "groups":
cr = qwebcontext.get('request') and qwebcontext['request'].cr or None
uid = qwebcontext.get('request') and qwebcontext['request'].uid or None
can_see = self.user_has_groups(cr, uid, groups=attribute_value) if cr and uid else False
if not can_see:
return ''
attribute_value = attribute_value.encode("utf8")
if attribute_name.startswith("t-"):
for attribute in self._render_att:
if attribute_name[2:].startswith(attribute):
attrs = self._render_att[attribute](
self, element, attribute_name, attribute_value, qwebcontext)
for att, val in attrs:
if not val: continue
generated_attributes += self.render_attribute(element, att, val, qwebcontext)
break
else:
if attribute_name[2:] in self._render_tag:
t_render = attribute_name[2:]
template_attributes[attribute_name[2:]] = attribute_value
else:
generated_attributes += self.render_attribute(element, attribute_name, attribute_value, qwebcontext)
if 'debug' in template_attributes:
debugger = template_attributes.get('debug', 'pdb')
__import__(debugger).set_trace() # pdb, ipdb, pudb, ...
if t_render:
result = self._render_tag[t_render](self, element, template_attributes, generated_attributes, qwebcontext)
else:
result = self.render_element(element, template_attributes, generated_attributes, qwebcontext)
if element.tail:
result += element.tail.encode('utf-8')
if isinstance(result, unicode):
return result.encode('utf-8')
return result
def render_element(self, element, template_attributes, generated_attributes, qwebcontext, inner=None):
# element: element
# template_attributes: t-* attributes
# generated_attributes: generated attributes
# qwebcontext: values
# inner: optional innerXml
if inner:
g_inner = inner.encode('utf-8') if isinstance(inner, unicode) else inner
else:
g_inner = [] if element.text is None else [element.text.encode('utf-8')]
for current_node in element.iterchildren(tag=etree.Element):
try:
g_inner.append(self.render_node(current_node, qwebcontext))
except QWebException:
raise
except Exception:
template = qwebcontext.get('__template__')
raise_qweb_exception(message="Could not render element %r" % element.tag, node=element, template=template)
name = unicode(element.tag)
inner = "".join(g_inner)
trim = template_attributes.get("trim", 0)
if trim == 0:
pass
elif trim == 'left':
inner = inner.lstrip()
elif trim == 'right':
inner = inner.rstrip()
elif trim == 'both':
inner = inner.strip()
if name == "t":
return inner
elif len(inner) or name not in self._void_elements:
return "<%s%s>%s</%s>" % tuple(
qwebcontext if isinstance(qwebcontext, str) else qwebcontext.encode('utf-8')
for qwebcontext in (name, generated_attributes, inner, name)
)
else:
return "<%s%s/>" % (name.encode("utf-8"), generated_attributes)
def render_attribute(self, element, name, value, qwebcontext):
return _build_attribute(name, value)
# Attributes
def render_att_att(self, element, attribute_name, attribute_value, qwebcontext):
if attribute_name.startswith("t-attf-"):
return [(attribute_name[7:], self.eval_format(attribute_value, qwebcontext))]
if attribute_name.startswith("t-att-"):
return [(attribute_name[6:], self.eval(attribute_value, qwebcontext))]
result = self.eval_object(attribute_value, qwebcontext)
if isinstance(result, collections.Mapping):
return result.iteritems()
# assume tuple
return [result]
# Tags
def render_tag_raw(self, element, template_attributes, generated_attributes, qwebcontext):
inner = self.eval_str(template_attributes["raw"], qwebcontext)
return self.render_element(element, template_attributes, generated_attributes, qwebcontext, inner)
def render_tag_esc(self, element, template_attributes, generated_attributes, qwebcontext):
options = json.loads(template_attributes.get('esc-options') or '{}')
widget = self.get_widget_for(options.get('widget'))
inner = widget.format(template_attributes['esc'], options, qwebcontext)
return self.render_element(element, template_attributes, generated_attributes, qwebcontext, inner)
def _iterate(self, iterable):
if isinstance (iterable, collections.Mapping):
return iterable.iteritems()
return itertools.izip(*itertools.tee(iterable))
def render_tag_foreach(self, element, template_attributes, generated_attributes, qwebcontext):
expr = template_attributes["foreach"]
enum = self.eval_object(expr, qwebcontext)
if enum is None:
template = qwebcontext.get('__template__')
raise QWebException("foreach enumerator %r is not defined while rendering template %r" % (expr, template), template=template)
if isinstance(enum, int):
enum = range(enum)
varname = template_attributes['as'].replace('.', '_')
copy_qwebcontext = qwebcontext.copy()
size = None
if isinstance(enum, collections.Sized):
size = len(enum)
copy_qwebcontext["%s_size" % varname] = size
copy_qwebcontext["%s_all" % varname] = enum
ru = []
for index, (item, value) in enumerate(self._iterate(enum)):
copy_qwebcontext.update({
varname: item,
'%s_value' % varname: value,
'%s_index' % varname: index,
'%s_first' % varname: index == 0,
})
if size is not None:
copy_qwebcontext['%s_last' % varname] = index + 1 == size
if index % 2:
copy_qwebcontext.update({
'%s_parity' % varname: 'odd',
'%s_even' % varname: False,
'%s_odd' % varname: True,
})
else:
copy_qwebcontext.update({
'%s_parity' % varname: 'even',
'%s_even' % varname: True,
'%s_odd' % varname: False,
})
ru.append(self.render_element(element, template_attributes, generated_attributes, copy_qwebcontext))
for k in qwebcontext.keys():
qwebcontext[k] = copy_qwebcontext[k]
return "".join(ru)
def render_tag_if(self, element, template_attributes, generated_attributes, qwebcontext):
if self.eval_bool(template_attributes["if"], qwebcontext):
return self.render_element(element, template_attributes, generated_attributes, qwebcontext)
return ""
def render_tag_call(self, element, template_attributes, generated_attributes, qwebcontext):
d = qwebcontext.copy()
d[0] = self.render_element(element, template_attributes, generated_attributes, d)
cr = d.get('request') and d['request'].cr or None
uid = d.get('request') and d['request'].uid or None
template = self.eval_format(template_attributes["call"], d)
try:
template = int(template)
except ValueError:
pass
return self.render(cr, uid, template, d)
def render_tag_call_assets(self, element, template_attributes, generated_attributes, qwebcontext):
""" This special 't-call' tag can be used in order to aggregate/minify javascript and css assets"""
if len(element):
# An asset bundle is rendered in two differents contexts (when genereting html and
# when generating the bundle itself) so they must be qwebcontext free
# even '0' variable is forbidden
template = qwebcontext.get('__template__')
raise QWebException("t-call-assets cannot contain children nodes", template=template)
xmlid = template_attributes['call-assets']
cr, uid, context = [getattr(qwebcontext, attr) for attr in ('cr', 'uid', 'context')]
bundle = AssetsBundle(xmlid, cr=cr, uid=uid, context=context, registry=self.pool)
css = self.get_attr_bool(template_attributes.get('css'), default=True)
js = self.get_attr_bool(template_attributes.get('js'), default=True)
return bundle.to_html(css=css, js=js, debug=bool(qwebcontext.get('debug')))
def render_tag_set(self, element, template_attributes, generated_attributes, qwebcontext):
if "value" in template_attributes:
qwebcontext[template_attributes["set"]] = self.eval_object(template_attributes["value"], qwebcontext)
elif "valuef" in template_attributes:
qwebcontext[template_attributes["set"]] = self.eval_format(template_attributes["valuef"], qwebcontext)
else:
qwebcontext[template_attributes["set"]] = self.render_element(element, template_attributes, generated_attributes, qwebcontext)
return ""
def render_tag_field(self, element, template_attributes, generated_attributes, qwebcontext):
""" eg: <span t-record="browse_record(res.partner, 1)" t-field="phone">+1 555 555 8069</span>"""
node_name = element.tag
assert node_name not in ("table", "tbody", "thead", "tfoot", "tr", "td",
"li", "ul", "ol", "dl", "dt", "dd"),\
"RTE widgets do not work correctly on %r elements" % node_name
assert node_name != 't',\
"t-field can not be used on a t element, provide an actual HTML node"
record, field_name = template_attributes["field"].rsplit('.', 1)
record = self.eval_object(record, qwebcontext)
field = record._fields[field_name]
options = json.loads(template_attributes.get('field-options') or '{}')
field_type = get_field_type(field, options)
converter = self.get_converter_for(field_type)
return converter.to_html(qwebcontext.cr, qwebcontext.uid, field_name, record, options,
element, template_attributes, generated_attributes, qwebcontext, context=qwebcontext.context)
def get_converter_for(self, field_type):
""" returns a :class:`~openerp.models.Model` used to render a
``t-field``.
By default, tries to get the model named
:samp:`ir.qweb.field.{field_type}`, falling back on ``ir.qweb.field``.
:param str field_type: type or widget of field to render
"""
return self.pool.get('ir.qweb.field.' + field_type, self.pool['ir.qweb.field'])
def get_widget_for(self, widget):
""" returns a :class:`~openerp.models.Model` used to render a
``t-esc``
:param str widget: name of the widget to use, or ``None``
"""
widget_model = ('ir.qweb.widget.' + widget) if widget else 'ir.qweb.widget'
return self.pool.get(widget_model) or self.pool['ir.qweb.widget']
def get_attr_bool(self, attr, default=False):
if attr:
attr = attr.lower()
if attr in ('false', '0'):
return False
elif attr in ('true', '1'):
return True
return default
#--------------------------------------------------------------------
# QWeb Fields converters
#--------------------------------------------------------------------
class FieldConverter(osv.AbstractModel):
""" Used to convert a t-field specification into an output HTML field.
:meth:`~.to_html` is the entry point of this conversion from QWeb, it:
* converts the record value to html using :meth:`~.record_to_html`
* generates the metadata attributes (``data-oe-``) to set on the root
result node
* generates the root result node itself through :meth:`~.render_element`
"""
_name = 'ir.qweb.field'
def attributes(self, cr, uid, field_name, record, options,
source_element, g_att, t_att, qweb_context,
context=None):
""" attributes(cr, uid, field_name, record, options, source_element, g_att, t_att, qweb_context, context=None)
Generates the metadata attributes (prefixed by ``data-oe-`` for the
root node of the field conversion. Attribute values are escaped by the
parent.
The default attributes are:
* ``model``, the name of the record's model
* ``id`` the id of the record to which the field belongs
* ``field`` the name of the converted field
* ``type`` the logical field type (widget, may not match the field's
``type``, may not be any Field subclass name)
* ``translate``, a boolean flag (``0`` or ``1``) denoting whether the
field is translatable
* ``expression``, the original expression
:returns: iterable of (attribute name, attribute value) pairs.
"""
field = record._fields[field_name]
field_type = get_field_type(field, options)
return [
('data-oe-model', record._name),
('data-oe-id', record.id),
('data-oe-field', field_name),
('data-oe-type', field_type),
('data-oe-expression', t_att['field']),
]
def value_to_html(self, cr, uid, value, field, options=None, context=None):
""" value_to_html(cr, uid, value, field, options=None, context=None)
Converts a single value to its HTML version/output
"""
if not value: return ''
return value
def record_to_html(self, cr, uid, field_name, record, options=None, context=None):
""" record_to_html(cr, uid, field_name, record, options=None, context=None)
Converts the specified field of the browse_record ``record`` to HTML
"""
field = record._fields[field_name]
return self.value_to_html(
cr, uid, record[field_name], field, options=options, context=context)
def to_html(self, cr, uid, field_name, record, options,
source_element, t_att, g_att, qweb_context, context=None):
""" to_html(cr, uid, field_name, record, options, source_element, t_att, g_att, qweb_context, context=None)
Converts a ``t-field`` to its HTML output. A ``t-field`` may be
extended by a ``t-field-options``, which is a JSON-serialized mapping
of configuration values.
A default configuration key is ``widget`` which can override the
field's own ``_type``.
"""
try:
content = self.record_to_html(cr, uid, field_name, record, options, context=context)
if options.get('html-escape', True):
content = escape(content)
elif hasattr(content, '__html__'):
content = content.__html__()
except Exception:
_logger.warning("Could not get field %s for model %s",
field_name, record._name, exc_info=True)
content = None
inherit_branding = context and context.get('inherit_branding')
if not inherit_branding and context and context.get('inherit_branding_auto'):
inherit_branding = self.pool['ir.model.access'].check(cr, uid, record._name, 'write', False, context=context)
if inherit_branding:
# add branding attributes
g_att += ''.join(
_build_attribute(name, value)
for name, value in self.attributes(
cr, uid, field_name, record, options,
source_element, g_att, t_att, qweb_context, context=context)
)
return self.render_element(cr, uid, source_element, t_att, g_att,
qweb_context, content)
def qweb_object(self):
return self.pool['ir.qweb']
def render_element(self, cr, uid, source_element, t_att, g_att,
qweb_context, content):
""" render_element(cr, uid, source_element, t_att, g_att, qweb_context, content)
Final rendering hook, by default just calls ir.qweb's ``render_element``
"""
return self.qweb_object().render_element(
source_element, t_att, g_att, qweb_context, content or '')
def user_lang(self, cr, uid, context):
""" user_lang(cr, uid, context)
Fetches the res.lang object corresponding to the language code stored
in the user's context. Fallbacks to en_US if no lang is present in the
context *or the language code is not valid*.
:returns: res.lang browse_record
"""
if context is None: context = {}
lang_code = context.get('lang') or 'en_US'
Lang = self.pool['res.lang']
lang_ids = Lang.search(cr, uid, [('code', '=', lang_code)], context=context) \
or Lang.search(cr, uid, [('code', '=', 'en_US')], context=context)
return Lang.browse(cr, uid, lang_ids[0], context=context)
class FloatConverter(osv.AbstractModel):
_name = 'ir.qweb.field.float'
_inherit = 'ir.qweb.field'
def precision(self, cr, uid, field, options=None, context=None):
_, precision = field.digits or (None, None)
return precision
def value_to_html(self, cr, uid, value, field, options=None, context=None):
if context is None:
context = {}
precision = self.precision(cr, uid, field, options=options, context=context)
fmt = '%f' if precision is None else '%.{precision}f'
lang_code = context.get('lang') or 'en_US'
lang = self.pool['res.lang']
formatted = lang.format(cr, uid, [lang_code], fmt.format(precision=precision), value, grouping=True)
# %f does not strip trailing zeroes. %g does but its precision causes
# it to switch to scientific notation starting at a million *and* to
# strip decimals. So use %f and if no precision was specified manually
# strip trailing 0.
if precision is None:
formatted = re.sub(r'(?:(0|\d+?)0+)$', r'\1', formatted)
return formatted
class DateConverter(osv.AbstractModel):
_name = 'ir.qweb.field.date'
_inherit = 'ir.qweb.field'
def value_to_html(self, cr, uid, value, field, options=None, context=None):
if not value or len(value)<10: return ''
lang = self.user_lang(cr, uid, context=context)
locale = babel.Locale.parse(lang.code)
if isinstance(value, basestring):
value = datetime.datetime.strptime(
value[:10], openerp.tools.DEFAULT_SERVER_DATE_FORMAT)
if options and 'format' in options:
pattern = options['format']
else:
strftime_pattern = lang.date_format
pattern = openerp.tools.posix_to_ldml(strftime_pattern, locale=locale)
return babel.dates.format_date(
value, format=pattern,
locale=locale)
class DateTimeConverter(osv.AbstractModel):
_name = 'ir.qweb.field.datetime'
_inherit = 'ir.qweb.field'
def value_to_html(self, cr, uid, value, field, options=None, context=None):
if not value: return ''
lang = self.user_lang(cr, uid, context=context)
locale = babel.Locale.parse(lang.code)
if isinstance(value, basestring):
value = datetime.datetime.strptime(
value, openerp.tools.DEFAULT_SERVER_DATETIME_FORMAT)
value = fields.datetime.context_timestamp(
cr, uid, timestamp=value, context=context)
if options and 'format' in options:
pattern = options['format']
else:
strftime_pattern = (u"%s %s" % (lang.date_format, lang.time_format))
pattern = openerp.tools.posix_to_ldml(strftime_pattern, locale=locale)
if options and options.get('hide_seconds'):
pattern = pattern.replace(":ss", "").replace(":s", "")
return babel.dates.format_datetime(value, format=pattern, locale=locale)
def record_to_html(self, cr, uid, field_name, record, options, context=None):
field = field = record._fields[field_name]
value = record[field_name]
return self.value_to_html(
cr, uid, value, field, options=options, context=dict(context, **record.env.context))
class TextConverter(osv.AbstractModel):
_name = 'ir.qweb.field.text'
_inherit = 'ir.qweb.field'
def value_to_html(self, cr, uid, value, field, options=None, context=None):
"""
Escapes the value and converts newlines to br. This is bullshit.
"""
if not value: return ''
return nl2br(value, options=options)
class SelectionConverter(osv.AbstractModel):
_name = 'ir.qweb.field.selection'
_inherit = 'ir.qweb.field'
def record_to_html(self, cr, uid, field_name, record, options=None, context=None):
value = record[field_name]
if not value: return ''
field = record._fields[field_name]
selection = dict(field.get_description(record.env)['selection'])
return self.value_to_html(
cr, uid, selection[value], field, options=options)
class ManyToOneConverter(osv.AbstractModel):
_name = 'ir.qweb.field.many2one'
_inherit = 'ir.qweb.field'
def record_to_html(self, cr, uid, field_name, record, options=None, context=None):
[read] = record.read([field_name])
if not read[field_name]: return ''
_, value = read[field_name]
return nl2br(value, options=options)
class HTMLConverter(osv.AbstractModel):
_name = 'ir.qweb.field.html'
_inherit = 'ir.qweb.field'
def value_to_html(self, cr, uid, value, field, options=None, context=None):
return HTMLSafe(value or '')
class ImageConverter(osv.AbstractModel):
""" ``image`` widget rendering, inserts a data:uri-using image tag in the
document. May be overridden by e.g. the website module to generate links
instead.
.. todo:: what happens if different output need different converters? e.g.
reports may need embedded images or FS links whereas website
needs website-aware
"""
_name = 'ir.qweb.field.image'
_inherit = 'ir.qweb.field'
def value_to_html(self, cr, uid, value, field, options=None, context=None):
try:
image = Image.open(cStringIO.StringIO(value.decode('base64')))
image.verify()
except IOError:
raise ValueError("Non-image binary fields can not be converted to HTML")
except: # image.verify() throws "suitable exceptions", I have no idea what they are
raise ValueError("Invalid image content")
return HTMLSafe('<img src="data:%s;base64,%s">' % (Image.MIME[image.format], value))
class MonetaryConverter(osv.AbstractModel):
""" ``monetary`` converter, has a mandatory option
``display_currency``.
The currency is used for formatting *and rounding* of the float value. It
is assumed that the linked res_currency has a non-empty rounding value and
res.currency's ``round`` method is used to perform rounding.
.. note:: the monetary converter internally adds the qweb context to its
options mapping, so that the context is available to callees.
It's set under the ``_qweb_context`` key.
"""
_name = 'ir.qweb.field.monetary'
_inherit = 'ir.qweb.field'
def to_html(self, cr, uid, field_name, record, options,
source_element, t_att, g_att, qweb_context, context=None):
options['_qweb_context'] = qweb_context
return super(MonetaryConverter, self).to_html(
cr, uid, field_name, record, options,
source_element, t_att, g_att, qweb_context, context=context)
def record_to_html(self, cr, uid, field_name, record, options, context=None):
if context is None:
context = {}
Currency = self.pool['res.currency']
display_currency = self.display_currency(cr, uid, options['display_currency'], options)
# lang.format mandates a sprintf-style format. These formats are non-
# minimal (they have a default fixed precision instead), and
# lang.format will not set one by default. currency.round will not
# provide one either. So we need to generate a precision value
# (integer > 0) from the currency's rounding (a float generally < 1.0).
#
# The log10 of the rounding should be the number of digits involved if
# negative, if positive clamp to 0 digits and call it a day.
# nb: int() ~ floor(), we want nearest rounding instead
precision = int(math.floor(math.log10(display_currency.rounding)))
fmt = "%.{0}f".format(-precision if precision < 0 else 0)
from_amount = record[field_name]
if options.get('from_currency'):
from_currency = self.display_currency(cr, uid, options['from_currency'], options)
from_amount = Currency.compute(cr, uid, from_currency.id, display_currency.id, from_amount)
lang_code = context.get('lang') or 'en_US'
lang = self.pool['res.lang']
formatted_amount = lang.format(cr, uid, [lang_code],
fmt, Currency.round(cr, uid, display_currency, from_amount),
grouping=True, monetary=True)
pre = post = u''
if display_currency.position == 'before':
pre = u'{symbol}\N{NO-BREAK SPACE}'
else:
post = u'\N{NO-BREAK SPACE}{symbol}'
return HTMLSafe(u'{pre}<span class="oe_currency_value">{0}</span>{post}'.format(
formatted_amount,
pre=pre, post=post,
).format(
symbol=display_currency.symbol,
))
def display_currency(self, cr, uid, currency, options):
return self.qweb_object().eval_object(
currency, options['_qweb_context'])
TIMEDELTA_UNITS = (
('year', 3600 * 24 * 365),
('month', 3600 * 24 * 30),
('week', 3600 * 24 * 7),
('day', 3600 * 24),
('hour', 3600),
('minute', 60),
('second', 1)
)
class DurationConverter(osv.AbstractModel):
""" ``duration`` converter, to display integral or fractional values as
human-readable time spans (e.g. 1.5 as "1 hour 30 minutes").
Can be used on any numerical field.
Has a mandatory option ``unit`` which can be one of ``second``, ``minute``,
``hour``, ``day``, ``week`` or ``year``, used to interpret the numerical
field value before converting it.
Sub-second values will be ignored.
"""
_name = 'ir.qweb.field.duration'
_inherit = 'ir.qweb.field'
def value_to_html(self, cr, uid, value, field, options=None, context=None):
units = dict(TIMEDELTA_UNITS)
if value < 0:
raise ValueError(_("Durations can't be negative"))
if not options or options.get('unit') not in units:
raise ValueError(_("A unit must be provided to duration widgets"))
locale = babel.Locale.parse(
self.user_lang(cr, uid, context=context).code)
factor = units[options['unit']]
sections = []
r = value * factor
for unit, secs_per_unit in TIMEDELTA_UNITS:
v, r = divmod(r, secs_per_unit)
if not v: continue
section = babel.dates.format_timedelta(
v*secs_per_unit, threshold=1, locale=locale)
if section:
sections.append(section)
return ' '.join(sections)
class RelativeDatetimeConverter(osv.AbstractModel):
_name = 'ir.qweb.field.relative'
_inherit = 'ir.qweb.field'
def value_to_html(self, cr, uid, value, field, options=None, context=None):
parse_format = openerp.tools.DEFAULT_SERVER_DATETIME_FORMAT
locale = babel.Locale.parse(
self.user_lang(cr, uid, context=context).code)
if isinstance(value, basestring):
value = datetime.datetime.strptime(value, parse_format)
# value should be a naive datetime in UTC. So is fields.Datetime.now()
reference = datetime.datetime.strptime(field.now(), parse_format)
return babel.dates.format_timedelta(
value - reference, add_direction=True, locale=locale)
class Contact(orm.AbstractModel):
_name = 'ir.qweb.field.contact'
_inherit = 'ir.qweb.field.many2one'
def record_to_html(self, cr, uid, field_name, record, options=None, context=None):
if context is None:
context = {}
if options is None:
options = {}
opf = options.get('fields') or ["name", "address", "phone", "mobile", "fax", "email"]
value_rec = record[field_name]
if not value_rec:
return None
value_rec = value_rec.sudo().with_context(show_address=True)
value = value_rec.name_get()[0][1]
val = {
'name': value.split("\n")[0],
'address': escape("\n".join(value.split("\n")[1:])),
'phone': value_rec.phone,
'mobile': value_rec.mobile,
'fax': value_rec.fax,
'city': value_rec.city,
'country_id': value_rec.country_id.display_name,
'website': value_rec.website,
'email': value_rec.email,
'fields': opf,
'object': value_rec,
'options': options
}
html = self.pool["ir.ui.view"].render(cr, uid, "base.contact", val, engine='ir.qweb', context=context).decode('utf8')
return HTMLSafe(html)
class QwebView(orm.AbstractModel):
_name = 'ir.qweb.field.qweb'
_inherit = 'ir.qweb.field.many2one'
def record_to_html(self, cr, uid, field_name, record, options=None, context=None):
if not getattr(record, field_name):
return None
view = getattr(record, field_name)
if view._model._name != "ir.ui.view":
_logger.warning("%s.%s must be a 'ir.ui.view' model." % (record, field_name))
return None
ctx = (context or {}).copy()
ctx['object'] = record
html = view.render(ctx, engine='ir.qweb', context=ctx).decode('utf8')
return HTMLSafe(html)
class QwebWidget(osv.AbstractModel):
_name = 'ir.qweb.widget'
def _format(self, inner, options, qwebcontext):
return self.pool['ir.qweb'].eval_str(inner, qwebcontext)
def format(self, inner, options, qwebcontext):
return escape(self._format(inner, options, qwebcontext))
class QwebWidgetMonetary(osv.AbstractModel):
_name = 'ir.qweb.widget.monetary'
_inherit = 'ir.qweb.widget'
def _format(self, inner, options, qwebcontext):
inner = self.pool['ir.qweb'].eval(inner, qwebcontext)
display = self.pool['ir.qweb'].eval_object(options['display_currency'], qwebcontext)
precision = int(round(math.log10(display.rounding)))
fmt = "%.{0}f".format(-precision if precision < 0 else 0)
lang_code = qwebcontext.context.get('lang') or 'en_US'
formatted_amount = self.pool['res.lang'].format(
qwebcontext.cr, qwebcontext.uid, [lang_code], fmt, inner, grouping=True, monetary=True
)
pre = post = u''
if display.position == 'before':
pre = u'{symbol}\N{NO-BREAK SPACE}'
else:
post = u'\N{NO-BREAK SPACE}{symbol}'
return u'{pre}{0}{post}'.format(
formatted_amount, pre=pre, post=post
).format(symbol=display.symbol,)
class HTMLSafe(object):
""" HTMLSafe string wrapper, Werkzeug's escape() has special handling for
objects with a ``__html__`` methods but AFAIK does not provide any such
object.
Wrapping a string in HTML will prevent its escaping
"""
__slots__ = ['string']
def __init__(self, string):
self.string = string
def __html__(self):
return self.string
def __str__(self):
s = self.string
if isinstance(s, unicode):
return s.encode('utf-8')
return s
def __unicode__(self):
s = self.string
if isinstance(s, str):
return s.decode('utf-8')
return s
def nl2br(string, options=None):
""" Converts newlines to HTML linebreaks in ``string``. Automatically
escapes content unless options['html-escape'] is set to False, and returns
the result wrapped in an HTMLSafe object.
:param str string:
:param dict options:
:rtype: HTMLSafe
"""
if options is None: options = {}
if options.get('html-escape', True):
string = escape(string)
return HTMLSafe(string.replace('\n', '<br>\n'))
def get_field_type(field, options):
""" Gets a t-field's effective type from the field definition and its options """
return options.get('widget', field.type)
class AssetError(Exception):
pass
class AssetNotFound(AssetError):
pass
class AssetsBundle(object):
# Sass installation:
#
# sudo gem install sass compass bootstrap-sass
#
# If the following error is encountered:
# 'ERROR: Cannot load compass.'
# Use this:
# sudo gem install compass --pre
cmd_sass = ['sass', '--stdin', '-t', 'compressed', '--unix-newlines', '--compass', '-r', 'bootstrap-sass']
rx_css_import = re.compile("(@import[^;{]+;?)", re.M)
rx_sass_import = re.compile("""(@import\s?['"]([^'"]+)['"])""")
rx_css_split = re.compile("\/\*\! ([a-f0-9-]+) \*\/")
def __init__(self, xmlid, debug=False, cr=None, uid=None, context=None, registry=None):
self.xmlid = xmlid
self.cr = request.cr if cr is None else cr
self.uid = request.uid if uid is None else uid
self.context = request.context if context is None else context
self.registry = request.registry if registry is None else registry
self.javascripts = []
self.stylesheets = []
self.css_errors = []
self.remains = []
self._checksum = None
context = self.context.copy()
context['inherit_branding'] = False
context['inherit_branding_auto'] = False
context['rendering_bundle'] = True
self.html = self.registry['ir.ui.view'].render(self.cr, self.uid, xmlid, context=context)
self.parse()
def parse(self):
fragments = html.fragments_fromstring(self.html)
for el in fragments:
if isinstance(el, basestring):
self.remains.append(el)
elif isinstance(el, html.HtmlElement):
src = el.get('src', '')
href = el.get('href', '')
atype = el.get('type')
media = el.get('media')
if el.tag == 'style':
if atype == 'text/sass' or src.endswith('.sass'):
self.stylesheets.append(SassAsset(self, inline=el.text, media=media))
else:
self.stylesheets.append(StylesheetAsset(self, inline=el.text, media=media))
elif el.tag == 'link' and el.get('rel') == 'stylesheet' and self.can_aggregate(href):
if href.endswith('.sass') or atype == 'text/sass':
self.stylesheets.append(SassAsset(self, url=href, media=media))
else:
self.stylesheets.append(StylesheetAsset(self, url=href, media=media))
elif el.tag == 'script' and not src:
self.javascripts.append(JavascriptAsset(self, inline=el.text))
elif el.tag == 'script' and self.can_aggregate(src):
self.javascripts.append(JavascriptAsset(self, url=src))
else:
self.remains.append(html.tostring(el))
else:
try:
self.remains.append(html.tostring(el))
except Exception:
# notYETimplementederror
raise NotImplementedError
def can_aggregate(self, url):
return not urlparse(url).netloc and not url.startswith(('/web/css', '/web/js'))
def to_html(self, sep=None, css=True, js=True, debug=False):
if sep is None:
sep = '\n '
response = []
if debug:
if css and self.stylesheets:
self.compile_sass()
for style in self.stylesheets:
response.append(style.to_html())
if js:
for jscript in self.javascripts:
response.append(jscript.to_html())
else:
url_for = self.context.get('url_for', lambda url: url)
if css and self.stylesheets:
suffix = ''
if request:
ua = request.httprequest.user_agent
if ua.browser == "msie" and int((ua.version or '0').split('.')[0]) < 10:
suffix = '.0'
href = '/web/css%s/%s/%s' % (suffix, self.xmlid, self.version)
response.append('<link href="%s" rel="stylesheet"/>' % url_for(href))
if js:
src = '/web/js/%s/%s' % (self.xmlid, self.version)
response.append('<script type="text/javascript" src="%s"></script>' % url_for(src))
response.extend(self.remains)
return sep + sep.join(response)
@lazy_property
def last_modified(self):
"""Returns last modified date of linked files"""
return max(itertools.chain(
(asset.last_modified for asset in self.javascripts),
(asset.last_modified for asset in self.stylesheets),
))
@lazy_property
def version(self):
return self.checksum[0:7]
@lazy_property
def checksum(self):
"""
Not really a full checksum.
We compute a SHA1 on the rendered bundle + max linked files last_modified date
"""
check = self.html + str(self.last_modified)
return hashlib.sha1(check).hexdigest()
def js(self):
content = self.get_cache('js')
if content is None:
content = ';\n'.join(asset.minify() for asset in self.javascripts)
self.set_cache('js', content)
return content
def css(self, page_number=None):
if page_number is not None:
return self.css_page(page_number)
content = self.get_cache('css')
if content is None:
self.compile_sass()
content = '\n'.join(asset.minify() for asset in self.stylesheets)
if self.css_errors:
msg = '\n'.join(self.css_errors)
content += self.css_message(msg.replace('\n', '\\A '))
# move up all @import rules to the top
matches = []
def push(matchobj):
matches.append(matchobj.group(0))
return ''
content = re.sub(self.rx_css_import, push, content)
matches.append(content)
content = u'\n'.join(matches)
if not self.css_errors:
self.set_cache('css', content)
content = content.encode('utf-8')
return content
def css_page(self, page_number):
content = self.get_cache('css.%d' % (page_number,))
if page_number:
return content
if content is None:
css = self.css().decode('utf-8')
re_rules = '([^{]+\{(?:[^{}]|\{[^{}]*\})*\})'
re_selectors = '()(?:\s*@media\s*[^{]*\{)?(?:\s*(?:[^,{]*(?:,|\{(?:[^}]*\}))))'
css_url = '@import url(\'/web/css.%%d/%s/%s\');' % (self.xmlid, self.version)
pages = [[]]
page = pages[0]
page_selectors = 0
for rule in re.findall(re_rules, css):
selectors = len(re.findall(re_selectors, rule))
if page_selectors + selectors < MAX_CSS_RULES:
page_selectors += selectors
page.append(rule)
else:
pages.append([rule])
page = pages[-1]
page_selectors = selectors
if len(pages) == 1:
pages = []
for idx, page in enumerate(pages):
self.set_cache("css.%d" % (idx+1), ''.join(page))
content = '\n'.join(css_url % i for i in range(1,len(pages)+1))
self.set_cache("css.0", content)
if not content:
return self.css()
return content
def get_cache(self, type):
content = None
domain = [('url', '=', '/web/%s/%s/%s' % (type, self.xmlid, self.version))]
bundle = self.registry['ir.attachment'].search_read(self.cr, openerp.SUPERUSER_ID, domain, ['datas'], context=self.context)
if bundle and bundle[0]['datas']:
content = bundle[0]['datas'].decode('base64')
return content
def set_cache(self, type, content):
ira = self.registry['ir.attachment']
url_prefix = '/web/%s/%s/' % (type, self.xmlid)
# Invalidate previous caches
try:
with self.cr.savepoint():
domain = [('url', '=like', url_prefix + '%')]
oids = ira.search(self.cr, openerp.SUPERUSER_ID, domain, context=self.context)
if oids:
ira.unlink(self.cr, openerp.SUPERUSER_ID, oids, context=self.context)
url = url_prefix + self.version
ira.create(self.cr, openerp.SUPERUSER_ID, dict(
datas=content.encode('utf8').encode('base64'),
type='binary',
name=url,
url=url,
), context=self.context)
except psycopg2.Error:
pass
def css_message(self, message):
return """
body:before {
background: #ffc;
width: 100%%;
font-size: 14px;
font-family: monospace;
white-space: pre;
content: "%s";
}
""" % message.replace('"', '\\"')
def compile_sass(self):
"""
Checks if the bundle contains any sass content, then compiles it to css.
Css compilation is done at the bundle level and not in the assets
because they are potentially interdependant.
"""
sass = [asset for asset in self.stylesheets if isinstance(asset, SassAsset)]
if not sass:
return
source = '\n'.join([asset.get_source() for asset in sass])
# move up all @import rules to the top and exclude file imports
imports = []
def push(matchobj):
ref = matchobj.group(2)
line = '@import "%s"' % ref
if '.' not in ref and line not in imports and not ref.startswith(('.', '/', '~')):
imports.append(line)
return ''
source = re.sub(self.rx_sass_import, push, source)
imports.append(source)
source = u'\n'.join(imports)
try:
compiler = Popen(self.cmd_sass, stdin=PIPE, stdout=PIPE, stderr=PIPE)
except Exception:
msg = "Could not find 'sass' program needed to compile sass/scss files"
_logger.error(msg)
self.css_errors.append(msg)
return
result = compiler.communicate(input=source.encode('utf-8'))
if compiler.returncode:
error = self.get_sass_error(''.join(result), source=source)
_logger.warning(error)
self.css_errors.append(error)
return
compiled = result[0].strip().decode('utf8')
fragments = self.rx_css_split.split(compiled)[1:]
while fragments:
asset_id = fragments.pop(0)
asset = next(asset for asset in sass if asset.id == asset_id)
asset._content = fragments.pop(0)
def get_sass_error(self, stderr, source=None):
# TODO: try to find out which asset the error belongs to
error = stderr.split('Load paths')[0].replace(' Use --trace for backtrace.', '')
error += "This error occured while compiling the bundle '%s' containing:" % self.xmlid
for asset in self.stylesheets:
if isinstance(asset, SassAsset):
error += '\n - %s' % (asset.url if asset.url else '<inline sass>')
return error
class WebAsset(object):
html_url = '%s'
def __init__(self, bundle, inline=None, url=None):
self.id = str(uuid.uuid4())
self.bundle = bundle
self.inline = inline
self.url = url
self.cr = bundle.cr
self.uid = bundle.uid
self.registry = bundle.registry
self.context = bundle.context
self._content = None
self._filename = None
self._ir_attach = None
name = '<inline asset>' if inline else url
self.name = "%s defined in bundle '%s'" % (name, bundle.xmlid)
if not inline and not url:
raise Exception("An asset should either be inlined or url linked")
def stat(self):
if not (self.inline or self._filename or self._ir_attach):
addon = filter(None, self.url.split('/'))[0]
try:
# Test url against modules static assets
mpath = openerp.http.addons_manifest[addon]['addons_path']
self._filename = mpath + self.url.replace('/', os.path.sep)
except Exception:
try:
# Test url against ir.attachments
fields = ['__last_update', 'datas', 'mimetype']
domain = [('type', '=', 'binary'), ('url', '=', self.url)]
ira = self.registry['ir.attachment']
attach = ira.search_read(self.cr, openerp.SUPERUSER_ID, domain, fields, context=self.context)
self._ir_attach = attach[0]
except Exception:
raise AssetNotFound("Could not find %s" % self.name)
def to_html(self):
raise NotImplementedError()
@lazy_property
def last_modified(self):
try:
self.stat()
if self._filename:
return datetime.datetime.fromtimestamp(os.path.getmtime(self._filename))
elif self._ir_attach:
server_format = openerp.tools.misc.DEFAULT_SERVER_DATETIME_FORMAT
last_update = self._ir_attach['__last_update']
try:
return datetime.datetime.strptime(last_update, server_format + '.%f')
except ValueError:
return datetime.datetime.strptime(last_update, server_format)
except Exception:
pass
return datetime.datetime(1970, 1, 1)
@property
def content(self):
if not self._content:
self._content = self.inline or self._fetch_content()
return self._content
def _fetch_content(self):
""" Fetch content from file or database"""
try:
self.stat()
if self._filename:
with open(self._filename, 'rb') as fp:
return fp.read().decode('utf-8')
else:
return self._ir_attach['datas'].decode('base64')
except UnicodeDecodeError:
raise AssetError('%s is not utf-8 encoded.' % self.name)
except IOError:
raise AssetNotFound('File %s does not exist.' % self.name)
except:
raise AssetError('Could not get content for %s.' % self.name)
def minify(self):
return self.content
def with_header(self, content=None):
if content is None:
content = self.content
return '\n/* %s */\n%s' % (self.name, content)
class JavascriptAsset(WebAsset):
def minify(self):
return self.with_header(rjsmin(self.content))
def _fetch_content(self):
try:
return super(JavascriptAsset, self)._fetch_content()
except AssetError, e:
return "console.error(%s);" % json.dumps(e.message)
def to_html(self):
if self.url:
return '<script type="text/javascript" src="%s"></script>' % (self.html_url % self.url)
else:
return '<script type="text/javascript" charset="utf-8">%s</script>' % self.with_header()
class StylesheetAsset(WebAsset):
rx_import = re.compile(r"""@import\s+('|")(?!'|"|/|https?://)""", re.U)
rx_url = re.compile(r"""url\s*\(\s*('|"|)(?!'|"|/|https?://|data:)""", re.U)
rx_sourceMap = re.compile(r'(/\*# sourceMappingURL=.*)', re.U)
rx_charset = re.compile(r'(@charset "[^"]+";)', re.U)
def __init__(self, *args, **kw):
self.media = kw.pop('media', None)
super(StylesheetAsset, self).__init__(*args, **kw)
@property
def content(self):
content = super(StylesheetAsset, self).content
if self.media:
content = '@media %s { %s }' % (self.media, content)
return content
def _fetch_content(self):
try:
content = super(StylesheetAsset, self)._fetch_content()
web_dir = os.path.dirname(self.url)
content = self.rx_import.sub(
r"""@import \1%s/""" % (web_dir,),
content,
)
content = self.rx_url.sub(
r"url(\1%s/" % (web_dir,),
content,
)
# remove charset declarations, we only support utf-8
content = self.rx_charset.sub('', content)
except AssetError, e:
self.bundle.css_errors.append(e.message)
return ''
return content
def minify(self):
# remove existing sourcemaps, make no sense after re-mini
content = self.rx_sourceMap.sub('', self.content)
# comments
content = re.sub(r'/\*.*?\*/', '', content, flags=re.S)
# space
content = re.sub(r'\s+', ' ', content)
content = re.sub(r' *([{}]) *', r'\1', content)
return self.with_header(content)
def to_html(self):
media = (' media="%s"' % werkzeug.utils.escape(self.media)) if self.media else ''
if self.url:
href = self.html_url % self.url
return '<link rel="stylesheet" href="%s" type="text/css"%s/>' % (href, media)
else:
return '<style type="text/css"%s>%s</style>' % (media, self.with_header())
class SassAsset(StylesheetAsset):
html_url = '%s.css'
rx_indent = re.compile(r'^( +|\t+)', re.M)
indent = None
reindent = ' '
def minify(self):
return self.with_header()
def to_html(self):
if self.url:
try:
ira = self.registry['ir.attachment']
url = self.html_url % self.url
domain = [('type', '=', 'binary'), ('url', '=', self.url)]
with self.cr.savepoint():
ira_id = ira.search(self.cr, openerp.SUPERUSER_ID, domain, context=self.context)
if ira_id:
# TODO: update only if needed
ira.write(self.cr, openerp.SUPERUSER_ID, [ira_id], {'datas': self.content},
context=self.context)
else:
ira.create(self.cr, openerp.SUPERUSER_ID, dict(
datas=self.content.encode('utf8').encode('base64'),
mimetype='text/css',
type='binary',
name=url,
url=url,
), context=self.context)
except psycopg2.Error:
pass
return super(SassAsset, self).to_html()
def get_source(self):
content = textwrap.dedent(self.inline or self._fetch_content())
def fix_indent(m):
ind = m.group()
if self.indent is None:
self.indent = ind
if self.indent == self.reindent:
# Don't reindent the file if identation is the final one (reindent)
raise StopIteration()
return ind.replace(self.indent, self.reindent)
try:
content = self.rx_indent.sub(fix_indent, content)
except StopIteration:
pass
return "/*! %s */\n%s" % (self.id, content)
def rjsmin(script):
""" Minify js with a clever regex.
Taken from http://opensource.perlig.de/rjsmin
Apache License, Version 2.0 """
def subber(match):
""" Substitution callback """
groups = match.groups()
return (
groups[0] or
groups[1] or
groups[2] or
groups[3] or
(groups[4] and '\n') or
(groups[5] and ' ') or
(groups[6] and ' ') or
(groups[7] and ' ') or
''
)
result = re.sub(
r'([^\047"/\000-\040]+)|((?:(?:\047[^\047\\\r\n]*(?:\\(?:[^\r\n]|\r?'
r'\n|\r)[^\047\\\r\n]*)*\047)|(?:"[^"\\\r\n]*(?:\\(?:[^\r\n]|\r?\n|'
r'\r)[^"\\\r\n]*)*"))[^\047"/\000-\040]*)|(?:(?<=[(,=:\[!&|?{};\r\n]'
r')(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/'
r'))*((?:/(?![\r\n/*])[^/\\\[\r\n]*(?:(?:\\[^\r\n]|(?:\[[^\\\]\r\n]*'
r'(?:\\[^\r\n][^\\\]\r\n]*)*\]))[^/\\\[\r\n]*)*/)[^\047"/\000-\040]*'
r'))|(?:(?<=[\000-#%-,./:-@\[-^`{-~-]return)(?:[\000-\011\013\014\01'
r'6-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*((?:/(?![\r\n/*])[^/'
r'\\\[\r\n]*(?:(?:\\[^\r\n]|(?:\[[^\\\]\r\n]*(?:\\[^\r\n][^\\\]\r\n]'
r'*)*\]))[^/\\\[\r\n]*)*/)[^\047"/\000-\040]*))|(?<=[^\000-!#%&(*,./'
r':-@\[\\^`{|~])(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/'
r'*][^*]*\*+)*/))*(?:((?:(?://[^\r\n]*)?[\r\n]))(?:[\000-\011\013\01'
r'4\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*)+(?=[^\000-\040"#'
r'%-\047)*,./:-@\\-^`|-~])|(?<=[^\000-#%-,./:-@\[-^`{-~-])((?:[\000-'
r'\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?=[^'
r'\000-#%-,./:-@\[-^`{-~-])|(?<=\+)((?:[\000-\011\013\014\016-\040]|'
r'(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?=\+)|(?<=-)((?:[\000-\011\0'
r'13\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?=-)|(?:[\0'
r'00-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))+|(?:'
r'(?:(?://[^\r\n]*)?[\r\n])(?:[\000-\011\013\014\016-\040]|(?:/\*[^*'
r']*\*+(?:[^/*][^*]*\*+)*/))*)+', subber, '\n%s\n' % script
).strip()
return result
# vim:et:
| agpl-3.0 |
espadrine/opera | chromium/src/third_party/python_26/Lib/site-packages/win32/Demos/eventLogDemo.py | 17 | 3273 | import win32evtlog, traceback
import win32api, win32con
import win32security # To translate NT Sids to account names.
from win32evtlogutil import *
def ReadLog(computer, logType="Application", dumpEachRecord = 0):
# read the entire log back.
h=win32evtlog.OpenEventLog(computer, logType)
numRecords = win32evtlog.GetNumberOfEventLogRecords(h)
# print "There are %d records" % numRecords
num=0
while 1:
objects = win32evtlog.ReadEventLog(h, win32evtlog.EVENTLOG_BACKWARDS_READ|win32evtlog.EVENTLOG_SEQUENTIAL_READ, 0)
if not objects:
break
for object in objects:
# get it for testing purposes, but dont print it.
msg = SafeFormatMessage(object, logType).encode("mbcs")
if object.Sid is not None:
try:
domain, user, typ = win32security.LookupAccountSid(computer, object.Sid)
sidDesc = "%s/%s" % (domain, user)
except win32security.error:
sidDesc = str(object.Sid)
user_desc = "Event associated with user %s" % (sidDesc,)
else:
user_desc = None
if dumpEachRecord:
if user_desc:
print user_desc
print msg
num = num + len(objects)
if numRecords == num:
print "Successfully read all", numRecords, "records"
else:
print "Couldn't get all records - reported %d, but found %d" % (numRecords, num)
print "(Note that some other app may have written records while we were running!)"
win32evtlog.CloseEventLog(h)
def Usage():
print "Writes an event to the event log."
print "-w : Dont write any test records."
print "-r : Dont read the event log"
print "-c : computerName : Process the log on the specified computer"
print "-v : Verbose"
print "-t : LogType - Use the specified log - default = 'Application'"
def test():
# check if running on Windows NT, if not, display notice and terminate
if win32api.GetVersion() & 0x80000000:
print "This sample only runs on NT"
return
import sys, getopt
opts, args = getopt.getopt(sys.argv[1:], "rwh?c:t:v")
computer = None
do_read = do_write = 1
logType = "Application"
verbose = 0
if len(args)>0:
print "Invalid args"
usage()
return 1
for opt, val in opts:
if opt == '-t':
logType = val
if opt == '-c':
computer = val
if opt in ['-h', '-?']:
Usage()
return
if opt=='-r':
do_read = 0
if opt=='-w':
do_write = 0
if opt=='-v':
verbose = verbose + 1
if do_write:
ReportEvent(logType, 2, strings=["The message text for event 2"], data = "Raw\0Data")
ReportEvent(logType, 1, eventType=win32evtlog.EVENTLOG_WARNING_TYPE, strings=["A warning"], data = "Raw\0Data")
ReportEvent(logType, 1, eventType=win32evtlog.EVENTLOG_INFORMATION_TYPE, strings=["An info"], data = "Raw\0Data")
print "Successfully wrote 3 records to the log"
if do_read:
ReadLog(computer, logType, verbose > 0)
if __name__=='__main__':
test()
| bsd-3-clause |
hjfreyer/marry-fuck-kill | backend/core.py | 1 | 11449 | #!/usr/bin/env python
#
# Copyright 2011 Hunter Freyer and Michael Kelly
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import collections
import datetime
import hmac
import json
import logging
import urllib2
from google.appengine.api import urlfetch
from google.appengine.api import users
from google.appengine.ext import db
import models
import config_NOCOMMIT as config
# Whether to display the new vote counts (cached in Triples).
USE_CACHED_VOTE_COUNTS = True
Image = collections.namedtuple('Image', ['original', 'thumbnail'])
class JSONStructureError(Exception):
"""Raised when JSON structure doesn't match our expectations."""
class EntityValidationError(Exception): pass
def GetStatsUrlsForTriple(triple, w=160, h=85):
"""Returns a list of stats URLs for the given triple.
Args:
triple: (Triple) triple to examine
w: (int) Optional. Width of each chart image.
h: (int) Optional. Height of each chart image.
Returns:
[str, str, str]: URLs for the Triple's three Entities.
"""
counts = GetTripleVoteCounts(triple)
urls = []
overall_max = max([max(c) for c in counts])
for count in counts:
urls.append('http://chart.apis.google.com/chart'
'?chxr=0,0,%(max)d'
'&chxt=y'
'&chbh=a'
'&chs=%(w)dx%(h)d'
'&cht=bvg'
'&chco=9911BB,C76FDD,63067A'
'&chds=0,%(max)d,0,%(max)d,0,%(max)d'
'&chd=t:%(m)d|%(f)d|%(k)d'
'&chdl=Marry|Fuck|Kill'
'&chdlp=r' % (dict(m=count[0], f=count[1], k=count[2],
max=overall_max,
w=w,
h=h)))
return urls
def GetTripleVoteCounts(triple):
"""Calculates vote count for the given triple.
Returns:
([[int]]) Vote counts. This is a nested list (first level is votes for
entity one, two, three; second level is votes for m, f, k).
"""
def _CalculateEntityVoteCounts(entity):
m = entity.assignment_reference_marry_set.count()
f = entity.assignment_reference_fuck_set.count()
k = entity.assignment_reference_kill_set.count()
return [m, f, k]
# For backwards compatibility with Triples that don't have embedded vote
# counts.
if not USE_CACHED_VOTE_COUNTS or not triple.has_cached_votes:
logging.info('Updating legacy Triple without vote counts: %s',
triple.key())
votes = [_CalculateEntityVoteCounts(triple.one),
_CalculateEntityVoteCounts(triple.two),
_CalculateEntityVoteCounts(triple.three)]
# Race condition here: We're done calculating the votes, and we're about to
# update the Entity. We might be off by one if someone else votes while
# we're here. We have MapReduces to fix this up, so we don't care too much.
db.run_in_transaction(_UpdateTripleVoteCounts, triple.key(), votes)
return votes
else:
logging.info('Got cached votes for Triple %s', triple.key())
return [[triple.votes_one_m, triple.votes_one_f, triple.votes_one_k],
[triple.votes_two_m, triple.votes_two_f, triple.votes_two_k],
[triple.votes_three_m, triple.votes_three_f, triple.votes_three_k]]
def _AddTripleVoteCounts(triple_key, votes):
"""Adds votes to a triple's vote count.
This should be run in a transaction.
Args:
triple_key: (db.Key) the triple to update
votes: ([str]) a 3-list of 'm', 'f', and 'k', corresponding to the votes
for the 3 items in the triple, in order.
"""
triple = models.Triple.get(triple_key)
if triple.has_cached_votes:
triple.votes_one_m += 1 if votes[0] == 'm' else 0
triple.votes_one_f += 1 if votes[0] == 'f' else 0
triple.votes_one_k += 1 if votes[0] == 'k' else 0
triple.votes_two_m += 1 if votes[1] == 'm' else 0
triple.votes_two_f += 1 if votes[1] == 'f' else 0
triple.votes_two_k += 1 if votes[1] == 'k' else 0
triple.votes_three_m += 1 if votes[2] == 'm' else 0
triple.votes_three_f += 1 if votes[2] == 'f' else 0
triple.votes_three_k += 1 if votes[2] == 'k' else 0
triple.put()
else:
logging.warning('_AddTripleVoteCounts: Legacy Triple without vote counts:'
'%s', triple_key)
def _UpdateTripleVoteCounts(triple_key, new_counts):
"""Updates vote counts on the given triple.
Args:
triple: (Triple) triple to update
new_counts: ([[int]]) These values are the new values for votes_one_m, ...,
votes_three_k. See core.GetTripleVoteCounts.
"""
triple = models.Triple.get(triple_key)
assert(len(new_counts) == 3)
votes_one, votes_two, votes_three = new_counts
assert(len(votes_one) == 3)
assert(len(votes_two) == 3)
assert(len(votes_three) == 3)
triple.votes_one_m, triple.votes_one_f, triple.votes_one_k = votes_one
triple.votes_two_m, triple.votes_two_f, triple.votes_two_k = votes_two
triple.votes_three_m, triple.votes_three_f, triple.votes_three_k = (
votes_three)
triple.has_cached_votes = True
triple.put()
def MakeEntity(name, query, user_ip, thumb_url, original_url):
"""Makes an Entity with the given attributes."""
# Get the thumbnail URL for the entity. This could throw
# URLError. We'll let it bubble up.
result = urlfetch.fetch(thumb_url)
logging.info('Downloading %s' % thumb_url)
entity = models.Entity(name=name,
data=result.content,
query=query,
original_url=original_url)
entity.put()
return entity
def MakeTriple(entities, creator, creator_ip):
"""Create the named triple.
Args:
entities: a data structure built in MakeSubmitHandler.
creator: the user who created the Triple.
creator_ip: IP address of the request to make this triple.
"""
for i in range(len(entities)):
# TODO(mjkelly): Check for a signature element.
for k in ['n', 'u', 'q', 'ou']:
if not entities[i][k]:
raise ValueError("Entity %s missing attribute '%s'" % (i, k))
# This may raise a URLError or EntityValidatationError.
one = MakeEntity(name=entities[0]['n'],
query=entities[0]['q'],
user_ip=creator_ip,
thumb_url=entities[0]['u'],
original_url=entities[0]['ou'])
two = MakeEntity(name=entities[1]['n'],
query=entities[1]['q'],
user_ip=creator_ip,
thumb_url=entities[1]['u'],
original_url=entities[1]['ou'])
three = MakeEntity(name=entities[2]['n'],
query=entities[2]['q'],
user_ip=creator_ip,
thumb_url=entities[2]['u'],
original_url=entities[2]['ou'])
triple = models.Triple(one=one, two=two, three=three,
creator=creator,
creatorip=creator_ip,
has_cached_votes=True,
votes_one_m=0,
votes_one_f=0,
votes_one_k=0,
votes_two_m=0,
votes_two_f=0,
votes_two_k=0,
votes_three_m=0,
votes_three_f=0,
votes_three_k=0)
triple.enable()
triple.put()
return triple
def MakeAssignment(triple_id, v1, v2, v3, user, user_ip):
"""Create a new assignment.
Args:
request: the POST request from the client
user: the user who made the assignment request
"""
values = [v1, v2, v3]
if set(values) != set(['m', 'f', 'k']):
return None
try:
triple_id = long(triple_id)
except ValueError:
logging.error("make_assignment: bad triple key '%s'", triple_id)
triple = models.Triple.get_by_id(triple_id)
logging.debug('triple = %s', triple)
if triple is None:
logging.error('make_assignment: No triple with key %s', triple_id)
return None
db.run_in_transaction(_AddTripleVoteCounts, triple.key(), values)
# We get an entity->action map from the client, but we need to reverse
# it to action->entity to update the DB.
triple_entities = [triple.one,
triple.two,
triple.three]
entities = {}
for i in range(len(values)):
# Items in values are guaranteed to be 'm', 'f', 'k' (check above)
entities[values[i]] = triple_entities[i]
if (entities['m'] is None or entities['f'] is None
or entities['k'] is None):
logging.error('Not all non-None: marry = %s, fuck = %s, kill = %s',
entities['m'], entities['f'], entities['k'])
return None
assign = models.Assignment(triple=triple,
marry=entities['m'],
fuck=entities['f'],
kill=entities['k'],
user=user,
userip=str(user_ip))
assign.put()
logging.info("Assigned m=%s, f=%s, k=%s to %s", entities['m'],
entities['f'], entities['k'], triple)
return assign
def ImageSearch(query, user_ip):
"""Performs an image search. It should return 10 results.
Args:
query: (str) The search query
user_ip: (str) IP address of user making the query, for accounting.
Returns:
[Image]: A list of Image objects representing search results
Raises:
JSONStructureError: If the structure of the search results is unexpected.
"""
images = []
query = query.encode('utf-8')
url = ('https://www.googleapis.com/customsearch/v1'
'?key={key}'
'&cx={cx}'
'&q={q}'
'&userIp={userip}'
'&searchType=image').format(
key=config.CSE_API_KEY,
cx=config.CSE_ID,
q=urllib2.quote(query),
userip=user_ip)
logging.info('ImageSearch: query url=%s', url)
download_start = datetime.datetime.now()
# This may raise a DownloadError
result = urlfetch.fetch(url)
download_finish = datetime.datetime.now()
data = json.loads(result.content)
parse_finish = datetime.datetime.now()
logging.info('ImageSearch: downloaded %s bytes; %s to download, %s to parse',
len(result.content),
download_finish - download_start,
parse_finish - download_finish)
try:
if data['searchInformation']['totalResults'] == '0':
return []
for item in data['items']:
link = item['link']
thumb = item['image']['thumbnailLink']
images.append(Image(original=link, thumbnail=thumb))
except KeyError as e:
error = 'Missing key %s in JSON. JSON = %r' % (e, result.content)
logging.error(error)
raise JSONStructureError(error)
return images
def Sign(*items):
"""Signs a sequence of items using our internal HMAC key.
Args:
*items: Any sequence of items.
Returns:
(str) Hex digest of *items
"""
h = hmac.new(config.HMAC_KEY)
for item in items:
h.update(item)
return h.hexdigest()
| apache-2.0 |
rmhyman/DataScience | Lesson1/IntroToPandas.py | 1 | 1976 | import pandas as pd
'''
The following code is to help you play with the concept of Series in Pandas.
You can think of Series as an one-dimensional object that is similar to
an array, list, or column in a database. By default, it will assign an
index label to each item in the Series ranging from 0 to N, where N is
the number of items in the Series minus one.
Please feel free to play around with the concept of Series and see what it does
*This playground is inspired by Greg Reda's post on Intro to Pandas Data Structures:
http://www.gregreda.com/2013/10/26/intro-to-pandas-data-structures/
'''
# Change False to True to create a Series object
if True:
series = pd.Series(['Dave', 'Cheng-Han', 'Udacity', 42, -1789710578])
print series
'''
You can also manually assign indices to the items in the Series when
creating the series
'''
# Change False to True to see custom index in action
if False:
series = pd.Series(['Dave', 'Cheng-Han', 359, 9001],
index=['Instructor', 'Curriculum Manager',
'Course Number', 'Power Level'])
print series
'''
You can use index to select specific items from the Series
'''
# Change False to True to see Series indexing in action
if False:
series = pd.Series(['Dave', 'Cheng-Han', 359, 9001],
index=['Instructor', 'Curriculum Manager',
'Course Number', 'Power Level'])
print series['Instructor']
print ""
print series[['Instructor', 'Curriculum Manager', 'Course Number']]
'''
You can also use boolean operators to select specific items from the Series
'''
# Change False to True to see boolean indexing in action
if True:
cuteness = pd.Series([1, 2, 3, 4, 5], index=['Cockroach', 'Fish', 'Mini Pig',
'Puppy', 'Kitten'])
print cuteness > 3
print ""
print cuteness[cuteness > 3]
| mit |
tedelhourani/ansible | lib/ansible/module_utils/known_hosts.py | 7 | 6918 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Michael DeHaan <[email protected]>, 2012-2013
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import hmac
import re
from ansible.module_utils.six.moves.urllib.parse import urlparse
try:
from hashlib import sha1
except ImportError:
import sha as sha1
HASHED_KEY_MAGIC = "|1|"
def is_ssh_url(url):
""" check if url is ssh """
if "@" in url and "://" not in url:
return True
for scheme in "ssh://", "git+ssh://", "ssh+git://":
if url.startswith(scheme):
return True
return False
def get_fqdn_and_port(repo_url):
""" chop the hostname and port out of a url """
fqdn = None
port = None
ipv6_re = re.compile('(\[[^]]*\])(?::([0-9]+))?')
if "@" in repo_url and "://" not in repo_url:
# most likely an user@host:path or user@host/path type URL
repo_url = repo_url.split("@", 1)[1]
match = ipv6_re.match(repo_url)
# For this type of URL, colon specifies the path, not the port
if match:
fqdn, path = match.groups()
elif ":" in repo_url:
fqdn = repo_url.split(":")[0]
elif "/" in repo_url:
fqdn = repo_url.split("/")[0]
elif "://" in repo_url:
# this should be something we can parse with urlparse
parts = urlparse(repo_url)
# parts[1] will be empty on python2.4 on ssh:// or git:// urls, so
# ensure we actually have a parts[1] before continuing.
if parts[1] != '':
fqdn = parts[1]
if "@" in fqdn:
fqdn = fqdn.split("@", 1)[1]
match = ipv6_re.match(fqdn)
if match:
fqdn, port = match.groups()
elif ":" in fqdn:
fqdn, port = fqdn.split(":")[0:2]
return fqdn, port
def check_hostkey(module, fqdn):
return not not_in_host_file(module, fqdn)
# this is a variant of code found in connection_plugins/paramiko.py and we should modify
# the paramiko code to import and use this.
def not_in_host_file(self, host):
if 'USER' in os.environ:
user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
else:
user_host_file = "~/.ssh/known_hosts"
user_host_file = os.path.expanduser(user_host_file)
host_file_list = []
host_file_list.append(user_host_file)
host_file_list.append("/etc/ssh/ssh_known_hosts")
host_file_list.append("/etc/ssh/ssh_known_hosts2")
host_file_list.append("/etc/openssh/ssh_known_hosts")
hfiles_not_found = 0
for hf in host_file_list:
if not os.path.exists(hf):
hfiles_not_found += 1
continue
try:
host_fh = open(hf)
except IOError:
hfiles_not_found += 1
continue
else:
data = host_fh.read()
host_fh.close()
for line in data.split("\n"):
if line is None or " " not in line:
continue
tokens = line.split()
if tokens[0].find(HASHED_KEY_MAGIC) == 0:
# this is a hashed known host entry
try:
(kn_salt, kn_host) = tokens[0][len(HASHED_KEY_MAGIC):].split("|", 2)
hash = hmac.new(kn_salt.decode('base64'), digestmod=sha1)
hash.update(host)
if hash.digest() == kn_host.decode('base64'):
return False
except:
# invalid hashed host key, skip it
continue
else:
# standard host file entry
if host in tokens[0]:
return False
return True
def add_host_key(module, fqdn, port=22, key_type="rsa", create_dir=False):
""" use ssh-keyscan to add the hostkey """
keyscan_cmd = module.get_bin_path('ssh-keyscan', True)
if 'USER' in os.environ:
user_ssh_dir = os.path.expandvars("~${USER}/.ssh/")
user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
else:
user_ssh_dir = "~/.ssh/"
user_host_file = "~/.ssh/known_hosts"
user_ssh_dir = os.path.expanduser(user_ssh_dir)
if not os.path.exists(user_ssh_dir):
if create_dir:
try:
os.makedirs(user_ssh_dir, int('700', 8))
except:
module.fail_json(msg="failed to create host key directory: %s" % user_ssh_dir)
else:
module.fail_json(msg="%s does not exist" % user_ssh_dir)
elif not os.path.isdir(user_ssh_dir):
module.fail_json(msg="%s is not a directory" % user_ssh_dir)
if port:
this_cmd = "%s -t %s -p %s %s" % (keyscan_cmd, key_type, port, fqdn)
else:
this_cmd = "%s -t %s %s" % (keyscan_cmd, key_type, fqdn)
rc, out, err = module.run_command(this_cmd)
# ssh-keyscan gives a 0 exit code and prints nothing on timeout
if rc != 0 or not out:
msg = 'failed to retrieve hostkey'
if not out:
msg += '. "%s" returned no matches.' % this_cmd
else:
msg += ' using command "%s". [stdout]: %s' % (this_cmd, out)
if err:
msg += ' [stderr]: %s' % err
module.fail_json(msg=msg)
module.append_to_file(user_host_file, out)
return rc, out, err
| gpl-3.0 |
google/dnae | services/service-example/service_example_run.py | 1 | 9308 | # Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DNA - Service example - Main service module.
Main methods to handle the service tasks.
This example service uses DCM APIs (and the corresponding DNA connector) to
collect DCM report data, elaborate it, and push it to a BigQuery dataset.
"""
import base64
import json
import logging
import sys
from dcm_connector import DCMConnector
from dcm_connector import DCMReport
from dna_general_settings import CREDENTIAL_FILE
from dna_general_settings import DCM_API_VER
from dna_general_settings import GDS_KIND_LOG_SERVICE
from dna_logging import configure_logging
from dna_project_settings import DCM_PROFILE_ID
from dna_project_settings import PROJECT_ID
from gcp_connector import GCPConnector
from gcp_connector import GCPTable
from service_example_settings import DATA_SCHEMA_STANDARD
from service_example_settings import DCM_REPORT_DATE_RANGE
from service_example_settings import DCM_REPORT_NAME
from service_example_settings import DCM_REPORT_TEMPLATE
from service_example_settings import FIELD_MAP_STANDARD
from service_example_settings import GBQ_TABLE
from service_example_settings import SERVICE_NAME
from utils import TextUtils
# Configure logging
configure_logging()
logger = logging.getLogger('DNA-Service-example')
def get_field(field, row, field_map, default_value=None):
"""Access fields in a row according to the specified field map.
Args:
field: field to extract.
row: row to extract field from.
field_map: field map.
default_value: value to be returned in case the field is not mapped.
Returns:
specified field value if the field is mapped, the default value otherwise.
"""
field_info = field_map.get(field)
if not field_info:
return default_value
else:
return row[field_info['idx']]
def extract(dcm, params):
"""Create a DCM report and extract the resulting data.
Args:
dcm: initiated instance of the DCM connector.
params: parameters to use for the DCM report.
Returns:
the data object resulting from the DCM report.
"""
report = DCMReport(params['report_template'])
report.setname(params['report_name'])
report.setdates(params['date_range'])
advertiser_ids = params['advertiser_id'].split(' ')
# Add filters
for item in advertiser_ids:
report.addfilter('dfa:advertiser', item)
# Insert and run a new report
rid = dcm.createreport(report)
fid = dcm.runreport(rid)
# Get raw report data for the specified report and file
data = dcm.getreportdata(rid, fid)
# Delete the report from DCM
dcm.deletereport(rid)
return data
def transform(dest_table, raw_data):
"""Transform the report data and add it to the destination table.
Args:
dest_table: GCPConnector object.
raw_data: GCPTable object containing the transformed data.
"""
# Enable smart completion for the GCPTable object.
assert isinstance(dest_table, GCPTable)
# Loop over raw data rows (excluding header row).
for row in raw_data[1:]:
try:
# Add all fields to the destination table
dest_table.addrow([
get_field('Advertiser', row, FIELD_MAP_STANDARD),
get_field('AdvertiserID', row, FIELD_MAP_STANDARD),
get_field('Campaign', row, FIELD_MAP_STANDARD),
get_field('CampaignID', row, FIELD_MAP_STANDARD),
get_field('PlacementSize', row, FIELD_MAP_STANDARD),
get_field('CreativeType', row, FIELD_MAP_STANDARD),
get_field('CreativeSize', row, FIELD_MAP_STANDARD),
get_field('PlatformType', row, FIELD_MAP_STANDARD),
get_field('Site', row, FIELD_MAP_STANDARD),
get_field('Month', row, FIELD_MAP_STANDARD),
get_field('Week', row, FIELD_MAP_STANDARD),
get_field('Date', row, FIELD_MAP_STANDARD),
int(get_field('Clicks', row, FIELD_MAP_STANDARD)),
int(get_field('Impressions', row, FIELD_MAP_STANDARD)),
float(get_field('ViewableTimeSeconds', row, FIELD_MAP_STANDARD)),
int(get_field('EligibleImpressions', row, FIELD_MAP_STANDARD)),
int(get_field('MeasurableImpressions', row, FIELD_MAP_STANDARD)),
int(get_field('ViewableImpressions', row, FIELD_MAP_STANDARD)),
])
# pylint: disable=broad-except
except Exception as e:
logger.debug('[%s] - Error "%s" occurs while adding the following row',
SERVICE_NAME, str(e))
logger.debug(str(row))
# pylint: enable=broad-except
def load(gcp, source_table, params):
"""Load transformed data onto Google Cloud Platform.
Args:
gcp: GCPConnector object.
source_table: GCPTable object containing the transformed data.
params: dictionary containing all relevant GCP parameters.
Returns:
The BigQuery job id.
"""
assert isinstance(gcp, GCPConnector)
assert isinstance(source_table, GCPTable)
bucket = params['bucket']
filename = params['filename']
dataset = params['dataset']
table = params['table']
# Upload data onto a specified Google Cloud Storage bucket/filename
gcsuri = gcp.gcs_uploadtable(source_table, bucket, filename)
# Create a BigQuery job to transfer uploaded data from GCS to a BigQuery table
if params['append']:
# If append is "True" append transformed data to the table
job_id = gcp.bq_importfromgcs(
gcsuri=gcsuri,
dataset=dataset,
table=table,
schema=source_table.schema,
encoding=source_table.encoding,
writemode='WRITE_APPEND')
else:
# Otherwise overwrite (or create a new) table
job_id = gcp.bq_importfromgcs(
gcsuri=gcsuri,
dataset=dataset,
table=table,
schema=source_table.schema,
encoding=source_table.encoding)
return job_id
def service_task(dcm, gcp, params):
"""Main ETL job, putting together all ETL functions to implement the service.
Args:
dcm: the DCMConnector instance.
gcp: the GCPConnector instance.
params: dictionary containing all parameters relevant for the ETL task.
Returns:
The BigQuery job id.
"""
# Initiate a GCPTable object (to be used for data ingestion) using the
# appropriate data schema.
dest_table = GCPTable(params['schema'])
# Extract data via a DCM report.
raw_data = extract(dcm, params)
# Transform data if necessary (and upload it to the destination table)
transform(dest_table, raw_data)
# ...or alternatively ingest extracted data as it is
# dest_table.ingest(raw_data, True)
# Load data into Google Big Query.
job_id = load(gcp, dest_table, params)
return job_id
def main(argv):
"""Main function reading the task and launching the corresponding ETL job.
Args:
argv: array of parameters: (1) queue name, (2) task id.
"""
# Get input arguments passed by the service-example-run.sh script
queue_name = str(argv[1])
task_name = str(argv[2])
logger.info('Starting service-example processing task. Queue name: [%s]. '
'Task name: [%s]', queue_name, task_name)
# Initiate connectors for Google Cloud Platform and DCM.
gcp = GCPConnector(PROJECT_ID)
dcm = DCMConnector(
credential_file=CREDENTIAL_FILE,
user_email=None,
profile_id=DCM_PROFILE_ID,
api_version=DCM_API_VER)
# Get the first available task from the queue.
task = gcp.gct_gettask(task_name)
payload = task['pullMessage']['payload']
params = json.loads(base64.urlsafe_b64decode(str(payload)))
# Add service-specific params.
params['report_template'] = DCM_REPORT_TEMPLATE
params['report_name'] = DCM_REPORT_NAME
params['date_range'] = DCM_REPORT_DATE_RANGE
params['schema'] = DATA_SCHEMA_STANDARD
params['filename'] = TextUtils.timestamp() + '_' + str(
params['account_id']) + '.csv'
params['table'] = GBQ_TABLE
params['append'] = False
# Log run info as Datastore entity.
run_entity = gcp.gds_insert(
kind=GDS_KIND_LOG_SERVICE,
attributes={
'created': TextUtils.timestamp().decode(),
'service': params['service'].decode(),
'status': u'RUNNING',
'error': None,
'bqjob': None,
'bqstatus': None,
})
try:
# Run the ETL task with the given params and update the Datastore entity.
job_id = service_task(dcm, gcp, params)
run_entity['bqjob'] = job_id.decode()
run_entity['bqstatus'] = u'RUNNING'
run_entity['status'] = u'DONE'
# pylint: disable=broad-except
except Exception as e:
run_entity['status'] = u'FAILED'
run_entity['error'] = str(e).decode()
logger.error(
'[%s] - The following error occurs while executing task <%s> : <%s>',
SERVICE_NAME, task_name, str(e))
finally:
gcp.gds_update(run_entity)
# pylint: enable=broad-except
if __name__ == '__main__':
main(sys.argv)
| apache-2.0 |
pypa/setuptools | setuptools/_distutils/command/bdist_msi.py | 26 | 35579 | # Copyright (C) 2005, 2006 Martin von Löwis
# Licensed to PSF under a Contributor Agreement.
# The bdist_wininst command proper
# based on bdist_wininst
"""
Implements the bdist_msi command.
"""
import os
import sys
import warnings
from distutils.core import Command
from distutils.dir_util import remove_tree
from distutils.sysconfig import get_python_version
from distutils.version import StrictVersion
from distutils.errors import DistutilsOptionError
from distutils.util import get_platform
from distutils import log
import msilib
from msilib import schema, sequence, text
from msilib import Directory, Feature, Dialog, add_data
class PyDialog(Dialog):
"""Dialog class with a fixed layout: controls at the top, then a ruler,
then a list of buttons: back, next, cancel. Optionally a bitmap at the
left."""
def __init__(self, *args, **kw):
"""Dialog(database, name, x, y, w, h, attributes, title, first,
default, cancel, bitmap=true)"""
Dialog.__init__(self, *args)
ruler = self.h - 36
bmwidth = 152*ruler/328
#if kw.get("bitmap", True):
# self.bitmap("Bitmap", 0, 0, bmwidth, ruler, "PythonWin")
self.line("BottomLine", 0, ruler, self.w, 0)
def title(self, title):
"Set the title text of the dialog at the top."
# name, x, y, w, h, flags=Visible|Enabled|Transparent|NoPrefix,
# text, in VerdanaBold10
self.text("Title", 15, 10, 320, 60, 0x30003,
r"{\VerdanaBold10}%s" % title)
def back(self, title, next, name = "Back", active = 1):
"""Add a back button with a given title, the tab-next button,
its name in the Control table, possibly initially disabled.
Return the button, so that events can be associated"""
if active:
flags = 3 # Visible|Enabled
else:
flags = 1 # Visible
return self.pushbutton(name, 180, self.h-27 , 56, 17, flags, title, next)
def cancel(self, title, next, name = "Cancel", active = 1):
"""Add a cancel button with a given title, the tab-next button,
its name in the Control table, possibly initially disabled.
Return the button, so that events can be associated"""
if active:
flags = 3 # Visible|Enabled
else:
flags = 1 # Visible
return self.pushbutton(name, 304, self.h-27, 56, 17, flags, title, next)
def next(self, title, next, name = "Next", active = 1):
"""Add a Next button with a given title, the tab-next button,
its name in the Control table, possibly initially disabled.
Return the button, so that events can be associated"""
if active:
flags = 3 # Visible|Enabled
else:
flags = 1 # Visible
return self.pushbutton(name, 236, self.h-27, 56, 17, flags, title, next)
def xbutton(self, name, title, next, xpos):
"""Add a button with a given title, the tab-next button,
its name in the Control table, giving its x position; the
y-position is aligned with the other buttons.
Return the button, so that events can be associated"""
return self.pushbutton(name, int(self.w*xpos - 28), self.h-27, 56, 17, 3, title, next)
class bdist_msi(Command):
description = "create a Microsoft Installer (.msi) binary distribution"
user_options = [('bdist-dir=', None,
"temporary directory for creating the distribution"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_platform()),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('target-version=', None,
"require a specific python version" +
" on the target system"),
('no-target-compile', 'c',
"do not compile .py to .pyc on the target system"),
('no-target-optimize', 'o',
"do not compile .py to .pyo (optimized) "
"on the target system"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
('install-script=', None,
"basename of installation script to be run after "
"installation or before deinstallation"),
('pre-install-script=', None,
"Fully qualified filename of a script to be run before "
"any files are installed. This script need not be in the "
"distribution"),
]
boolean_options = ['keep-temp', 'no-target-compile', 'no-target-optimize',
'skip-build']
all_versions = ['2.0', '2.1', '2.2', '2.3', '2.4',
'2.5', '2.6', '2.7', '2.8', '2.9',
'3.0', '3.1', '3.2', '3.3', '3.4',
'3.5', '3.6', '3.7', '3.8', '3.9']
other_version = 'X'
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
warnings.warn("bdist_msi command is deprecated since Python 3.9, "
"use bdist_wheel (wheel packages) instead",
DeprecationWarning, 2)
def initialize_options(self):
self.bdist_dir = None
self.plat_name = None
self.keep_temp = 0
self.no_target_compile = 0
self.no_target_optimize = 0
self.target_version = None
self.dist_dir = None
self.skip_build = None
self.install_script = None
self.pre_install_script = None
self.versions = None
def finalize_options(self):
self.set_undefined_options('bdist', ('skip_build', 'skip_build'))
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'msi')
short_version = get_python_version()
if (not self.target_version) and self.distribution.has_ext_modules():
self.target_version = short_version
if self.target_version:
self.versions = [self.target_version]
if not self.skip_build and self.distribution.has_ext_modules()\
and self.target_version != short_version:
raise DistutilsOptionError(
"target version can only be %s, or the '--skip-build'"
" option must be specified" % (short_version,))
else:
self.versions = list(self.all_versions)
self.set_undefined_options('bdist',
('dist_dir', 'dist_dir'),
('plat_name', 'plat_name'),
)
if self.pre_install_script:
raise DistutilsOptionError(
"the pre-install-script feature is not yet implemented")
if self.install_script:
for script in self.distribution.scripts:
if self.install_script == os.path.basename(script):
break
else:
raise DistutilsOptionError(
"install_script '%s' not found in scripts"
% self.install_script)
self.install_script_key = None
def run(self):
if not self.skip_build:
self.run_command('build')
install = self.reinitialize_command('install', reinit_subcommands=1)
install.prefix = self.bdist_dir
install.skip_build = self.skip_build
install.warn_dir = 0
install_lib = self.reinitialize_command('install_lib')
# we do not want to include pyc or pyo files
install_lib.compile = 0
install_lib.optimize = 0
if self.distribution.has_ext_modules():
# If we are building an installer for a Python version other
# than the one we are currently running, then we need to ensure
# our build_lib reflects the other Python version rather than ours.
# Note that for target_version!=sys.version, we must have skipped the
# build step, so there is no issue with enforcing the build of this
# version.
target_version = self.target_version
if not target_version:
assert self.skip_build, "Should have already checked this"
target_version = '%d.%d' % sys.version_info[:2]
plat_specifier = ".%s-%s" % (self.plat_name, target_version)
build = self.get_finalized_command('build')
build.build_lib = os.path.join(build.build_base,
'lib' + plat_specifier)
log.info("installing to %s", self.bdist_dir)
install.ensure_finalized()
# avoid warning of 'install_lib' about installing
# into a directory not in sys.path
sys.path.insert(0, os.path.join(self.bdist_dir, 'PURELIB'))
install.run()
del sys.path[0]
self.mkpath(self.dist_dir)
fullname = self.distribution.get_fullname()
installer_name = self.get_installer_filename(fullname)
installer_name = os.path.abspath(installer_name)
if os.path.exists(installer_name): os.unlink(installer_name)
metadata = self.distribution.metadata
author = metadata.author
if not author:
author = metadata.maintainer
if not author:
author = "UNKNOWN"
version = metadata.get_version()
# ProductVersion must be strictly numeric
# XXX need to deal with prerelease versions
sversion = "%d.%d.%d" % StrictVersion(version).version
# Prefix ProductName with Python x.y, so that
# it sorts together with the other Python packages
# in Add-Remove-Programs (APR)
fullname = self.distribution.get_fullname()
if self.target_version:
product_name = "Python %s %s" % (self.target_version, fullname)
else:
product_name = "Python %s" % (fullname)
self.db = msilib.init_database(installer_name, schema,
product_name, msilib.gen_uuid(),
sversion, author)
msilib.add_tables(self.db, sequence)
props = [('DistVersion', version)]
email = metadata.author_email or metadata.maintainer_email
if email:
props.append(("ARPCONTACT", email))
if metadata.url:
props.append(("ARPURLINFOABOUT", metadata.url))
if props:
add_data(self.db, 'Property', props)
self.add_find_python()
self.add_files()
self.add_scripts()
self.add_ui()
self.db.Commit()
if hasattr(self.distribution, 'dist_files'):
tup = 'bdist_msi', self.target_version or 'any', fullname
self.distribution.dist_files.append(tup)
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run)
def add_files(self):
db = self.db
cab = msilib.CAB("distfiles")
rootdir = os.path.abspath(self.bdist_dir)
root = Directory(db, cab, None, rootdir, "TARGETDIR", "SourceDir")
f = Feature(db, "Python", "Python", "Everything",
0, 1, directory="TARGETDIR")
items = [(f, root, '')]
for version in self.versions + [self.other_version]:
target = "TARGETDIR" + version
name = default = "Python" + version
desc = "Everything"
if version is self.other_version:
title = "Python from another location"
level = 2
else:
title = "Python %s from registry" % version
level = 1
f = Feature(db, name, title, desc, 1, level, directory=target)
dir = Directory(db, cab, root, rootdir, target, default)
items.append((f, dir, version))
db.Commit()
seen = {}
for feature, dir, version in items:
todo = [dir]
while todo:
dir = todo.pop()
for file in os.listdir(dir.absolute):
afile = os.path.join(dir.absolute, file)
if os.path.isdir(afile):
short = "%s|%s" % (dir.make_short(file), file)
default = file + version
newdir = Directory(db, cab, dir, file, default, short)
todo.append(newdir)
else:
if not dir.component:
dir.start_component(dir.logical, feature, 0)
if afile not in seen:
key = seen[afile] = dir.add_file(file)
if file==self.install_script:
if self.install_script_key:
raise DistutilsOptionError(
"Multiple files with name %s" % file)
self.install_script_key = '[#%s]' % key
else:
key = seen[afile]
add_data(self.db, "DuplicateFile",
[(key + version, dir.component, key, None, dir.logical)])
db.Commit()
cab.commit(db)
def add_find_python(self):
"""Adds code to the installer to compute the location of Python.
Properties PYTHON.MACHINE.X.Y and PYTHON.USER.X.Y will be set from the
registry for each version of Python.
Properties TARGETDIRX.Y will be set from PYTHON.USER.X.Y if defined,
else from PYTHON.MACHINE.X.Y.
Properties PYTHONX.Y will be set to TARGETDIRX.Y\\python.exe"""
start = 402
for ver in self.versions:
install_path = r"SOFTWARE\Python\PythonCore\%s\InstallPath" % ver
machine_reg = "python.machine." + ver
user_reg = "python.user." + ver
machine_prop = "PYTHON.MACHINE." + ver
user_prop = "PYTHON.USER." + ver
machine_action = "PythonFromMachine" + ver
user_action = "PythonFromUser" + ver
exe_action = "PythonExe" + ver
target_dir_prop = "TARGETDIR" + ver
exe_prop = "PYTHON" + ver
if msilib.Win64:
# type: msidbLocatorTypeRawValue + msidbLocatorType64bit
Type = 2+16
else:
Type = 2
add_data(self.db, "RegLocator",
[(machine_reg, 2, install_path, None, Type),
(user_reg, 1, install_path, None, Type)])
add_data(self.db, "AppSearch",
[(machine_prop, machine_reg),
(user_prop, user_reg)])
add_data(self.db, "CustomAction",
[(machine_action, 51+256, target_dir_prop, "[" + machine_prop + "]"),
(user_action, 51+256, target_dir_prop, "[" + user_prop + "]"),
(exe_action, 51+256, exe_prop, "[" + target_dir_prop + "]\\python.exe"),
])
add_data(self.db, "InstallExecuteSequence",
[(machine_action, machine_prop, start),
(user_action, user_prop, start + 1),
(exe_action, None, start + 2),
])
add_data(self.db, "InstallUISequence",
[(machine_action, machine_prop, start),
(user_action, user_prop, start + 1),
(exe_action, None, start + 2),
])
add_data(self.db, "Condition",
[("Python" + ver, 0, "NOT TARGETDIR" + ver)])
start += 4
assert start < 500
def add_scripts(self):
if self.install_script:
start = 6800
for ver in self.versions + [self.other_version]:
install_action = "install_script." + ver
exe_prop = "PYTHON" + ver
add_data(self.db, "CustomAction",
[(install_action, 50, exe_prop, self.install_script_key)])
add_data(self.db, "InstallExecuteSequence",
[(install_action, "&Python%s=3" % ver, start)])
start += 1
# XXX pre-install scripts are currently refused in finalize_options()
# but if this feature is completed, it will also need to add
# entries for each version as the above code does
if self.pre_install_script:
scriptfn = os.path.join(self.bdist_dir, "preinstall.bat")
with open(scriptfn, "w") as f:
# The batch file will be executed with [PYTHON], so that %1
# is the path to the Python interpreter; %0 will be the path
# of the batch file.
# rem ="""
# %1 %0
# exit
# """
# <actual script>
f.write('rem ="""\n%1 %0\nexit\n"""\n')
with open(self.pre_install_script) as fin:
f.write(fin.read())
add_data(self.db, "Binary",
[("PreInstall", msilib.Binary(scriptfn))
])
add_data(self.db, "CustomAction",
[("PreInstall", 2, "PreInstall", None)
])
add_data(self.db, "InstallExecuteSequence",
[("PreInstall", "NOT Installed", 450)])
def add_ui(self):
db = self.db
x = y = 50
w = 370
h = 300
title = "[ProductName] Setup"
# see "Dialog Style Bits"
modal = 3 # visible | modal
modeless = 1 # visible
track_disk_space = 32
# UI customization properties
add_data(db, "Property",
# See "DefaultUIFont Property"
[("DefaultUIFont", "DlgFont8"),
# See "ErrorDialog Style Bit"
("ErrorDialog", "ErrorDlg"),
("Progress1", "Install"), # modified in maintenance type dlg
("Progress2", "installs"),
("MaintenanceForm_Action", "Repair"),
# possible values: ALL, JUSTME
("WhichUsers", "ALL")
])
# Fonts, see "TextStyle Table"
add_data(db, "TextStyle",
[("DlgFont8", "Tahoma", 9, None, 0),
("DlgFontBold8", "Tahoma", 8, None, 1), #bold
("VerdanaBold10", "Verdana", 10, None, 1),
("VerdanaRed9", "Verdana", 9, 255, 0),
])
# UI Sequences, see "InstallUISequence Table", "Using a Sequence Table"
# Numbers indicate sequence; see sequence.py for how these action integrate
add_data(db, "InstallUISequence",
[("PrepareDlg", "Not Privileged or Windows9x or Installed", 140),
("WhichUsersDlg", "Privileged and not Windows9x and not Installed", 141),
# In the user interface, assume all-users installation if privileged.
("SelectFeaturesDlg", "Not Installed", 1230),
# XXX no support for resume installations yet
#("ResumeDlg", "Installed AND (RESUME OR Preselected)", 1240),
("MaintenanceTypeDlg", "Installed AND NOT RESUME AND NOT Preselected", 1250),
("ProgressDlg", None, 1280)])
add_data(db, 'ActionText', text.ActionText)
add_data(db, 'UIText', text.UIText)
#####################################################################
# Standard dialogs: FatalError, UserExit, ExitDialog
fatal=PyDialog(db, "FatalError", x, y, w, h, modal, title,
"Finish", "Finish", "Finish")
fatal.title("[ProductName] Installer ended prematurely")
fatal.back("< Back", "Finish", active = 0)
fatal.cancel("Cancel", "Back", active = 0)
fatal.text("Description1", 15, 70, 320, 80, 0x30003,
"[ProductName] setup ended prematurely because of an error. Your system has not been modified. To install this program at a later time, please run the installation again.")
fatal.text("Description2", 15, 155, 320, 20, 0x30003,
"Click the Finish button to exit the Installer.")
c=fatal.next("Finish", "Cancel", name="Finish")
c.event("EndDialog", "Exit")
user_exit=PyDialog(db, "UserExit", x, y, w, h, modal, title,
"Finish", "Finish", "Finish")
user_exit.title("[ProductName] Installer was interrupted")
user_exit.back("< Back", "Finish", active = 0)
user_exit.cancel("Cancel", "Back", active = 0)
user_exit.text("Description1", 15, 70, 320, 80, 0x30003,
"[ProductName] setup was interrupted. Your system has not been modified. "
"To install this program at a later time, please run the installation again.")
user_exit.text("Description2", 15, 155, 320, 20, 0x30003,
"Click the Finish button to exit the Installer.")
c = user_exit.next("Finish", "Cancel", name="Finish")
c.event("EndDialog", "Exit")
exit_dialog = PyDialog(db, "ExitDialog", x, y, w, h, modal, title,
"Finish", "Finish", "Finish")
exit_dialog.title("Completing the [ProductName] Installer")
exit_dialog.back("< Back", "Finish", active = 0)
exit_dialog.cancel("Cancel", "Back", active = 0)
exit_dialog.text("Description", 15, 235, 320, 20, 0x30003,
"Click the Finish button to exit the Installer.")
c = exit_dialog.next("Finish", "Cancel", name="Finish")
c.event("EndDialog", "Return")
#####################################################################
# Required dialog: FilesInUse, ErrorDlg
inuse = PyDialog(db, "FilesInUse",
x, y, w, h,
19, # KeepModeless|Modal|Visible
title,
"Retry", "Retry", "Retry", bitmap=False)
inuse.text("Title", 15, 6, 200, 15, 0x30003,
r"{\DlgFontBold8}Files in Use")
inuse.text("Description", 20, 23, 280, 20, 0x30003,
"Some files that need to be updated are currently in use.")
inuse.text("Text", 20, 55, 330, 50, 3,
"The following applications are using files that need to be updated by this setup. Close these applications and then click Retry to continue the installation or Cancel to exit it.")
inuse.control("List", "ListBox", 20, 107, 330, 130, 7, "FileInUseProcess",
None, None, None)
c=inuse.back("Exit", "Ignore", name="Exit")
c.event("EndDialog", "Exit")
c=inuse.next("Ignore", "Retry", name="Ignore")
c.event("EndDialog", "Ignore")
c=inuse.cancel("Retry", "Exit", name="Retry")
c.event("EndDialog","Retry")
# See "Error Dialog". See "ICE20" for the required names of the controls.
error = Dialog(db, "ErrorDlg",
50, 10, 330, 101,
65543, # Error|Minimize|Modal|Visible
title,
"ErrorText", None, None)
error.text("ErrorText", 50,9,280,48,3, "")
#error.control("ErrorIcon", "Icon", 15, 9, 24, 24, 5242881, None, "py.ico", None, None)
error.pushbutton("N",120,72,81,21,3,"No",None).event("EndDialog","ErrorNo")
error.pushbutton("Y",240,72,81,21,3,"Yes",None).event("EndDialog","ErrorYes")
error.pushbutton("A",0,72,81,21,3,"Abort",None).event("EndDialog","ErrorAbort")
error.pushbutton("C",42,72,81,21,3,"Cancel",None).event("EndDialog","ErrorCancel")
error.pushbutton("I",81,72,81,21,3,"Ignore",None).event("EndDialog","ErrorIgnore")
error.pushbutton("O",159,72,81,21,3,"Ok",None).event("EndDialog","ErrorOk")
error.pushbutton("R",198,72,81,21,3,"Retry",None).event("EndDialog","ErrorRetry")
#####################################################################
# Global "Query Cancel" dialog
cancel = Dialog(db, "CancelDlg", 50, 10, 260, 85, 3, title,
"No", "No", "No")
cancel.text("Text", 48, 15, 194, 30, 3,
"Are you sure you want to cancel [ProductName] installation?")
#cancel.control("Icon", "Icon", 15, 15, 24, 24, 5242881, None,
# "py.ico", None, None)
c=cancel.pushbutton("Yes", 72, 57, 56, 17, 3, "Yes", "No")
c.event("EndDialog", "Exit")
c=cancel.pushbutton("No", 132, 57, 56, 17, 3, "No", "Yes")
c.event("EndDialog", "Return")
#####################################################################
# Global "Wait for costing" dialog
costing = Dialog(db, "WaitForCostingDlg", 50, 10, 260, 85, modal, title,
"Return", "Return", "Return")
costing.text("Text", 48, 15, 194, 30, 3,
"Please wait while the installer finishes determining your disk space requirements.")
c = costing.pushbutton("Return", 102, 57, 56, 17, 3, "Return", None)
c.event("EndDialog", "Exit")
#####################################################################
# Preparation dialog: no user input except cancellation
prep = PyDialog(db, "PrepareDlg", x, y, w, h, modeless, title,
"Cancel", "Cancel", "Cancel")
prep.text("Description", 15, 70, 320, 40, 0x30003,
"Please wait while the Installer prepares to guide you through the installation.")
prep.title("Welcome to the [ProductName] Installer")
c=prep.text("ActionText", 15, 110, 320, 20, 0x30003, "Pondering...")
c.mapping("ActionText", "Text")
c=prep.text("ActionData", 15, 135, 320, 30, 0x30003, None)
c.mapping("ActionData", "Text")
prep.back("Back", None, active=0)
prep.next("Next", None, active=0)
c=prep.cancel("Cancel", None)
c.event("SpawnDialog", "CancelDlg")
#####################################################################
# Feature (Python directory) selection
seldlg = PyDialog(db, "SelectFeaturesDlg", x, y, w, h, modal, title,
"Next", "Next", "Cancel")
seldlg.title("Select Python Installations")
seldlg.text("Hint", 15, 30, 300, 20, 3,
"Select the Python locations where %s should be installed."
% self.distribution.get_fullname())
seldlg.back("< Back", None, active=0)
c = seldlg.next("Next >", "Cancel")
order = 1
c.event("[TARGETDIR]", "[SourceDir]", ordering=order)
for version in self.versions + [self.other_version]:
order += 1
c.event("[TARGETDIR]", "[TARGETDIR%s]" % version,
"FEATURE_SELECTED AND &Python%s=3" % version,
ordering=order)
c.event("SpawnWaitDialog", "WaitForCostingDlg", ordering=order + 1)
c.event("EndDialog", "Return", ordering=order + 2)
c = seldlg.cancel("Cancel", "Features")
c.event("SpawnDialog", "CancelDlg")
c = seldlg.control("Features", "SelectionTree", 15, 60, 300, 120, 3,
"FEATURE", None, "PathEdit", None)
c.event("[FEATURE_SELECTED]", "1")
ver = self.other_version
install_other_cond = "FEATURE_SELECTED AND &Python%s=3" % ver
dont_install_other_cond = "FEATURE_SELECTED AND &Python%s<>3" % ver
c = seldlg.text("Other", 15, 200, 300, 15, 3,
"Provide an alternate Python location")
c.condition("Enable", install_other_cond)
c.condition("Show", install_other_cond)
c.condition("Disable", dont_install_other_cond)
c.condition("Hide", dont_install_other_cond)
c = seldlg.control("PathEdit", "PathEdit", 15, 215, 300, 16, 1,
"TARGETDIR" + ver, None, "Next", None)
c.condition("Enable", install_other_cond)
c.condition("Show", install_other_cond)
c.condition("Disable", dont_install_other_cond)
c.condition("Hide", dont_install_other_cond)
#####################################################################
# Disk cost
cost = PyDialog(db, "DiskCostDlg", x, y, w, h, modal, title,
"OK", "OK", "OK", bitmap=False)
cost.text("Title", 15, 6, 200, 15, 0x30003,
r"{\DlgFontBold8}Disk Space Requirements")
cost.text("Description", 20, 20, 280, 20, 0x30003,
"The disk space required for the installation of the selected features.")
cost.text("Text", 20, 53, 330, 60, 3,
"The highlighted volumes (if any) do not have enough disk space "
"available for the currently selected features. You can either "
"remove some files from the highlighted volumes, or choose to "
"install less features onto local drive(s), or select different "
"destination drive(s).")
cost.control("VolumeList", "VolumeCostList", 20, 100, 330, 150, 393223,
None, "{120}{70}{70}{70}{70}", None, None)
cost.xbutton("OK", "Ok", None, 0.5).event("EndDialog", "Return")
#####################################################################
# WhichUsers Dialog. Only available on NT, and for privileged users.
# This must be run before FindRelatedProducts, because that will
# take into account whether the previous installation was per-user
# or per-machine. We currently don't support going back to this
# dialog after "Next" was selected; to support this, we would need to
# find how to reset the ALLUSERS property, and how to re-run
# FindRelatedProducts.
# On Windows9x, the ALLUSERS property is ignored on the command line
# and in the Property table, but installer fails according to the documentation
# if a dialog attempts to set ALLUSERS.
whichusers = PyDialog(db, "WhichUsersDlg", x, y, w, h, modal, title,
"AdminInstall", "Next", "Cancel")
whichusers.title("Select whether to install [ProductName] for all users of this computer.")
# A radio group with two options: allusers, justme
g = whichusers.radiogroup("AdminInstall", 15, 60, 260, 50, 3,
"WhichUsers", "", "Next")
g.add("ALL", 0, 5, 150, 20, "Install for all users")
g.add("JUSTME", 0, 25, 150, 20, "Install just for me")
whichusers.back("Back", None, active=0)
c = whichusers.next("Next >", "Cancel")
c.event("[ALLUSERS]", "1", 'WhichUsers="ALL"', 1)
c.event("EndDialog", "Return", ordering = 2)
c = whichusers.cancel("Cancel", "AdminInstall")
c.event("SpawnDialog", "CancelDlg")
#####################################################################
# Installation Progress dialog (modeless)
progress = PyDialog(db, "ProgressDlg", x, y, w, h, modeless, title,
"Cancel", "Cancel", "Cancel", bitmap=False)
progress.text("Title", 20, 15, 200, 15, 0x30003,
r"{\DlgFontBold8}[Progress1] [ProductName]")
progress.text("Text", 35, 65, 300, 30, 3,
"Please wait while the Installer [Progress2] [ProductName]. "
"This may take several minutes.")
progress.text("StatusLabel", 35, 100, 35, 20, 3, "Status:")
c=progress.text("ActionText", 70, 100, w-70, 20, 3, "Pondering...")
c.mapping("ActionText", "Text")
#c=progress.text("ActionData", 35, 140, 300, 20, 3, None)
#c.mapping("ActionData", "Text")
c=progress.control("ProgressBar", "ProgressBar", 35, 120, 300, 10, 65537,
None, "Progress done", None, None)
c.mapping("SetProgress", "Progress")
progress.back("< Back", "Next", active=False)
progress.next("Next >", "Cancel", active=False)
progress.cancel("Cancel", "Back").event("SpawnDialog", "CancelDlg")
###################################################################
# Maintenance type: repair/uninstall
maint = PyDialog(db, "MaintenanceTypeDlg", x, y, w, h, modal, title,
"Next", "Next", "Cancel")
maint.title("Welcome to the [ProductName] Setup Wizard")
maint.text("BodyText", 15, 63, 330, 42, 3,
"Select whether you want to repair or remove [ProductName].")
g=maint.radiogroup("RepairRadioGroup", 15, 108, 330, 60, 3,
"MaintenanceForm_Action", "", "Next")
#g.add("Change", 0, 0, 200, 17, "&Change [ProductName]")
g.add("Repair", 0, 18, 200, 17, "&Repair [ProductName]")
g.add("Remove", 0, 36, 200, 17, "Re&move [ProductName]")
maint.back("< Back", None, active=False)
c=maint.next("Finish", "Cancel")
# Change installation: Change progress dialog to "Change", then ask
# for feature selection
#c.event("[Progress1]", "Change", 'MaintenanceForm_Action="Change"', 1)
#c.event("[Progress2]", "changes", 'MaintenanceForm_Action="Change"', 2)
# Reinstall: Change progress dialog to "Repair", then invoke reinstall
# Also set list of reinstalled features to "ALL"
c.event("[REINSTALL]", "ALL", 'MaintenanceForm_Action="Repair"', 5)
c.event("[Progress1]", "Repairing", 'MaintenanceForm_Action="Repair"', 6)
c.event("[Progress2]", "repairs", 'MaintenanceForm_Action="Repair"', 7)
c.event("Reinstall", "ALL", 'MaintenanceForm_Action="Repair"', 8)
# Uninstall: Change progress to "Remove", then invoke uninstall
# Also set list of removed features to "ALL"
c.event("[REMOVE]", "ALL", 'MaintenanceForm_Action="Remove"', 11)
c.event("[Progress1]", "Removing", 'MaintenanceForm_Action="Remove"', 12)
c.event("[Progress2]", "removes", 'MaintenanceForm_Action="Remove"', 13)
c.event("Remove", "ALL", 'MaintenanceForm_Action="Remove"', 14)
# Close dialog when maintenance action scheduled
c.event("EndDialog", "Return", 'MaintenanceForm_Action<>"Change"', 20)
#c.event("NewDialog", "SelectFeaturesDlg", 'MaintenanceForm_Action="Change"', 21)
maint.cancel("Cancel", "RepairRadioGroup").event("SpawnDialog", "CancelDlg")
def get_installer_filename(self, fullname):
# Factored out to allow overriding in subclasses
if self.target_version:
base_name = "%s.%s-py%s.msi" % (fullname, self.plat_name,
self.target_version)
else:
base_name = "%s.%s.msi" % (fullname, self.plat_name)
installer_name = os.path.join(self.dist_dir, base_name)
return installer_name
| mit |
mikemccllstr/seadir | seadir/commands/responses.py | 1 | 1359 | '''Command line handlers that address the responses in the Google Sheet'''
import logging
import os
from cliff.lister import Lister
import seadir.config as config
import seadir.model.responses as responses
class Dump(Lister):
'''Dump the records from the Google Sheet to the screen'''
log = logging.getLogger(__name__)
def take_action(self, parsed_args):
'''Method invoked by the Cliff framework'''
# Obtain the data from the Google Sheet
data = responses.Data(email=config.email,
password=config.password,
sheetname=config.spreadsheet,
tabname=config.worksheet)
return (data.headers,
data.raw_contents())
class Clean(Lister):
'''Process all the records and perform any possible cleanups'''
log = logging.getLogger(__name__)
def take_action(self, parsed_args):
return (('Name', 'Size'),
((n, os.stat(n).st_size) for n in os.listdir('.'))
)
class Validate(Lister):
'''Process all the records and flags any situations that seem erroneous'''
log = logging.getLogger(__name__)
def take_action(self, parsed_args):
return (('Name', 'Size'),
((n, os.stat(n).st_size) for n in os.listdir('.'))
)
| mit |
windyuuy/opera | chromium/src/third_party/python_26/Lib/ctypes/test/test_pickling.py | 48 | 2062 | import unittest
import pickle
from ctypes import *
import _ctypes_test
dll = CDLL(_ctypes_test.__file__)
class X(Structure):
_fields_ = [("a", c_int), ("b", c_double)]
init_called = 0
def __init__(self, *args, **kw):
X.init_called += 1
self.x = 42
class Y(X):
_fields_ = [("str", c_char_p)]
class PickleTest(unittest.TestCase):
def dumps(self, item):
return pickle.dumps(item)
def loads(self, item):
return pickle.loads(item)
def test_simple(self):
for src in [
c_int(42),
c_double(3.14),
]:
dst = self.loads(self.dumps(src))
self.failUnlessEqual(src.__dict__, dst.__dict__)
self.failUnlessEqual(buffer(src)[:],
buffer(dst)[:])
def test_struct(self):
X.init_called = 0
x = X()
x.a = 42
self.failUnlessEqual(X.init_called, 1)
y = self.loads(self.dumps(x))
# loads must NOT call __init__
self.failUnlessEqual(X.init_called, 1)
# ctypes instances are identical when the instance __dict__
# and the memory buffer are identical
self.failUnlessEqual(y.__dict__, x.__dict__)
self.failUnlessEqual(buffer(y)[:],
buffer(x)[:])
def test_unpickable(self):
# ctypes objects that are pointers or contain pointers are
# unpickable.
self.assertRaises(ValueError, lambda: self.dumps(Y()))
prototype = CFUNCTYPE(c_int)
for item in [
c_char_p(),
c_wchar_p(),
c_void_p(),
pointer(c_int(42)),
dll._testfunc_p_p,
prototype(lambda: 42),
]:
self.assertRaises(ValueError, lambda: self.dumps(item))
class PickleTest_1(PickleTest):
def dumps(self, item):
return pickle.dumps(item, 1)
class PickleTest_2(PickleTest):
def dumps(self, item):
return pickle.dumps(item, 2)
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
jacraven/lsiapp | main/auth/dropbox.py | 2 | 1701 | # coding: utf-8
# pylint: disable=missing-docstring, invalid-name
import flask
import auth
import config
import model
from main import app
dropbox_config = dict(
access_token_method='POST',
access_token_url='https://api.dropbox.com/1/oauth2/token',
authorize_url='https://www.dropbox.com/1/oauth2/authorize',
base_url='https://www.dropbox.com/1/',
consumer_key=config.CONFIG_DB.auth_dropbox_id,
consumer_secret=config.CONFIG_DB.auth_dropbox_secret,
)
dropbox = auth.create_oauth_app(dropbox_config, 'dropbox')
@app.route('/_s/callback/dropbox/oauth-authorized/')
def dropbox_authorized():
response = dropbox.authorized_response()
if response is None:
flask.flash('You denied the request to sign in.')
return flask.redirect(flask.url_for('index'))
flask.session['oauth_token'] = (response['access_token'], '')
me = dropbox.get('account/info')
user_db = retrieve_user_from_dropbox(me.data)
return auth.signin_via_social(user_db)
@dropbox.tokengetter
def get_dropbox_oauth_token():
return flask.session.get('oauth_token')
@app.route('/signin/dropbox/')
def signin_dropbox():
scheme = 'https' if config.PRODUCTION else 'http'
return auth.signin_oauth(dropbox, scheme)
def retrieve_user_from_dropbox(response):
auth_id = 'dropbox_%s' % response['uid']
user_db = model.User.get_by('auth_ids', auth_id)
if user_db:
return user_db
return auth.create_or_get_user_db(
auth_id=auth_id,
email=response['email'],
name=response['display_name'],
username=response['display_name'],
verified=True
)
| mit |
davidzchen/tensorflow | tensorflow/python/keras/distribute/model_collection_base.py | 11 | 1687 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A base class to provide a model and corresponding input data for testing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class ModelAndInput(object):
"""Base class to provide model and its corresponding inputs."""
def get_model(self):
"""Returns a compiled keras model object, together with output name.
Returns:
model: a keras model object
output_name: a string for the name of the output layer
"""
raise NotImplementedError("must be implemented in descendants")
def get_data(self):
"""Returns data for training and predicting.
Returns:
x_train: data used for training
y_train: label used for training
x_predict: data used for predicting
"""
raise NotImplementedError("must be implemented in descendants")
def get_batch_size(self):
"""Returns the batch_size used by the model."""
raise NotImplementedError("must be implemented in descendants")
| apache-2.0 |
cyrixhero/Django-facebook | docs/docs_env/Lib/encodings/zlib_codec.py | 533 | 3015 | """ Python 'zlib_codec' Codec - zlib compression encoding
Unlike most of the other codecs which target Unicode, this codec
will return Python string objects for both encode and decode.
Written by Marc-Andre Lemburg ([email protected]).
"""
import codecs
import zlib # this codec needs the optional zlib module !
### Codec APIs
def zlib_encode(input,errors='strict'):
""" Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = zlib.compress(input)
return (output, len(input))
def zlib_decode(input,errors='strict'):
""" Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = zlib.decompress(input)
return (output, len(input))
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return zlib_encode(input, errors)
def decode(self, input, errors='strict'):
return zlib_decode(input, errors)
class IncrementalEncoder(codecs.IncrementalEncoder):
def __init__(self, errors='strict'):
assert errors == 'strict'
self.errors = errors
self.compressobj = zlib.compressobj()
def encode(self, input, final=False):
if final:
c = self.compressobj.compress(input)
return c + self.compressobj.flush()
else:
return self.compressobj.compress(input)
def reset(self):
self.compressobj = zlib.compressobj()
class IncrementalDecoder(codecs.IncrementalDecoder):
def __init__(self, errors='strict'):
assert errors == 'strict'
self.errors = errors
self.decompressobj = zlib.decompressobj()
def decode(self, input, final=False):
if final:
c = self.decompressobj.decompress(input)
return c + self.decompressobj.flush()
else:
return self.decompressobj.decompress(input)
def reset(self):
self.decompressobj = zlib.decompressobj()
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='zlib',
encode=zlib_encode,
decode=zlib_decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| bsd-3-clause |
neale/CS-program | 434-MachineLearning/final_project/linearClassifier/sklearn/__init__.py | 27 | 3086 | """
Machine learning module for Python
==================================
sklearn is a Python module integrating classical machine
learning algorithms in the tightly-knit world of scientific Python
packages (numpy, scipy, matplotlib).
It aims to provide simple and efficient solutions to learning problems
that are accessible to everybody and reusable in various contexts:
machine-learning as a versatile tool for science and engineering.
See http://scikit-learn.org for complete documentation.
"""
import sys
import re
import warnings
# Make sure that DeprecationWarning within this package always gets printed
warnings.filterwarnings('always', category=DeprecationWarning,
module='^{0}\.'.format(re.escape(__name__)))
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.18.dev0'
try:
# This variable is injected in the __builtins__ by the build
# process. It used to enable importing subpackages of sklearn when
# the binaries are not built
__SKLEARN_SETUP__
except NameError:
__SKLEARN_SETUP__ = False
if __SKLEARN_SETUP__:
sys.stderr.write('Partial import of sklearn during the build process.\n')
# We are not importing the rest of the scikit during the build
# process, as it may not be compiled yet
else:
from . import __check_build
from .base import clone
__check_build # avoid flakes unused variable error
__all__ = ['calibration', 'cluster', 'covariance', 'cross_decomposition',
'cross_validation', 'datasets', 'decomposition', 'dummy',
'ensemble', 'exceptions', 'externals', 'feature_extraction',
'feature_selection', 'gaussian_process', 'grid_search',
'isotonic', 'kernel_approximation', 'kernel_ridge',
'lda', 'learning_curve', 'linear_model', 'manifold', 'metrics',
'mixture', 'model_selection', 'multiclass', 'multioutput',
'naive_bayes', 'neighbors', 'neural_network', 'pipeline',
'preprocessing', 'qda', 'random_projection', 'semi_supervised',
'svm', 'tree', 'discriminant_analysis',
# Non-modules:
'clone']
def setup_module(module):
"""Fixture for the tests to assure globally controllable seeding of RNGs"""
import os
import numpy as np
import random
# It could have been provided in the environment
_random_seed = os.environ.get('SKLEARN_SEED', None)
if _random_seed is None:
_random_seed = np.random.uniform() * (2 ** 31 - 1)
_random_seed = int(_random_seed)
print("I: Seeding RNGs with %r" % _random_seed)
np.random.seed(_random_seed)
random.seed(_random_seed)
| unlicense |
WarrenWeckesser/scipy | scipy/interpolate/fitpack.py | 16 | 26807 | __all__ = ['splrep', 'splprep', 'splev', 'splint', 'sproot', 'spalde',
'bisplrep', 'bisplev', 'insert', 'splder', 'splantider']
import warnings
import numpy as np
# These are in the API for fitpack even if not used in fitpack.py itself.
from ._fitpack_impl import bisplrep, bisplev, dblint
from . import _fitpack_impl as _impl
from ._bsplines import BSpline
def splprep(x, w=None, u=None, ub=None, ue=None, k=3, task=0, s=None, t=None,
full_output=0, nest=None, per=0, quiet=1):
"""
Find the B-spline representation of an N-D curve.
Given a list of N rank-1 arrays, `x`, which represent a curve in
N-D space parametrized by `u`, find a smooth approximating
spline curve g(`u`). Uses the FORTRAN routine parcur from FITPACK.
Parameters
----------
x : array_like
A list of sample vector arrays representing the curve.
w : array_like, optional
Strictly positive rank-1 array of weights the same length as `x[0]`.
The weights are used in computing the weighted least-squares spline
fit. If the errors in the `x` values have standard-deviation given by
the vector d, then `w` should be 1/d. Default is ``ones(len(x[0]))``.
u : array_like, optional
An array of parameter values. If not given, these values are
calculated automatically as ``M = len(x[0])``, where
v[0] = 0
v[i] = v[i-1] + distance(`x[i]`, `x[i-1]`)
u[i] = v[i] / v[M-1]
ub, ue : int, optional
The end-points of the parameters interval. Defaults to
u[0] and u[-1].
k : int, optional
Degree of the spline. Cubic splines are recommended.
Even values of `k` should be avoided especially with a small s-value.
``1 <= k <= 5``, default is 3.
task : int, optional
If task==0 (default), find t and c for a given smoothing factor, s.
If task==1, find t and c for another value of the smoothing factor, s.
There must have been a previous call with task=0 or task=1
for the same set of data.
If task=-1 find the weighted least square spline for a given set of
knots, t.
s : float, optional
A smoothing condition. The amount of smoothness is determined by
satisfying the conditions: ``sum((w * (y - g))**2,axis=0) <= s``,
where g(x) is the smoothed interpolation of (x,y). The user can
use `s` to control the trade-off between closeness and smoothness
of fit. Larger `s` means more smoothing while smaller values of `s`
indicate less smoothing. Recommended values of `s` depend on the
weights, w. If the weights represent the inverse of the
standard-deviation of y, then a good `s` value should be found in
the range ``(m-sqrt(2*m),m+sqrt(2*m))``, where m is the number of
data points in x, y, and w.
t : int, optional
The knots needed for task=-1.
full_output : int, optional
If non-zero, then return optional outputs.
nest : int, optional
An over-estimate of the total number of knots of the spline to
help in determining the storage space. By default nest=m/2.
Always large enough is nest=m+k+1.
per : int, optional
If non-zero, data points are considered periodic with period
``x[m-1] - x[0]`` and a smooth periodic spline approximation is
returned. Values of ``y[m-1]`` and ``w[m-1]`` are not used.
quiet : int, optional
Non-zero to suppress messages.
This parameter is deprecated; use standard Python warning filters
instead.
Returns
-------
tck : tuple
(t,c,k) a tuple containing the vector of knots, the B-spline
coefficients, and the degree of the spline.
u : array
An array of the values of the parameter.
fp : float
The weighted sum of squared residuals of the spline approximation.
ier : int
An integer flag about splrep success. Success is indicated
if ier<=0. If ier in [1,2,3] an error occurred but was not raised.
Otherwise an error is raised.
msg : str
A message corresponding to the integer flag, ier.
See Also
--------
splrep, splev, sproot, spalde, splint,
bisplrep, bisplev
UnivariateSpline, BivariateSpline
BSpline
make_interp_spline
Notes
-----
See `splev` for evaluation of the spline and its derivatives.
The number of dimensions N must be smaller than 11.
The number of coefficients in the `c` array is ``k+1`` less then the number
of knots, ``len(t)``. This is in contrast with `splrep`, which zero-pads
the array of coefficients to have the same length as the array of knots.
These additional coefficients are ignored by evaluation routines, `splev`
and `BSpline`.
References
----------
.. [1] P. Dierckx, "Algorithms for smoothing data with periodic and
parametric splines, Computer Graphics and Image Processing",
20 (1982) 171-184.
.. [2] P. Dierckx, "Algorithms for smoothing data with periodic and
parametric splines", report tw55, Dept. Computer Science,
K.U.Leuven, 1981.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs on
Numerical Analysis, Oxford University Press, 1993.
Examples
--------
Generate a discretization of a limacon curve in the polar coordinates:
>>> phi = np.linspace(0, 2.*np.pi, 40)
>>> r = 0.5 + np.cos(phi) # polar coords
>>> x, y = r * np.cos(phi), r * np.sin(phi) # convert to cartesian
And interpolate:
>>> from scipy.interpolate import splprep, splev
>>> tck, u = splprep([x, y], s=0)
>>> new_points = splev(u, tck)
Notice that (i) we force interpolation by using `s=0`,
(ii) the parameterization, ``u``, is generated automatically.
Now plot the result:
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> ax.plot(x, y, 'ro')
>>> ax.plot(new_points[0], new_points[1], 'r-')
>>> plt.show()
"""
res = _impl.splprep(x, w, u, ub, ue, k, task, s, t, full_output, nest, per,
quiet)
return res
def splrep(x, y, w=None, xb=None, xe=None, k=3, task=0, s=None, t=None,
full_output=0, per=0, quiet=1):
"""
Find the B-spline representation of a 1-D curve.
Given the set of data points ``(x[i], y[i])`` determine a smooth spline
approximation of degree k on the interval ``xb <= x <= xe``.
Parameters
----------
x, y : array_like
The data points defining a curve y = f(x).
w : array_like, optional
Strictly positive rank-1 array of weights the same length as x and y.
The weights are used in computing the weighted least-squares spline
fit. If the errors in the y values have standard-deviation given by the
vector d, then w should be 1/d. Default is ones(len(x)).
xb, xe : float, optional
The interval to fit. If None, these default to x[0] and x[-1]
respectively.
k : int, optional
The degree of the spline fit. It is recommended to use cubic splines.
Even values of k should be avoided especially with small s values.
1 <= k <= 5
task : {1, 0, -1}, optional
If task==0 find t and c for a given smoothing factor, s.
If task==1 find t and c for another value of the smoothing factor, s.
There must have been a previous call with task=0 or task=1 for the same
set of data (t will be stored an used internally)
If task=-1 find the weighted least square spline for a given set of
knots, t. These should be interior knots as knots on the ends will be
added automatically.
s : float, optional
A smoothing condition. The amount of smoothness is determined by
satisfying the conditions: sum((w * (y - g))**2,axis=0) <= s where g(x)
is the smoothed interpolation of (x,y). The user can use s to control
the tradeoff between closeness and smoothness of fit. Larger s means
more smoothing while smaller values of s indicate less smoothing.
Recommended values of s depend on the weights, w. If the weights
represent the inverse of the standard-deviation of y, then a good s
value should be found in the range (m-sqrt(2*m),m+sqrt(2*m)) where m is
the number of datapoints in x, y, and w. default : s=m-sqrt(2*m) if
weights are supplied. s = 0.0 (interpolating) if no weights are
supplied.
t : array_like, optional
The knots needed for task=-1. If given then task is automatically set
to -1.
full_output : bool, optional
If non-zero, then return optional outputs.
per : bool, optional
If non-zero, data points are considered periodic with period x[m-1] -
x[0] and a smooth periodic spline approximation is returned. Values of
y[m-1] and w[m-1] are not used.
quiet : bool, optional
Non-zero to suppress messages.
This parameter is deprecated; use standard Python warning filters
instead.
Returns
-------
tck : tuple
A tuple (t,c,k) containing the vector of knots, the B-spline
coefficients, and the degree of the spline.
fp : array, optional
The weighted sum of squared residuals of the spline approximation.
ier : int, optional
An integer flag about splrep success. Success is indicated if ier<=0.
If ier in [1,2,3] an error occurred but was not raised. Otherwise an
error is raised.
msg : str, optional
A message corresponding to the integer flag, ier.
See Also
--------
UnivariateSpline, BivariateSpline
splprep, splev, sproot, spalde, splint
bisplrep, bisplev
BSpline
make_interp_spline
Notes
-----
See `splev` for evaluation of the spline and its derivatives. Uses the
FORTRAN routine ``curfit`` from FITPACK.
The user is responsible for assuring that the values of `x` are unique.
Otherwise, `splrep` will not return sensible results.
If provided, knots `t` must satisfy the Schoenberg-Whitney conditions,
i.e., there must be a subset of data points ``x[j]`` such that
``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``.
This routine zero-pads the coefficients array ``c`` to have the same length
as the array of knots ``t`` (the trailing ``k + 1`` coefficients are ignored
by the evaluation routines, `splev` and `BSpline`.) This is in contrast with
`splprep`, which does not zero-pad the coefficients.
References
----------
Based on algorithms described in [1]_, [2]_, [3]_, and [4]_:
.. [1] P. Dierckx, "An algorithm for smoothing, differentiation and
integration of experimental data using spline functions",
J.Comp.Appl.Maths 1 (1975) 165-184.
.. [2] P. Dierckx, "A fast algorithm for smoothing data on a rectangular
grid while using spline functions", SIAM J.Numer.Anal. 19 (1982)
1286-1304.
.. [3] P. Dierckx, "An improved algorithm for curve fitting with spline
functions", report tw54, Dept. Computer Science,K.U. Leuven, 1981.
.. [4] P. Dierckx, "Curve and surface fitting with splines", Monographs on
Numerical Analysis, Oxford University Press, 1993.
Examples
--------
You can interpolate 1-D points with a B-spline curve.
Further examples are given in
:ref:`in the tutorial <tutorial-interpolate_splXXX>`.
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import splev, splrep
>>> x = np.linspace(0, 10, 10)
>>> y = np.sin(x)
>>> spl = splrep(x, y)
>>> x2 = np.linspace(0, 10, 200)
>>> y2 = splev(x2, spl)
>>> plt.plot(x, y, 'o', x2, y2)
>>> plt.show()
"""
res = _impl.splrep(x, y, w, xb, xe, k, task, s, t, full_output, per, quiet)
return res
def splev(x, tck, der=0, ext=0):
"""
Evaluate a B-spline or its derivatives.
Given the knots and coefficients of a B-spline representation, evaluate
the value of the smoothing polynomial and its derivatives. This is a
wrapper around the FORTRAN routines splev and splder of FITPACK.
Parameters
----------
x : array_like
An array of points at which to return the value of the smoothed
spline or its derivatives. If `tck` was returned from `splprep`,
then the parameter values, u should be given.
tck : 3-tuple or a BSpline object
If a tuple, then it should be a sequence of length 3 returned by
`splrep` or `splprep` containing the knots, coefficients, and degree
of the spline. (Also see Notes.)
der : int, optional
The order of derivative of the spline to compute (must be less than
or equal to k, the degree of the spline).
ext : int, optional
Controls the value returned for elements of ``x`` not in the
interval defined by the knot sequence.
* if ext=0, return the extrapolated value.
* if ext=1, return 0
* if ext=2, raise a ValueError
* if ext=3, return the boundary value.
The default value is 0.
Returns
-------
y : ndarray or list of ndarrays
An array of values representing the spline function evaluated at
the points in `x`. If `tck` was returned from `splprep`, then this
is a list of arrays representing the curve in an N-D space.
Notes
-----
Manipulating the tck-tuples directly is not recommended. In new code,
prefer using `BSpline` objects.
See Also
--------
splprep, splrep, sproot, spalde, splint
bisplrep, bisplev
BSpline
References
----------
.. [1] C. de Boor, "On calculating with b-splines", J. Approximation
Theory, 6, p.50-62, 1972.
.. [2] M. G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
Applics, 10, p.134-149, 1972.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
Examples
--------
Examples are given :ref:`in the tutorial <tutorial-interpolate_splXXX>`.
"""
if isinstance(tck, BSpline):
if tck.c.ndim > 1:
mesg = ("Calling splev() with BSpline objects with c.ndim > 1 is "
"not recommended. Use BSpline.__call__(x) instead.")
warnings.warn(mesg, DeprecationWarning)
# remap the out-of-bounds behavior
try:
extrapolate = {0: True, }[ext]
except KeyError as e:
raise ValueError("Extrapolation mode %s is not supported "
"by BSpline." % ext) from e
return tck(x, der, extrapolate=extrapolate)
else:
return _impl.splev(x, tck, der, ext)
def splint(a, b, tck, full_output=0):
"""
Evaluate the definite integral of a B-spline between two given points.
Parameters
----------
a, b : float
The end-points of the integration interval.
tck : tuple or a BSpline instance
If a tuple, then it should be a sequence of length 3, containing the
vector of knots, the B-spline coefficients, and the degree of the
spline (see `splev`).
full_output : int, optional
Non-zero to return optional output.
Returns
-------
integral : float
The resulting integral.
wrk : ndarray
An array containing the integrals of the normalized B-splines
defined on the set of knots.
(Only returned if `full_output` is non-zero)
Notes
-----
`splint` silently assumes that the spline function is zero outside the data
interval (`a`, `b`).
Manipulating the tck-tuples directly is not recommended. In new code,
prefer using the `BSpline` objects.
See Also
--------
splprep, splrep, sproot, spalde, splev
bisplrep, bisplev
BSpline
References
----------
.. [1] P.W. Gaffney, The calculation of indefinite integrals of b-splines",
J. Inst. Maths Applics, 17, p.37-41, 1976.
.. [2] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
Examples
--------
Examples are given :ref:`in the tutorial <tutorial-interpolate_splXXX>`.
"""
if isinstance(tck, BSpline):
if tck.c.ndim > 1:
mesg = ("Calling splint() with BSpline objects with c.ndim > 1 is "
"not recommended. Use BSpline.integrate() instead.")
warnings.warn(mesg, DeprecationWarning)
if full_output != 0:
mesg = ("full_output = %s is not supported. Proceeding as if "
"full_output = 0" % full_output)
return tck.integrate(a, b, extrapolate=False)
else:
return _impl.splint(a, b, tck, full_output)
def sproot(tck, mest=10):
"""
Find the roots of a cubic B-spline.
Given the knots (>=8) and coefficients of a cubic B-spline return the
roots of the spline.
Parameters
----------
tck : tuple or a BSpline object
If a tuple, then it should be a sequence of length 3, containing the
vector of knots, the B-spline coefficients, and the degree of the
spline.
The number of knots must be >= 8, and the degree must be 3.
The knots must be a montonically increasing sequence.
mest : int, optional
An estimate of the number of zeros (Default is 10).
Returns
-------
zeros : ndarray
An array giving the roots of the spline.
Notes
-----
Manipulating the tck-tuples directly is not recommended. In new code,
prefer using the `BSpline` objects.
See also
--------
splprep, splrep, splint, spalde, splev
bisplrep, bisplev
BSpline
References
----------
.. [1] C. de Boor, "On calculating with b-splines", J. Approximation
Theory, 6, p.50-62, 1972.
.. [2] M. G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
Applics, 10, p.134-149, 1972.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
Examples
--------
Examples are given :ref:`in the tutorial <tutorial-interpolate_splXXX>`.
"""
if isinstance(tck, BSpline):
if tck.c.ndim > 1:
mesg = ("Calling sproot() with BSpline objects with c.ndim > 1 is "
"not recommended.")
warnings.warn(mesg, DeprecationWarning)
t, c, k = tck.tck
# _impl.sproot expects the interpolation axis to be last, so roll it.
# NB: This transpose is a no-op if c is 1D.
sh = tuple(range(c.ndim))
c = c.transpose(sh[1:] + (0,))
return _impl.sproot((t, c, k), mest)
else:
return _impl.sproot(tck, mest)
def spalde(x, tck):
"""
Evaluate all derivatives of a B-spline.
Given the knots and coefficients of a cubic B-spline compute all
derivatives up to order k at a point (or set of points).
Parameters
----------
x : array_like
A point or a set of points at which to evaluate the derivatives.
Note that ``t(k) <= x <= t(n-k+1)`` must hold for each `x`.
tck : tuple
A tuple ``(t, c, k)``, containing the vector of knots, the B-spline
coefficients, and the degree of the spline (see `splev`).
Returns
-------
results : {ndarray, list of ndarrays}
An array (or a list of arrays) containing all derivatives
up to order k inclusive for each point `x`.
See Also
--------
splprep, splrep, splint, sproot, splev, bisplrep, bisplev,
BSpline
References
----------
.. [1] C. de Boor: On calculating with b-splines, J. Approximation Theory
6 (1972) 50-62.
.. [2] M. G. Cox : The numerical evaluation of b-splines, J. Inst. Maths
applics 10 (1972) 134-149.
.. [3] P. Dierckx : Curve and surface fitting with splines, Monographs on
Numerical Analysis, Oxford University Press, 1993.
Examples
--------
Examples are given :ref:`in the tutorial <tutorial-interpolate_splXXX>`.
"""
if isinstance(tck, BSpline):
raise TypeError("spalde does not accept BSpline instances.")
else:
return _impl.spalde(x, tck)
def insert(x, tck, m=1, per=0):
"""
Insert knots into a B-spline.
Given the knots and coefficients of a B-spline representation, create a
new B-spline with a knot inserted `m` times at point `x`.
This is a wrapper around the FORTRAN routine insert of FITPACK.
Parameters
----------
x (u) : array_like
A 1-D point at which to insert a new knot(s). If `tck` was returned
from ``splprep``, then the parameter values, u should be given.
tck : a `BSpline` instance or a tuple
If tuple, then it is expected to be a tuple (t,c,k) containing
the vector of knots, the B-spline coefficients, and the degree of
the spline.
m : int, optional
The number of times to insert the given knot (its multiplicity).
Default is 1.
per : int, optional
If non-zero, the input spline is considered periodic.
Returns
-------
BSpline instance or a tuple
A new B-spline with knots t, coefficients c, and degree k.
``t(k+1) <= x <= t(n-k)``, where k is the degree of the spline.
In case of a periodic spline (``per != 0``) there must be
either at least k interior knots t(j) satisfying ``t(k+1)<t(j)<=x``
or at least k interior knots t(j) satisfying ``x<=t(j)<t(n-k)``.
A tuple is returned iff the input argument `tck` is a tuple, otherwise
a BSpline object is constructed and returned.
Notes
-----
Based on algorithms from [1]_ and [2]_.
Manipulating the tck-tuples directly is not recommended. In new code,
prefer using the `BSpline` objects.
References
----------
.. [1] W. Boehm, "Inserting new knots into b-spline curves.",
Computer Aided Design, 12, p.199-201, 1980.
.. [2] P. Dierckx, "Curve and surface fitting with splines, Monographs on
Numerical Analysis", Oxford University Press, 1993.
Examples
--------
You can insert knots into a B-spline.
>>> from scipy.interpolate import splrep, insert
>>> x = np.linspace(0, 10, 5)
>>> y = np.sin(x)
>>> tck = splrep(x, y)
>>> tck[0]
array([ 0., 0., 0., 0., 5., 10., 10., 10., 10.])
A knot is inserted:
>>> tck_inserted = insert(3, tck)
>>> tck_inserted[0]
array([ 0., 0., 0., 0., 3., 5., 10., 10., 10., 10.])
Some knots are inserted:
>>> tck_inserted2 = insert(8, tck, m=3)
>>> tck_inserted2[0]
array([ 0., 0., 0., 0., 5., 8., 8., 8., 10., 10., 10., 10.])
"""
if isinstance(tck, BSpline):
t, c, k = tck.tck
# FITPACK expects the interpolation axis to be last, so roll it over
# NB: if c array is 1D, transposes are no-ops
sh = tuple(range(c.ndim))
c = c.transpose(sh[1:] + (0,))
t_, c_, k_ = _impl.insert(x, (t, c, k), m, per)
# and roll the last axis back
c_ = np.asarray(c_)
c_ = c_.transpose((sh[-1],) + sh[:-1])
return BSpline(t_, c_, k_)
else:
return _impl.insert(x, tck, m, per)
def splder(tck, n=1):
"""
Compute the spline representation of the derivative of a given spline
Parameters
----------
tck : BSpline instance or a tuple of (t, c, k)
Spline whose derivative to compute
n : int, optional
Order of derivative to evaluate. Default: 1
Returns
-------
`BSpline` instance or tuple
Spline of order k2=k-n representing the derivative
of the input spline.
A tuple is returned iff the input argument `tck` is a tuple, otherwise
a BSpline object is constructed and returned.
Notes
-----
.. versionadded:: 0.13.0
See Also
--------
splantider, splev, spalde
BSpline
Examples
--------
This can be used for finding maxima of a curve:
>>> from scipy.interpolate import splrep, splder, sproot
>>> x = np.linspace(0, 10, 70)
>>> y = np.sin(x)
>>> spl = splrep(x, y, k=4)
Now, differentiate the spline and find the zeros of the
derivative. (NB: `sproot` only works for order 3 splines, so we
fit an order 4 spline):
>>> dspl = splder(spl)
>>> sproot(dspl) / np.pi
array([ 0.50000001, 1.5 , 2.49999998])
This agrees well with roots :math:`\\pi/2 + n\\pi` of
:math:`\\cos(x) = \\sin'(x)`.
"""
if isinstance(tck, BSpline):
return tck.derivative(n)
else:
return _impl.splder(tck, n)
def splantider(tck, n=1):
"""
Compute the spline for the antiderivative (integral) of a given spline.
Parameters
----------
tck : BSpline instance or a tuple of (t, c, k)
Spline whose antiderivative to compute
n : int, optional
Order of antiderivative to evaluate. Default: 1
Returns
-------
BSpline instance or a tuple of (t2, c2, k2)
Spline of order k2=k+n representing the antiderivative of the input
spline.
A tuple is returned iff the input argument `tck` is a tuple, otherwise
a BSpline object is constructed and returned.
See Also
--------
splder, splev, spalde
BSpline
Notes
-----
The `splder` function is the inverse operation of this function.
Namely, ``splder(splantider(tck))`` is identical to `tck`, modulo
rounding error.
.. versionadded:: 0.13.0
Examples
--------
>>> from scipy.interpolate import splrep, splder, splantider, splev
>>> x = np.linspace(0, np.pi/2, 70)
>>> y = 1 / np.sqrt(1 - 0.8*np.sin(x)**2)
>>> spl = splrep(x, y)
The derivative is the inverse operation of the antiderivative,
although some floating point error accumulates:
>>> splev(1.7, spl), splev(1.7, splder(splantider(spl)))
(array(2.1565429877197317), array(2.1565429877201865))
Antiderivative can be used to evaluate definite integrals:
>>> ispl = splantider(spl)
>>> splev(np.pi/2, ispl) - splev(0, ispl)
2.2572053588768486
This is indeed an approximation to the complete elliptic integral
:math:`K(m) = \\int_0^{\\pi/2} [1 - m\\sin^2 x]^{-1/2} dx`:
>>> from scipy.special import ellipk
>>> ellipk(0.8)
2.2572053268208538
"""
if isinstance(tck, BSpline):
return tck.antiderivative(n)
else:
return _impl.splantider(tck, n)
| bsd-3-clause |
blackbliss/callme | flask/lib/python2.7/site-packages/pip/_vendor/distlib/scripts.py | 163 | 11979 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from io import BytesIO
import logging
import os
import re
import struct
import sys
from .compat import sysconfig, fsencode, detect_encoding, ZipFile
from .resources import finder
from .util import (FileOperator, get_export_entry, convert_path,
get_executable, in_venv)
logger = logging.getLogger(__name__)
_DEFAULT_MANIFEST = '''
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<assemblyIdentity version="1.0.0.0"
processorArchitecture="X86"
name="%s"
type="win32"/>
<!-- Identify the application security requirements. -->
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level="asInvoker" uiAccess="false"/>
</requestedPrivileges>
</security>
</trustInfo>
</assembly>'''.strip()
# check if Python is called on the first line with this expression
FIRST_LINE_RE = re.compile(b'^#!.*pythonw?[0-9.]*([ \t].*)?$')
SCRIPT_TEMPLATE = '''# -*- coding: utf-8 -*-
if __name__ == '__main__':
import sys, re
def _resolve(module, func):
__import__(module)
mod = sys.modules[module]
parts = func.split('.')
result = getattr(mod, parts.pop(0))
for p in parts:
result = getattr(result, p)
return result
try:
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
func = _resolve('%(module)s', '%(func)s')
rc = func() # None interpreted as 0
except Exception as e: # only supporting Python >= 2.6
sys.stderr.write('%%s\\n' %% e)
rc = 1
sys.exit(rc)
'''
class ScriptMaker(object):
"""
A class to copy or create scripts from source scripts or callable
specifications.
"""
script_template = SCRIPT_TEMPLATE
executable = None # for shebangs
def __init__(self, source_dir, target_dir, add_launchers=True,
dry_run=False, fileop=None):
self.source_dir = source_dir
self.target_dir = target_dir
self.add_launchers = add_launchers
self.force = False
self.clobber = False
# It only makes sense to set mode bits on POSIX.
self.set_mode = (os.name == 'posix')
self.variants = set(('', 'X.Y'))
self._fileop = fileop or FileOperator(dry_run)
def _get_alternate_executable(self, executable, options):
if options.get('gui', False) and os.name == 'nt':
dn, fn = os.path.split(executable)
fn = fn.replace('python', 'pythonw')
executable = os.path.join(dn, fn)
return executable
def _get_shebang(self, encoding, post_interp=b'', options=None):
if self.executable:
executable = self.executable
elif not sysconfig.is_python_build():
executable = get_executable()
elif in_venv():
executable = os.path.join(sysconfig.get_path('scripts'),
'python%s' % sysconfig.get_config_var('EXE'))
else:
executable = os.path.join(
sysconfig.get_config_var('BINDIR'),
'python%s%s' % (sysconfig.get_config_var('VERSION'),
sysconfig.get_config_var('EXE')))
if options:
executable = self._get_alternate_executable(executable, options)
executable = fsencode(executable)
shebang = b'#!' + executable + post_interp + b'\n'
# Python parser starts to read a script using UTF-8 until
# it gets a #coding:xxx cookie. The shebang has to be the
# first line of a file, the #coding:xxx cookie cannot be
# written before. So the shebang has to be decodable from
# UTF-8.
try:
shebang.decode('utf-8')
except UnicodeDecodeError:
raise ValueError(
'The shebang (%r) is not decodable from utf-8' % shebang)
# If the script is encoded to a custom encoding (use a
# #coding:xxx cookie), the shebang has to be decodable from
# the script encoding too.
if encoding != 'utf-8':
try:
shebang.decode(encoding)
except UnicodeDecodeError:
raise ValueError(
'The shebang (%r) is not decodable '
'from the script encoding (%r)' % (shebang, encoding))
return shebang
def _get_script_text(self, entry):
return self.script_template % dict(module=entry.prefix,
func=entry.suffix)
manifest = _DEFAULT_MANIFEST
def get_manifest(self, exename):
base = os.path.basename(exename)
return self.manifest % base
def _write_script(self, names, shebang, script_bytes, filenames, ext):
use_launcher = self.add_launchers and os.name == 'nt'
linesep = os.linesep.encode('utf-8')
if not use_launcher:
script_bytes = shebang + linesep + script_bytes
else:
if ext == 'py':
launcher = self._get_launcher('t')
else:
launcher = self._get_launcher('w')
stream = BytesIO()
with ZipFile(stream, 'w') as zf:
zf.writestr('__main__.py', script_bytes)
zip_data = stream.getvalue()
script_bytes = launcher + shebang + linesep + zip_data
for name in names:
outname = os.path.join(self.target_dir, name)
if use_launcher:
n, e = os.path.splitext(outname)
if e.startswith('.py'):
outname = n
outname = '%s.exe' % outname
try:
self._fileop.write_binary_file(outname, script_bytes)
except Exception:
# Failed writing an executable - it might be in use.
logger.warning('Failed to write executable - trying to '
'use .deleteme logic')
dfname = '%s.deleteme' % outname
if os.path.exists(dfname):
os.remove(dfname) # Not allowed to fail here
os.rename(outname, dfname) # nor here
self._fileop.write_binary_file(outname, script_bytes)
logger.debug('Able to replace executable using '
'.deleteme logic')
try:
os.remove(dfname)
except Exception:
pass # still in use - ignore error
else:
if os.name == 'nt' and not outname.endswith('.' + ext):
outname = '%s.%s' % (outname, ext)
if os.path.exists(outname) and not self.clobber:
logger.warning('Skipping existing file %s', outname)
continue
self._fileop.write_binary_file(outname, script_bytes)
if self.set_mode:
self._fileop.set_executable_mode([outname])
filenames.append(outname)
def _make_script(self, entry, filenames, options=None):
shebang = self._get_shebang('utf-8', options=options)
script = self._get_script_text(entry).encode('utf-8')
name = entry.name
scriptnames = set()
if '' in self.variants:
scriptnames.add(name)
if 'X' in self.variants:
scriptnames.add('%s%s' % (name, sys.version[0]))
if 'X.Y' in self.variants:
scriptnames.add('%s-%s' % (name, sys.version[:3]))
if options and options.get('gui', False):
ext = 'pyw'
else:
ext = 'py'
self._write_script(scriptnames, shebang, script, filenames, ext)
def _copy_script(self, script, filenames):
adjust = False
script = os.path.join(self.source_dir, convert_path(script))
outname = os.path.join(self.target_dir, os.path.basename(script))
if not self.force and not self._fileop.newer(script, outname):
logger.debug('not copying %s (up-to-date)', script)
return
# Always open the file, but ignore failures in dry-run mode --
# that way, we'll get accurate feedback if we can read the
# script.
try:
f = open(script, 'rb')
except IOError:
if not self.dry_run:
raise
f = None
else:
encoding, lines = detect_encoding(f.readline)
f.seek(0)
first_line = f.readline()
if not first_line:
logger.warning('%s: %s is an empty file (skipping)',
self.get_command_name(), script)
return
match = FIRST_LINE_RE.match(first_line.replace(b'\r\n', b'\n'))
if match:
adjust = True
post_interp = match.group(1) or b''
if not adjust:
if f:
f.close()
self._fileop.copy_file(script, outname)
if self.set_mode:
self._fileop.set_executable_mode([outname])
filenames.append(outname)
else:
logger.info('copying and adjusting %s -> %s', script,
self.target_dir)
if not self._fileop.dry_run:
shebang = self._get_shebang(encoding, post_interp)
if b'pythonw' in first_line:
ext = 'pyw'
else:
ext = 'py'
n = os.path.basename(outname)
self._write_script([n], shebang, f.read(), filenames, ext)
if f:
f.close()
@property
def dry_run(self):
return self._fileop.dry_run
@dry_run.setter
def dry_run(self, value):
self._fileop.dry_run = value
if os.name == 'nt':
# Executable launcher support.
# Launchers are from https://bitbucket.org/vinay.sajip/simple_launcher/
def _get_launcher(self, kind):
if struct.calcsize('P') == 8: # 64-bit
bits = '64'
else:
bits = '32'
name = '%s%s.exe' % (kind, bits)
# Issue 31: don't hardcode an absolute package name, but
# determine it relative to the current package
distlib_package = __name__.rsplit('.', 1)[0]
result = finder(distlib_package).find(name).bytes
return result
# Public API follows
def make(self, specification, options=None):
"""
Make a script.
:param specification: The specification, which is either a valid export
entry specification (to make a script from a
callable) or a filename (to make a script by
copying from a source location).
:param options: A dictionary of options controlling script generation.
:return: A list of all absolute pathnames written to.
"""
filenames = []
entry = get_export_entry(specification)
if entry is None:
self._copy_script(specification, filenames)
else:
self._make_script(entry, filenames, options=options)
return filenames
def make_multiple(self, specifications, options=None):
"""
Take a list of specifications and make scripts from them,
:param specifications: A list of specifications.
:return: A list of all absolute pathnames written to,
"""
filenames = []
for specification in specifications:
filenames.extend(self.make(specification, options))
return filenames
| mit |
mfussenegger/jedi | test/static_analysis/arguments.py | 20 | 1150 | # -----------------
# normal arguments (no keywords)
# -----------------
def simple(a):
return a
simple(1)
#! 6 type-error-too-few-arguments
simple()
#! 10 type-error-too-many-arguments
simple(1, 2)
#! 10 type-error-too-many-arguments
simple(1, 2, 3)
# -----------------
# keyword arguments
# -----------------
simple(a=1)
#! 7 type-error-keyword-argument
simple(b=1)
#! 10 type-error-too-many-arguments
simple(1, a=1)
def two_params(x, y):
return y
two_params(y=2, x=1)
two_params(1, y=2)
#! 11 type-error-multiple-values
two_params(1, x=2)
#! 17 type-error-too-many-arguments
two_params(1, 2, y=3)
# -----------------
# default arguments
# -----------------
def default(x, y=1, z=2):
return x
#! 7 type-error-too-few-arguments
default()
default(1)
default(1, 2)
default(1, 2, 3)
#! 17 type-error-too-many-arguments
default(1, 2, 3, 4)
default(x=1)
# -----------------
# class arguments
# -----------------
class Instance():
def __init__(self, foo):
self.foo = foo
Instance(1).foo
Instance(foo=1).foo
#! 12 type-error-too-many-arguments
Instance(1, 2).foo
#! 8 type-error-too-few-arguments
Instance().foo
| mit |
junhuac/MQUIC | src/tools/gyp/pylib/gyp/xcode_emulation.py | 7 | 67036 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This module contains classes that help to emulate xcodebuild behavior on top of
other build systems, such as make and ninja.
"""
import copy
import gyp.common
import os
import os.path
import re
import shlex
import subprocess
import sys
import tempfile
from gyp.common import GypError
# Populated lazily by XcodeVersion, for efficiency, and to fix an issue when
# "xcodebuild" is called too quickly (it has been found to return incorrect
# version number).
XCODE_VERSION_CACHE = None
# Populated lazily by GetXcodeArchsDefault, to an |XcodeArchsDefault| instance
# corresponding to the installed version of Xcode.
XCODE_ARCHS_DEFAULT_CACHE = None
def XcodeArchsVariableMapping(archs, archs_including_64_bit=None):
"""Constructs a dictionary with expansion for $(ARCHS_STANDARD) variable,
and optionally for $(ARCHS_STANDARD_INCLUDING_64_BIT)."""
mapping = {'$(ARCHS_STANDARD)': archs}
if archs_including_64_bit:
mapping['$(ARCHS_STANDARD_INCLUDING_64_BIT)'] = archs_including_64_bit
return mapping
class XcodeArchsDefault(object):
"""A class to resolve ARCHS variable from xcode_settings, resolving Xcode
macros and implementing filtering by VALID_ARCHS. The expansion of macros
depends on the SDKROOT used ("macosx", "iphoneos", "iphonesimulator") and
on the version of Xcode.
"""
# Match variable like $(ARCHS_STANDARD).
variable_pattern = re.compile(r'\$\([a-zA-Z_][a-zA-Z0-9_]*\)$')
def __init__(self, default, mac, iphonesimulator, iphoneos):
self._default = (default,)
self._archs = {'mac': mac, 'ios': iphoneos, 'iossim': iphonesimulator}
def _VariableMapping(self, sdkroot):
"""Returns the dictionary of variable mapping depending on the SDKROOT."""
sdkroot = sdkroot.lower()
if 'iphoneos' in sdkroot:
return self._archs['ios']
elif 'iphonesimulator' in sdkroot:
return self._archs['iossim']
else:
return self._archs['mac']
def _ExpandArchs(self, archs, sdkroot):
"""Expands variables references in ARCHS, and remove duplicates."""
variable_mapping = self._VariableMapping(sdkroot)
expanded_archs = []
for arch in archs:
if self.variable_pattern.match(arch):
variable = arch
try:
variable_expansion = variable_mapping[variable]
for arch in variable_expansion:
if arch not in expanded_archs:
expanded_archs.append(arch)
except KeyError as e:
print 'Warning: Ignoring unsupported variable "%s".' % variable
elif arch not in expanded_archs:
expanded_archs.append(arch)
return expanded_archs
def ActiveArchs(self, archs, valid_archs, sdkroot):
"""Expands variables references in ARCHS, and filter by VALID_ARCHS if it
is defined (if not set, Xcode accept any value in ARCHS, otherwise, only
values present in VALID_ARCHS are kept)."""
expanded_archs = self._ExpandArchs(archs or self._default, sdkroot or '')
if valid_archs:
filtered_archs = []
for arch in expanded_archs:
if arch in valid_archs:
filtered_archs.append(arch)
expanded_archs = filtered_archs
return expanded_archs
def GetXcodeArchsDefault():
"""Returns the |XcodeArchsDefault| object to use to expand ARCHS for the
installed version of Xcode. The default values used by Xcode for ARCHS
and the expansion of the variables depends on the version of Xcode used.
For all version anterior to Xcode 5.0 or posterior to Xcode 5.1 included
uses $(ARCHS_STANDARD) if ARCHS is unset, while Xcode 5.0 to 5.0.2 uses
$(ARCHS_STANDARD_INCLUDING_64_BIT). This variable was added to Xcode 5.0
and deprecated with Xcode 5.1.
For "macosx" SDKROOT, all version starting with Xcode 5.0 includes 64-bit
architecture as part of $(ARCHS_STANDARD) and default to only building it.
For "iphoneos" and "iphonesimulator" SDKROOT, 64-bit architectures are part
of $(ARCHS_STANDARD_INCLUDING_64_BIT) from Xcode 5.0. From Xcode 5.1, they
are also part of $(ARCHS_STANDARD).
All thoses rules are coded in the construction of the |XcodeArchsDefault|
object to use depending on the version of Xcode detected. The object is
for performance reason."""
global XCODE_ARCHS_DEFAULT_CACHE
if XCODE_ARCHS_DEFAULT_CACHE:
return XCODE_ARCHS_DEFAULT_CACHE
xcode_version, _ = XcodeVersion()
if xcode_version < '0500':
XCODE_ARCHS_DEFAULT_CACHE = XcodeArchsDefault(
'$(ARCHS_STANDARD)',
XcodeArchsVariableMapping(['i386']),
XcodeArchsVariableMapping(['i386']),
XcodeArchsVariableMapping(['armv7']))
elif xcode_version < '0510':
XCODE_ARCHS_DEFAULT_CACHE = XcodeArchsDefault(
'$(ARCHS_STANDARD_INCLUDING_64_BIT)',
XcodeArchsVariableMapping(['x86_64'], ['x86_64']),
XcodeArchsVariableMapping(['i386'], ['i386', 'x86_64']),
XcodeArchsVariableMapping(
['armv7', 'armv7s'],
['armv7', 'armv7s', 'arm64']))
else:
XCODE_ARCHS_DEFAULT_CACHE = XcodeArchsDefault(
'$(ARCHS_STANDARD)',
XcodeArchsVariableMapping(['x86_64'], ['x86_64']),
XcodeArchsVariableMapping(['i386', 'x86_64'], ['i386', 'x86_64']),
XcodeArchsVariableMapping(
['armv7', 'armv7s', 'arm64'],
['armv7', 'armv7s', 'arm64']))
return XCODE_ARCHS_DEFAULT_CACHE
class XcodeSettings(object):
"""A class that understands the gyp 'xcode_settings' object."""
# Populated lazily by _SdkPath(). Shared by all XcodeSettings, so cached
# at class-level for efficiency.
_sdk_path_cache = {}
_platform_path_cache = {}
_sdk_root_cache = {}
# Populated lazily by GetExtraPlistItems(). Shared by all XcodeSettings, so
# cached at class-level for efficiency.
_plist_cache = {}
# Populated lazily by GetIOSPostbuilds. Shared by all XcodeSettings, so
# cached at class-level for efficiency.
_codesigning_key_cache = {}
def __init__(self, spec):
self.spec = spec
self.isIOS = False
self.mac_toolchain_dir = None
self.header_map_path = None
# Per-target 'xcode_settings' are pushed down into configs earlier by gyp.
# This means self.xcode_settings[config] always contains all settings
# for that config -- the per-target settings as well. Settings that are
# the same for all configs are implicitly per-target settings.
self.xcode_settings = {}
configs = spec['configurations']
for configname, config in configs.iteritems():
self.xcode_settings[configname] = config.get('xcode_settings', {})
self._ConvertConditionalKeys(configname)
if self.xcode_settings[configname].get('IPHONEOS_DEPLOYMENT_TARGET',
None):
self.isIOS = True
# This is only non-None temporarily during the execution of some methods.
self.configname = None
# Used by _AdjustLibrary to match .a and .dylib entries in libraries.
self.library_re = re.compile(r'^lib([^/]+)\.(a|dylib)$')
def _ConvertConditionalKeys(self, configname):
"""Converts or warns on conditional keys. Xcode supports conditional keys,
such as CODE_SIGN_IDENTITY[sdk=iphoneos*]. This is a partial implementation
with some keys converted while the rest force a warning."""
settings = self.xcode_settings[configname]
conditional_keys = [key for key in settings if key.endswith(']')]
for key in conditional_keys:
# If you need more, speak up at http://crbug.com/122592
if key.endswith("[sdk=iphoneos*]"):
if configname.endswith("iphoneos"):
new_key = key.split("[")[0]
settings[new_key] = settings[key]
else:
print 'Warning: Conditional keys not implemented, ignoring:', \
' '.join(conditional_keys)
del settings[key]
def _Settings(self):
assert self.configname
return self.xcode_settings[self.configname]
def _Test(self, test_key, cond_key, default):
return self._Settings().get(test_key, default) == cond_key
def _Appendf(self, lst, test_key, format_str, default=None):
if test_key in self._Settings():
lst.append(format_str % str(self._Settings()[test_key]))
elif default:
lst.append(format_str % str(default))
def _WarnUnimplemented(self, test_key):
if test_key in self._Settings():
print 'Warning: Ignoring not yet implemented key "%s".' % test_key
def IsBinaryOutputFormat(self, configname):
default = "binary" if self.isIOS else "xml"
format = self.xcode_settings[configname].get('INFOPLIST_OUTPUT_FORMAT',
default)
return format == "binary"
def IsIosFramework(self):
return self.spec['type'] == 'shared_library' and self._IsBundle() and \
self.isIOS
def _IsBundle(self):
return int(self.spec.get('mac_bundle', 0)) != 0 or self._IsXCTest()
def _IsXCTest(self):
return int(self.spec.get('mac_xctest_bundle', 0)) != 0
def _IsIosAppExtension(self):
return int(self.spec.get('ios_app_extension', 0)) != 0
def _IsIosWatchKitExtension(self):
return int(self.spec.get('ios_watchkit_extension', 0)) != 0
def _IsIosWatchApp(self):
return int(self.spec.get('ios_watch_app', 0)) != 0
def GetFrameworkVersion(self):
"""Returns the framework version of the current target. Only valid for
bundles."""
assert self._IsBundle()
return self.GetPerTargetSetting('FRAMEWORK_VERSION', default='A')
def GetWrapperExtension(self):
"""Returns the bundle extension (.app, .framework, .plugin, etc). Only
valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('loadable_module', 'shared_library'):
default_wrapper_extension = {
'loadable_module': 'bundle',
'shared_library': 'framework',
}[self.spec['type']]
wrapper_extension = self.GetPerTargetSetting(
'WRAPPER_EXTENSION', default=default_wrapper_extension)
return '.' + self.spec.get('product_extension', wrapper_extension)
elif self.spec['type'] == 'executable':
if self._IsIosAppExtension() or self._IsIosWatchKitExtension():
return '.' + self.spec.get('product_extension', 'appex')
else:
return '.' + self.spec.get('product_extension', 'app')
else:
assert False, "Don't know extension for '%s', target '%s'" % (
self.spec['type'], self.spec['target_name'])
def GetProductName(self):
"""Returns PRODUCT_NAME."""
return self.spec.get('product_name', self.spec['target_name'])
def GetFullProductName(self):
"""Returns FULL_PRODUCT_NAME."""
if self._IsBundle():
return self.GetWrapperName()
else:
return self._GetStandaloneBinaryPath()
def GetWrapperName(self):
"""Returns the directory name of the bundle represented by this target.
Only valid for bundles."""
assert self._IsBundle()
return self.GetProductName() + self.GetWrapperExtension()
def GetBundleContentsFolderPath(self):
"""Returns the qualified path to the bundle's contents folder. E.g.
Chromium.app/Contents or Foo.bundle/Versions/A. Only valid for bundles."""
if self.isIOS:
return self.GetWrapperName()
assert self._IsBundle()
if self.spec['type'] == 'shared_library':
return os.path.join(
self.GetWrapperName(), 'Versions', self.GetFrameworkVersion())
else:
# loadable_modules have a 'Contents' folder like executables.
return os.path.join(self.GetWrapperName(), 'Contents')
def GetBundleResourceFolder(self):
"""Returns the qualified path to the bundle's resource folder. E.g.
Chromium.app/Contents/Resources. Only valid for bundles."""
assert self._IsBundle()
if self.isIOS:
return self.GetBundleContentsFolderPath()
return os.path.join(self.GetBundleContentsFolderPath(), 'Resources')
def GetBundlePlistPath(self):
"""Returns the qualified path to the bundle's plist file. E.g.
Chromium.app/Contents/Info.plist. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('executable', 'loadable_module'):
return os.path.join(self.GetBundleContentsFolderPath(), 'Info.plist')
else:
return os.path.join(self.GetBundleContentsFolderPath(),
'Resources', 'Info.plist')
def GetProductType(self):
"""Returns the PRODUCT_TYPE of this target."""
if self._IsIosAppExtension():
assert self._IsBundle(), ('ios_app_extension flag requires mac_bundle '
'(target %s)' % self.spec['target_name'])
return 'com.apple.product-type.app-extension'
if self._IsIosWatchKitExtension():
assert self._IsBundle(), ('ios_watchkit_extension flag requires '
'mac_bundle (target %s)' % self.spec['target_name'])
return 'com.apple.product-type.watchkit-extension'
if self._IsIosWatchApp():
assert self._IsBundle(), ('ios_watch_app flag requires mac_bundle '
'(target %s)' % self.spec['target_name'])
return 'com.apple.product-type.application.watchapp'
if self._IsBundle():
return {
'executable': 'com.apple.product-type.application',
'loadable_module': 'com.apple.product-type.bundle',
'shared_library': 'com.apple.product-type.framework',
}[self.spec['type']]
else:
return {
'executable': 'com.apple.product-type.tool',
'loadable_module': 'com.apple.product-type.library.dynamic',
'shared_library': 'com.apple.product-type.library.dynamic',
'static_library': 'com.apple.product-type.library.static',
}[self.spec['type']]
def GetMachOType(self):
"""Returns the MACH_O_TYPE of this target."""
# Weird, but matches Xcode.
if not self._IsBundle() and self.spec['type'] == 'executable':
return ''
return {
'executable': 'mh_execute',
'static_library': 'staticlib',
'shared_library': 'mh_dylib',
'loadable_module': 'mh_bundle',
}[self.spec['type']]
def _GetBundleBinaryPath(self):
"""Returns the name of the bundle binary of by this target.
E.g. Chromium.app/Contents/MacOS/Chromium. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('shared_library') or self.isIOS:
path = self.GetBundleContentsFolderPath()
elif self.spec['type'] in ('executable', 'loadable_module'):
path = os.path.join(self.GetBundleContentsFolderPath(), 'MacOS')
return os.path.join(path, self.GetExecutableName())
def _GetStandaloneExecutableSuffix(self):
if 'product_extension' in self.spec:
return '.' + self.spec['product_extension']
return {
'executable': '',
'static_library': '.a',
'shared_library': '.dylib',
'loadable_module': '.so',
}[self.spec['type']]
def _GetStandaloneExecutablePrefix(self):
return self.spec.get('product_prefix', {
'executable': '',
'static_library': 'lib',
'shared_library': 'lib',
# Non-bundled loadable_modules are called foo.so for some reason
# (that is, .so and no prefix) with the xcode build -- match that.
'loadable_module': '',
}[self.spec['type']])
def _GetStandaloneBinaryPath(self):
"""Returns the name of the non-bundle binary represented by this target.
E.g. hello_world. Only valid for non-bundles."""
assert not self._IsBundle()
assert self.spec['type'] in (
'executable', 'shared_library', 'static_library', 'loadable_module'), (
'Unexpected type %s' % self.spec['type'])
target = self.spec['target_name']
if self.spec['type'] == 'static_library':
if target[:3] == 'lib':
target = target[3:]
elif self.spec['type'] in ('loadable_module', 'shared_library'):
if target[:3] == 'lib':
target = target[3:]
target_prefix = self._GetStandaloneExecutablePrefix()
target = self.spec.get('product_name', target)
target_ext = self._GetStandaloneExecutableSuffix()
return target_prefix + target + target_ext
def GetExecutableName(self):
"""Returns the executable name of the bundle represented by this target.
E.g. Chromium."""
if self._IsBundle():
return self.spec.get('product_name', self.spec['target_name'])
else:
return self._GetStandaloneBinaryPath()
def GetExecutablePath(self):
"""Returns the directory name of the bundle represented by this target. E.g.
Chromium.app/Contents/MacOS/Chromium."""
if self._IsBundle():
return self._GetBundleBinaryPath()
else:
return self._GetStandaloneBinaryPath()
def GetActiveArchs(self, configname):
"""Returns the architectures this target should be built for."""
config_settings = self.xcode_settings[configname]
xcode_archs_default = GetXcodeArchsDefault()
return xcode_archs_default.ActiveArchs(
config_settings.get('ARCHS'),
config_settings.get('VALID_ARCHS'),
config_settings.get('SDKROOT'))
def _GetSdkVersionInfoItem(self, sdk, infoitem):
# xcodebuild requires Xcode and can't run on Command Line Tools-only
# systems from 10.7 onward.
# Since the CLT has no SDK paths anyway, returning None is the
# most sensible route and should still do the right thing.
try:
return GetStdout(['xcrun', '--sdk', sdk, infoitem])
except:
pass
def _SdkRoot(self, configname):
if configname is None:
configname = self.configname
return self.GetPerConfigSetting('SDKROOT', configname, default='')
def _XcodePlatformPath(self, configname=None):
sdk_root = self._SdkRoot(configname)
if sdk_root not in XcodeSettings._platform_path_cache:
platform_path = self._GetSdkVersionInfoItem(sdk_root,
'--show-sdk-platform-path')
XcodeSettings._platform_path_cache[sdk_root] = platform_path
return XcodeSettings._platform_path_cache[sdk_root]
def _SdkPath(self, configname=None):
sdk_root = self._SdkRoot(configname)
if sdk_root.startswith('/'):
return sdk_root
return self._XcodeSdkPath(sdk_root)
def _XcodeSdkPath(self, sdk_root):
if sdk_root not in XcodeSettings._sdk_path_cache:
sdk_path = self._GetSdkVersionInfoItem(sdk_root, '--show-sdk-path')
XcodeSettings._sdk_path_cache[sdk_root] = sdk_path
if sdk_root:
XcodeSettings._sdk_root_cache[sdk_path] = sdk_root
return XcodeSettings._sdk_path_cache[sdk_root]
def _AppendPlatformVersionMinFlags(self, lst):
self._Appendf(lst, 'MACOSX_DEPLOYMENT_TARGET', '-mmacosx-version-min=%s')
if 'IPHONEOS_DEPLOYMENT_TARGET' in self._Settings():
# TODO: Implement this better?
sdk_path_basename = os.path.basename(self._SdkPath())
if sdk_path_basename.lower().startswith('iphonesimulator'):
self._Appendf(lst, 'IPHONEOS_DEPLOYMENT_TARGET',
'-mios-simulator-version-min=%s')
else:
self._Appendf(lst, 'IPHONEOS_DEPLOYMENT_TARGET',
'-miphoneos-version-min=%s')
def GetCflags(self, configname, arch=None):
"""Returns flags that need to be added to .c, .cc, .m, and .mm
compilations."""
# This functions (and the similar ones below) do not offer complete
# emulation of all xcode_settings keys. They're implemented on demand.
self.configname = configname
cflags = []
sdk_root = self._SdkPath()
if 'SDKROOT' in self._Settings() and sdk_root:
cflags.append('-isysroot %s' % sdk_root)
if self.header_map_path:
cflags.append('-I%s' % self.header_map_path)
if self._Test('CLANG_WARN_CONSTANT_CONVERSION', 'YES', default='NO'):
cflags.append('-Wconstant-conversion')
if self._Test('GCC_CHAR_IS_UNSIGNED_CHAR', 'YES', default='NO'):
cflags.append('-funsigned-char')
if self._Test('GCC_CW_ASM_SYNTAX', 'YES', default='YES'):
cflags.append('-fasm-blocks')
if 'GCC_DYNAMIC_NO_PIC' in self._Settings():
if self._Settings()['GCC_DYNAMIC_NO_PIC'] == 'YES':
cflags.append('-mdynamic-no-pic')
else:
pass
# TODO: In this case, it depends on the target. xcode passes
# mdynamic-no-pic by default for executable and possibly static lib
# according to mento
if self._Test('GCC_ENABLE_PASCAL_STRINGS', 'YES', default='YES'):
cflags.append('-mpascal-strings')
self._Appendf(cflags, 'GCC_OPTIMIZATION_LEVEL', '-O%s', default='s')
if self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES'):
dbg_format = self._Settings().get('DEBUG_INFORMATION_FORMAT', 'dwarf')
if dbg_format == 'dwarf':
cflags.append('-gdwarf-2')
elif dbg_format == 'stabs':
raise NotImplementedError('stabs debug format is not supported yet.')
elif dbg_format == 'dwarf-with-dsym':
cflags.append('-gdwarf-2')
else:
raise NotImplementedError('Unknown debug format %s' % dbg_format)
if self._Settings().get('GCC_STRICT_ALIASING') == 'YES':
cflags.append('-fstrict-aliasing')
elif self._Settings().get('GCC_STRICT_ALIASING') == 'NO':
cflags.append('-fno-strict-aliasing')
if self._Test('GCC_SYMBOLS_PRIVATE_EXTERN', 'YES', default='NO'):
cflags.append('-fvisibility=hidden')
if self._Test('GCC_TREAT_WARNINGS_AS_ERRORS', 'YES', default='NO'):
cflags.append('-Werror')
if self._Test('GCC_WARN_ABOUT_MISSING_NEWLINE', 'YES', default='NO'):
cflags.append('-Wnewline-eof')
# In Xcode, this is only activated when GCC_COMPILER_VERSION is clang or
# llvm-gcc. It also requires a fairly recent libtool, and
# if the system clang isn't used, DYLD_LIBRARY_PATH needs to contain the
# path to the libLTO.dylib that matches the used clang.
if self._Test('LLVM_LTO', 'YES', default='NO'):
cflags.append('-flto')
self._AppendPlatformVersionMinFlags(cflags)
# TODO:
if self._Test('COPY_PHASE_STRIP', 'YES', default='NO'):
self._WarnUnimplemented('COPY_PHASE_STRIP')
self._WarnUnimplemented('GCC_DEBUGGING_SYMBOLS')
self._WarnUnimplemented('GCC_ENABLE_OBJC_EXCEPTIONS')
# TODO: This is exported correctly, but assigning to it is not supported.
self._WarnUnimplemented('MACH_O_TYPE')
self._WarnUnimplemented('PRODUCT_TYPE')
if arch is not None:
archs = [arch]
else:
assert self.configname
archs = self.GetActiveArchs(self.configname)
if len(archs) != 1:
# TODO: Supporting fat binaries will be annoying.
self._WarnUnimplemented('ARCHS')
archs = ['i386']
cflags.append('-arch ' + archs[0])
if archs[0] in ('i386', 'x86_64'):
if self._Test('GCC_ENABLE_SSE3_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse3')
if self._Test('GCC_ENABLE_SUPPLEMENTAL_SSE3_INSTRUCTIONS', 'YES',
default='NO'):
cflags.append('-mssse3') # Note 3rd 's'.
if self._Test('GCC_ENABLE_SSE41_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse4.1')
if self._Test('GCC_ENABLE_SSE42_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse4.2')
cflags += self._Settings().get('WARNING_CFLAGS', [])
platform_root = self._XcodePlatformPath(configname)
if platform_root and self._IsXCTest():
cflags.append('-F' + platform_root + '/Developer/Library/Frameworks/')
if sdk_root:
framework_root = sdk_root
else:
framework_root = ''
config = self.spec['configurations'][self.configname]
framework_dirs = config.get('mac_framework_dirs', [])
for directory in framework_dirs:
cflags.append('-F' + directory.replace('$(SDKROOT)', framework_root))
self.configname = None
return cflags
def GetCflagsC(self, configname):
"""Returns flags that need to be added to .c, and .m compilations."""
self.configname = configname
cflags_c = []
if self._Settings().get('GCC_C_LANGUAGE_STANDARD', '') == 'ansi':
cflags_c.append('-ansi')
else:
self._Appendf(cflags_c, 'GCC_C_LANGUAGE_STANDARD', '-std=%s')
cflags_c += self._Settings().get('OTHER_CFLAGS', [])
self.configname = None
return cflags_c
def GetCflagsCC(self, configname):
"""Returns flags that need to be added to .cc, and .mm compilations."""
self.configname = configname
cflags_cc = []
clang_cxx_language_standard = self._Settings().get(
'CLANG_CXX_LANGUAGE_STANDARD')
# Note: Don't make c++0x to c++11 so that c++0x can be used with older
# clangs that don't understand c++11 yet (like Xcode 4.2's).
if clang_cxx_language_standard:
cflags_cc.append('-std=%s' % clang_cxx_language_standard)
self._Appendf(cflags_cc, 'CLANG_CXX_LIBRARY', '-stdlib=%s')
if self._Test('GCC_ENABLE_CPP_RTTI', 'NO', default='YES'):
cflags_cc.append('-fno-rtti')
if self._Test('GCC_ENABLE_CPP_EXCEPTIONS', 'NO', default='YES'):
cflags_cc.append('-fno-exceptions')
if self._Test('GCC_INLINES_ARE_PRIVATE_EXTERN', 'YES', default='NO'):
cflags_cc.append('-fvisibility-inlines-hidden')
if self._Test('GCC_THREADSAFE_STATICS', 'NO', default='YES'):
cflags_cc.append('-fno-threadsafe-statics')
# Note: This flag is a no-op for clang, it only has an effect for gcc.
if self._Test('GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO', 'NO', default='YES'):
cflags_cc.append('-Wno-invalid-offsetof')
other_ccflags = []
for flag in self._Settings().get('OTHER_CPLUSPLUSFLAGS', ['$(inherited)']):
# TODO: More general variable expansion. Missing in many other places too.
if flag in ('$inherited', '$(inherited)', '${inherited}'):
flag = '$OTHER_CFLAGS'
if flag in ('$OTHER_CFLAGS', '$(OTHER_CFLAGS)', '${OTHER_CFLAGS}'):
other_ccflags += self._Settings().get('OTHER_CFLAGS', [])
else:
other_ccflags.append(flag)
cflags_cc += other_ccflags
self.configname = None
return cflags_cc
def _AddObjectiveCGarbageCollectionFlags(self, flags):
gc_policy = self._Settings().get('GCC_ENABLE_OBJC_GC', 'unsupported')
if gc_policy == 'supported':
flags.append('-fobjc-gc')
elif gc_policy == 'required':
flags.append('-fobjc-gc-only')
def _AddObjectiveCARCFlags(self, flags):
if self._Test('CLANG_ENABLE_OBJC_ARC', 'YES', default='NO'):
flags.append('-fobjc-arc')
def _AddObjectiveCMissingPropertySynthesisFlags(self, flags):
if self._Test('CLANG_WARN_OBJC_MISSING_PROPERTY_SYNTHESIS',
'YES', default='NO'):
flags.append('-Wobjc-missing-property-synthesis')
def GetCflagsObjC(self, configname):
"""Returns flags that need to be added to .m compilations."""
self.configname = configname
cflags_objc = []
self._AddObjectiveCGarbageCollectionFlags(cflags_objc)
self._AddObjectiveCARCFlags(cflags_objc)
self._AddObjectiveCMissingPropertySynthesisFlags(cflags_objc)
self.configname = None
return cflags_objc
def GetCflagsObjCC(self, configname):
"""Returns flags that need to be added to .mm compilations."""
self.configname = configname
cflags_objcc = []
self._AddObjectiveCGarbageCollectionFlags(cflags_objcc)
self._AddObjectiveCARCFlags(cflags_objcc)
self._AddObjectiveCMissingPropertySynthesisFlags(cflags_objcc)
if self._Test('GCC_OBJC_CALL_CXX_CDTORS', 'YES', default='NO'):
cflags_objcc.append('-fobjc-call-cxx-cdtors')
self.configname = None
return cflags_objcc
def GetInstallNameBase(self):
"""Return DYLIB_INSTALL_NAME_BASE for this target."""
# Xcode sets this for shared_libraries, and for nonbundled loadable_modules.
if (self.spec['type'] != 'shared_library' and
(self.spec['type'] != 'loadable_module' or self._IsBundle())):
return None
install_base = self.GetPerTargetSetting(
'DYLIB_INSTALL_NAME_BASE',
default='/Library/Frameworks' if self._IsBundle() else '/usr/local/lib')
return install_base
def _StandardizePath(self, path):
"""Do :standardizepath processing for path."""
# I'm not quite sure what :standardizepath does. Just call normpath(),
# but don't let @executable_path/../foo collapse to foo.
if '/' in path:
prefix, rest = '', path
if path.startswith('@'):
prefix, rest = path.split('/', 1)
rest = os.path.normpath(rest) # :standardizepath
path = os.path.join(prefix, rest)
return path
def GetInstallName(self):
"""Return LD_DYLIB_INSTALL_NAME for this target."""
# Xcode sets this for shared_libraries, and for nonbundled loadable_modules.
if (self.spec['type'] != 'shared_library' and
(self.spec['type'] != 'loadable_module' or self._IsBundle())):
return None
default_install_name = \
'$(DYLIB_INSTALL_NAME_BASE:standardizepath)/$(EXECUTABLE_PATH)'
install_name = self.GetPerTargetSetting(
'LD_DYLIB_INSTALL_NAME', default=default_install_name)
# Hardcode support for the variables used in chromium for now, to
# unblock people using the make build.
if '$' in install_name:
assert install_name in ('$(DYLIB_INSTALL_NAME_BASE:standardizepath)/'
'$(WRAPPER_NAME)/$(PRODUCT_NAME)', default_install_name), (
'Variables in LD_DYLIB_INSTALL_NAME are not generally supported '
'yet in target \'%s\' (got \'%s\')' %
(self.spec['target_name'], install_name))
install_name = install_name.replace(
'$(DYLIB_INSTALL_NAME_BASE:standardizepath)',
self._StandardizePath(self.GetInstallNameBase()))
if self._IsBundle():
# These are only valid for bundles, hence the |if|.
install_name = install_name.replace(
'$(WRAPPER_NAME)', self.GetWrapperName())
install_name = install_name.replace(
'$(PRODUCT_NAME)', self.GetProductName())
else:
assert '$(WRAPPER_NAME)' not in install_name
assert '$(PRODUCT_NAME)' not in install_name
install_name = install_name.replace(
'$(EXECUTABLE_PATH)', self.GetExecutablePath())
return install_name
def _MapLinkerFlagFilename(self, ldflag, gyp_to_build_path):
"""Checks if ldflag contains a filename and if so remaps it from
gyp-directory-relative to build-directory-relative."""
# This list is expanded on demand.
# They get matched as:
# -exported_symbols_list file
# -Wl,exported_symbols_list file
# -Wl,exported_symbols_list,file
LINKER_FILE = r'(\S+)'
WORD = r'\S+'
linker_flags = [
['-exported_symbols_list', LINKER_FILE], # Needed for NaCl.
['-unexported_symbols_list', LINKER_FILE],
['-reexported_symbols_list', LINKER_FILE],
['-sectcreate', WORD, WORD, LINKER_FILE], # Needed for remoting.
]
for flag_pattern in linker_flags:
regex = re.compile('(?:-Wl,)?' + '[ ,]'.join(flag_pattern))
m = regex.match(ldflag)
if m:
ldflag = ldflag[:m.start(1)] + gyp_to_build_path(m.group(1)) + \
ldflag[m.end(1):]
# Required for ffmpeg (no idea why they don't use LIBRARY_SEARCH_PATHS,
# TODO(thakis): Update ffmpeg.gyp):
if ldflag.startswith('-L'):
ldflag = '-L' + gyp_to_build_path(ldflag[len('-L'):])
return ldflag
def GetLdflags(self, configname, product_dir, gyp_to_build_path, arch=None):
"""Returns flags that need to be passed to the linker.
Args:
configname: The name of the configuration to get ld flags for.
product_dir: The directory where products such static and dynamic
libraries are placed. This is added to the library search path.
gyp_to_build_path: A function that converts paths relative to the
current gyp file to paths relative to the build direcotry.
"""
self.configname = configname
ldflags = []
# The xcode build is relative to a gyp file's directory, and OTHER_LDFLAGS
# can contain entries that depend on this. Explicitly absolutify these.
for ldflag in self._Settings().get('OTHER_LDFLAGS', []):
ldflags.append(self._MapLinkerFlagFilename(ldflag, gyp_to_build_path))
if self._Test('DEAD_CODE_STRIPPING', 'YES', default='NO'):
ldflags.append('-Wl,-dead_strip')
if self._Test('PREBINDING', 'YES', default='NO'):
ldflags.append('-Wl,-prebind')
self._Appendf(
ldflags, 'DYLIB_COMPATIBILITY_VERSION', '-compatibility_version %s')
self._Appendf(
ldflags, 'DYLIB_CURRENT_VERSION', '-current_version %s')
self._AppendPlatformVersionMinFlags(ldflags)
if 'SDKROOT' in self._Settings() and self._SdkPath():
ldflags.append('-isysroot ' + self._SdkPath())
for library_path in self._Settings().get('LIBRARY_SEARCH_PATHS', []):
ldflags.append('-L' + gyp_to_build_path(library_path))
if 'ORDER_FILE' in self._Settings():
ldflags.append('-Wl,-order_file ' +
'-Wl,' + gyp_to_build_path(
self._Settings()['ORDER_FILE']))
if arch is not None:
archs = [arch]
else:
assert self.configname
archs = self.GetActiveArchs(self.configname)
if len(archs) != 1:
# TODO: Supporting fat binaries will be annoying.
self._WarnUnimplemented('ARCHS')
archs = ['i386']
ldflags.append('-arch ' + archs[0])
# Xcode adds the product directory by default.
ldflags.append('-L' + product_dir)
install_name = self.GetInstallName()
if install_name and self.spec['type'] != 'loadable_module':
ldflags.append('-install_name ' + install_name.replace(' ', r'\ '))
for rpath in self._Settings().get('LD_RUNPATH_SEARCH_PATHS', []):
ldflags.append('-Wl,-rpath,' + rpath)
sdk_root = self._SdkPath()
if not sdk_root:
sdk_root = ''
config = self.spec['configurations'][self.configname]
framework_dirs = config.get('mac_framework_dirs', [])
for directory in framework_dirs:
ldflags.append('-F' + directory.replace('$(SDKROOT)', sdk_root))
platform_root = self._XcodePlatformPath(configname)
if sdk_root and platform_root and self._IsXCTest():
ldflags.append('-F' + platform_root + '/Developer/Library/Frameworks/')
ldflags.append('-framework XCTest')
is_extension = self._IsIosAppExtension() or self._IsIosWatchKitExtension()
if sdk_root and is_extension:
# Adds the link flags for extensions. These flags are common for all
# extensions and provide loader and main function.
# These flags reflect the compilation options used by xcode to compile
# extensions.
if XcodeVersion() < '0900':
ldflags.append('-lpkstart')
ldflags.append(sdk_root +
'/System/Library/PrivateFrameworks/PlugInKit.framework/PlugInKit')
else:
ldflags.append('-e _NSExtensionMain')
ldflags.append('-fapplication-extension')
self._Appendf(ldflags, 'CLANG_CXX_LIBRARY', '-stdlib=%s')
self.configname = None
return ldflags
def GetLibtoolflags(self, configname):
"""Returns flags that need to be passed to the static linker.
Args:
configname: The name of the configuration to get ld flags for.
"""
self.configname = configname
libtoolflags = []
for libtoolflag in self._Settings().get('OTHER_LDFLAGS', []):
libtoolflags.append(libtoolflag)
# TODO(thakis): ARCHS?
self.configname = None
return libtoolflags
def GetPerTargetSettings(self):
"""Gets a list of all the per-target settings. This will only fetch keys
whose values are the same across all configurations."""
first_pass = True
result = {}
for configname in sorted(self.xcode_settings.keys()):
if first_pass:
result = dict(self.xcode_settings[configname])
first_pass = False
else:
for key, value in self.xcode_settings[configname].iteritems():
if key not in result:
continue
elif result[key] != value:
del result[key]
return result
def GetPerConfigSetting(self, setting, configname, default=None):
if configname in self.xcode_settings:
return self.xcode_settings[configname].get(setting, default)
else:
return self.GetPerTargetSetting(setting, default)
def GetPerTargetSetting(self, setting, default=None):
"""Tries to get xcode_settings.setting from spec. Assumes that the setting
has the same value in all configurations and throws otherwise."""
is_first_pass = True
result = None
for configname in sorted(self.xcode_settings.keys()):
if is_first_pass:
result = self.xcode_settings[configname].get(setting, None)
is_first_pass = False
else:
assert result == self.xcode_settings[configname].get(setting, None), (
"Expected per-target setting for '%s', got per-config setting "
"(target %s)" % (setting, self.spec['target_name']))
if result is None:
return default
return result
def _GetStripPostbuilds(self, configname, output_binary, quiet):
"""Returns a list of shell commands that contain the shell commands
neccessary to strip this target's binary. These should be run as postbuilds
before the actual postbuilds run."""
self.configname = configname
result = []
if (self._Test('DEPLOYMENT_POSTPROCESSING', 'YES', default='NO') and
self._Test('STRIP_INSTALLED_PRODUCT', 'YES', default='NO')):
default_strip_style = 'debugging'
if ((self.spec['type'] == 'loadable_module' or self._IsIosAppExtension())
and self._IsBundle()):
default_strip_style = 'non-global'
elif self.spec['type'] == 'executable':
default_strip_style = 'all'
strip_style = self._Settings().get('STRIP_STYLE', default_strip_style)
strip_flags = {
'all': '',
'non-global': '-x',
'debugging': '-S',
}[strip_style]
explicit_strip_flags = self._Settings().get('STRIPFLAGS', '')
if explicit_strip_flags:
strip_flags += ' ' + _NormalizeEnvVarReferences(explicit_strip_flags)
if not quiet:
result.append('echo STRIP\\(%s\\)' % self.spec['target_name'])
result.append('strip %s %s' % (strip_flags, output_binary))
self.configname = None
return result
def _GetDebugInfoPostbuilds(self, configname, output, output_binary, quiet):
"""Returns a list of shell commands that contain the shell commands
neccessary to massage this target's debug information. These should be run
as postbuilds before the actual postbuilds run."""
self.configname = configname
# For static libraries, no dSYMs are created.
result = []
if (self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES') and
self._Test(
'DEBUG_INFORMATION_FORMAT', 'dwarf-with-dsym', default='dwarf') and
self.spec['type'] != 'static_library'):
if not quiet:
result.append('echo DSYMUTIL\\(%s\\)' % self.spec['target_name'])
result.append('dsymutil %s -o %s' % (output_binary, output + '.dSYM'))
self.configname = None
return result
def _GetTargetPostbuilds(self, configname, output, output_binary,
quiet=False):
"""Returns a list of shell commands that contain the shell commands
to run as postbuilds for this target, before the actual postbuilds."""
# dSYMs need to build before stripping happens.
return (
self._GetDebugInfoPostbuilds(configname, output, output_binary, quiet) +
self._GetStripPostbuilds(configname, output_binary, quiet))
def _GetIOSPostbuilds(self, configname, output_binary):
"""Return a shell command to codesign the iOS output binary so it can
be deployed to a device. This should be run as the very last step of the
build."""
if not (self.isIOS and
(self.spec['type'] == 'executable' or self._IsXCTest()) or
self.IsIosFramework()):
return []
settings = self.xcode_settings[configname]
key = self._GetIOSCodeSignIdentityKey(settings)
if not key:
return []
# Warn for any unimplemented signing xcode keys.
unimpl = ['OTHER_CODE_SIGN_FLAGS']
unimpl = set(unimpl) & set(self.xcode_settings[configname].keys())
if unimpl:
print 'Warning: Some codesign keys not implemented, ignoring: %s' % (
', '.join(sorted(unimpl)))
return ['%s code-sign-bundle "%s" "%s" "%s"' % (
os.path.join('${TARGET_BUILD_DIR}', 'gyp-mac-tool'), key,
settings.get('CODE_SIGN_ENTITLEMENTS', ''),
settings.get('PROVISIONING_PROFILE', ''))
]
def _GetIOSCodeSignIdentityKey(self, settings):
identity = settings.get('CODE_SIGN_IDENTITY')
if not identity:
return None
if identity not in XcodeSettings._codesigning_key_cache:
output = subprocess.check_output(
['security', 'find-identity', '-p', 'codesigning', '-v'])
for line in output.splitlines():
if identity in line:
fingerprint = line.split()[1]
cache = XcodeSettings._codesigning_key_cache
assert identity not in cache or fingerprint == cache[identity], (
"Multiple codesigning fingerprints for identity: %s" % identity)
XcodeSettings._codesigning_key_cache[identity] = fingerprint
return XcodeSettings._codesigning_key_cache.get(identity, '')
def AddImplicitPostbuilds(self, configname, output, output_binary,
postbuilds=[], quiet=False):
"""Returns a list of shell commands that should run before and after
|postbuilds|."""
assert output_binary is not None
pre = self._GetTargetPostbuilds(configname, output, output_binary, quiet)
post = self._GetIOSPostbuilds(configname, output_binary)
return pre + postbuilds + post
def _AdjustLibrary(self, library, config_name=None):
if library.endswith('.framework'):
l = '-framework ' + os.path.splitext(os.path.basename(library))[0]
else:
m = self.library_re.match(library)
if m:
l = '-l' + m.group(1)
else:
l = library
sdk_root = self._SdkPath(config_name)
if not sdk_root:
sdk_root = ''
# Xcode 7 started shipping with ".tbd" (text based stubs) files instead of
# ".dylib" without providing a real support for them. What it does, for
# "/usr/lib" libraries, is do "-L/usr/lib -lname" which is dependent on the
# library order and cause collision when building Chrome.
#
# Instead substitude ".tbd" to ".dylib" in the generated project when the
# following conditions are both true:
# - library is referenced in the gyp file as "$(SDKROOT)/**/*.dylib",
# - the ".dylib" file does not exists but a ".tbd" file do.
library = l.replace('$(SDKROOT)', sdk_root)
if l.startswith('$(SDKROOT)'):
basename, ext = os.path.splitext(library)
if ext == '.dylib' and not os.path.exists(library):
tbd_library = basename + '.tbd'
if os.path.exists(tbd_library):
library = tbd_library
return library
def AdjustLibraries(self, libraries, config_name=None):
"""Transforms entries like 'Cocoa.framework' in libraries into entries like
'-framework Cocoa', 'libcrypto.dylib' into '-lcrypto', etc.
"""
libraries = [self._AdjustLibrary(library, config_name)
for library in libraries]
return libraries
def _BuildMachineOSBuild(self):
return GetStdout(['sw_vers', '-buildVersion'])
def _XcodeIOSDeviceFamily(self, configname):
family = self.xcode_settings[configname].get('TARGETED_DEVICE_FAMILY', '1')
return [int(x) for x in family.split(',')]
def GetExtraPlistItems(self, configname=None):
"""Returns a dictionary with extra items to insert into Info.plist."""
if configname not in XcodeSettings._plist_cache:
cache = {}
cache['BuildMachineOSBuild'] = self._BuildMachineOSBuild()
xcode, xcode_build = XcodeVersion()
cache['DTXcode'] = xcode
cache['DTXcodeBuild'] = xcode_build
compiler = self.xcode_settings[configname].get('GCC_VERSION')
if compiler is not None:
cache['DTCompiler'] = compiler
sdk_root = self._SdkRoot(configname)
if not sdk_root:
sdk_root = self._DefaultSdkRoot()
sdk_version = self._GetSdkVersionInfoItem(sdk_root, '--show-sdk-version')
cache['DTSDKName'] = sdk_root + (sdk_version or '')
if xcode >= '0720':
cache['DTSDKBuild'] = self._GetSdkVersionInfoItem(
sdk_root, '--show-sdk-build-version')
elif xcode >= '0430':
cache['DTSDKBuild'] = sdk_version
else:
cache['DTSDKBuild'] = cache['BuildMachineOSBuild']
if self.isIOS:
cache['MinimumOSVersion'] = self.xcode_settings[configname].get(
'IPHONEOS_DEPLOYMENT_TARGET')
cache['DTPlatformName'] = sdk_root
cache['DTPlatformVersion'] = sdk_version
if configname.endswith("iphoneos"):
cache['CFBundleSupportedPlatforms'] = ['iPhoneOS']
cache['DTPlatformBuild'] = cache['DTSDKBuild']
else:
cache['CFBundleSupportedPlatforms'] = ['iPhoneSimulator']
# This is weird, but Xcode sets DTPlatformBuild to an empty field
# for simulator builds.
cache['DTPlatformBuild'] = ""
XcodeSettings._plist_cache[configname] = cache
# Include extra plist items that are per-target, not per global
# XcodeSettings.
items = dict(XcodeSettings._plist_cache[configname])
if self.isIOS:
items['UIDeviceFamily'] = self._XcodeIOSDeviceFamily(configname)
return items
def _DefaultSdkRoot(self):
"""Returns the default SDKROOT to use.
Prior to version 5.0.0, if SDKROOT was not explicitly set in the Xcode
project, then the environment variable was empty. Starting with this
version, Xcode uses the name of the newest SDK installed.
"""
xcode_version, xcode_build = XcodeVersion()
if xcode_version < '0500':
return ''
default_sdk_path = self._XcodeSdkPath('')
default_sdk_root = XcodeSettings._sdk_root_cache.get(default_sdk_path)
if default_sdk_root:
return default_sdk_root
try:
all_sdks = GetStdout(['xcodebuild', '-showsdks'])
except:
# If xcodebuild fails, there will be no valid SDKs
return ''
for line in all_sdks.splitlines():
items = line.split()
if len(items) >= 3 and items[-2] == '-sdk':
sdk_root = items[-1]
sdk_path = self._XcodeSdkPath(sdk_root)
if sdk_path == default_sdk_path:
return sdk_root
return ''
class MacPrefixHeader(object):
"""A class that helps with emulating Xcode's GCC_PREFIX_HEADER feature.
This feature consists of several pieces:
* If GCC_PREFIX_HEADER is present, all compilations in that project get an
additional |-include path_to_prefix_header| cflag.
* If GCC_PRECOMPILE_PREFIX_HEADER is present too, then the prefix header is
instead compiled, and all other compilations in the project get an
additional |-include path_to_compiled_header| instead.
+ Compiled prefix headers have the extension gch. There is one gch file for
every language used in the project (c, cc, m, mm), since gch files for
different languages aren't compatible.
+ gch files themselves are built with the target's normal cflags, but they
obviously don't get the |-include| flag. Instead, they need a -x flag that
describes their language.
+ All o files in the target need to depend on the gch file, to make sure
it's built before any o file is built.
This class helps with some of these tasks, but it needs help from the build
system for writing dependencies to the gch files, for writing build commands
for the gch files, and for figuring out the location of the gch files.
"""
def __init__(self, xcode_settings,
gyp_path_to_build_path, gyp_path_to_build_output):
"""If xcode_settings is None, all methods on this class are no-ops.
Args:
gyp_path_to_build_path: A function that takes a gyp-relative path,
and returns a path relative to the build directory.
gyp_path_to_build_output: A function that takes a gyp-relative path and
a language code ('c', 'cc', 'm', or 'mm'), and that returns a path
to where the output of precompiling that path for that language
should be placed (without the trailing '.gch').
"""
# This doesn't support per-configuration prefix headers. Good enough
# for now.
self.header = None
self.compile_headers = False
if xcode_settings:
self.header = xcode_settings.GetPerTargetSetting('GCC_PREFIX_HEADER')
self.compile_headers = xcode_settings.GetPerTargetSetting(
'GCC_PRECOMPILE_PREFIX_HEADER', default='NO') != 'NO'
self.compiled_headers = {}
if self.header:
if self.compile_headers:
for lang in ['c', 'cc', 'm', 'mm']:
self.compiled_headers[lang] = gyp_path_to_build_output(
self.header, lang)
self.header = gyp_path_to_build_path(self.header)
def _CompiledHeader(self, lang, arch):
assert self.compile_headers
h = self.compiled_headers[lang]
if arch:
h += '.' + arch
return h
def GetInclude(self, lang, arch=None):
"""Gets the cflags to include the prefix header for language |lang|."""
if self.compile_headers and lang in self.compiled_headers:
return '-include %s' % self._CompiledHeader(lang, arch)
elif self.header:
return '-include %s' % self.header
else:
return ''
def _Gch(self, lang, arch):
"""Returns the actual file name of the prefix header for language |lang|."""
assert self.compile_headers
return self._CompiledHeader(lang, arch) + '.gch'
def GetObjDependencies(self, sources, objs, arch=None):
"""Given a list of source files and the corresponding object files, returns
a list of (source, object, gch) tuples, where |gch| is the build-directory
relative path to the gch file each object file depends on. |compilable[i]|
has to be the source file belonging to |objs[i]|."""
if not self.header or not self.compile_headers:
return []
result = []
for source, obj in zip(sources, objs):
ext = os.path.splitext(source)[1]
lang = {
'.c': 'c',
'.cpp': 'cc', '.cc': 'cc', '.cxx': 'cc',
'.m': 'm',
'.mm': 'mm',
}.get(ext, None)
if lang:
result.append((source, obj, self._Gch(lang, arch)))
return result
def GetPchBuildCommands(self, arch=None):
"""Returns [(path_to_gch, language_flag, language, header)].
|path_to_gch| and |header| are relative to the build directory.
"""
if not self.header or not self.compile_headers:
return []
return [
(self._Gch('c', arch), '-x c-header', 'c', self.header),
(self._Gch('cc', arch), '-x c++-header', 'cc', self.header),
(self._Gch('m', arch), '-x objective-c-header', 'm', self.header),
(self._Gch('mm', arch), '-x objective-c++-header', 'mm', self.header),
]
def XcodeVersion():
"""Returns a tuple of version and build version of installed Xcode."""
# `xcodebuild -version` output looks like
# Xcode 4.6.3
# Build version 4H1503
# or like
# Xcode 3.2.6
# Component versions: DevToolsCore-1809.0; DevToolsSupport-1806.0
# BuildVersion: 10M2518
# Convert that to '0463', '4H1503'.
global XCODE_VERSION_CACHE
if XCODE_VERSION_CACHE:
return XCODE_VERSION_CACHE
try:
version_list = GetStdout(['xcodebuild', '-version']).splitlines()
# In some circumstances xcodebuild exits 0 but doesn't return
# the right results; for example, a user on 10.7 or 10.8 with
# a bogus path set via xcode-select
# In that case this may be a CLT-only install so fall back to
# checking that version.
if len(version_list) < 2:
raise GypError("xcodebuild returned unexpected results")
except:
version = CLTVersion()
if version:
version = re.match(r'(\d\.\d\.?\d*)', version).groups()[0]
else:
raise GypError("No Xcode or CLT version detected!")
# The CLT has no build information, so we return an empty string.
version_list = [version, '']
version = version_list[0]
build = version_list[-1]
# Be careful to convert "4.2" to "0420":
version = version.split()[-1].replace('.', '')
version = (version + '0' * (3 - len(version))).zfill(4)
if build:
build = build.split()[-1]
XCODE_VERSION_CACHE = (version, build)
return XCODE_VERSION_CACHE
# This function ported from the logic in Homebrew's CLT version check
def CLTVersion():
"""Returns the version of command-line tools from pkgutil."""
# pkgutil output looks like
# package-id: com.apple.pkg.CLTools_Executables
# version: 5.0.1.0.1.1382131676
# volume: /
# location: /
# install-time: 1382544035
# groups: com.apple.FindSystemFiles.pkg-group com.apple.DevToolsBoth.pkg-group com.apple.DevToolsNonRelocatableShared.pkg-group
STANDALONE_PKG_ID = "com.apple.pkg.DeveloperToolsCLILeo"
FROM_XCODE_PKG_ID = "com.apple.pkg.DeveloperToolsCLI"
MAVERICKS_PKG_ID = "com.apple.pkg.CLTools_Executables"
regex = re.compile('version: (?P<version>.+)')
for key in [MAVERICKS_PKG_ID, STANDALONE_PKG_ID, FROM_XCODE_PKG_ID]:
try:
output = GetStdout(['/usr/sbin/pkgutil', '--pkg-info', key])
return re.search(regex, output).groupdict()['version']
except:
continue
def GetStdout(cmdlist):
"""Returns the content of standard output returned by invoking |cmdlist|.
Raises |GypError| if the command return with a non-zero return code."""
job = subprocess.Popen(cmdlist, stdout=subprocess.PIPE)
out = job.communicate()[0]
if job.returncode != 0:
sys.stderr.write(out + '\n')
raise GypError('Error %d running %s' % (job.returncode, cmdlist[0]))
return out.rstrip('\n')
def MergeGlobalXcodeSettingsToSpec(global_dict, spec):
"""Merges the global xcode_settings dictionary into each configuration of the
target represented by spec. For keys that are both in the global and the local
xcode_settings dict, the local key gets precendence.
"""
# The xcode generator special-cases global xcode_settings and does something
# that amounts to merging in the global xcode_settings into each local
# xcode_settings dict.
global_xcode_settings = global_dict.get('xcode_settings', {})
for config in spec['configurations'].values():
if 'xcode_settings' in config:
new_settings = global_xcode_settings.copy()
new_settings.update(config['xcode_settings'])
config['xcode_settings'] = new_settings
def IsMacBundle(flavor, spec):
"""Returns if |spec| should be treated as a bundle.
Bundles are directories with a certain subdirectory structure, instead of
just a single file. Bundle rules do not produce a binary but also package
resources into that directory."""
is_mac_bundle = int(spec.get('mac_xctest_bundle', 0)) != 0 or \
(int(spec.get('mac_bundle', 0)) != 0 and flavor == 'mac')
if is_mac_bundle:
assert spec['type'] != 'none', (
'mac_bundle targets cannot have type none (target "%s")' %
spec['target_name'])
return is_mac_bundle
def GetMacBundleResources(product_dir, xcode_settings, resources):
"""Yields (output, resource) pairs for every resource in |resources|.
Only call this for mac bundle targets.
Args:
product_dir: Path to the directory containing the output bundle,
relative to the build directory.
xcode_settings: The XcodeSettings of the current target.
resources: A list of bundle resources, relative to the build directory.
"""
dest = os.path.join(product_dir,
xcode_settings.GetBundleResourceFolder())
for res in resources:
output = dest
# The make generator doesn't support it, so forbid it everywhere
# to keep the generators more interchangable.
assert ' ' not in res, (
"Spaces in resource filenames not supported (%s)" % res)
# Split into (path,file).
res_parts = os.path.split(res)
# Now split the path into (prefix,maybe.lproj).
lproj_parts = os.path.split(res_parts[0])
# If the resource lives in a .lproj bundle, add that to the destination.
if lproj_parts[1].endswith('.lproj'):
output = os.path.join(output, lproj_parts[1])
output = os.path.join(output, res_parts[1])
# Compiled XIB files are referred to by .nib.
if output.endswith('.xib'):
output = os.path.splitext(output)[0] + '.nib'
# Compiled storyboard files are referred to by .storyboardc.
if output.endswith('.storyboard'):
output = os.path.splitext(output)[0] + '.storyboardc'
yield output, res
def GetMacInfoPlist(product_dir, xcode_settings, gyp_path_to_build_path):
"""Returns (info_plist, dest_plist, defines, extra_env), where:
* |info_plist| is the source plist path, relative to the
build directory,
* |dest_plist| is the destination plist path, relative to the
build directory,
* |defines| is a list of preprocessor defines (empty if the plist
shouldn't be preprocessed,
* |extra_env| is a dict of env variables that should be exported when
invoking |mac_tool copy-info-plist|.
Only call this for mac bundle targets.
Args:
product_dir: Path to the directory containing the output bundle,
relative to the build directory.
xcode_settings: The XcodeSettings of the current target.
gyp_to_build_path: A function that converts paths relative to the
current gyp file to paths relative to the build direcotry.
"""
info_plist = xcode_settings.GetPerTargetSetting('INFOPLIST_FILE')
if not info_plist:
return None, None, [], {}
# The make generator doesn't support it, so forbid it everywhere
# to keep the generators more interchangable.
assert ' ' not in info_plist, (
"Spaces in Info.plist filenames not supported (%s)" % info_plist)
info_plist = gyp_path_to_build_path(info_plist)
# If explicitly set to preprocess the plist, invoke the C preprocessor and
# specify any defines as -D flags.
if xcode_settings.GetPerTargetSetting(
'INFOPLIST_PREPROCESS', default='NO') == 'YES':
# Create an intermediate file based on the path.
defines = shlex.split(xcode_settings.GetPerTargetSetting(
'INFOPLIST_PREPROCESSOR_DEFINITIONS', default=''))
else:
defines = []
dest_plist = os.path.join(product_dir, xcode_settings.GetBundlePlistPath())
extra_env = xcode_settings.GetPerTargetSettings()
return info_plist, dest_plist, defines, extra_env
def _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration,
additional_settings=None):
"""Return the environment variables that Xcode would set. See
http://developer.apple.com/library/mac/#documentation/DeveloperTools/Reference/XcodeBuildSettingRef/1-Build_Setting_Reference/build_setting_ref.html#//apple_ref/doc/uid/TP40003931-CH3-SW153
for a full list.
Args:
xcode_settings: An XcodeSettings object. If this is None, this function
returns an empty dict.
built_products_dir: Absolute path to the built products dir.
srcroot: Absolute path to the source root.
configuration: The build configuration name.
additional_settings: An optional dict with more values to add to the
result.
"""
if not xcode_settings: return {}
# This function is considered a friend of XcodeSettings, so let it reach into
# its implementation details.
spec = xcode_settings.spec
# These are filled in on a as-needed basis.
env = {
'BUILT_FRAMEWORKS_DIR' : built_products_dir,
'BUILT_PRODUCTS_DIR' : built_products_dir,
'CONFIGURATION' : configuration,
'PRODUCT_NAME' : xcode_settings.GetProductName(),
# See /Developer/Platforms/MacOSX.platform/Developer/Library/Xcode/Specifications/MacOSX\ Product\ Types.xcspec for FULL_PRODUCT_NAME
'SRCROOT' : srcroot,
'SOURCE_ROOT': '${SRCROOT}',
# This is not true for static libraries, but currently the env is only
# written for bundles:
'TARGET_BUILD_DIR' : built_products_dir,
'TEMP_DIR' : '${TMPDIR}',
'XCODE_VERSION_ACTUAL' : XcodeVersion()[0],
}
if xcode_settings.GetPerConfigSetting('SDKROOT', configuration):
env['SDKROOT'] = xcode_settings._SdkPath(configuration)
else:
env['SDKROOT'] = ''
if xcode_settings.mac_toolchain_dir:
env['DEVELOPER_DIR'] = xcode_settings.mac_toolchain_dir
if spec['type'] in (
'executable', 'static_library', 'shared_library', 'loadable_module'):
env['EXECUTABLE_NAME'] = xcode_settings.GetExecutableName()
env['EXECUTABLE_PATH'] = xcode_settings.GetExecutablePath()
env['FULL_PRODUCT_NAME'] = xcode_settings.GetFullProductName()
mach_o_type = xcode_settings.GetMachOType()
if mach_o_type:
env['MACH_O_TYPE'] = mach_o_type
env['PRODUCT_TYPE'] = xcode_settings.GetProductType()
if xcode_settings._IsBundle():
env['CONTENTS_FOLDER_PATH'] = \
xcode_settings.GetBundleContentsFolderPath()
env['UNLOCALIZED_RESOURCES_FOLDER_PATH'] = \
xcode_settings.GetBundleResourceFolder()
env['INFOPLIST_PATH'] = xcode_settings.GetBundlePlistPath()
env['WRAPPER_NAME'] = xcode_settings.GetWrapperName()
install_name = xcode_settings.GetInstallName()
if install_name:
env['LD_DYLIB_INSTALL_NAME'] = install_name
install_name_base = xcode_settings.GetInstallNameBase()
if install_name_base:
env['DYLIB_INSTALL_NAME_BASE'] = install_name_base
if XcodeVersion() >= '0500' and not env.get('SDKROOT'):
sdk_root = xcode_settings._SdkRoot(configuration)
if not sdk_root:
sdk_root = xcode_settings._XcodeSdkPath('')
env['SDKROOT'] = sdk_root
if not additional_settings:
additional_settings = {}
else:
# Flatten lists to strings.
for k in additional_settings:
if not isinstance(additional_settings[k], str):
additional_settings[k] = ' '.join(additional_settings[k])
additional_settings.update(env)
for k in additional_settings:
additional_settings[k] = _NormalizeEnvVarReferences(additional_settings[k])
return additional_settings
def _NormalizeEnvVarReferences(str):
"""Takes a string containing variable references in the form ${FOO}, $(FOO),
or $FOO, and returns a string with all variable references in the form ${FOO}.
"""
# $FOO -> ${FOO}
str = re.sub(r'\$([a-zA-Z_][a-zA-Z0-9_]*)', r'${\1}', str)
# $(FOO) -> ${FOO}
matches = re.findall(r'(\$\(([a-zA-Z0-9\-_]+)\))', str)
for match in matches:
to_replace, variable = match
assert '$(' not in match, '$($(FOO)) variables not supported: ' + match
str = str.replace(to_replace, '${' + variable + '}')
return str
def ExpandEnvVars(string, expansions):
"""Expands ${VARIABLES}, $(VARIABLES), and $VARIABLES in string per the
expansions list. If the variable expands to something that references
another variable, this variable is expanded as well if it's in env --
until no variables present in env are left."""
for k, v in reversed(expansions):
string = string.replace('${' + k + '}', v)
string = string.replace('$(' + k + ')', v)
string = string.replace('$' + k, v)
return string
def _TopologicallySortedEnvVarKeys(env):
"""Takes a dict |env| whose values are strings that can refer to other keys,
for example env['foo'] = '$(bar) and $(baz)'. Returns a list L of all keys of
env such that key2 is after key1 in L if env[key2] refers to env[key1].
Throws an Exception in case of dependency cycles.
"""
# Since environment variables can refer to other variables, the evaluation
# order is important. Below is the logic to compute the dependency graph
# and sort it.
regex = re.compile(r'\$\{([a-zA-Z0-9\-_]+)\}')
def GetEdges(node):
# Use a definition of edges such that user_of_variable -> used_varible.
# This happens to be easier in this case, since a variable's
# definition contains all variables it references in a single string.
# We can then reverse the result of the topological sort at the end.
# Since: reverse(topsort(DAG)) = topsort(reverse_edges(DAG))
matches = set([v for v in regex.findall(env[node]) if v in env])
for dependee in matches:
assert '${' not in dependee, 'Nested variables not supported: ' + dependee
return matches
try:
# Topologically sort, and then reverse, because we used an edge definition
# that's inverted from the expected result of this function (see comment
# above).
order = gyp.common.TopologicallySorted(env.keys(), GetEdges)
order.reverse()
return order
except gyp.common.CycleError, e:
raise GypError(
'Xcode environment variables are cyclically dependent: ' + str(e.nodes))
def GetSortedXcodeEnv(xcode_settings, built_products_dir, srcroot,
configuration, additional_settings=None):
env = _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration,
additional_settings)
return [(key, env[key]) for key in _TopologicallySortedEnvVarKeys(env)]
def GetSpecPostbuildCommands(spec, quiet=False):
"""Returns the list of postbuilds explicitly defined on |spec|, in a form
executable by a shell."""
postbuilds = []
for postbuild in spec.get('postbuilds', []):
if not quiet:
postbuilds.append('echo POSTBUILD\\(%s\\) %s' % (
spec['target_name'], postbuild['postbuild_name']))
postbuilds.append(gyp.common.EncodePOSIXShellList(postbuild['action']))
return postbuilds
def _HasIOSTarget(targets):
"""Returns true if any target contains the iOS specific key
IPHONEOS_DEPLOYMENT_TARGET."""
for target_dict in targets.values():
for config in target_dict['configurations'].values():
if config.get('xcode_settings', {}).get('IPHONEOS_DEPLOYMENT_TARGET'):
return True
return False
def _AddIOSDeviceConfigurations(targets):
"""Clone all targets and append -iphoneos to the name. Configure these targets
to build for iOS devices and use correct architectures for those builds."""
for target_dict in targets.itervalues():
toolset = target_dict['toolset']
configs = target_dict['configurations']
for config_name, config_dict in dict(configs).iteritems():
iphoneos_config_dict = copy.deepcopy(config_dict)
configs[config_name + '-iphoneos'] = iphoneos_config_dict
configs[config_name + '-iphonesimulator'] = config_dict
if toolset == 'target':
iphoneos_config_dict['xcode_settings']['SDKROOT'] = 'iphoneos'
return targets
def CloneConfigurationForDeviceAndEmulator(target_dicts):
"""If |target_dicts| contains any iOS targets, automatically create -iphoneos
targets for iOS device builds."""
if _HasIOSTarget(target_dicts):
return _AddIOSDeviceConfigurations(target_dicts)
return target_dicts
| mit |
xavieraijon/init | node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/MSVSToolFile.py | 2736 | 1804 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio project reader/writer."""
import gyp.common
import gyp.easy_xml as easy_xml
class Writer(object):
"""Visual Studio XML tool file writer."""
def __init__(self, tool_file_path, name):
"""Initializes the tool file.
Args:
tool_file_path: Path to the tool file.
name: Name of the tool file.
"""
self.tool_file_path = tool_file_path
self.name = name
self.rules_section = ['Rules']
def AddCustomBuildRule(self, name, cmd, description,
additional_dependencies,
outputs, extensions):
"""Adds a rule to the tool file.
Args:
name: Name of the rule.
description: Description of the rule.
cmd: Command line of the rule.
additional_dependencies: other files which may trigger the rule.
outputs: outputs of the rule.
extensions: extensions handled by the rule.
"""
rule = ['CustomBuildRule',
{'Name': name,
'ExecutionDescription': description,
'CommandLine': cmd,
'Outputs': ';'.join(outputs),
'FileExtensions': ';'.join(extensions),
'AdditionalDependencies':
';'.join(additional_dependencies)
}]
self.rules_section.append(rule)
def WriteIfChanged(self):
"""Writes the tool file."""
content = ['VisualStudioToolFile',
{'Version': '8.00',
'Name': self.name
},
self.rules_section
]
easy_xml.WriteXmlIfChanged(content, self.tool_file_path,
encoding="Windows-1252")
| mit |
chfw/Flask-Excel | examples/database_example_formatted.py | 2 | 3569 | """
database_example_formatted.py
:copyright: (c) 2015 by C. W.
:license: New BSD
"""
from flask import Flask, request, jsonify, redirect, url_for
import flask_excel as excel
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
# please uncomment the following line if you use pyexcel < 0.2.2
# import pyexcel.ext.xls
app = Flask(__name__)
excel.init_excel(app)
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///tmp.db"
db = SQLAlchemy(app)
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(80))
body = db.Column(db.Text)
pub_date = db.Column(db.DateTime)
category_id = db.Column(db.Integer, db.ForeignKey("category.id"))
category = db.relationship(
"Category", backref=db.backref("posts", lazy="dynamic")
)
def __init__(self, title, body, category, pub_date=None):
self.title = title
self.body = body
if pub_date is None:
pub_date = datetime.utcnow()
self.pub_date = pub_date
self.category = category
def __repr__(self):
return "<Post %r>" % self.title
class Category(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50))
def __init__(self, name):
self.name = name
def __repr__(self):
return "<Category %r>" % self.name
db.create_all()
@app.route("/upload", methods=["GET", "POST"])
def upload_file():
if request.method == "POST":
return jsonify({"result": request.get_array("file")})
return """
<!doctype html>
<title>Upload an excel file</title>
<h1>Excel file upload (csv, tsv, csvz, tsvz only)</h1>
<form action="" method=post enctype=multipart/form-data><p>
<input type=file name=file><input type=submit value=Upload>
</form>
"""
@app.route("/download", methods=["GET"])
def download_file():
return excel.make_response_from_array([[1, 2], [3, 4]], "csv")
@app.route("/import", methods=["GET", "POST"])
def doimport():
if request.method == "POST":
def category_init_func(row):
c = Category(row["name"])
c.id = row["id"]
return c
def post_init_func(row):
c = Category.query.filter_by(name=row["category"]).first()
p = Post(row["title"], row["body"], c, row["pub_date"])
return p
request.save_book_to_database(
field_name="file",
session=db.session,
tables=[Category, Post],
initializers=[category_init_func, post_init_func],
)
return redirect(url_for(".handson_table"), code=302)
return """
<!doctype html>
<title>Upload an excel file</title>
<h1>Excel file upload (xls, xlsx, ods please)</h1>
<form action="" method=post enctype=multipart/form-data><p>
<input type=file name=file><input type=submit value=Upload>
</form>
"""
@app.route("/export", methods=["GET"])
def doexport():
return excel.make_response_from_tables(db.session, [Category, Post], "xls")
@app.route("/custom_export", methods=["GET"])
def docustomexport():
query_sets = Category.query.filter_by(id=1).all()
column_names = ["id", "name"]
return excel.make_response_from_query_sets(
query_sets, column_names, "xls", dest_sheet_name="custom_sheet"
)
@app.route("/handson_view", methods=["GET"])
def handson_table():
return excel.make_response_from_tables(
db.session, [Category, Post], "handsontable.html"
)
if __name__ == "__main__":
app.run()
| bsd-3-clause |
dmitriy0611/django | tests/m2m_through_regress/tests.py | 182 | 9847 | from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.core import management
from django.test import TestCase
from django.utils.six import StringIO
from .models import (
Car, CarDriver, Driver, Group, Membership, Person, UserMembership,
)
class M2MThroughTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.bob = Person.objects.create(name="Bob")
cls.jim = Person.objects.create(name="Jim")
cls.rock = Group.objects.create(name="Rock")
cls.roll = Group.objects.create(name="Roll")
cls.frank = User.objects.create_user("frank", "[email protected]", "password")
cls.jane = User.objects.create_user("jane", "[email protected]", "password")
# normal intermediate model
cls.bob_rock = Membership.objects.create(person=cls.bob, group=cls.rock)
cls.bob_roll = Membership.objects.create(person=cls.bob, group=cls.roll, price=50)
cls.jim_rock = Membership.objects.create(person=cls.jim, group=cls.rock, price=50)
# intermediate model with custom id column
cls.frank_rock = UserMembership.objects.create(user=cls.frank, group=cls.rock)
cls.frank_roll = UserMembership.objects.create(user=cls.frank, group=cls.roll)
cls.jane_rock = UserMembership.objects.create(user=cls.jane, group=cls.rock)
def test_retrieve_reverse_m2m_items(self):
self.assertQuerysetEqual(
self.bob.group_set.all(), [
"<Group: Rock>",
"<Group: Roll>",
],
ordered=False
)
def test_retrieve_forward_m2m_items(self):
self.assertQuerysetEqual(
self.roll.members.all(), [
"<Person: Bob>",
]
)
def test_cannot_use_setattr_on_reverse_m2m_with_intermediary_model(self):
self.assertRaises(AttributeError, setattr, self.bob, "group_set", [])
def test_cannot_use_setattr_on_forward_m2m_with_intermediary_model(self):
self.assertRaises(AttributeError, setattr, self.roll, "members", [])
def test_cannot_use_create_on_m2m_with_intermediary_model(self):
self.assertRaises(AttributeError, self.rock.members.create, name="Anne")
def test_cannot_use_create_on_reverse_m2m_with_intermediary_model(self):
self.assertRaises(AttributeError, self.bob.group_set.create, name="Funk")
def test_retrieve_reverse_m2m_items_via_custom_id_intermediary(self):
self.assertQuerysetEqual(
self.frank.group_set.all(), [
"<Group: Rock>",
"<Group: Roll>",
],
ordered=False
)
def test_retrieve_forward_m2m_items_via_custom_id_intermediary(self):
self.assertQuerysetEqual(
self.roll.user_members.all(), [
"<User: frank>",
]
)
def test_join_trimming_forwards(self):
"Check that we don't involve too many copies of the intermediate table when doing a join. Refs #8046, #8254"
self.assertQuerysetEqual(
self.rock.members.filter(membership__price=50), [
"<Person: Jim>",
]
)
def test_join_trimming_reverse(self):
self.assertQuerysetEqual(
self.bob.group_set.filter(membership__price=50), [
"<Group: Roll>",
]
)
class M2MThroughSerializationTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.bob = Person.objects.create(name="Bob")
cls.roll = Group.objects.create(name="Roll")
cls.bob_roll = Membership.objects.create(person=cls.bob, group=cls.roll)
def test_serialization(self):
"m2m-through models aren't serialized as m2m fields. Refs #8134"
pks = {"p_pk": self.bob.pk, "g_pk": self.roll.pk, "m_pk": self.bob_roll.pk}
out = StringIO()
management.call_command("dumpdata", "m2m_through_regress", format="json", stdout=out)
self.assertJSONEqual(out.getvalue().strip(), """[{"pk": %(m_pk)s, "model": "m2m_through_regress.membership", "fields": {"person": %(p_pk)s, "price": 100, "group": %(g_pk)s}}, {"pk": %(p_pk)s, "model": "m2m_through_regress.person", "fields": {"name": "Bob"}}, {"pk": %(g_pk)s, "model": "m2m_through_regress.group", "fields": {"name": "Roll"}}]""" % pks)
out = StringIO()
management.call_command("dumpdata", "m2m_through_regress", format="xml",
indent=2, stdout=out)
self.assertXMLEqual(out.getvalue().strip(), """
<?xml version="1.0" encoding="utf-8"?>
<django-objects version="1.0">
<object pk="%(m_pk)s" model="m2m_through_regress.membership">
<field to="m2m_through_regress.person" name="person" rel="ManyToOneRel">%(p_pk)s</field>
<field to="m2m_through_regress.group" name="group" rel="ManyToOneRel">%(g_pk)s</field>
<field type="IntegerField" name="price">100</field>
</object>
<object pk="%(p_pk)s" model="m2m_through_regress.person">
<field type="CharField" name="name">Bob</field>
</object>
<object pk="%(g_pk)s" model="m2m_through_regress.group">
<field type="CharField" name="name">Roll</field>
</object>
</django-objects>
""".strip() % pks)
class ToFieldThroughTests(TestCase):
def setUp(self):
self.car = Car.objects.create(make="Toyota")
self.driver = Driver.objects.create(name="Ryan Briscoe")
CarDriver.objects.create(car=self.car, driver=self.driver)
# We are testing if wrong objects get deleted due to using wrong
# field value in m2m queries. So, it is essential that the pk
# numberings do not match.
# Create one intentionally unused driver to mix up the autonumbering
self.unused_driver = Driver.objects.create(name="Barney Gumble")
# And two intentionally unused cars.
self.unused_car1 = Car.objects.create(make="Trabant")
self.unused_car2 = Car.objects.create(make="Wartburg")
def test_to_field(self):
self.assertQuerysetEqual(
self.car.drivers.all(),
["<Driver: Ryan Briscoe>"]
)
def test_to_field_reverse(self):
self.assertQuerysetEqual(
self.driver.car_set.all(),
["<Car: Toyota>"]
)
def test_to_field_clear_reverse(self):
self.driver.car_set.clear()
self.assertQuerysetEqual(
self.driver.car_set.all(), [])
def test_to_field_clear(self):
self.car.drivers.clear()
self.assertQuerysetEqual(
self.car.drivers.all(), [])
# Low level tests for _add_items and _remove_items. We test these methods
# because .add/.remove aren't available for m2m fields with through, but
# through is the only way to set to_field currently. We do want to make
# sure these methods are ready if the ability to use .add or .remove with
# to_field relations is added some day.
def test_add(self):
self.assertQuerysetEqual(
self.car.drivers.all(),
["<Driver: Ryan Briscoe>"]
)
# Yikes - barney is going to drive...
self.car.drivers._add_items('car', 'driver', self.unused_driver)
self.assertQuerysetEqual(
self.car.drivers.all(),
["<Driver: Barney Gumble>", "<Driver: Ryan Briscoe>"]
)
def test_add_null(self):
nullcar = Car.objects.create(make=None)
with self.assertRaises(ValueError):
nullcar.drivers._add_items('car', 'driver', self.unused_driver)
def test_add_related_null(self):
nulldriver = Driver.objects.create(name=None)
with self.assertRaises(ValueError):
self.car.drivers._add_items('car', 'driver', nulldriver)
def test_add_reverse(self):
car2 = Car.objects.create(make="Honda")
self.assertQuerysetEqual(
self.driver.car_set.all(),
["<Car: Toyota>"]
)
self.driver.car_set._add_items('driver', 'car', car2)
self.assertQuerysetEqual(
self.driver.car_set.all(),
["<Car: Toyota>", "<Car: Honda>"],
ordered=False
)
def test_add_null_reverse(self):
nullcar = Car.objects.create(make=None)
with self.assertRaises(ValueError):
self.driver.car_set._add_items('driver', 'car', nullcar)
def test_add_null_reverse_related(self):
nulldriver = Driver.objects.create(name=None)
with self.assertRaises(ValueError):
nulldriver.car_set._add_items('driver', 'car', self.car)
def test_remove(self):
self.assertQuerysetEqual(
self.car.drivers.all(),
["<Driver: Ryan Briscoe>"]
)
self.car.drivers._remove_items('car', 'driver', self.driver)
self.assertQuerysetEqual(
self.car.drivers.all(), [])
def test_remove_reverse(self):
self.assertQuerysetEqual(
self.driver.car_set.all(),
["<Car: Toyota>"]
)
self.driver.car_set._remove_items('driver', 'car', self.car)
self.assertQuerysetEqual(
self.driver.car_set.all(), [])
class ThroughLoadDataTestCase(TestCase):
fixtures = ["m2m_through"]
def test_sequence_creation(self):
"Check that sequences on an m2m_through are created for the through model, not a phantom auto-generated m2m table. Refs #11107"
out = StringIO()
management.call_command("dumpdata", "m2m_through_regress", format="json", stdout=out)
self.assertJSONEqual(out.getvalue().strip(), """[{"pk": 1, "model": "m2m_through_regress.usermembership", "fields": {"price": 100, "group": 1, "user": 1}}, {"pk": 1, "model": "m2m_through_regress.person", "fields": {"name": "Guido"}}, {"pk": 1, "model": "m2m_through_regress.group", "fields": {"name": "Python Core Group"}}]""")
| bsd-3-clause |
raj454raj/eden | tests/dbmigration/example_migration.py | 33 | 2001 | import os
import sys
import copy
import subprocess
WEB2PY_PATH = sys.argv[1]
APP = sys.argv[2]
changed_table = "org_organisation"
new_field = "type_id"
new_table = "org_organisation_type"
old_field = "type"
new_table_field = "name"
os.chdir(WEB2PY_PATH)
sys.path.append(WEB2PY_PATH)
from gluon.custom_import import custom_import_install
custom_import_install(WEB2PY_PATH)
from gluon.shell import env
from gluon import DAL, Field
old_env = env(APP, c=None, import_models=True)
old_str ='''
try:
s3db.load_all_models()
except NameError:
print "s3db not defined"
'''
globals().update(**old_env)
exec old_str in globals(), locals()
database_string = "sqlite://storage.db"
old_database_folder = "%s/applications/%s/databases" % (WEB2PY_PATH, APP)
temp_db = DAL(database_string,
folder=old_database_folder,
migrate_enabled=True,
migrate=True)
# Migration Script
list_of_fields = []
list_of_fields.append(Field(new_field, "integer"))
list_of_new_table_fields = []
list_of_new_table_fields.append(Field(new_table_field, "integer"))
try:
db[changed_table]._primarykey
except KeyError:
db[changed_table]._primarykey = None
temp_db.define_table(changed_table,
db[changed_table],
*list_of_fields,
primarykey = db[changed_table]._primarykey
)
temp_db.define_table(new_table,
*list_of_new_table_fields
)
temp_db.commit()
# Add a new field of int type in a class
for old_row in temp_db().select(temp_db[changed_table][old_field]):
if (len(temp_db(temp_db[new_table][new_table_field] == old_row[old_field]).select()) == 0):
row = temp_db[new_table].insert(name = old_row[old_field])
new_id = int(row["id"])
temp_db(temp_db[changed_table][old_field] == old_row[old_field]).update(type_id = new_id)
temp_db.commit()
# END =========================================================================
| mit |
ahmadRagheb/goldenHR | erpnext/projects/report/project_wise_stock_tracking/project_wise_stock_tracking.py | 62 | 3071 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
def execute(filters=None):
columns = get_columns()
proj_details = get_project_details()
pr_item_map = get_purchased_items_cost()
se_item_map = get_issued_items_cost()
dn_item_map = get_delivered_items_cost()
data = []
for project in proj_details:
data.append([project.name, pr_item_map.get(project.name, 0),
se_item_map.get(project.name, 0), dn_item_map.get(project.name, 0),
project.project_name, project.status, project.company,
project.customer, project.estimated_costing, project.expected_start_date,
project.expected_end_date])
return columns, data
def get_columns():
return [_("Project Id") + ":Link/Project:140", _("Cost of Purchased Items") + ":Currency:160",
_("Cost of Issued Items") + ":Currency:160", _("Cost of Delivered Items") + ":Currency:160",
_("Project Name") + "::120", _("Project Status") + "::120", _("Company") + ":Link/Company:100",
_("Customer") + ":Link/Customer:140", _("Project Value") + ":Currency:120",
_("Project Start Date") + ":Date:120", _("Completion Date") + ":Date:120"]
def get_project_details():
return frappe.db.sql(""" select name, project_name, status, company, customer, estimated_costing,
expected_start_date, expected_end_date from tabProject where docstatus < 2""", as_dict=1)
def get_purchased_items_cost():
pr_items = frappe.db.sql("""select project, sum(base_net_amount) as amount
from `tabPurchase Receipt Item` where ifnull(project, '') != ''
and docstatus = 1 group by project""", as_dict=1)
pr_item_map = {}
for item in pr_items:
pr_item_map.setdefault(item.project, item.amount)
return pr_item_map
def get_issued_items_cost():
se_items = frappe.db.sql("""select se.project, sum(se_item.amount) as amount
from `tabStock Entry` se, `tabStock Entry Detail` se_item
where se.name = se_item.parent and se.docstatus = 1 and ifnull(se_item.t_warehouse, '') = ''
and ifnull(se.project, '') != '' group by se.project""", as_dict=1)
se_item_map = {}
for item in se_items:
se_item_map.setdefault(item.project, item.amount)
return se_item_map
def get_delivered_items_cost():
dn_items = frappe.db.sql("""select dn.project, sum(dn_item.base_net_amount) as amount
from `tabDelivery Note` dn, `tabDelivery Note Item` dn_item
where dn.name = dn_item.parent and dn.docstatus = 1 and ifnull(dn.project, '') != ''
group by dn.project""", as_dict=1)
si_items = frappe.db.sql("""select si.project, sum(si_item.base_net_amount) as amount
from `tabSales Invoice` si, `tabSales Invoice Item` si_item
where si.name = si_item.parent and si.docstatus = 1 and si.update_stock = 1
and si.is_pos = 1 and ifnull(si.project, '') != ''
group by si.project""", as_dict=1)
dn_item_map = {}
for item in dn_items:
dn_item_map.setdefault(item.project, item.amount)
for item in si_items:
dn_item_map.setdefault(item.project, item.amount)
return dn_item_map
| gpl-3.0 |
clubcapra/Ibex | src/capra_ai/scripts/igvc_basic_north.py | 1 | 1052 | #! /usr/bin/env python
from state_ai import StateAi
import rospy
from math import pi
from std_msgs.msg import Bool
class IGVCBasicNorth(StateAi):
def __init__(self):
super(IGVCBasicNorth, self).__init__("igvc_basic_north")
def on_start(self):
#self.generate_circle(2.0, pi/4, 2 * pi - pi/4, pi/270.0, 120)
pass
def on_goal_changed(self, goal_msg):
rospy.loginfo("Targeting goal with priority: {}".format(goal_msg.priority))
if goal_msg.priority == 105: ## First GOAL
pass
if goal_msg.priority == 404: ## MiddlePoint
self.clear_octomap(self.start_pos, 5, 5)
if goal_msg.priority == 103: ## Last GOAL
pass
if goal_msg.priority == 402:
self.generate_bar(8, -1.5, -1)
def on_last_goal_reached(self, msg):
rospy.loginfo("WE FINISHED THE BASIC COURSE !!! CONGRATS EVERYONE !!!")
pass
if __name__ == "__main__":
try:
a = IGVCBasicNorth()
except rospy.ROSInterruptException:
pass | gpl-3.0 |
batxes/4c2vhic | SHH_WT_models_highres/SHH_WT_models_highres_final_output_0.1_-0.1_5000/SHH_WT_models_highres22408.py | 4 | 88247 | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((2741.55, -327.326, 2795.68), (0.7, 0.7, 0.7), 182.271)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((2635.52, -708.606, 2992.33), (0.7, 0.7, 0.7), 258.199)
if "particle_2 geometry" not in marker_sets:
s=new_marker_set('particle_2 geometry')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((2541.68, -317.222, 3056.47), (0.7, 0.7, 0.7), 123.897)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((2753.53, -421.408, 3384.17), (0.7, 0.7, 0.7), 146.739)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((3062.32, -423.876, 3762.62), (0.7, 0.7, 0.7), 179.098)
if "particle_5 geometry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((3009.04, 119.367, 3419.17), (0.7, 0.7, 0.7), 148.854)
if "particle_6 geometry" not in marker_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((2968.55, 695.52, 3259.31), (0.7, 0.7, 0.7), 196.357)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((2913.65, 1050.01, 3778.63), (0.7, 0.7, 0.7), 166.873)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((2979.14, 1364.88, 4413.29), (0.7, 0.7, 0.7), 95.4711)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((3026.4, 1414.54, 4001.2), (0.7, 0.7, 0.7), 185.401)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((3051.88, 1171.6, 3448.43), (0.7, 0.7, 0.7), 151.984)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((3030.88, 913.082, 2821.97), (0.7, 0.7, 0.7), 185.612)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((3173.84, 558.064, 2529.87), (0.7, 0.7, 0.7), 210.273)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((3331.13, 240.428, 2598.69), (0.7, 0.7, 0.7), 106.892)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((3533.35, -29.3789, 2397.38), (0.7, 0.7, 0.7), 202.025)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((3583.23, -440.423, 2058.68), (0.7, 0.7, 0.7), 192.169)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((3547.97, -849.289, 1565.82), (0.7, 0.7, 0.7), 241.11)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((3373.05, -981.874, 1012.02), (0.7, 0.7, 0.7), 128.465)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((3077.47, -1143.6, 472.392), (0.7, 0.7, 0.7), 217.38)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((2784.73, -1601.17, -16.1303), (0.7, 0.7, 0.7), 184.555)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((2652.86, -942.844, 70.2615), (0.7, 0.7, 0.7), 140.055)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((2771.95, -502.308, -113.207), (0.7, 0.7, 0.7), 169.708)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((2813.09, -141.296, -445.61), (0.7, 0.7, 0.7), 184.639)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((2610.53, 181.077, -477.077), (0.7, 0.7, 0.7), 119.286)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((2303.23, 269.335, -508.128), (0.7, 0.7, 0.7), 147.754)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((2226.37, 45.6785, -287.536), (0.7, 0.7, 0.7), 171.4)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((2489.64, 139.297, 29.4646), (0.7, 0.7, 0.7), 156.341)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((2474.67, 309.045, 596.606), (0.7, 0.7, 0.7), 186.501)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((2462.57, 550.748, 1094.68), (0.7, 0.7, 0.7), 308.325)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((2669.28, 689.534, 1468.55), (0.7, 0.7, 0.7), 138.617)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((2824.23, 919.558, 1608.01), (0.7, 0.7, 0.7), 130.03)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((2798.14, 796.182, 1299.15), (0.7, 0.7, 0.7), 156.552)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["particle_32 geometry"]=s
s= marker_sets["particle_32 geometry"]
mark=s.place_marker((2607.19, 577.036, 1395.03), (0.7, 0.7, 0.7), 183.244)
if "particle_33 geometry" not in marker_sets:
s=new_marker_set('particle_33 geometry')
marker_sets["particle_33 geometry"]=s
s= marker_sets["particle_33 geometry"]
mark=s.place_marker((2422.81, 396.399, 1469.51), (0.7, 0.7, 0.7), 181.382)
if "particle_34 geometry" not in marker_sets:
s=new_marker_set('particle_34 geometry')
marker_sets["particle_34 geometry"]=s
s= marker_sets["particle_34 geometry"]
mark=s.place_marker((2239.48, 389.226, 1393.29), (0.7, 0.7, 0.7), 101.943)
if "particle_35 geometry" not in marker_sets:
s=new_marker_set('particle_35 geometry')
marker_sets["particle_35 geometry"]=s
s= marker_sets["particle_35 geometry"]
mark=s.place_marker((2014.88, 124.104, 1308.08), (1, 0.7, 0), 138.913)
if "particle_36 geometry" not in marker_sets:
s=new_marker_set('particle_36 geometry')
marker_sets["particle_36 geometry"]=s
s= marker_sets["particle_36 geometry"]
mark=s.place_marker((2382.45, -230.454, 2251.68), (0.7, 0.7, 0.7), 221.737)
if "particle_37 geometry" not in marker_sets:
s=new_marker_set('particle_37 geometry')
marker_sets["particle_37 geometry"]=s
s= marker_sets["particle_37 geometry"]
mark=s.place_marker((2369.99, -431.305, 3082.57), (0.7, 0.7, 0.7), 256.38)
if "particle_38 geometry" not in marker_sets:
s=new_marker_set('particle_38 geometry')
marker_sets["particle_38 geometry"]=s
s= marker_sets["particle_38 geometry"]
mark=s.place_marker((2153.53, -82.2576, 3597.1), (0.7, 0.7, 0.7), 221.694)
if "particle_39 geometry" not in marker_sets:
s=new_marker_set('particle_39 geometry')
marker_sets["particle_39 geometry"]=s
s= marker_sets["particle_39 geometry"]
mark=s.place_marker((2239.44, 594.941, 3492.15), (0.7, 0.7, 0.7), 259.341)
if "particle_40 geometry" not in marker_sets:
s=new_marker_set('particle_40 geometry')
marker_sets["particle_40 geometry"]=s
s= marker_sets["particle_40 geometry"]
mark=s.place_marker((2520.19, 1079.45, 2913.27), (0.7, 0.7, 0.7), 117.89)
if "particle_41 geometry" not in marker_sets:
s=new_marker_set('particle_41 geometry')
marker_sets["particle_41 geometry"]=s
s= marker_sets["particle_41 geometry"]
mark=s.place_marker((2628.76, 1216.45, 2087.75), (0.7, 0.7, 0.7), 116.071)
if "particle_42 geometry" not in marker_sets:
s=new_marker_set('particle_42 geometry')
marker_sets["particle_42 geometry"]=s
s= marker_sets["particle_42 geometry"]
mark=s.place_marker((2290.02, 1129.8, 1720.83), (0.7, 0.7, 0.7), 268.224)
if "particle_43 geometry" not in marker_sets:
s=new_marker_set('particle_43 geometry')
marker_sets["particle_43 geometry"]=s
s= marker_sets["particle_43 geometry"]
mark=s.place_marker((2060.12, 1149.05, 1954.89), (0.7, 0.7, 0.7), 386.918)
if "particle_44 geometry" not in marker_sets:
s=new_marker_set('particle_44 geometry')
marker_sets["particle_44 geometry"]=s
s= marker_sets["particle_44 geometry"]
mark=s.place_marker((2026.33, 1328.4, 2562.9), (0.7, 0.7, 0.7), 121.316)
if "particle_45 geometry" not in marker_sets:
s=new_marker_set('particle_45 geometry')
marker_sets["particle_45 geometry"]=s
s= marker_sets["particle_45 geometry"]
mark=s.place_marker((1727.44, 1308.31, 2862.9), (0.7, 0.7, 0.7), 138.363)
if "particle_46 geometry" not in marker_sets:
s=new_marker_set('particle_46 geometry')
marker_sets["particle_46 geometry"]=s
s= marker_sets["particle_46 geometry"]
mark=s.place_marker((1627.79, 909.083, 2271), (1, 0.7, 0), 175.207)
if "particle_47 geometry" not in marker_sets:
s=new_marker_set('particle_47 geometry')
marker_sets["particle_47 geometry"]=s
s= marker_sets["particle_47 geometry"]
mark=s.place_marker((1330.65, 1020.55, 2919.69), (0.7, 0.7, 0.7), 131.468)
if "particle_48 geometry" not in marker_sets:
s=new_marker_set('particle_48 geometry')
marker_sets["particle_48 geometry"]=s
s= marker_sets["particle_48 geometry"]
mark=s.place_marker((876.668, 1075.86, 3491.95), (0.7, 0.7, 0.7), 287.894)
if "particle_49 geometry" not in marker_sets:
s=new_marker_set('particle_49 geometry')
marker_sets["particle_49 geometry"]=s
s= marker_sets["particle_49 geometry"]
mark=s.place_marker((955.807, 1422.62, 3079.83), (0.7, 0.7, 0.7), 88.1109)
if "particle_50 geometry" not in marker_sets:
s=new_marker_set('particle_50 geometry')
marker_sets["particle_50 geometry"]=s
s= marker_sets["particle_50 geometry"]
mark=s.place_marker((1310.35, 1497.02, 2620.58), (0.7, 0.7, 0.7), 145.385)
if "particle_51 geometry" not in marker_sets:
s=new_marker_set('particle_51 geometry')
marker_sets["particle_51 geometry"]=s
s= marker_sets["particle_51 geometry"]
mark=s.place_marker((1516.64, 1597.33, 2582.89), (0.7, 0.7, 0.7), 155.452)
if "particle_52 geometry" not in marker_sets:
s=new_marker_set('particle_52 geometry')
marker_sets["particle_52 geometry"]=s
s= marker_sets["particle_52 geometry"]
mark=s.place_marker((1212.3, 1786.79, 3096.63), (0.7, 0.7, 0.7), 145.512)
if "particle_53 geometry" not in marker_sets:
s=new_marker_set('particle_53 geometry')
marker_sets["particle_53 geometry"]=s
s= marker_sets["particle_53 geometry"]
mark=s.place_marker((1027.78, 1971.44, 3534.04), (0.7, 0.7, 0.7), 99.9972)
if "particle_54 geometry" not in marker_sets:
s=new_marker_set('particle_54 geometry')
marker_sets["particle_54 geometry"]=s
s= marker_sets["particle_54 geometry"]
mark=s.place_marker((889.374, 2106.02, 3927.64), (0.7, 0.7, 0.7), 327.529)
if "particle_55 geometry" not in marker_sets:
s=new_marker_set('particle_55 geometry')
marker_sets["particle_55 geometry"]=s
s= marker_sets["particle_55 geometry"]
mark=s.place_marker((1475.81, 2252.35, 3711.46), (0.7, 0.7, 0.7), 137.983)
if "particle_56 geometry" not in marker_sets:
s=new_marker_set('particle_56 geometry')
marker_sets["particle_56 geometry"]=s
s= marker_sets["particle_56 geometry"]
mark=s.place_marker((1666.43, 2250.77, 3237.23), (0.7, 0.7, 0.7), 83.3733)
if "particle_57 geometry" not in marker_sets:
s=new_marker_set('particle_57 geometry')
marker_sets["particle_57 geometry"]=s
s= marker_sets["particle_57 geometry"]
mark=s.place_marker((1798.7, 2157.57, 2682.61), (0.7, 0.7, 0.7), 101.562)
if "particle_58 geometry" not in marker_sets:
s=new_marker_set('particle_58 geometry')
marker_sets["particle_58 geometry"]=s
s= marker_sets["particle_58 geometry"]
mark=s.place_marker((1868.39, 1939.33, 2200.25), (0.7, 0.7, 0.7), 165.689)
if "particle_59 geometry" not in marker_sets:
s=new_marker_set('particle_59 geometry')
marker_sets["particle_59 geometry"]=s
s= marker_sets["particle_59 geometry"]
mark=s.place_marker((1583.18, 1820.05, 2103.88), (0.7, 0.7, 0.7), 136.925)
if "particle_60 geometry" not in marker_sets:
s=new_marker_set('particle_60 geometry')
marker_sets["particle_60 geometry"]=s
s= marker_sets["particle_60 geometry"]
mark=s.place_marker((1577.01, 1777.02, 2235.57), (0.7, 0.7, 0.7), 123.389)
if "particle_61 geometry" not in marker_sets:
s=new_marker_set('particle_61 geometry')
marker_sets["particle_61 geometry"]=s
s= marker_sets["particle_61 geometry"]
mark=s.place_marker((1581.25, 2027.75, 2651.89), (0.7, 0.7, 0.7), 184.47)
if "particle_62 geometry" not in marker_sets:
s=new_marker_set('particle_62 geometry')
marker_sets["particle_62 geometry"]=s
s= marker_sets["particle_62 geometry"]
mark=s.place_marker((1389.66, 2489.65, 3292.78), (0.7, 0.7, 0.7), 148.473)
if "particle_63 geometry" not in marker_sets:
s=new_marker_set('particle_63 geometry')
marker_sets["particle_63 geometry"]=s
s= marker_sets["particle_63 geometry"]
mark=s.place_marker((1051.32, 3102.8, 4043.29), (0.7, 0.7, 0.7), 241.406)
if "particle_64 geometry" not in marker_sets:
s=new_marker_set('particle_64 geometry')
marker_sets["particle_64 geometry"]=s
s= marker_sets["particle_64 geometry"]
mark=s.place_marker((1544.31, 2733.61, 3811.03), (0.7, 0.7, 0.7), 182.736)
if "particle_65 geometry" not in marker_sets:
s=new_marker_set('particle_65 geometry')
marker_sets["particle_65 geometry"]=s
s= marker_sets["particle_65 geometry"]
mark=s.place_marker((1764.28, 2406.35, 3619.57), (0.7, 0.7, 0.7), 166.62)
if "particle_66 geometry" not in marker_sets:
s=new_marker_set('particle_66 geometry')
marker_sets["particle_66 geometry"]=s
s= marker_sets["particle_66 geometry"]
mark=s.place_marker((1625.84, 2437.7, 3345.5), (0.7, 0.7, 0.7), 113.872)
if "particle_67 geometry" not in marker_sets:
s=new_marker_set('particle_67 geometry')
marker_sets["particle_67 geometry"]=s
s= marker_sets["particle_67 geometry"]
mark=s.place_marker((1579.87, 2243.19, 3088.63), (0.7, 0.7, 0.7), 110.065)
if "particle_68 geometry" not in marker_sets:
s=new_marker_set('particle_68 geometry')
marker_sets["particle_68 geometry"]=s
s= marker_sets["particle_68 geometry"]
mark=s.place_marker((1378.6, 1963.01, 2898.42), (0.7, 0.7, 0.7), 150.08)
if "particle_69 geometry" not in marker_sets:
s=new_marker_set('particle_69 geometry')
marker_sets["particle_69 geometry"]=s
s= marker_sets["particle_69 geometry"]
mark=s.place_marker((1079.55, 1601.98, 2784.68), (0.7, 0.7, 0.7), 118.525)
if "particle_70 geometry" not in marker_sets:
s=new_marker_set('particle_70 geometry')
marker_sets["particle_70 geometry"]=s
s= marker_sets["particle_70 geometry"]
mark=s.place_marker((636.978, 1292.42, 2695.27), (0.7, 0.7, 0.7), 163.955)
if "particle_71 geometry" not in marker_sets:
s=new_marker_set('particle_71 geometry')
marker_sets["particle_71 geometry"]=s
s= marker_sets["particle_71 geometry"]
mark=s.place_marker((287.774, 1409.24, 2757.28), (0.7, 0.7, 0.7), 170.131)
if "particle_72 geometry" not in marker_sets:
s=new_marker_set('particle_72 geometry')
marker_sets["particle_72 geometry"]=s
s= marker_sets["particle_72 geometry"]
mark=s.place_marker((396.95, 2094.53, 3080.13), (0.7, 0.7, 0.7), 78.2127)
if "particle_73 geometry" not in marker_sets:
s=new_marker_set('particle_73 geometry')
marker_sets["particle_73 geometry"]=s
s= marker_sets["particle_73 geometry"]
mark=s.place_marker((639.812, 2782.38, 3485.19), (0.7, 0.7, 0.7), 251.896)
if "particle_74 geometry" not in marker_sets:
s=new_marker_set('particle_74 geometry')
marker_sets["particle_74 geometry"]=s
s= marker_sets["particle_74 geometry"]
mark=s.place_marker((1014.06, 3219.86, 3844.25), (0.7, 0.7, 0.7), 167.55)
if "particle_75 geometry" not in marker_sets:
s=new_marker_set('particle_75 geometry')
marker_sets["particle_75 geometry"]=s
s= marker_sets["particle_75 geometry"]
mark=s.place_marker((1380.82, 3338.8, 3998.45), (0.7, 0.7, 0.7), 167.846)
if "particle_76 geometry" not in marker_sets:
s=new_marker_set('particle_76 geometry')
marker_sets["particle_76 geometry"]=s
s= marker_sets["particle_76 geometry"]
mark=s.place_marker((1046.63, 3619.82, 3709.06), (0.7, 0.7, 0.7), 259.68)
if "particle_77 geometry" not in marker_sets:
s=new_marker_set('particle_77 geometry')
marker_sets["particle_77 geometry"]=s
s= marker_sets["particle_77 geometry"]
mark=s.place_marker((614.949, 3472.15, 3569.53), (0.7, 0.7, 0.7), 80.2854)
if "particle_78 geometry" not in marker_sets:
s=new_marker_set('particle_78 geometry')
marker_sets["particle_78 geometry"]=s
s= marker_sets["particle_78 geometry"]
mark=s.place_marker((485.953, 3498.96, 3739.87), (0.7, 0.7, 0.7), 82.4427)
if "particle_79 geometry" not in marker_sets:
s=new_marker_set('particle_79 geometry')
marker_sets["particle_79 geometry"]=s
s= marker_sets["particle_79 geometry"]
mark=s.place_marker((469.933, 3847.22, 3836.31), (0.7, 0.7, 0.7), 212.811)
if "particle_80 geometry" not in marker_sets:
s=new_marker_set('particle_80 geometry')
marker_sets["particle_80 geometry"]=s
s= marker_sets["particle_80 geometry"]
mark=s.place_marker((886.004, 4118.01, 3304.47), (0.7, 0.7, 0.7), 176.391)
if "particle_81 geometry" not in marker_sets:
s=new_marker_set('particle_81 geometry')
marker_sets["particle_81 geometry"]=s
s= marker_sets["particle_81 geometry"]
mark=s.place_marker((1359.83, 3751.74, 2872.72), (0.7, 0.7, 0.7), 99.3204)
if "particle_82 geometry" not in marker_sets:
s=new_marker_set('particle_82 geometry')
marker_sets["particle_82 geometry"]=s
s= marker_sets["particle_82 geometry"]
mark=s.place_marker((1481.84, 3252.26, 2574.88), (0.7, 0.7, 0.7), 166.62)
if "particle_83 geometry" not in marker_sets:
s=new_marker_set('particle_83 geometry')
marker_sets["particle_83 geometry"]=s
s= marker_sets["particle_83 geometry"]
mark=s.place_marker((1470.73, 3098.34, 2459.25), (0.7, 0.7, 0.7), 102.831)
if "particle_84 geometry" not in marker_sets:
s=new_marker_set('particle_84 geometry')
marker_sets["particle_84 geometry"]=s
s= marker_sets["particle_84 geometry"]
mark=s.place_marker((1168.59, 3883.83, 2880.49), (0.7, 0.7, 0.7), 65.0997)
if "particle_85 geometry" not in marker_sets:
s=new_marker_set('particle_85 geometry')
marker_sets["particle_85 geometry"]=s
s= marker_sets["particle_85 geometry"]
mark=s.place_marker((1279.59, 3550.72, 3231.46), (0.7, 0.7, 0.7), 92.1294)
if "particle_86 geometry" not in marker_sets:
s=new_marker_set('particle_86 geometry')
marker_sets["particle_86 geometry"]=s
s= marker_sets["particle_86 geometry"]
mark=s.place_marker((1496.95, 3073.77, 3349.07), (0.7, 0.7, 0.7), 194.791)
if "particle_87 geometry" not in marker_sets:
s=new_marker_set('particle_87 geometry')
marker_sets["particle_87 geometry"]=s
s= marker_sets["particle_87 geometry"]
mark=s.place_marker((1717.54, 2804.96, 3508.05), (0.7, 0.7, 0.7), 120.766)
if "particle_88 geometry" not in marker_sets:
s=new_marker_set('particle_88 geometry')
marker_sets["particle_88 geometry"]=s
s= marker_sets["particle_88 geometry"]
mark=s.place_marker((1665.43, 3276.68, 3838.83), (0.7, 0.7, 0.7), 217.803)
if "particle_89 geometry" not in marker_sets:
s=new_marker_set('particle_89 geometry')
marker_sets["particle_89 geometry"]=s
s= marker_sets["particle_89 geometry"]
mark=s.place_marker((1280.5, 3242.37, 3681.28), (0.7, 0.7, 0.7), 115.775)
if "particle_90 geometry" not in marker_sets:
s=new_marker_set('particle_90 geometry')
marker_sets["particle_90 geometry"]=s
s= marker_sets["particle_90 geometry"]
mark=s.place_marker((1066.7, 2892.62, 3585.36), (0.7, 0.7, 0.7), 115.648)
if "particle_91 geometry" not in marker_sets:
s=new_marker_set('particle_91 geometry')
marker_sets["particle_91 geometry"]=s
s= marker_sets["particle_91 geometry"]
mark=s.place_marker((1300.04, 2719.56, 3415.09), (0.7, 0.7, 0.7), 83.8386)
if "particle_92 geometry" not in marker_sets:
s=new_marker_set('particle_92 geometry')
marker_sets["particle_92 geometry"]=s
s= marker_sets["particle_92 geometry"]
mark=s.place_marker((1656.4, 2705.44, 3570.43), (0.7, 0.7, 0.7), 124.32)
if "particle_93 geometry" not in marker_sets:
s=new_marker_set('particle_93 geometry')
marker_sets["particle_93 geometry"]=s
s= marker_sets["particle_93 geometry"]
mark=s.place_marker((1987.68, 2523.02, 3800.62), (0.7, 0.7, 0.7), 185.993)
if "particle_94 geometry" not in marker_sets:
s=new_marker_set('particle_94 geometry')
marker_sets["particle_94 geometry"]=s
s= marker_sets["particle_94 geometry"]
mark=s.place_marker((1993, 2261.19, 4351.74), (0.7, 0.7, 0.7), 238.826)
if "particle_95 geometry" not in marker_sets:
s=new_marker_set('particle_95 geometry')
marker_sets["particle_95 geometry"]=s
s= marker_sets["particle_95 geometry"]
mark=s.place_marker((1616.64, 2122.79, 4726.33), (0.7, 0.7, 0.7), 128.465)
if "particle_96 geometry" not in marker_sets:
s=new_marker_set('particle_96 geometry')
marker_sets["particle_96 geometry"]=s
s= marker_sets["particle_96 geometry"]
mark=s.place_marker((1170.24, 2175.32, 4250.69), (0.7, 0.7, 0.7), 203.209)
if "particle_97 geometry" not in marker_sets:
s=new_marker_set('particle_97 geometry')
marker_sets["particle_97 geometry"]=s
s= marker_sets["particle_97 geometry"]
mark=s.place_marker((1230.7, 2411.54, 3793.71), (0.7, 0.7, 0.7), 160.486)
if "particle_98 geometry" not in marker_sets:
s=new_marker_set('particle_98 geometry')
marker_sets["particle_98 geometry"]=s
s= marker_sets["particle_98 geometry"]
mark=s.place_marker((1508.99, 2593.7, 3913.91), (0.7, 0.7, 0.7), 149.277)
if "particle_99 geometry" not in marker_sets:
s=new_marker_set('particle_99 geometry')
marker_sets["particle_99 geometry"]=s
s= marker_sets["particle_99 geometry"]
mark=s.place_marker((1269.36, 2821.02, 4335.36), (0.7, 0.7, 0.7), 35.7435)
if "particle_100 geometry" not in marker_sets:
s=new_marker_set('particle_100 geometry')
marker_sets["particle_100 geometry"]=s
s= marker_sets["particle_100 geometry"]
mark=s.place_marker((1169.42, 2529.58, 3379.37), (0.7, 0.7, 0.7), 98.3898)
if "particle_101 geometry" not in marker_sets:
s=new_marker_set('particle_101 geometry')
marker_sets["particle_101 geometry"]=s
s= marker_sets["particle_101 geometry"]
mark=s.place_marker((1322.96, 2267.73, 2363.11), (0.7, 0.7, 0.7), 188.404)
if "particle_102 geometry" not in marker_sets:
s=new_marker_set('particle_102 geometry')
marker_sets["particle_102 geometry"]=s
s= marker_sets["particle_102 geometry"]
mark=s.place_marker((1713.69, 2290.53, 2028.12), (0.7, 0.7, 0.7), 110.318)
if "particle_103 geometry" not in marker_sets:
s=new_marker_set('particle_103 geometry')
marker_sets["particle_103 geometry"]=s
s= marker_sets["particle_103 geometry"]
mark=s.place_marker((1712.1, 2572.06, 2301.56), (0.7, 0.7, 0.7), 127.534)
if "particle_104 geometry" not in marker_sets:
s=new_marker_set('particle_104 geometry')
marker_sets["particle_104 geometry"]=s
s= marker_sets["particle_104 geometry"]
mark=s.place_marker((1613.57, 2715.52, 2648.03), (0.7, 0.7, 0.7), 91.368)
if "particle_105 geometry" not in marker_sets:
s=new_marker_set('particle_105 geometry')
marker_sets["particle_105 geometry"]=s
s= marker_sets["particle_105 geometry"]
mark=s.place_marker((1464.95, 2755.05, 3018.66), (0.7, 0.7, 0.7), 131.045)
if "particle_106 geometry" not in marker_sets:
s=new_marker_set('particle_106 geometry')
marker_sets["particle_106 geometry"]=s
s= marker_sets["particle_106 geometry"]
mark=s.place_marker((1343.45, 2575.21, 3368.75), (0.7, 0.7, 0.7), 143.608)
if "particle_107 geometry" not in marker_sets:
s=new_marker_set('particle_107 geometry')
marker_sets["particle_107 geometry"]=s
s= marker_sets["particle_107 geometry"]
mark=s.place_marker((1596.45, 2511.97, 3666.9), (0.7, 0.7, 0.7), 135.783)
if "particle_108 geometry" not in marker_sets:
s=new_marker_set('particle_108 geometry')
marker_sets["particle_108 geometry"]=s
s= marker_sets["particle_108 geometry"]
mark=s.place_marker((1844.16, 2497.68, 3895.53), (0.7, 0.7, 0.7), 92.5947)
if "particle_109 geometry" not in marker_sets:
s=new_marker_set('particle_109 geometry')
marker_sets["particle_109 geometry"]=s
s= marker_sets["particle_109 geometry"]
mark=s.place_marker((2030.74, 2603.94, 3723.49), (0.7, 0.7, 0.7), 150.123)
if "particle_110 geometry" not in marker_sets:
s=new_marker_set('particle_110 geometry')
marker_sets["particle_110 geometry"]=s
s= marker_sets["particle_110 geometry"]
mark=s.place_marker((2092.89, 2750.61, 3511.04), (0.7, 0.7, 0.7), 121.57)
if "particle_111 geometry" not in marker_sets:
s=new_marker_set('particle_111 geometry')
marker_sets["particle_111 geometry"]=s
s= marker_sets["particle_111 geometry"]
mark=s.place_marker((2285.24, 2984.6, 3648.96), (0.7, 0.7, 0.7), 104.777)
if "particle_112 geometry" not in marker_sets:
s=new_marker_set('particle_112 geometry')
marker_sets["particle_112 geometry"]=s
s= marker_sets["particle_112 geometry"]
mark=s.place_marker((2429.43, 2891.63, 3272.3), (0.7, 0.7, 0.7), 114.844)
if "particle_113 geometry" not in marker_sets:
s=new_marker_set('particle_113 geometry')
marker_sets["particle_113 geometry"]=s
s= marker_sets["particle_113 geometry"]
mark=s.place_marker((2576.95, 2783.7, 2862.55), (0.7, 0.7, 0.7), 150.588)
if "particle_114 geometry" not in marker_sets:
s=new_marker_set('particle_114 geometry')
marker_sets["particle_114 geometry"]=s
s= marker_sets["particle_114 geometry"]
mark=s.place_marker((2299.56, 2677.99, 2562.02), (0.7, 0.7, 0.7), 103.55)
if "particle_115 geometry" not in marker_sets:
s=new_marker_set('particle_115 geometry')
marker_sets["particle_115 geometry"]=s
s= marker_sets["particle_115 geometry"]
mark=s.place_marker((1886.4, 2891.84, 2280.86), (0.7, 0.7, 0.7), 215.392)
if "particle_116 geometry" not in marker_sets:
s=new_marker_set('particle_116 geometry')
marker_sets["particle_116 geometry"]=s
s= marker_sets["particle_116 geometry"]
mark=s.place_marker((1541.76, 3035.28, 1876.78), (0.7, 0.7, 0.7), 99.9126)
if "particle_117 geometry" not in marker_sets:
s=new_marker_set('particle_117 geometry')
marker_sets["particle_117 geometry"]=s
s= marker_sets["particle_117 geometry"]
mark=s.place_marker((1214.23, 3658.04, 1726.81), (0.7, 0.7, 0.7), 99.7857)
if "particle_118 geometry" not in marker_sets:
s=new_marker_set('particle_118 geometry')
marker_sets["particle_118 geometry"]=s
s= marker_sets["particle_118 geometry"]
mark=s.place_marker((846.04, 4085.66, 1785.25), (0.7, 0.7, 0.7), 109.98)
if "particle_119 geometry" not in marker_sets:
s=new_marker_set('particle_119 geometry')
marker_sets["particle_119 geometry"]=s
s= marker_sets["particle_119 geometry"]
mark=s.place_marker((1287.98, 3850.74, 1923.63), (0.7, 0.7, 0.7), 102.831)
if "particle_120 geometry" not in marker_sets:
s=new_marker_set('particle_120 geometry')
marker_sets["particle_120 geometry"]=s
s= marker_sets["particle_120 geometry"]
mark=s.place_marker((1462.32, 3548.85, 2123.45), (0.7, 0.7, 0.7), 103.593)
if "particle_121 geometry" not in marker_sets:
s=new_marker_set('particle_121 geometry')
marker_sets["particle_121 geometry"]=s
s= marker_sets["particle_121 geometry"]
mark=s.place_marker((1572.87, 3245.23, 2484.13), (0.7, 0.7, 0.7), 173.472)
if "particle_122 geometry" not in marker_sets:
s=new_marker_set('particle_122 geometry')
marker_sets["particle_122 geometry"]=s
s= marker_sets["particle_122 geometry"]
mark=s.place_marker((1433.54, 3214.93, 3022.63), (0.7, 0.7, 0.7), 113.575)
if "particle_123 geometry" not in marker_sets:
s=new_marker_set('particle_123 geometry')
marker_sets["particle_123 geometry"]=s
s= marker_sets["particle_123 geometry"]
mark=s.place_marker((1583.43, 2952.46, 3396.19), (0.7, 0.7, 0.7), 128.296)
if "particle_124 geometry" not in marker_sets:
s=new_marker_set('particle_124 geometry')
marker_sets["particle_124 geometry"]=s
s= marker_sets["particle_124 geometry"]
mark=s.place_marker((1811.4, 2770.88, 3791.12), (0.7, 0.7, 0.7), 145.004)
if "particle_125 geometry" not in marker_sets:
s=new_marker_set('particle_125 geometry')
marker_sets["particle_125 geometry"]=s
s= marker_sets["particle_125 geometry"]
mark=s.place_marker((2193.24, 2458.46, 4082.79), (0.7, 0.7, 0.7), 148.261)
if "particle_126 geometry" not in marker_sets:
s=new_marker_set('particle_126 geometry')
marker_sets["particle_126 geometry"]=s
s= marker_sets["particle_126 geometry"]
mark=s.place_marker((2429.59, 2199.64, 4649.2), (0.7, 0.7, 0.7), 127.704)
if "particle_127 geometry" not in marker_sets:
s=new_marker_set('particle_127 geometry')
marker_sets["particle_127 geometry"]=s
s= marker_sets["particle_127 geometry"]
mark=s.place_marker((2443.35, 1944.73, 5199.9), (0.7, 0.7, 0.7), 129.607)
if "particle_128 geometry" not in marker_sets:
s=new_marker_set('particle_128 geometry')
marker_sets["particle_128 geometry"]=s
s= marker_sets["particle_128 geometry"]
mark=s.place_marker((2010.84, 2027.01, 5036.63), (0.7, 0.7, 0.7), 139.759)
if "particle_129 geometry" not in marker_sets:
s=new_marker_set('particle_129 geometry')
marker_sets["particle_129 geometry"]=s
s= marker_sets["particle_129 geometry"]
mark=s.place_marker((1591.71, 2098.64, 4582.86), (0.7, 0.7, 0.7), 118.567)
if "particle_130 geometry" not in marker_sets:
s=new_marker_set('particle_130 geometry')
marker_sets["particle_130 geometry"]=s
s= marker_sets["particle_130 geometry"]
mark=s.place_marker((1558.49, 2438.54, 4392.82), (0.7, 0.7, 0.7), 136.164)
if "particle_131 geometry" not in marker_sets:
s=new_marker_set('particle_131 geometry')
marker_sets["particle_131 geometry"]=s
s= marker_sets["particle_131 geometry"]
mark=s.place_marker((1623.02, 2685.74, 4028.43), (0.7, 0.7, 0.7), 121.655)
if "particle_132 geometry" not in marker_sets:
s=new_marker_set('particle_132 geometry')
marker_sets["particle_132 geometry"]=s
s= marker_sets["particle_132 geometry"]
mark=s.place_marker((1817.16, 2934.72, 3720.99), (0.7, 0.7, 0.7), 127.492)
if "particle_133 geometry" not in marker_sets:
s=new_marker_set('particle_133 geometry')
marker_sets["particle_133 geometry"]=s
s= marker_sets["particle_133 geometry"]
mark=s.place_marker((1941.11, 3349.57, 3703.59), (0.7, 0.7, 0.7), 138.617)
if "particle_134 geometry" not in marker_sets:
s=new_marker_set('particle_134 geometry')
marker_sets["particle_134 geometry"]=s
s= marker_sets["particle_134 geometry"]
mark=s.place_marker((2218.1, 3437.14, 3490.06), (0.7, 0.7, 0.7), 120.766)
if "particle_135 geometry" not in marker_sets:
s=new_marker_set('particle_135 geometry')
marker_sets["particle_135 geometry"]=s
s= marker_sets["particle_135 geometry"]
mark=s.place_marker((2344.88, 3361.46, 3289.98), (0.7, 0.7, 0.7), 145.893)
if "particle_136 geometry" not in marker_sets:
s=new_marker_set('particle_136 geometry')
marker_sets["particle_136 geometry"]=s
s= marker_sets["particle_136 geometry"]
mark=s.place_marker((2137.04, 2938.81, 3261.78), (0.7, 0.7, 0.7), 185.02)
if "particle_137 geometry" not in marker_sets:
s=new_marker_set('particle_137 geometry')
marker_sets["particle_137 geometry"]=s
s= marker_sets["particle_137 geometry"]
mark=s.place_marker((2182.42, 2445.89, 3035.18), (0.7, 0.7, 0.7), 221.314)
if "particle_138 geometry" not in marker_sets:
s=new_marker_set('particle_138 geometry')
marker_sets["particle_138 geometry"]=s
s= marker_sets["particle_138 geometry"]
mark=s.place_marker((2404.72, 2055.48, 2785.44), (0.7, 0.7, 0.7), 165.139)
if "particle_139 geometry" not in marker_sets:
s=new_marker_set('particle_139 geometry')
marker_sets["particle_139 geometry"]=s
s= marker_sets["particle_139 geometry"]
mark=s.place_marker((2321.07, 2027.08, 2682.99), (0.7, 0.7, 0.7), 179.437)
if "particle_140 geometry" not in marker_sets:
s=new_marker_set('particle_140 geometry')
marker_sets["particle_140 geometry"]=s
s= marker_sets["particle_140 geometry"]
mark=s.place_marker((2021.37, 2331.66, 2754.97), (0.7, 0.7, 0.7), 137.898)
if "particle_141 geometry" not in marker_sets:
s=new_marker_set('particle_141 geometry')
marker_sets["particle_141 geometry"]=s
s= marker_sets["particle_141 geometry"]
mark=s.place_marker((1742.15, 2597.42, 2832.32), (0.7, 0.7, 0.7), 124.658)
if "particle_142 geometry" not in marker_sets:
s=new_marker_set('particle_142 geometry')
marker_sets["particle_142 geometry"]=s
s= marker_sets["particle_142 geometry"]
mark=s.place_marker((1807.26, 2928.63, 2885.26), (0.7, 0.7, 0.7), 97.7553)
if "particle_143 geometry" not in marker_sets:
s=new_marker_set('particle_143 geometry')
marker_sets["particle_143 geometry"]=s
s= marker_sets["particle_143 geometry"]
mark=s.place_marker((1781.17, 3245.93, 2906.1), (0.7, 0.7, 0.7), 92.9331)
if "particle_144 geometry" not in marker_sets:
s=new_marker_set('particle_144 geometry')
marker_sets["particle_144 geometry"]=s
s= marker_sets["particle_144 geometry"]
mark=s.place_marker((1636.24, 3580.87, 2953.13), (0.7, 0.7, 0.7), 123.135)
if "particle_145 geometry" not in marker_sets:
s=new_marker_set('particle_145 geometry')
marker_sets["particle_145 geometry"]=s
s= marker_sets["particle_145 geometry"]
mark=s.place_marker((1610.82, 3268.24, 2699.59), (0.7, 0.7, 0.7), 125.716)
if "particle_146 geometry" not in marker_sets:
s=new_marker_set('particle_146 geometry')
marker_sets["particle_146 geometry"]=s
s= marker_sets["particle_146 geometry"]
mark=s.place_marker((1722.97, 2994.43, 2625.4), (0.7, 0.7, 0.7), 127.534)
if "particle_147 geometry" not in marker_sets:
s=new_marker_set('particle_147 geometry')
marker_sets["particle_147 geometry"]=s
s= marker_sets["particle_147 geometry"]
mark=s.place_marker((1731.62, 2879.87, 2879.8), (0.7, 0.7, 0.7), 94.9212)
if "particle_148 geometry" not in marker_sets:
s=new_marker_set('particle_148 geometry')
marker_sets["particle_148 geometry"]=s
s= marker_sets["particle_148 geometry"]
mark=s.place_marker((1844.73, 2482.67, 2746.77), (0.7, 0.7, 0.7), 137.644)
if "particle_149 geometry" not in marker_sets:
s=new_marker_set('particle_149 geometry')
marker_sets["particle_149 geometry"]=s
s= marker_sets["particle_149 geometry"]
mark=s.place_marker((2015.31, 2178.97, 2731.69), (0.7, 0.7, 0.7), 149.277)
if "particle_150 geometry" not in marker_sets:
s=new_marker_set('particle_150 geometry')
marker_sets["particle_150 geometry"]=s
s= marker_sets["particle_150 geometry"]
mark=s.place_marker((2281.95, 2418.65, 2744.08), (0.7, 0.7, 0.7), 103.677)
if "particle_151 geometry" not in marker_sets:
s=new_marker_set('particle_151 geometry')
marker_sets["particle_151 geometry"]=s
s= marker_sets["particle_151 geometry"]
mark=s.place_marker((2542.63, 2769.46, 2977.12), (0.7, 0.7, 0.7), 99.6588)
if "particle_152 geometry" not in marker_sets:
s=new_marker_set('particle_152 geometry')
marker_sets["particle_152 geometry"]=s
s= marker_sets["particle_152 geometry"]
mark=s.place_marker((2762.9, 2985.04, 3198.66), (0.7, 0.7, 0.7), 134.133)
if "particle_153 geometry" not in marker_sets:
s=new_marker_set('particle_153 geometry')
marker_sets["particle_153 geometry"]=s
s= marker_sets["particle_153 geometry"]
mark=s.place_marker((2617.5, 3009.45, 2906.68), (0.7, 0.7, 0.7), 173.007)
if "particle_154 geometry" not in marker_sets:
s=new_marker_set('particle_154 geometry')
marker_sets["particle_154 geometry"]=s
s= marker_sets["particle_154 geometry"]
mark=s.place_marker((2443.56, 2576.89, 2585.1), (0.7, 0.7, 0.7), 141.028)
if "particle_155 geometry" not in marker_sets:
s=new_marker_set('particle_155 geometry')
marker_sets["particle_155 geometry"]=s
s= marker_sets["particle_155 geometry"]
mark=s.place_marker((2194.03, 2261.14, 2350.31), (0.7, 0.7, 0.7), 161.121)
if "particle_156 geometry" not in marker_sets:
s=new_marker_set('particle_156 geometry')
marker_sets["particle_156 geometry"]=s
s= marker_sets["particle_156 geometry"]
mark=s.place_marker((1876.22, 2166.23, 2497.78), (0.7, 0.7, 0.7), 119.582)
if "particle_157 geometry" not in marker_sets:
s=new_marker_set('particle_157 geometry')
marker_sets["particle_157 geometry"]=s
s= marker_sets["particle_157 geometry"]
mark=s.place_marker((1720.51, 2465.28, 2722.84), (0.7, 0.7, 0.7), 137.094)
if "particle_158 geometry" not in marker_sets:
s=new_marker_set('particle_158 geometry')
marker_sets["particle_158 geometry"]=s
s= marker_sets["particle_158 geometry"]
mark=s.place_marker((1472.39, 2881.28, 2862.14), (0.7, 0.7, 0.7), 149.234)
if "particle_159 geometry" not in marker_sets:
s=new_marker_set('particle_159 geometry')
marker_sets["particle_159 geometry"]=s
s= marker_sets["particle_159 geometry"]
mark=s.place_marker((1338.87, 2935.84, 2474.67), (0.7, 0.7, 0.7), 151.011)
if "particle_160 geometry" not in marker_sets:
s=new_marker_set('particle_160 geometry')
marker_sets["particle_160 geometry"]=s
s= marker_sets["particle_160 geometry"]
mark=s.place_marker((1408.5, 2757.04, 2018.6), (0.7, 0.7, 0.7), 184.216)
if "particle_161 geometry" not in marker_sets:
s=new_marker_set('particle_161 geometry')
marker_sets["particle_161 geometry"]=s
s= marker_sets["particle_161 geometry"]
mark=s.place_marker((1753.23, 2926.96, 1886.01), (0.7, 0.7, 0.7), 170.596)
if "particle_162 geometry" not in marker_sets:
s=new_marker_set('particle_162 geometry')
marker_sets["particle_162 geometry"]=s
s= marker_sets["particle_162 geometry"]
mark=s.place_marker((1665.04, 3485.8, 2161.74), (0.7, 0.7, 0.7), 215.603)
if "particle_163 geometry" not in marker_sets:
s=new_marker_set('particle_163 geometry')
marker_sets["particle_163 geometry"]=s
s= marker_sets["particle_163 geometry"]
mark=s.place_marker((1462.34, 4252.89, 2550.88), (0.7, 0.7, 0.7), 79.0164)
if "particle_164 geometry" not in marker_sets:
s=new_marker_set('particle_164 geometry')
marker_sets["particle_164 geometry"]=s
s= marker_sets["particle_164 geometry"]
mark=s.place_marker((1759.34, 4370.01, 2689.14), (0.7, 0.7, 0.7), 77.2821)
if "particle_165 geometry" not in marker_sets:
s=new_marker_set('particle_165 geometry')
marker_sets["particle_165 geometry"]=s
s= marker_sets["particle_165 geometry"]
mark=s.place_marker((1988.54, 4109.78, 2769.16), (0.7, 0.7, 0.7), 188.658)
if "particle_166 geometry" not in marker_sets:
s=new_marker_set('particle_166 geometry')
marker_sets["particle_166 geometry"]=s
s= marker_sets["particle_166 geometry"]
mark=s.place_marker((2303.09, 4117.51, 2710.28), (0.7, 0.7, 0.7), 115.437)
if "particle_167 geometry" not in marker_sets:
s=new_marker_set('particle_167 geometry')
marker_sets["particle_167 geometry"]=s
s= marker_sets["particle_167 geometry"]
mark=s.place_marker((2209.64, 3535.33, 2573.73), (0.7, 0.7, 0.7), 88.4916)
if "particle_168 geometry" not in marker_sets:
s=new_marker_set('particle_168 geometry')
marker_sets["particle_168 geometry"]=s
s= marker_sets["particle_168 geometry"]
mark=s.place_marker((2096.99, 2938.46, 2427.82), (0.7, 0.7, 0.7), 108.88)
if "particle_169 geometry" not in marker_sets:
s=new_marker_set('particle_169 geometry')
marker_sets["particle_169 geometry"]=s
s= marker_sets["particle_169 geometry"]
mark=s.place_marker((1845.49, 2695.74, 2358.05), (0.7, 0.7, 0.7), 172.119)
if "particle_170 geometry" not in marker_sets:
s=new_marker_set('particle_170 geometry')
marker_sets["particle_170 geometry"]=s
s= marker_sets["particle_170 geometry"]
mark=s.place_marker((1709.57, 3154.5, 2466.64), (0.7, 0.7, 0.7), 139.505)
if "particle_171 geometry" not in marker_sets:
s=new_marker_set('particle_171 geometry')
marker_sets["particle_171 geometry"]=s
s= marker_sets["particle_171 geometry"]
mark=s.place_marker((1593.12, 3607.82, 2590.08), (0.7, 0.7, 0.7), 92.7639)
if "particle_172 geometry" not in marker_sets:
s=new_marker_set('particle_172 geometry')
marker_sets["particle_172 geometry"]=s
s= marker_sets["particle_172 geometry"]
mark=s.place_marker((1374.97, 3522.61, 2512.68), (0.7, 0.7, 0.7), 89.8452)
if "particle_173 geometry" not in marker_sets:
s=new_marker_set('particle_173 geometry')
marker_sets["particle_173 geometry"]=s
s= marker_sets["particle_173 geometry"]
mark=s.place_marker((1599.05, 3532.11, 2345.71), (0.7, 0.7, 0.7), 149.446)
if "particle_174 geometry" not in marker_sets:
s=new_marker_set('particle_174 geometry')
marker_sets["particle_174 geometry"]=s
s= marker_sets["particle_174 geometry"]
mark=s.place_marker((1839.23, 3766.32, 2406.83), (0.7, 0.7, 0.7), 126.858)
if "particle_175 geometry" not in marker_sets:
s=new_marker_set('particle_175 geometry')
marker_sets["particle_175 geometry"]=s
s= marker_sets["particle_175 geometry"]
mark=s.place_marker((1710.12, 3947.55, 2659.44), (0.7, 0.7, 0.7), 106.046)
if "particle_176 geometry" not in marker_sets:
s=new_marker_set('particle_176 geometry')
marker_sets["particle_176 geometry"]=s
s= marker_sets["particle_176 geometry"]
mark=s.place_marker((1318.24, 3776.79, 2926.64), (0.7, 0.7, 0.7), 156.298)
if "particle_177 geometry" not in marker_sets:
s=new_marker_set('particle_177 geometry')
marker_sets["particle_177 geometry"]=s
s= marker_sets["particle_177 geometry"]
mark=s.place_marker((956.258, 3535.08, 3348.37), (0.7, 0.7, 0.7), 231.212)
if "particle_178 geometry" not in marker_sets:
s=new_marker_set('particle_178 geometry')
marker_sets["particle_178 geometry"]=s
s= marker_sets["particle_178 geometry"]
mark=s.place_marker((711.147, 3028.52, 3293.96), (0.7, 0.7, 0.7), 88.4916)
if "particle_179 geometry" not in marker_sets:
s=new_marker_set('particle_179 geometry')
marker_sets["particle_179 geometry"]=s
s= marker_sets["particle_179 geometry"]
mark=s.place_marker((818.465, 2687.3, 2977.72), (0.7, 0.7, 0.7), 111.334)
if "particle_180 geometry" not in marker_sets:
s=new_marker_set('particle_180 geometry')
marker_sets["particle_180 geometry"]=s
s= marker_sets["particle_180 geometry"]
mark=s.place_marker((1230.42, 2524.04, 2548.39), (0.7, 0.7, 0.7), 127.619)
if "particle_181 geometry" not in marker_sets:
s=new_marker_set('particle_181 geometry')
marker_sets["particle_181 geometry"]=s
s= marker_sets["particle_181 geometry"]
mark=s.place_marker((1530.68, 2434.22, 2211.53), (0.7, 0.7, 0.7), 230.746)
if "particle_182 geometry" not in marker_sets:
s=new_marker_set('particle_182 geometry')
marker_sets["particle_182 geometry"]=s
s= marker_sets["particle_182 geometry"]
mark=s.place_marker((1462.02, 2765.87, 2407.25), (0.7, 0.7, 0.7), 124.573)
if "particle_183 geometry" not in marker_sets:
s=new_marker_set('particle_183 geometry')
marker_sets["particle_183 geometry"]=s
s= marker_sets["particle_183 geometry"]
mark=s.place_marker((1258.93, 3278.44, 2663.65), (0.7, 0.7, 0.7), 124.489)
if "particle_184 geometry" not in marker_sets:
s=new_marker_set('particle_184 geometry')
marker_sets["particle_184 geometry"]=s
s= marker_sets["particle_184 geometry"]
mark=s.place_marker((1502.98, 3550.95, 2787.43), (0.7, 0.7, 0.7), 196.61)
if "particle_185 geometry" not in marker_sets:
s=new_marker_set('particle_185 geometry')
marker_sets["particle_185 geometry"]=s
s= marker_sets["particle_185 geometry"]
mark=s.place_marker((1614.74, 3380.99, 2789.56), (0.7, 0.7, 0.7), 134.049)
if "particle_186 geometry" not in marker_sets:
s=new_marker_set('particle_186 geometry')
marker_sets["particle_186 geometry"]=s
s= marker_sets["particle_186 geometry"]
mark=s.place_marker((1312.53, 3421.41, 2728.61), (0.7, 0.7, 0.7), 141.493)
if "particle_187 geometry" not in marker_sets:
s=new_marker_set('particle_187 geometry')
marker_sets["particle_187 geometry"]=s
s= marker_sets["particle_187 geometry"]
mark=s.place_marker((971.504, 3691.2, 2725.46), (0.7, 0.7, 0.7), 172.203)
if "particle_188 geometry" not in marker_sets:
s=new_marker_set('particle_188 geometry')
marker_sets["particle_188 geometry"]=s
s= marker_sets["particle_188 geometry"]
mark=s.place_marker((1600.99, 3615.22, 2696.21), (0.7, 0.7, 0.7), 271.354)
if "particle_189 geometry" not in marker_sets:
s=new_marker_set('particle_189 geometry')
marker_sets["particle_189 geometry"]=s
s= marker_sets["particle_189 geometry"]
mark=s.place_marker((1979.38, 3415, 2928.39), (0.7, 0.7, 0.7), 97.0785)
if "particle_190 geometry" not in marker_sets:
s=new_marker_set('particle_190 geometry')
marker_sets["particle_190 geometry"]=s
s= marker_sets["particle_190 geometry"]
mark=s.place_marker((2095.69, 3285.75, 3276.21), (0.7, 0.7, 0.7), 151.857)
if "particle_191 geometry" not in marker_sets:
s=new_marker_set('particle_191 geometry')
marker_sets["particle_191 geometry"]=s
s= marker_sets["particle_191 geometry"]
mark=s.place_marker((2529.16, 3118.2, 3575.06), (0.7, 0.7, 0.7), 199.233)
if "particle_192 geometry" not in marker_sets:
s=new_marker_set('particle_192 geometry')
marker_sets["particle_192 geometry"]=s
s= marker_sets["particle_192 geometry"]
mark=s.place_marker((2795.05, 2677.36, 3379.26), (0.7, 0.7, 0.7), 118.863)
if "particle_193 geometry" not in marker_sets:
s=new_marker_set('particle_193 geometry')
marker_sets["particle_193 geometry"]=s
s= marker_sets["particle_193 geometry"]
mark=s.place_marker((2951.39, 2370.65, 3600.66), (0.7, 0.7, 0.7), 172.415)
if "particle_194 geometry" not in marker_sets:
s=new_marker_set('particle_194 geometry')
marker_sets["particle_194 geometry"]=s
s= marker_sets["particle_194 geometry"]
mark=s.place_marker((3052.67, 2342.01, 4079.13), (0.7, 0.7, 0.7), 134.26)
if "particle_195 geometry" not in marker_sets:
s=new_marker_set('particle_195 geometry')
marker_sets["particle_195 geometry"]=s
s= marker_sets["particle_195 geometry"]
mark=s.place_marker((3255.29, 2539.83, 4956.41), (0.7, 0.7, 0.7), 139.548)
if "particle_196 geometry" not in marker_sets:
s=new_marker_set('particle_196 geometry')
marker_sets["particle_196 geometry"]=s
s= marker_sets["particle_196 geometry"]
mark=s.place_marker((2715.26, 2716.14, 4985.63), (0.7, 0.7, 0.7), 196.526)
if "particle_197 geometry" not in marker_sets:
s=new_marker_set('particle_197 geometry')
marker_sets["particle_197 geometry"]=s
s= marker_sets["particle_197 geometry"]
mark=s.place_marker((2161.25, 2801.95, 4403.76), (0.7, 0.7, 0.7), 136.206)
if "particle_198 geometry" not in marker_sets:
s=new_marker_set('particle_198 geometry')
marker_sets["particle_198 geometry"]=s
s= marker_sets["particle_198 geometry"]
mark=s.place_marker((1819.1, 2465.83, 3522.74), (0.7, 0.7, 0.7), 152.322)
if "particle_199 geometry" not in marker_sets:
s=new_marker_set('particle_199 geometry')
marker_sets["particle_199 geometry"]=s
s= marker_sets["particle_199 geometry"]
mark=s.place_marker((1731.93, 2334, 2874.81), (0.7, 0.7, 0.7), 126.054)
if "particle_200 geometry" not in marker_sets:
s=new_marker_set('particle_200 geometry')
marker_sets["particle_200 geometry"]=s
s= marker_sets["particle_200 geometry"]
mark=s.place_marker((1578.96, 2723.19, 2742.39), (0.7, 0.7, 0.7), 164.378)
if "particle_201 geometry" not in marker_sets:
s=new_marker_set('particle_201 geometry')
marker_sets["particle_201 geometry"]=s
s= marker_sets["particle_201 geometry"]
mark=s.place_marker((1646.41, 3167.97, 2712.49), (0.7, 0.7, 0.7), 122.205)
if "particle_202 geometry" not in marker_sets:
s=new_marker_set('particle_202 geometry')
marker_sets["particle_202 geometry"]=s
s= marker_sets["particle_202 geometry"]
mark=s.place_marker((1874.69, 3539.8, 2664.36), (0.7, 0.7, 0.7), 134.979)
if "particle_203 geometry" not in marker_sets:
s=new_marker_set('particle_203 geometry')
marker_sets["particle_203 geometry"]=s
s= marker_sets["particle_203 geometry"]
mark=s.place_marker((2170.35, 3393.79, 2572.64), (0.7, 0.7, 0.7), 136.375)
if "particle_204 geometry" not in marker_sets:
s=new_marker_set('particle_204 geometry')
marker_sets["particle_204 geometry"]=s
s= marker_sets["particle_204 geometry"]
mark=s.place_marker((2007.39, 3370.7, 2815.9), (0.7, 0.7, 0.7), 151.688)
if "particle_205 geometry" not in marker_sets:
s=new_marker_set('particle_205 geometry')
marker_sets["particle_205 geometry"]=s
s= marker_sets["particle_205 geometry"]
mark=s.place_marker((1941.99, 3566.74, 2608.47), (0.7, 0.7, 0.7), 116.156)
if "particle_206 geometry" not in marker_sets:
s=new_marker_set('particle_206 geometry')
marker_sets["particle_206 geometry"]=s
s= marker_sets["particle_206 geometry"]
mark=s.place_marker((1944.32, 2904.91, 2370.57), (0.7, 0.7, 0.7), 122.839)
if "particle_207 geometry" not in marker_sets:
s=new_marker_set('particle_207 geometry')
marker_sets["particle_207 geometry"]=s
s= marker_sets["particle_207 geometry"]
mark=s.place_marker((1822.94, 2394.1, 2409.65), (0.7, 0.7, 0.7), 164.716)
if "particle_208 geometry" not in marker_sets:
s=new_marker_set('particle_208 geometry')
marker_sets["particle_208 geometry"]=s
s= marker_sets["particle_208 geometry"]
mark=s.place_marker((1609.62, 2760.4, 3146.6), (0.7, 0.7, 0.7), 303.672)
if "particle_209 geometry" not in marker_sets:
s=new_marker_set('particle_209 geometry')
marker_sets["particle_209 geometry"]=s
s= marker_sets["particle_209 geometry"]
mark=s.place_marker((1740.52, 3531.79, 3901.15), (0.7, 0.7, 0.7), 220.298)
if "particle_210 geometry" not in marker_sets:
s=new_marker_set('particle_210 geometry')
marker_sets["particle_210 geometry"]=s
s= marker_sets["particle_210 geometry"]
mark=s.place_marker((1743.69, 3905.72, 3378.27), (0.7, 0.7, 0.7), 175.883)
if "particle_211 geometry" not in marker_sets:
s=new_marker_set('particle_211 geometry')
marker_sets["particle_211 geometry"]=s
s= marker_sets["particle_211 geometry"]
mark=s.place_marker((1432.79, 4099.28, 2783.14), (0.7, 0.7, 0.7), 233.581)
if "particle_212 geometry" not in marker_sets:
s=new_marker_set('particle_212 geometry')
marker_sets["particle_212 geometry"]=s
s= marker_sets["particle_212 geometry"]
mark=s.place_marker((873.754, 3778.78, 2320.67), (0.7, 0.7, 0.7), 231.127)
if "particle_213 geometry" not in marker_sets:
s=new_marker_set('particle_213 geometry')
marker_sets["particle_213 geometry"]=s
s= marker_sets["particle_213 geometry"]
mark=s.place_marker((721.756, 3825.45, 1697.84), (0.7, 0.7, 0.7), 247.413)
if "particle_214 geometry" not in marker_sets:
s=new_marker_set('particle_214 geometry')
marker_sets["particle_214 geometry"]=s
s= marker_sets["particle_214 geometry"]
mark=s.place_marker((916.647, 4100.36, 1128.44), (0.7, 0.7, 0.7), 200.206)
if "particle_215 geometry" not in marker_sets:
s=new_marker_set('particle_215 geometry')
marker_sets["particle_215 geometry"]=s
s= marker_sets["particle_215 geometry"]
mark=s.place_marker((1280.27, 4292.22, 1031.47), (0.7, 0.7, 0.7), 150.419)
if "particle_216 geometry" not in marker_sets:
s=new_marker_set('particle_216 geometry')
marker_sets["particle_216 geometry"]=s
s= marker_sets["particle_216 geometry"]
mark=s.place_marker((1344.89, 3686, 1083.45), (0.7, 0.7, 0.7), 140.14)
if "particle_217 geometry" not in marker_sets:
s=new_marker_set('particle_217 geometry')
marker_sets["particle_217 geometry"]=s
s= marker_sets["particle_217 geometry"]
mark=s.place_marker((1145.22, 3299.63, 1009.84), (0.7, 0.7, 0.7), 132.949)
if "particle_218 geometry" not in marker_sets:
s=new_marker_set('particle_218 geometry')
marker_sets["particle_218 geometry"]=s
s= marker_sets["particle_218 geometry"]
mark=s.place_marker((1129.86, 2933.12, 1004.29), (0.7, 0.7, 0.7), 141.113)
if "particle_219 geometry" not in marker_sets:
s=new_marker_set('particle_219 geometry')
marker_sets["particle_219 geometry"]=s
s= marker_sets["particle_219 geometry"]
mark=s.place_marker((884.041, 2895.93, 1224.04), (0.7, 0.7, 0.7), 171.526)
if "particle_220 geometry" not in marker_sets:
s=new_marker_set('particle_220 geometry')
marker_sets["particle_220 geometry"]=s
s= marker_sets["particle_220 geometry"]
mark=s.place_marker((686.954, 3320.25, 1559.97), (0.7, 0.7, 0.7), 326.937)
if "particle_221 geometry" not in marker_sets:
s=new_marker_set('particle_221 geometry')
marker_sets["particle_221 geometry"]=s
s= marker_sets["particle_221 geometry"]
mark=s.place_marker((839.31, 3632.16, 2025.31), (0.7, 0.7, 0.7), 92.0871)
if "particle_222 geometry" not in marker_sets:
s=new_marker_set('particle_222 geometry')
marker_sets["particle_222 geometry"]=s
s= marker_sets["particle_222 geometry"]
mark=s.place_marker((1016.31, 3360.94, 2294.73), (0.7, 0.7, 0.7), 210.273)
if "particle_223 geometry" not in marker_sets:
s=new_marker_set('particle_223 geometry')
marker_sets["particle_223 geometry"]=s
s= marker_sets["particle_223 geometry"]
mark=s.place_marker((1279.65, 2676.56, 2055.04), (0.7, 0.7, 0.7), 122.628)
if "particle_224 geometry" not in marker_sets:
s=new_marker_set('particle_224 geometry')
marker_sets["particle_224 geometry"]=s
s= marker_sets["particle_224 geometry"]
mark=s.place_marker((1325.8, 2456.22, 1919.87), (0.7, 0.7, 0.7), 109.176)
if "particle_225 geometry" not in marker_sets:
s=new_marker_set('particle_225 geometry')
marker_sets["particle_225 geometry"]=s
s= marker_sets["particle_225 geometry"]
mark=s.place_marker((1336.36, 2739.85, 1984.62), (0.7, 0.7, 0.7), 142.213)
if "particle_226 geometry" not in marker_sets:
s=new_marker_set('particle_226 geometry')
marker_sets["particle_226 geometry"]=s
s= marker_sets["particle_226 geometry"]
mark=s.place_marker((1250.02, 2843.78, 2369.7), (0.7, 0.7, 0.7), 250.078)
if "particle_227 geometry" not in marker_sets:
s=new_marker_set('particle_227 geometry')
marker_sets["particle_227 geometry"]=s
s= marker_sets["particle_227 geometry"]
mark=s.place_marker((1669.3, 3025.48, 2266.62), (0.7, 0.7, 0.7), 123.558)
if "particle_228 geometry" not in marker_sets:
s=new_marker_set('particle_228 geometry')
marker_sets["particle_228 geometry"]=s
s= marker_sets["particle_228 geometry"]
mark=s.place_marker((2089.66, 2826.92, 2176.5), (0.7, 0.7, 0.7), 235.992)
if "particle_229 geometry" not in marker_sets:
s=new_marker_set('particle_229 geometry')
marker_sets["particle_229 geometry"]=s
s= marker_sets["particle_229 geometry"]
mark=s.place_marker((2577.51, 2763.59, 2085.23), (0.7, 0.7, 0.7), 172.373)
if "particle_230 geometry" not in marker_sets:
s=new_marker_set('particle_230 geometry')
marker_sets["particle_230 geometry"]=s
s= marker_sets["particle_230 geometry"]
mark=s.place_marker((2848.86, 3107.9, 2220.87), (0.7, 0.7, 0.7), 152.322)
if "particle_231 geometry" not in marker_sets:
s=new_marker_set('particle_231 geometry')
marker_sets["particle_231 geometry"]=s
s= marker_sets["particle_231 geometry"]
mark=s.place_marker((2944.39, 3345.32, 2431.25), (0.7, 0.7, 0.7), 196.653)
if "particle_232 geometry" not in marker_sets:
s=new_marker_set('particle_232 geometry')
marker_sets["particle_232 geometry"]=s
s= marker_sets["particle_232 geometry"]
mark=s.place_marker((2948.74, 3226.99, 2104.49), (0.7, 0.7, 0.7), 134.091)
if "particle_233 geometry" not in marker_sets:
s=new_marker_set('particle_233 geometry')
marker_sets["particle_233 geometry"]=s
s= marker_sets["particle_233 geometry"]
mark=s.place_marker((2990.67, 3036.92, 1849.09), (0.7, 0.7, 0.7), 180.325)
if "particle_234 geometry" not in marker_sets:
s=new_marker_set('particle_234 geometry')
marker_sets["particle_234 geometry"]=s
s= marker_sets["particle_234 geometry"]
mark=s.place_marker((2610.89, 2922.67, 2100.81), (0.7, 0.7, 0.7), 218.437)
if "particle_235 geometry" not in marker_sets:
s=new_marker_set('particle_235 geometry')
marker_sets["particle_235 geometry"]=s
s= marker_sets["particle_235 geometry"]
mark=s.place_marker((2248.7, 3109.92, 2337.43), (0.7, 0.7, 0.7), 148.008)
if "particle_236 geometry" not in marker_sets:
s=new_marker_set('particle_236 geometry')
marker_sets["particle_236 geometry"]=s
s= marker_sets["particle_236 geometry"]
mark=s.place_marker((1953.18, 3650.87, 2507.22), (0.7, 0.7, 0.7), 191.873)
if "particle_237 geometry" not in marker_sets:
s=new_marker_set('particle_237 geometry')
marker_sets["particle_237 geometry"]=s
s= marker_sets["particle_237 geometry"]
mark=s.place_marker((1601.93, 4058.59, 2427.21), (0.7, 0.7, 0.7), 138.575)
if "particle_238 geometry" not in marker_sets:
s=new_marker_set('particle_238 geometry')
marker_sets["particle_238 geometry"]=s
s= marker_sets["particle_238 geometry"]
mark=s.place_marker((1716.74, 4444.65, 2257.34), (0.7, 0.7, 0.7), 161.205)
if "particle_239 geometry" not in marker_sets:
s=new_marker_set('particle_239 geometry')
marker_sets["particle_239 geometry"]=s
s= marker_sets["particle_239 geometry"]
mark=s.place_marker((1849.13, 4155.38, 2578.67), (0.7, 0.7, 0.7), 288.021)
if "particle_240 geometry" not in marker_sets:
s=new_marker_set('particle_240 geometry')
marker_sets["particle_240 geometry"]=s
s= marker_sets["particle_240 geometry"]
mark=s.place_marker((2317.53, 3793.31, 2199.7), (0.7, 0.7, 0.7), 227.405)
if "particle_241 geometry" not in marker_sets:
s=new_marker_set('particle_241 geometry')
marker_sets["particle_241 geometry"]=s
s= marker_sets["particle_241 geometry"]
mark=s.place_marker((2553.21, 3348.15, 2097.32), (0.7, 0.7, 0.7), 126.519)
if "particle_242 geometry" not in marker_sets:
s=new_marker_set('particle_242 geometry')
marker_sets["particle_242 geometry"]=s
s= marker_sets["particle_242 geometry"]
mark=s.place_marker((2666.12, 3509.63, 2339.16), (0.7, 0.7, 0.7), 117.975)
if "particle_243 geometry" not in marker_sets:
s=new_marker_set('particle_243 geometry')
marker_sets["particle_243 geometry"]=s
s= marker_sets["particle_243 geometry"]
mark=s.place_marker((2490.91, 3148.2, 2405.43), (0.7, 0.7, 0.7), 200.883)
if "particle_244 geometry" not in marker_sets:
s=new_marker_set('particle_244 geometry')
marker_sets["particle_244 geometry"]=s
s= marker_sets["particle_244 geometry"]
mark=s.place_marker((2480.94, 2977.52, 2083.48), (0.7, 0.7, 0.7), 158.794)
if "particle_245 geometry" not in marker_sets:
s=new_marker_set('particle_245 geometry')
marker_sets["particle_245 geometry"]=s
s= marker_sets["particle_245 geometry"]
mark=s.place_marker((2351.64, 2995.61, 1791.84), (0.7, 0.7, 0.7), 115.86)
if "particle_246 geometry" not in marker_sets:
s=new_marker_set('particle_246 geometry')
marker_sets["particle_246 geometry"]=s
s= marker_sets["particle_246 geometry"]
mark=s.place_marker((2376.02, 2758.91, 1741.39), (0.7, 0.7, 0.7), 133.034)
if "particle_247 geometry" not in marker_sets:
s=new_marker_set('particle_247 geometry')
marker_sets["particle_247 geometry"]=s
s= marker_sets["particle_247 geometry"]
mark=s.place_marker((2683.07, 2474.69, 1973.49), (0.7, 0.7, 0.7), 314.627)
if "particle_248 geometry" not in marker_sets:
s=new_marker_set('particle_248 geometry')
marker_sets["particle_248 geometry"]=s
s= marker_sets["particle_248 geometry"]
mark=s.place_marker((2672.05, 2802.16, 2130.33), (0.7, 0.7, 0.7), 115.352)
if "particle_249 geometry" not in marker_sets:
s=new_marker_set('particle_249 geometry')
marker_sets["particle_249 geometry"]=s
s= marker_sets["particle_249 geometry"]
mark=s.place_marker((2572.51, 3217.25, 2075.18), (0.7, 0.7, 0.7), 180.621)
if "particle_250 geometry" not in marker_sets:
s=new_marker_set('particle_250 geometry')
marker_sets["particle_250 geometry"]=s
s= marker_sets["particle_250 geometry"]
mark=s.place_marker((2387.03, 3247.21, 1767.48), (0.7, 0.7, 0.7), 126.265)
if "particle_251 geometry" not in marker_sets:
s=new_marker_set('particle_251 geometry')
marker_sets["particle_251 geometry"]=s
s= marker_sets["particle_251 geometry"]
mark=s.place_marker((2127.7, 3032.6, 1599.17), (0.7, 0.7, 0.7), 133.541)
if "particle_252 geometry" not in marker_sets:
s=new_marker_set('particle_252 geometry')
marker_sets["particle_252 geometry"]=s
s= marker_sets["particle_252 geometry"]
mark=s.place_marker((1742.75, 2972.58, 1421.19), (0.7, 0.7, 0.7), 171.019)
if "particle_253 geometry" not in marker_sets:
s=new_marker_set('particle_253 geometry')
marker_sets["particle_253 geometry"]=s
s= marker_sets["particle_253 geometry"]
mark=s.place_marker((1392.15, 3088.31, 1300.23), (0.7, 0.7, 0.7), 115.437)
if "particle_254 geometry" not in marker_sets:
s=new_marker_set('particle_254 geometry')
marker_sets["particle_254 geometry"]=s
s= marker_sets["particle_254 geometry"]
mark=s.place_marker((1718.89, 3167.89, 1340.9), (0.7, 0.7, 0.7), 158.583)
if "particle_255 geometry" not in marker_sets:
s=new_marker_set('particle_255 geometry')
marker_sets["particle_255 geometry"]=s
s= marker_sets["particle_255 geometry"]
mark=s.place_marker((1905.03, 2837.64, 1626.12), (0.7, 0.7, 0.7), 192)
if "particle_256 geometry" not in marker_sets:
s=new_marker_set('particle_256 geometry')
marker_sets["particle_256 geometry"]=s
s= marker_sets["particle_256 geometry"]
mark=s.place_marker((2215.25, 2590.23, 1807.28), (0.7, 0.7, 0.7), 150.165)
if "particle_257 geometry" not in marker_sets:
s=new_marker_set('particle_257 geometry')
marker_sets["particle_257 geometry"]=s
s= marker_sets["particle_257 geometry"]
mark=s.place_marker((2000.89, 2451.62, 1699.83), (0.7, 0.7, 0.7), 157.567)
if "particle_258 geometry" not in marker_sets:
s=new_marker_set('particle_258 geometry')
marker_sets["particle_258 geometry"]=s
s= marker_sets["particle_258 geometry"]
mark=s.place_marker((1980.93, 2411.52, 1806.35), (0.7, 0.7, 0.7), 199.36)
if "particle_259 geometry" not in marker_sets:
s=new_marker_set('particle_259 geometry')
marker_sets["particle_259 geometry"]=s
s= marker_sets["particle_259 geometry"]
mark=s.place_marker((1803.91, 2801.24, 1985.16), (0.7, 0.7, 0.7), 105.369)
if "particle_260 geometry" not in marker_sets:
s=new_marker_set('particle_260 geometry')
marker_sets["particle_260 geometry"]=s
s= marker_sets["particle_260 geometry"]
mark=s.place_marker((1759.31, 2936.32, 2218.67), (0.7, 0.7, 0.7), 118.651)
if "particle_261 geometry" not in marker_sets:
s=new_marker_set('particle_261 geometry')
marker_sets["particle_261 geometry"]=s
s= marker_sets["particle_261 geometry"]
mark=s.place_marker((2101.28, 2648.14, 2190.72), (0.7, 0.7, 0.7), 219.664)
if "particle_262 geometry" not in marker_sets:
s=new_marker_set('particle_262 geometry')
marker_sets["particle_262 geometry"]=s
s= marker_sets["particle_262 geometry"]
mark=s.place_marker((2407.4, 2184.96, 1989.89), (0.7, 0.7, 0.7), 196.018)
if "particle_263 geometry" not in marker_sets:
s=new_marker_set('particle_263 geometry')
marker_sets["particle_263 geometry"]=s
s= marker_sets["particle_263 geometry"]
mark=s.place_marker((2648.1, 1776.94, 1884.71), (0.7, 0.7, 0.7), 218.141)
if "particle_264 geometry" not in marker_sets:
s=new_marker_set('particle_264 geometry')
marker_sets["particle_264 geometry"]=s
s= marker_sets["particle_264 geometry"]
mark=s.place_marker((2349.39, 1569.55, 1909.22), (0.7, 0.7, 0.7), 181.636)
if "particle_265 geometry" not in marker_sets:
s=new_marker_set('particle_265 geometry')
marker_sets["particle_265 geometry"]=s
s= marker_sets["particle_265 geometry"]
mark=s.place_marker((2109.11, 1727.2, 1986.03), (0.7, 0.7, 0.7), 195.003)
if "particle_266 geometry" not in marker_sets:
s=new_marker_set('particle_266 geometry')
marker_sets["particle_266 geometry"]=s
s= marker_sets["particle_266 geometry"]
mark=s.place_marker((2299.59, 1607.88, 1849.35), (0.7, 0.7, 0.7), 139.209)
if "particle_267 geometry" not in marker_sets:
s=new_marker_set('particle_267 geometry')
marker_sets["particle_267 geometry"]=s
s= marker_sets["particle_267 geometry"]
mark=s.place_marker((2282.74, 1584.73, 1772.01), (0.7, 0.7, 0.7), 189.885)
if "particle_268 geometry" not in marker_sets:
s=new_marker_set('particle_268 geometry')
marker_sets["particle_268 geometry"]=s
s= marker_sets["particle_268 geometry"]
mark=s.place_marker((2372.79, 1811.6, 1613.76), (0.7, 0.7, 0.7), 267.674)
if "particle_269 geometry" not in marker_sets:
s=new_marker_set('particle_269 geometry')
marker_sets["particle_269 geometry"]=s
s= marker_sets["particle_269 geometry"]
mark=s.place_marker((2555.75, 2293.2, 1377.48), (0.7, 0.7, 0.7), 196.568)
if "particle_270 geometry" not in marker_sets:
s=new_marker_set('particle_270 geometry')
marker_sets["particle_270 geometry"]=s
s= marker_sets["particle_270 geometry"]
mark=s.place_marker((2309.24, 2203.9, 1164.5), (0.7, 0.7, 0.7), 192.423)
if "particle_271 geometry" not in marker_sets:
s=new_marker_set('particle_271 geometry')
marker_sets["particle_271 geometry"]=s
s= marker_sets["particle_271 geometry"]
mark=s.place_marker((2243.52, 1815.84, 1180.39), (1, 0.7, 0), 202.405)
if "particle_272 geometry" not in marker_sets:
s=new_marker_set('particle_272 geometry')
marker_sets["particle_272 geometry"]=s
s= marker_sets["particle_272 geometry"]
mark=s.place_marker((2376.27, 2652.35, 1062.06), (0.7, 0.7, 0.7), 135.529)
if "particle_273 geometry" not in marker_sets:
s=new_marker_set('particle_273 geometry')
marker_sets["particle_273 geometry"]=s
s= marker_sets["particle_273 geometry"]
mark=s.place_marker((2348.71, 3597.63, 841.451), (0.7, 0.7, 0.7), 114.21)
if "particle_274 geometry" not in marker_sets:
s=new_marker_set('particle_274 geometry')
marker_sets["particle_274 geometry"]=s
s= marker_sets["particle_274 geometry"]
mark=s.place_marker((2195.52, 3676.56, 1114.88), (0.7, 0.7, 0.7), 159.133)
if "particle_275 geometry" not in marker_sets:
s=new_marker_set('particle_275 geometry')
marker_sets["particle_275 geometry"]=s
s= marker_sets["particle_275 geometry"]
mark=s.place_marker((2270.97, 3498.65, 1477.92), (0.7, 0.7, 0.7), 144.412)
if "particle_276 geometry" not in marker_sets:
s=new_marker_set('particle_276 geometry')
marker_sets["particle_276 geometry"]=s
s= marker_sets["particle_276 geometry"]
mark=s.place_marker((2355.02, 3351.67, 1747.29), (0.7, 0.7, 0.7), 70.8525)
if "particle_277 geometry" not in marker_sets:
s=new_marker_set('particle_277 geometry')
marker_sets["particle_277 geometry"]=s
s= marker_sets["particle_277 geometry"]
mark=s.place_marker((2322.97, 2742.56, 1839.41), (0.7, 0.7, 0.7), 141.874)
if "particle_278 geometry" not in marker_sets:
s=new_marker_set('particle_278 geometry')
marker_sets["particle_278 geometry"]=s
s= marker_sets["particle_278 geometry"]
mark=s.place_marker((2286.68, 2131.17, 1819.69), (0.7, 0.7, 0.7), 217.337)
if "particle_279 geometry" not in marker_sets:
s=new_marker_set('particle_279 geometry')
marker_sets["particle_279 geometry"]=s
s= marker_sets["particle_279 geometry"]
mark=s.place_marker((2365.78, 2098.15, 1827.52), (0.7, 0.7, 0.7), 237.641)
if "particle_280 geometry" not in marker_sets:
s=new_marker_set('particle_280 geometry')
marker_sets["particle_280 geometry"]=s
s= marker_sets["particle_280 geometry"]
mark=s.place_marker((2618.98, 2443.32, 2002.75), (0.7, 0.7, 0.7), 229.393)
if "particle_281 geometry" not in marker_sets:
s=new_marker_set('particle_281 geometry')
marker_sets["particle_281 geometry"]=s
s= marker_sets["particle_281 geometry"]
mark=s.place_marker((3048.45, 2025.92, 1996.98), (0.7, 0.7, 0.7), 349.906)
if "particle_282 geometry" not in marker_sets:
s=new_marker_set('particle_282 geometry')
marker_sets["particle_282 geometry"]=s
s= marker_sets["particle_282 geometry"]
mark=s.place_marker((3290.74, 1659.48, 1647.09), (0.7, 0.7, 0.7), 162.347)
if "particle_283 geometry" not in marker_sets:
s=new_marker_set('particle_283 geometry')
marker_sets["particle_283 geometry"]=s
s= marker_sets["particle_283 geometry"]
mark=s.place_marker((3421.67, 1605.96, 1556.05), (0.7, 0.7, 0.7), 194.072)
if "particle_284 geometry" not in marker_sets:
s=new_marker_set('particle_284 geometry')
marker_sets["particle_284 geometry"]=s
s= marker_sets["particle_284 geometry"]
mark=s.place_marker((3567.4, 1692.5, 1622.05), (0.7, 0.7, 0.7), 242.21)
if "particle_285 geometry" not in marker_sets:
s=new_marker_set('particle_285 geometry')
marker_sets["particle_285 geometry"]=s
s= marker_sets["particle_285 geometry"]
mark=s.place_marker((3789.68, 2124.09, 1537.07), (0.7, 0.7, 0.7), 320.93)
if "particle_286 geometry" not in marker_sets:
s=new_marker_set('particle_286 geometry')
marker_sets["particle_286 geometry"]=s
s= marker_sets["particle_286 geometry"]
mark=s.place_marker((4284.62, 2333, 1361.14), (0.7, 0.7, 0.7), 226.432)
if "particle_287 geometry" not in marker_sets:
s=new_marker_set('particle_287 geometry')
marker_sets["particle_287 geometry"]=s
s= marker_sets["particle_287 geometry"]
mark=s.place_marker((4241.61, 2133.35, 1673.72), (0.7, 0.7, 0.7), 125.208)
if "particle_288 geometry" not in marker_sets:
s=new_marker_set('particle_288 geometry')
marker_sets["particle_288 geometry"]=s
s= marker_sets["particle_288 geometry"]
mark=s.place_marker((4071.99, 1752.75, 2061.96), (0.7, 0.7, 0.7), 197.837)
if "particle_289 geometry" not in marker_sets:
s=new_marker_set('particle_289 geometry')
marker_sets["particle_289 geometry"]=s
s= marker_sets["particle_289 geometry"]
mark=s.place_marker((4468.28, 1644.84, 2537.11), (0.7, 0.7, 0.7), 167.804)
if "particle_290 geometry" not in marker_sets:
s=new_marker_set('particle_290 geometry')
marker_sets["particle_290 geometry"]=s
s= marker_sets["particle_290 geometry"]
mark=s.place_marker((5158.43, 1707.88, 2992.52), (0.7, 0.7, 0.7), 136.84)
if "particle_291 geometry" not in marker_sets:
s=new_marker_set('particle_291 geometry')
marker_sets["particle_291 geometry"]=s
s= marker_sets["particle_291 geometry"]
mark=s.place_marker((5305.49, 2070.3, 2799.74), (0.7, 0.7, 0.7), 85.7421)
if "particle_292 geometry" not in marker_sets:
s=new_marker_set('particle_292 geometry')
marker_sets["particle_292 geometry"]=s
s= marker_sets["particle_292 geometry"]
mark=s.place_marker((4406.25, 1833.39, 1735.12), (1, 0.7, 0), 256)
if "particle_293 geometry" not in marker_sets:
s=new_marker_set('particle_293 geometry')
marker_sets["particle_293 geometry"]=s
s= marker_sets["particle_293 geometry"]
mark=s.place_marker((4630.74, 1537.67, 2812.46), (0.7, 0.7, 0.7), 138.702)
if "particle_294 geometry" not in marker_sets:
s=new_marker_set('particle_294 geometry')
marker_sets["particle_294 geometry"]=s
s= marker_sets["particle_294 geometry"]
mark=s.place_marker((4706.01, 1237.08, 3184.68), (0.7, 0.7, 0.7), 140.732)
if "particle_295 geometry" not in marker_sets:
s=new_marker_set('particle_295 geometry')
marker_sets["particle_295 geometry"]=s
s= marker_sets["particle_295 geometry"]
mark=s.place_marker((4808.28, 1231.69, 2877.49), (0.7, 0.7, 0.7), 81.3006)
if "particle_296 geometry" not in marker_sets:
s=new_marker_set('particle_296 geometry')
marker_sets["particle_296 geometry"]=s
s= marker_sets["particle_296 geometry"]
mark=s.place_marker((5227.01, 1284.68, 2702.89), (0.7, 0.7, 0.7), 133.837)
if "particle_297 geometry" not in marker_sets:
s=new_marker_set('particle_297 geometry')
marker_sets["particle_297 geometry"]=s
s= marker_sets["particle_297 geometry"]
mark=s.place_marker((4808.97, 1455.23, 2280.03), (0.7, 0.7, 0.7), 98.3475)
if "particle_298 geometry" not in marker_sets:
s=new_marker_set('particle_298 geometry')
marker_sets["particle_298 geometry"]=s
s= marker_sets["particle_298 geometry"]
mark=s.place_marker((4071.41, 1472.28, 1930.31), (0.7, 0.7, 0.7), 297.623)
if "particle_299 geometry" not in marker_sets:
s=new_marker_set('particle_299 geometry')
marker_sets["particle_299 geometry"]=s
s= marker_sets["particle_299 geometry"]
mark=s.place_marker((3834.06, 1622.42, 1608.06), (0.7, 0.7, 0.7), 212.938)
if "particle_300 geometry" not in marker_sets:
s=new_marker_set('particle_300 geometry')
marker_sets["particle_300 geometry"]=s
s= marker_sets["particle_300 geometry"]
mark=s.place_marker((3870.9, 1477.77, 1470.88), (0.7, 0.7, 0.7), 154.183)
if "particle_301 geometry" not in marker_sets:
s=new_marker_set('particle_301 geometry')
marker_sets["particle_301 geometry"]=s
s= marker_sets["particle_301 geometry"]
mark=s.place_marker((4022.97, 1755.91, 1184.86), (0.7, 0.7, 0.7), 180.832)
if "particle_302 geometry" not in marker_sets:
s=new_marker_set('particle_302 geometry')
marker_sets["particle_302 geometry"]=s
s= marker_sets["particle_302 geometry"]
mark=s.place_marker((4124.22, 2124.09, 1140.97), (0.7, 0.7, 0.7), 122.332)
if "particle_303 geometry" not in marker_sets:
s=new_marker_set('particle_303 geometry')
marker_sets["particle_303 geometry"]=s
s= marker_sets["particle_303 geometry"]
mark=s.place_marker((4181.68, 2484.29, 1254.79), (0.7, 0.7, 0.7), 209.047)
if "particle_304 geometry" not in marker_sets:
s=new_marker_set('particle_304 geometry')
marker_sets["particle_304 geometry"]=s
s= marker_sets["particle_304 geometry"]
mark=s.place_marker((4229.81, 2477.23, 845.986), (0.7, 0.7, 0.7), 126.985)
if "particle_305 geometry" not in marker_sets:
s=new_marker_set('particle_305 geometry')
marker_sets["particle_305 geometry"]=s
s= marker_sets["particle_305 geometry"]
mark=s.place_marker((4580.11, 2470.31, 580.054), (0.7, 0.7, 0.7), 122.205)
if "particle_306 geometry" not in marker_sets:
s=new_marker_set('particle_306 geometry')
marker_sets["particle_306 geometry"]=s
s= marker_sets["particle_306 geometry"]
mark=s.place_marker((4797.46, 2254.26, 550.085), (0.7, 0.7, 0.7), 107.95)
if "particle_307 geometry" not in marker_sets:
s=new_marker_set('particle_307 geometry')
marker_sets["particle_307 geometry"]=s
s= marker_sets["particle_307 geometry"]
mark=s.place_marker((4314.89, 1987.98, 796.109), (0.7, 0.7, 0.7), 182.567)
if "particle_308 geometry" not in marker_sets:
s=new_marker_set('particle_308 geometry')
marker_sets["particle_308 geometry"]=s
s= marker_sets["particle_308 geometry"]
mark=s.place_marker((3894.36, 1803.08, 1227.49), (0.7, 0.7, 0.7), 185.274)
if "particle_309 geometry" not in marker_sets:
s=new_marker_set('particle_309 geometry')
marker_sets["particle_309 geometry"]=s
s= marker_sets["particle_309 geometry"]
mark=s.place_marker((3686.7, 1923.63, 1593.65), (0.7, 0.7, 0.7), 413.567)
if "particle_310 geometry" not in marker_sets:
s=new_marker_set('particle_310 geometry')
marker_sets["particle_310 geometry"]=s
s= marker_sets["particle_310 geometry"]
mark=s.place_marker((3511.6, 1739.84, 1620.74), (0.7, 0.7, 0.7), 240.01)
if "particle_311 geometry" not in marker_sets:
s=new_marker_set('particle_311 geometry')
marker_sets["particle_311 geometry"]=s
s= marker_sets["particle_311 geometry"]
mark=s.place_marker((3529.83, 1794.17, 1612.06), (0.7, 0.7, 0.7), 238.995)
if "particle_312 geometry" not in marker_sets:
s=new_marker_set('particle_312 geometry')
marker_sets["particle_312 geometry"]=s
s= marker_sets["particle_312 geometry"]
mark=s.place_marker((3637.04, 1716.27, 1431.83), (0.7, 0.7, 0.7), 203.674)
if "particle_313 geometry" not in marker_sets:
s=new_marker_set('particle_313 geometry')
marker_sets["particle_313 geometry"]=s
s= marker_sets["particle_313 geometry"]
mark=s.place_marker((3598.33, 1835.65, 845.316), (0.7, 0.7, 0.7), 266.744)
if "particle_314 geometry" not in marker_sets:
s=new_marker_set('particle_314 geometry')
marker_sets["particle_314 geometry"]=s
s= marker_sets["particle_314 geometry"]
mark=s.place_marker((3783.14, 1410.78, 813.574), (0.7, 0.7, 0.7), 147.585)
if "particle_315 geometry" not in marker_sets:
s=new_marker_set('particle_315 geometry')
marker_sets["particle_315 geometry"]=s
s= marker_sets["particle_315 geometry"]
mark=s.place_marker((3705.6, 1414.86, 1080.75), (0.7, 0.7, 0.7), 249.485)
if "particle_316 geometry" not in marker_sets:
s=new_marker_set('particle_316 geometry')
marker_sets["particle_316 geometry"]=s
s= marker_sets["particle_316 geometry"]
mark=s.place_marker((3531.5, 1797.64, 1173.68), (0.7, 0.7, 0.7), 119.371)
if "particle_317 geometry" not in marker_sets:
s=new_marker_set('particle_317 geometry')
marker_sets["particle_317 geometry"]=s
s= marker_sets["particle_317 geometry"]
mark=s.place_marker((3714.58, 2479.07, 1241.28), (0.7, 0.7, 0.7), 155.875)
if "particle_318 geometry" not in marker_sets:
s=new_marker_set('particle_318 geometry')
marker_sets["particle_318 geometry"]=s
s= marker_sets["particle_318 geometry"]
mark=s.place_marker((4175.68, 2931.47, 1618.34), (0.7, 0.7, 0.7), 189.419)
if "particle_319 geometry" not in marker_sets:
s=new_marker_set('particle_319 geometry')
marker_sets["particle_319 geometry"]=s
s= marker_sets["particle_319 geometry"]
mark=s.place_marker((4221.08, 2813.66, 2132.32), (0.7, 0.7, 0.7), 137.475)
if "particle_320 geometry" not in marker_sets:
s=new_marker_set('particle_320 geometry')
marker_sets["particle_320 geometry"]=s
s= marker_sets["particle_320 geometry"]
mark=s.place_marker((4026.08, 2564.39, 2516.67), (0.7, 0.7, 0.7), 176.179)
if "particle_321 geometry" not in marker_sets:
s=new_marker_set('particle_321 geometry')
marker_sets["particle_321 geometry"]=s
s= marker_sets["particle_321 geometry"]
mark=s.place_marker((3990.14, 2349.02, 2927.53), (0.7, 0.7, 0.7), 138.829)
if "particle_322 geometry" not in marker_sets:
s=new_marker_set('particle_322 geometry')
marker_sets["particle_322 geometry"]=s
s= marker_sets["particle_322 geometry"]
mark=s.place_marker((4116.21, 2158.41, 3280.12), (0.7, 0.7, 0.7), 148.727)
if "particle_323 geometry" not in marker_sets:
s=new_marker_set('particle_323 geometry')
marker_sets["particle_323 geometry"]=s
s= marker_sets["particle_323 geometry"]
mark=s.place_marker((4453.56, 2047.54, 3664.74), (0.7, 0.7, 0.7), 230.323)
if "particle_324 geometry" not in marker_sets:
s=new_marker_set('particle_324 geometry')
marker_sets["particle_324 geometry"]=s
s= marker_sets["particle_324 geometry"]
mark=s.place_marker((4383.9, 2210.94, 3045.95), (0.7, 0.7, 0.7), 175.376)
if "particle_325 geometry" not in marker_sets:
s=new_marker_set('particle_325 geometry')
marker_sets["particle_325 geometry"]=s
s= marker_sets["particle_325 geometry"]
mark=s.place_marker((4156.32, 2315.87, 2593.87), (0.7, 0.7, 0.7), 161.163)
if "particle_326 geometry" not in marker_sets:
s=new_marker_set('particle_326 geometry')
marker_sets["particle_326 geometry"]=s
s= marker_sets["particle_326 geometry"]
mark=s.place_marker((4057.1, 2772.29, 2751.64), (0.7, 0.7, 0.7), 125.885)
if "particle_327 geometry" not in marker_sets:
s=new_marker_set('particle_327 geometry')
marker_sets["particle_327 geometry"]=s
s= marker_sets["particle_327 geometry"]
mark=s.place_marker((4112.88, 3220, 2822.84), (0.7, 0.7, 0.7), 206.635)
if "particle_328 geometry" not in marker_sets:
s=new_marker_set('particle_328 geometry')
marker_sets["particle_328 geometry"]=s
s= marker_sets["particle_328 geometry"]
mark=s.place_marker((3728.31, 2984.82, 2862.59), (0.7, 0.7, 0.7), 151.392)
if "particle_329 geometry" not in marker_sets:
s=new_marker_set('particle_329 geometry')
marker_sets["particle_329 geometry"]=s
s= marker_sets["particle_329 geometry"]
mark=s.place_marker((3545.48, 2716.98, 3003.47), (0.7, 0.7, 0.7), 173.388)
if "particle_330 geometry" not in marker_sets:
s=new_marker_set('particle_330 geometry')
marker_sets["particle_330 geometry"]=s
s= marker_sets["particle_330 geometry"]
mark=s.place_marker((3761.85, 2587.02, 3220.9), (0.7, 0.7, 0.7), 135.825)
if "particle_331 geometry" not in marker_sets:
s=new_marker_set('particle_331 geometry')
marker_sets["particle_331 geometry"]=s
s= marker_sets["particle_331 geometry"]
mark=s.place_marker((4128.63, 2521.18, 3453.55), (0.7, 0.7, 0.7), 186.839)
if "particle_332 geometry" not in marker_sets:
s=new_marker_set('particle_332 geometry')
marker_sets["particle_332 geometry"]=s
s= marker_sets["particle_332 geometry"]
mark=s.place_marker((4541.37, 2464.77, 3684.1), (0.7, 0.7, 0.7), 121.189)
if "particle_333 geometry" not in marker_sets:
s=new_marker_set('particle_333 geometry')
marker_sets["particle_333 geometry"]=s
s= marker_sets["particle_333 geometry"]
mark=s.place_marker((4366.87, 2510.48, 3307.43), (0.7, 0.7, 0.7), 102.916)
if "particle_334 geometry" not in marker_sets:
s=new_marker_set('particle_334 geometry')
marker_sets["particle_334 geometry"]=s
s= marker_sets["particle_334 geometry"]
mark=s.place_marker((4150.95, 2443.39, 2734.26), (0.7, 0.7, 0.7), 212.769)
if "particle_335 geometry" not in marker_sets:
s=new_marker_set('particle_335 geometry')
marker_sets["particle_335 geometry"]=s
s= marker_sets["particle_335 geometry"]
mark=s.place_marker((3703.28, 2365.92, 2265.36), (0.7, 0.7, 0.7), 173.092)
if "particle_336 geometry" not in marker_sets:
s=new_marker_set('particle_336 geometry')
marker_sets["particle_336 geometry"]=s
s= marker_sets["particle_336 geometry"]
mark=s.place_marker((3554.78, 2440.35, 1800.84), (0.7, 0.7, 0.7), 264.502)
if "particle_337 geometry" not in marker_sets:
s=new_marker_set('particle_337 geometry')
marker_sets["particle_337 geometry"]=s
s= marker_sets["particle_337 geometry"]
mark=s.place_marker((3773.25, 2698.01, 1374.04), (0.7, 0.7, 0.7), 208.666)
if "particle_338 geometry" not in marker_sets:
s=new_marker_set('particle_338 geometry')
marker_sets["particle_338 geometry"]=s
s= marker_sets["particle_338 geometry"]
mark=s.place_marker((4102.69, 2703.51, 1015.64), (0.7, 0.7, 0.7), 186.797)
if "particle_339 geometry" not in marker_sets:
s=new_marker_set('particle_339 geometry')
marker_sets["particle_339 geometry"]=s
s= marker_sets["particle_339 geometry"]
mark=s.place_marker((4004.78, 2341.04, 681.696), (0.7, 0.7, 0.7), 255.534)
if "particle_340 geometry" not in marker_sets:
s=new_marker_set('particle_340 geometry')
marker_sets["particle_340 geometry"]=s
s= marker_sets["particle_340 geometry"]
mark=s.place_marker((4275.49, 2016.37, 616.45), (0.7, 0.7, 0.7), 153.126)
if "particle_341 geometry" not in marker_sets:
s=new_marker_set('particle_341 geometry')
marker_sets["particle_341 geometry"]=s
s= marker_sets["particle_341 geometry"]
mark=s.place_marker((4456.24, 2336.22, 459.961), (0.7, 0.7, 0.7), 165.816)
if "particle_342 geometry" not in marker_sets:
s=new_marker_set('particle_342 geometry')
marker_sets["particle_342 geometry"]=s
s= marker_sets["particle_342 geometry"]
mark=s.place_marker((4086.01, 2486.03, 475.835), (0.7, 0.7, 0.7), 134.429)
if "particle_343 geometry" not in marker_sets:
s=new_marker_set('particle_343 geometry')
marker_sets["particle_343 geometry"]=s
s= marker_sets["particle_343 geometry"]
mark=s.place_marker((3899.99, 2643.86, 808.879), (0.7, 0.7, 0.7), 178.971)
if "particle_344 geometry" not in marker_sets:
s=new_marker_set('particle_344 geometry')
marker_sets["particle_344 geometry"]=s
s= marker_sets["particle_344 geometry"]
mark=s.place_marker((4096.28, 2787.7, 1280.34), (0.7, 0.7, 0.7), 189.969)
if "particle_345 geometry" not in marker_sets:
s=new_marker_set('particle_345 geometry')
marker_sets["particle_345 geometry"]=s
s= marker_sets["particle_345 geometry"]
mark=s.place_marker((4530.89, 3211.78, 1448.26), (0.7, 0.7, 0.7), 121.359)
if "particle_346 geometry" not in marker_sets:
s=new_marker_set('particle_346 geometry')
marker_sets["particle_346 geometry"]=s
s= marker_sets["particle_346 geometry"]
mark=s.place_marker((4603.81, 3265.01, 1973.68), (0.7, 0.7, 0.7), 187.262)
if "particle_347 geometry" not in marker_sets:
s=new_marker_set('particle_347 geometry')
marker_sets["particle_347 geometry"]=s
s= marker_sets["particle_347 geometry"]
mark=s.place_marker((4276.76, 3052.95, 2484.62), (0.7, 0.7, 0.7), 164.335)
if "particle_348 geometry" not in marker_sets:
s=new_marker_set('particle_348 geometry')
marker_sets["particle_348 geometry"]=s
s= marker_sets["particle_348 geometry"]
mark=s.place_marker((4332.38, 2622.66, 2732.09), (0.7, 0.7, 0.7), 138.363)
if "particle_349 geometry" not in marker_sets:
s=new_marker_set('particle_349 geometry')
marker_sets["particle_349 geometry"]=s
s= marker_sets["particle_349 geometry"]
mark=s.place_marker((4432.95, 2456.81, 3041.05), (0.7, 0.7, 0.7), 138.49)
if "particle_350 geometry" not in marker_sets:
s=new_marker_set('particle_350 geometry')
marker_sets["particle_350 geometry"]=s
s= marker_sets["particle_350 geometry"]
mark=s.place_marker((4188.86, 2666.5, 3183.48), (0.7, 0.7, 0.7), 116.325)
if "particle_351 geometry" not in marker_sets:
s=new_marker_set('particle_351 geometry')
marker_sets["particle_351 geometry"]=s
s= marker_sets["particle_351 geometry"]
mark=s.place_marker((4028.61, 2877.28, 2807.05), (0.7, 0.7, 0.7), 106.511)
if "particle_352 geometry" not in marker_sets:
s=new_marker_set('particle_352 geometry')
marker_sets["particle_352 geometry"]=s
s= marker_sets["particle_352 geometry"]
mark=s.place_marker((4007.66, 2866.08, 2267.34), (0.7, 0.7, 0.7), 151.096)
if "particle_353 geometry" not in marker_sets:
s=new_marker_set('particle_353 geometry')
marker_sets["particle_353 geometry"]=s
s= marker_sets["particle_353 geometry"]
mark=s.place_marker((4200.86, 2884.79, 1633.83), (0.7, 0.7, 0.7), 240.856)
if "particle_354 geometry" not in marker_sets:
s=new_marker_set('particle_354 geometry')
marker_sets["particle_354 geometry"]=s
s= marker_sets["particle_354 geometry"]
mark=s.place_marker((4414.91, 2843.91, 1168.97), (0.7, 0.7, 0.7), 149.7)
if "particle_355 geometry" not in marker_sets:
s=new_marker_set('particle_355 geometry')
marker_sets["particle_355 geometry"]=s
s= marker_sets["particle_355 geometry"]
mark=s.place_marker((4253.57, 2805.12, 872.176), (0.7, 0.7, 0.7), 165.943)
if "particle_356 geometry" not in marker_sets:
s=new_marker_set('particle_356 geometry')
marker_sets["particle_356 geometry"]=s
s= marker_sets["particle_356 geometry"]
mark=s.place_marker((3802.04, 2393.54, 950.575), (0.7, 0.7, 0.7), 178.971)
if "particle_357 geometry" not in marker_sets:
s=new_marker_set('particle_357 geometry')
marker_sets["particle_357 geometry"]=s
s= marker_sets["particle_357 geometry"]
mark=s.place_marker((3163.65, 1993.15, 831.422), (0.7, 0.7, 0.7), 154.945)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| gpl-3.0 |
green-span/green-mail | old/imapproxyserver.py | 1 | 2623 |
#|##############################################################################
#|Copyright (c) 2009, The Green-Span Project. All rights reserved. This code is
#|Open Source Free Software - redistribution and use in source and binary forms,
#|with or without modification, are permitted under the Two Clause BSD License.
#|##############################################################################
#|File Created: 2009-04-01
#|Author(s): Sean Hastings,
#|##############################################################################
VERBOSE = True
from globals import ALLVERBOSE
"""
ImapProxyServer Object
Version 0.0.1
Sets up framework for loging into proxy server
spawns new CredentialsChecker
spawns new protocol factory
listens for connections
Version 0.0.0 - New File
"""
from twisted.internet import reactor, protocol
from twisted.cred import portal
from twisted.mail import imap4
from credentialschecker import CredentialsChecker
from imapproxyuserrealm import ImapProxyUserRealm
class ImapProxyServer(object):
"""Serves up protocol for incomming user imap-client connections"""
def __init__(self,valid_logins):
"gets valid email-addy:password combos from central object"
#shared caching for connections to same IMAP account
self.connected = {}
#Start up server
cred_check = CredentialsChecker(valid_logins)
the_portal = portal.Portal(ImapProxyUserRealm(self))
the_portal.registerChecker(cred_check)
factory = ImapProxyServerProtocolFactory()
factory.portal = the_portal
reactor.listenTCP(143, factory) #TODO - convert to listenSSL
class ImapProxyServerProtocol(imap4.IMAP4Server):
"Extension of Imap4Server class to expose functionality"
def lineReceived(self, line):
if ALLVERBOSE or VERBOSE: print "C>P S:", line
imap4.IMAP4Server.lineReceived(self,line)
def sendLine(self,line):
if ALLVERBOSE or VERBOSE: print "C<P S", line
imap4.IMAP4Server.sendLine(self,line)
def connectionLost(self, reason):
if ALLVERBOSE or VERBOSE: print "ImapProxyServerProtocol.connectionLost - %s", reason
imap4.IMAP4Server.connectionLost(self,reason)
class ImapProxyServerProtocolFactory(protocol.Factory):
protocol = ImapProxyServerProtocol
portal = None
def buildProtocol(self, address):
p = self.protocol()
p.portal = self.portal
p.factory = self
return p
| bsd-2-clause |
altsen/diandiyun-platform | lms/djangoapps/linkedin/management/commands/linkedin_mailusers.py | 9 | 10824 | """
Send emails to users inviting them to add their course certificates to their
LinkedIn profiles.
"""
from smtplib import SMTPServerDisconnected, SMTPDataError, SMTPConnectError, SMTPException
import json
import logging
import urllib
from boto.exception import AWSConnectionError
from boto.ses.exceptions import (
SESAddressNotVerifiedError,
SESIdentityNotVerifiedError,
SESDomainNotConfirmedError,
SESAddressBlacklistedError,
SESDailyQuotaExceededError,
SESMaxSendingRateExceededError,
SESDomainEndsWithDotError,
SESLocalAddressCharacterError,
SESIllegalAddressError,
)
from django.conf import settings
from django.core.mail import EmailMessage
from django.core.management.base import BaseCommand
from django.db import transaction
from django.template import Context
from django.template.loader import get_template
from django.core.urlresolvers import reverse
from optparse import make_option
from edxmako.shortcuts import render_to_string
from certificates.models import GeneratedCertificate
from courseware.courses import get_course_by_id, course_image_url
from ...models import LinkedIn
# The following is blatantly cribbed from bulk_email/tasks.py
# Errors that an individual email is failing to be sent, and should just
# be treated as a fail.
SINGLE_EMAIL_FAILURE_ERRORS = (
SESAddressBlacklistedError, # Recipient's email address has been temporarily blacklisted.
SESDomainEndsWithDotError, # Recipient's email address' domain ends with a period/dot.
SESIllegalAddressError, # Raised when an illegal address is encountered.
SESLocalAddressCharacterError, # An address contained a control or whitespace character.
)
# Exceptions that, if caught, should cause the task to be re-tried.
# These errors will be caught a limited number of times before the task fails.
LIMITED_RETRY_ERRORS = (
SMTPConnectError,
SMTPServerDisconnected,
AWSConnectionError,
)
# Errors that indicate that a mailing task should be retried without limit.
# An example is if email is being sent too quickly, but may succeed if sent
# more slowly. When caught by a task, it triggers an exponential backoff and retry.
# Retries happen continuously until the email is sent.
# Note that the SMTPDataErrors here are only those within the 4xx range.
# Those not in this range (i.e. in the 5xx range) are treated as hard failures
# and thus like SINGLE_EMAIL_FAILURE_ERRORS.
INFINITE_RETRY_ERRORS = (
SESMaxSendingRateExceededError, # Your account's requests/second limit has been exceeded.
SMTPDataError,
)
# Errors that are known to indicate an inability to send any more emails,
# and should therefore not be retried. For example, exceeding a quota for emails.
# Also, any SMTP errors that are not explicitly enumerated above.
BULK_EMAIL_FAILURE_ERRORS = (
SESAddressNotVerifiedError, # Raised when a "Reply-To" address has not been validated in SES yet.
SESIdentityNotVerifiedError, # Raised when an identity has not been verified in SES yet.
SESDomainNotConfirmedError, # Raised when domain ownership is not confirmed for DKIM.
SESDailyQuotaExceededError, # 24-hour allotment of outbound email has been exceeded.
SMTPException,
)
MAX_ATTEMPTS = 10
log = logging.getLogger("linkedin")
class Command(BaseCommand):
"""
Django command for inviting users to add their course certificates to their
LinkedIn profiles.
"""
args = ''
help = ('Sends emails to edX users that are on LinkedIn who have completed '
'course certificates, inviting them to add their certificates to '
'their LinkedIn profiles')
option_list = BaseCommand.option_list + (
make_option(
'--mock',
action='store_true',
dest='mock_run',
default=False,
help="Run without sending the final e-mails."),)
def __init__(self):
super(Command, self).__init__()
@transaction.commit_manually
def handle(self, *args, **options):
whitelist = settings.LINKEDIN_API['EMAIL_WHITELIST']
mock_run = options.get('mock_run', False)
accounts = LinkedIn.objects.filter(has_linkedin_account=True)
for account in accounts:
user = account.user
if whitelist and user.email not in whitelist:
# Whitelist only certain addresses for testing purposes
continue
try:
emailed = json.loads(account.emailed_courses)
except Exception:
log.exception("LinkedIn: Could not parse emailed_courses for {}".format(user.username))
continue
certificates = GeneratedCertificate.objects.filter(user=user)
certificates = certificates.filter(status='downloadable')
certificates = [cert for cert in certificates if cert.course_id not in emailed]
# Shouldn't happen, since we're only picking users who have
# certificates, but just in case...
if not certificates:
log.info("LinkedIn: No certificates for user {}".format(user.username))
continue
# Basic sanity checks passed, now try to send the emails
try:
success = False
success = self.send_grandfather_email(user, certificates, mock_run)
log.info("LinkedIn: Sent email for user {}".format(user.username))
if not mock_run:
emailed.extend([cert.course_id for cert in certificates])
if success and not mock_run:
account.emailed_courses = json.dumps(emailed)
account.save()
transaction.commit()
except BULK_EMAIL_FAILURE_ERRORS:
log.exception("LinkedIn: No further email sending will work, aborting")
transaction.commit()
return -1
except Exception:
log.exception("LinkedIn: User {} couldn't be processed".format(user.username))
transaction.commit()
def certificate_url(self, certificate):
"""
Generates a certificate URL based on LinkedIn's documentation. The
documentation is from a Word document: DAT_DOCUMENTATION_v3.12.docx
"""
course = get_course_by_id(certificate.course_id)
tracking_code = '-'.join([
'eml',
'prof', # the 'product'--no idea what that's supposed to mean
'edX', # Partner's name
course.number, # Certificate's name
'gf'])
query = [
('pfCertificationName', course.display_name_with_default),
('pfAuthorityId', settings.LINKEDIN_API['COMPANY_ID']),
('pfCertificationUrl', certificate.download_url),
('pfLicenseNo', certificate.course_id),
('pfCertStartDate', course.start.strftime('%Y%m')),
('_mSplash', '1'),
('trk', tracking_code),
('startTask', 'CERTIFICATION_NAME'),
('force', 'true')]
return 'http://www.linkedin.com/profile/guided?' + urllib.urlencode(query)
def send_grandfather_email(self, user, certificates, mock_run=False):
"""
Send the 'grandfathered' email informing historical students that they
may now post their certificates on their LinkedIn profiles.
"""
courses_list = []
for cert in certificates:
course = get_course_by_id(cert.course_id)
course_url = 'https://{}{}'.format(
settings.SITE_NAME,
reverse('course_root', kwargs={'course_id': cert.course_id})
)
course_title = course.display_name_with_default
course_img_url = 'https://{}{}'.format(settings.SITE_NAME, course_image_url(course))
course_end_date = course.end.strftime('%b %Y')
course_org = course.org
courses_list.append({
'course_url': course_url,
'course_org': course_org,
'course_title': course_title,
'course_image_url': course_img_url,
'course_end_date': course_end_date,
'linkedin_add_url': self.certificate_url(cert),
})
context = {'courses_list': courses_list, 'num_courses': len(courses_list)}
body = render_to_string('linkedin/linkedin_email.html', context)
subject = u'{}, Add your Achievements to your LinkedIn Profile'.format(user.profile.name)
if mock_run:
return True
else:
return self.send_email(user, subject, body)
def send_email(self, user, subject, body, num_attempts=MAX_ATTEMPTS):
"""
Send an email. Return True if it succeeded, False if it didn't.
"""
fromaddr = u'[email protected]'
toaddr = u'{} <{}>'.format(user.profile.name, user.email)
msg = EmailMessage(subject, body, fromaddr, (toaddr,))
msg.content_subtype = "html"
i = 1
while i <= num_attempts:
try:
msg.send()
return True # Happy path!
except SINGLE_EMAIL_FAILURE_ERRORS:
# Something unrecoverable is wrong about the email acct we're sending to
log.exception(
u"LinkedIn: Email send failed for user {}, email {}"
.format(user.username, user.email)
)
return False
except LIMITED_RETRY_ERRORS:
# Something went wrong (probably an intermittent connection error),
# but maybe if we beat our heads against the wall enough times,
# we can crack our way through. Thwack! Thwack! Thwack!
# Give up after num_attempts though (for loop exits), let's not
# get carried away.
log.exception(
u"LinkedIn: Email send for user {}, email {}, encountered error, attempt #{}"
.format(user.username, user.email, i)
)
i += 1
continue
except INFINITE_RETRY_ERRORS:
# Dude, it will *totally* work if I just... sleep... a little...
# Things like max send rate exceeded. The smart thing would be
# to do exponential backoff. The lazy thing to do would be just
# sleep some arbitrary amount and trust that it'll probably work.
# GUESS WHAT WE'RE DOING BOYS AND GIRLS!?!
log.exception("LinkedIn: temporary error encountered, retrying")
time.sleep(1)
# If we hit here, we went through all our attempts without success
return False
| agpl-3.0 |
jupyter/nbgrader | nbgrader/tests/nbextensions/test_validate_assignment.py | 3 | 7158 | import pytest
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException, NoSuchElementException, NoAlertPresentException
from .conftest import _make_nbserver, _make_browser, _close_nbserver, _close_browser
@pytest.fixture(scope="module")
def nbserver(request, port, tempdir, jupyter_config_dir, jupyter_data_dir, exchange, cache):
server = _make_nbserver("", port, tempdir, jupyter_config_dir, jupyter_data_dir, exchange, cache)
def fin():
_close_nbserver(server)
request.addfinalizer(fin)
return server
@pytest.fixture(scope="module")
def browser(request, tempdir, nbserver):
browser = _make_browser(tempdir)
def fin():
_close_browser(browser)
request.addfinalizer(fin)
return browser
def _wait(browser):
return WebDriverWait(browser, 30)
def _load_notebook(browser, port, notebook, retries=5):
# go to the correct page
url = "http://localhost:{}/notebooks/{}.ipynb".format(port, notebook)
browser.get(url)
alert = ''
for _ in range(5):
if alert is None:
break
try:
alert = browser.switch_to.alert
except NoAlertPresentException:
alert = None
else:
print("Warning: dismissing unexpected alert ({})".format(alert.text))
alert.accept()
def page_loaded(browser):
return browser.execute_script(
"""
return (typeof Jupyter !== "undefined" &&
Jupyter.page !== undefined &&
Jupyter.notebook !== undefined &&
$("#notebook_name").text() === "{}");
""".format(notebook))
# wait for the page to load
try:
_wait(browser).until(page_loaded)
except TimeoutException:
if retries > 0:
print("Retrying page load...")
# page timeout, but sometimes this happens, so try refreshing?
_load_notebook(browser, port, retries=retries - 1, notebook=notebook)
else:
print("Failed to load the page too many times")
raise
def _wait_for_validate_button(browser):
def validate_exists(browser):
try:
browser.find_element_by_css_selector("button.validate")
except NoSuchElementException:
return False
return True
_wait(browser).until(validate_exists)
def _wait_for_modal(browser):
_wait(browser).until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, ".modal-dialog")))
def _dismiss_modal(browser):
button = browser.find_element_by_css_selector(".modal-footer .btn-primary")
button.click()
def modal_gone(browser):
try:
browser.find_element_by_css_selector(".modal-dialog")
except NoSuchElementException:
return True
return False
_wait(browser).until(modal_gone)
@pytest.mark.nbextensions
def test_validate_ok(browser, port):
_load_notebook(browser, port, "submitted-changed")
_wait_for_validate_button(browser)
# click the "validate" button
browser.find_element_by_css_selector("button.validate").click()
# wait for the modal dialog to appear
_wait_for_modal(browser)
# check that it succeeded
browser.find_element_by_css_selector(".modal-dialog .validation-success")
# close the modal dialog
_dismiss_modal(browser)
@pytest.mark.nbextensions
def test_validate_failure(browser, port):
_load_notebook(browser, port, "submitted-unchanged")
_wait_for_validate_button(browser)
# click the "validate" button
browser.find_element_by_css_selector("button.validate").click()
# wait for the modal dialog to appear
_wait_for_modal(browser)
# check that it failed
browser.find_element_by_css_selector(".modal-dialog .validation-failed")
# close the modal dialog
_dismiss_modal(browser)
@pytest.mark.nbextensions
def test_validate_grade_cell_changed(browser, port):
_load_notebook(browser, port, "submitted-grade-cell-changed")
_wait_for_validate_button(browser)
# click the "validate" button
browser.find_element_by_css_selector("button.validate").click()
# wait for the modal dialog to appear
_wait_for_modal(browser)
# check that it failed
browser.find_element_by_css_selector(".modal-dialog .validation-changed")
# close the modal dialog
_dismiss_modal(browser)
@pytest.mark.nbextensions
def test_validate_locked_cell_changed(browser, port):
_load_notebook(browser, port, "submitted-locked-cell-changed")
_wait_for_validate_button(browser)
# click the "validate" button
browser.find_element_by_css_selector("button.validate").click()
# wait for the modal dialog to appear
_wait_for_modal(browser)
# check that it failed
browser.find_element_by_css_selector(".modal-dialog .validation-changed")
# close the modal dialog
_dismiss_modal(browser)
@pytest.mark.nbextensions
def test_validate_open_relative_file(browser, port):
_load_notebook(browser, port, "open_relative_file")
_wait_for_validate_button(browser)
# click the "validate" button
browser.find_element_by_css_selector("button.validate").click()
# wait for the modal dialog to appear
_wait_for_modal(browser)
# check that it succeeded
browser.find_element_by_css_selector(".modal-dialog .validation-success")
# close the modal dialog
_dismiss_modal(browser)
@pytest.mark.nbextensions
def test_validate_grade_cell_type_changed(browser, port):
_load_notebook(browser, port, "submitted-grade-cell-type-changed")
_wait_for_validate_button(browser)
# click the "validate" button
browser.find_element_by_css_selector("button.validate").click()
# wait for the modal dialog to appear
_wait_for_modal(browser)
# check that it failed
browser.find_element_by_css_selector(".modal-dialog .validation-type-changed")
# close the modal dialog
_dismiss_modal(browser)
@pytest.mark.nbextensions
def test_validate_answer_cell_type_changed(browser, port):
_load_notebook(browser, port, "submitted-answer-cell-type-changed")
_wait_for_validate_button(browser)
# click the "validate" button
browser.find_element_by_css_selector("button.validate").click()
# wait for the modal dialog to appear
_wait_for_modal(browser)
# check that it failed
browser.find_element_by_css_selector(".modal-dialog .validation-type-changed")
# close the modal dialog
_dismiss_modal(browser)
################################################################################
####### DO NOT ADD TESTS BELOW THIS LINE #######################################
################################################################################
@pytest.mark.nbextensions
def test_final(browser, port):
"""This is a final test to be run so that the browser doesn't hang, see
https://github.com/mozilla/geckodriver/issues/1151
"""
_load_notebook(browser, port, "blank")
| bsd-3-clause |
nkvelkov/Pyganizer | source/tests/test_task_scheduler.py | 1 | 2614 | import unittest
from task_scheduler import TaskScheduler
from task import Task
import arrow
class TaskSchedulerTestCase(unittest.TestCase):
def setUp(self):
self.task_scheduler = TaskScheduler()
self.tz = 'local'
self.datetime = arrow.Arrow(2015, 5, 9, 11, 11, 11, tzinfo=self.tz)
self.task = Task(self.datetime, "name", "message", 100, 1, 1, self.tz)
class TestTaskScheduler(TaskSchedulerTestCase):
def test_add_task(self):
now = arrow.now()
task = self.task_scheduler.append_task(now, "name", "message", 100, 1, self.tz)
self.assertTrue(task in self.task_scheduler.todos[task.datetime])
def test_add_multiple_active_tasks(self):
test_task = self.task
test_task.datetime = arrow.now()
self.task_scheduler.add_multiple_active_tasks([test_task, self.task])
self.assertTrue(test_task in self.task_scheduler.active_tasks and
self.task in self.task_scheduler.active_tasks)
def test_add_multiple_pending_tasks(self):
test_task = self.task
test_task.datetime = arrow.now()
self.task_scheduler.add_multiple_pending_tasks([test_task, self.task])
test_task_in = test_task in self.task_scheduler.todos[test_task.datetime]
task_in = self.task in self.task_scheduler.todos[self.task.datetime]
self.assertTrue(test_task_in and task_in)
def test_activate_task(self):
self.task_scheduler.insert_task(self.task)
self.task_scheduler._activate_task(self.task)
in_active_tasks = self.task in self.task_scheduler.active_tasks
removed = self.task.datetime not in self.task_scheduler.todos.keys()
self.assertTrue(in_active_tasks and removed)
def test_add_task_progress_by_id(self):
self.task_scheduler.insert_task(self.task)
self.task_scheduler.add_task_progress_by_id(self.task.tid, 110)
is_finished = self.task.datetime in self.task_scheduler.todos
self.assertTrue(is_finished)
def test_find_pending_task(self):
self.task_scheduler.insert_task(self.task)
self.assertTrue(self.task.datetime in self.task_scheduler.todos.keys())
def test_add_task_progress_by_id_(self):
self.task_scheduler.insert_task(self.task)
self.task_scheduler._activate_task(self.task)
self.task_scheduler.add_task_progress_by_id(self.task.tid, 10)
result_task = self.task_scheduler.find_active_task(self.task.tid)
self.assertEqual(result_task.completeness, 90)
if __name__ == '__main__':
unittest.main() | gpl-2.0 |
Glasgow2015/team-10 | env/lib/python2.7/site-packages/PIL/ImageFilter.py | 87 | 6618 | #
# The Python Imaging Library.
# $Id$
#
# standard filters
#
# History:
# 1995-11-27 fl Created
# 2002-06-08 fl Added rank and mode filters
# 2003-09-15 fl Fixed rank calculation in rank filter; added expand call
#
# Copyright (c) 1997-2003 by Secret Labs AB.
# Copyright (c) 1995-2002 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
import functools
class Filter(object):
pass
class Kernel(Filter):
"""
Create a convolution kernel. The current version only
supports 3x3 and 5x5 integer and floating point kernels.
In the current version, kernels can only be applied to
"L" and "RGB" images.
:param size: Kernel size, given as (width, height). In the current
version, this must be (3,3) or (5,5).
:param kernel: A sequence containing kernel weights.
:param scale: Scale factor. If given, the result for each pixel is
divided by this value. the default is the sum of the
kernel weights.
:param offset: Offset. If given, this value is added to the result,
after it has been divided by the scale factor.
"""
def __init__(self, size, kernel, scale=None, offset=0):
if scale is None:
# default scale is sum of kernel
scale = functools.reduce(lambda a, b: a+b, kernel)
if size[0] * size[1] != len(kernel):
raise ValueError("not enough coefficients in kernel")
self.filterargs = size, scale, offset, kernel
def filter(self, image):
if image.mode == "P":
raise ValueError("cannot filter palette images")
return image.filter(*self.filterargs)
class BuiltinFilter(Kernel):
def __init__(self):
pass
class RankFilter(Filter):
"""
Create a rank filter. The rank filter sorts all pixels in
a window of the given size, and returns the **rank**'th value.
:param size: The kernel size, in pixels.
:param rank: What pixel value to pick. Use 0 for a min filter,
``size * size / 2`` for a median filter, ``size * size - 1``
for a max filter, etc.
"""
name = "Rank"
def __init__(self, size, rank):
self.size = size
self.rank = rank
def filter(self, image):
if image.mode == "P":
raise ValueError("cannot filter palette images")
image = image.expand(self.size//2, self.size//2)
return image.rankfilter(self.size, self.rank)
class MedianFilter(RankFilter):
"""
Create a median filter. Picks the median pixel value in a window with the
given size.
:param size: The kernel size, in pixels.
"""
name = "Median"
def __init__(self, size=3):
self.size = size
self.rank = size*size//2
class MinFilter(RankFilter):
"""
Create a min filter. Picks the lowest pixel value in a window with the
given size.
:param size: The kernel size, in pixels.
"""
name = "Min"
def __init__(self, size=3):
self.size = size
self.rank = 0
class MaxFilter(RankFilter):
"""
Create a max filter. Picks the largest pixel value in a window with the
given size.
:param size: The kernel size, in pixels.
"""
name = "Max"
def __init__(self, size=3):
self.size = size
self.rank = size*size-1
class ModeFilter(Filter):
"""
Create a mode filter. Picks the most frequent pixel value in a box with the
given size. Pixel values that occur only once or twice are ignored; if no
pixel value occurs more than twice, the original pixel value is preserved.
:param size: The kernel size, in pixels.
"""
name = "Mode"
def __init__(self, size=3):
self.size = size
def filter(self, image):
return image.modefilter(self.size)
class GaussianBlur(Filter):
"""Gaussian blur filter.
:param radius: Blur radius.
"""
name = "GaussianBlur"
def __init__(self, radius=2):
self.radius = radius
def filter(self, image):
return image.gaussian_blur(self.radius)
class UnsharpMask(Filter):
"""Unsharp mask filter.
See Wikipedia's entry on `digital unsharp masking`_ for an explanation of
the parameters.
:param radius: Blur Radius
:param percent: Unsharp strength, in percent
:param threshold: Threshold controls the minimum brightness change that
will be sharpened
.. _digital unsharp masking: https://en.wikipedia.org/wiki/Unsharp_masking#Digital_unsharp_masking
"""
name = "UnsharpMask"
def __init__(self, radius=2, percent=150, threshold=3):
self.radius = radius
self.percent = percent
self.threshold = threshold
def filter(self, image):
return image.unsharp_mask(self.radius, self.percent, self.threshold)
class BLUR(BuiltinFilter):
name = "Blur"
filterargs = (5, 5), 16, 0, (
1, 1, 1, 1, 1,
1, 0, 0, 0, 1,
1, 0, 0, 0, 1,
1, 0, 0, 0, 1,
1, 1, 1, 1, 1
)
class CONTOUR(BuiltinFilter):
name = "Contour"
filterargs = (3, 3), 1, 255, (
-1, -1, -1,
-1, 8, -1,
-1, -1, -1
)
class DETAIL(BuiltinFilter):
name = "Detail"
filterargs = (3, 3), 6, 0, (
0, -1, 0,
-1, 10, -1,
0, -1, 0
)
class EDGE_ENHANCE(BuiltinFilter):
name = "Edge-enhance"
filterargs = (3, 3), 2, 0, (
-1, -1, -1,
-1, 10, -1,
-1, -1, -1
)
class EDGE_ENHANCE_MORE(BuiltinFilter):
name = "Edge-enhance More"
filterargs = (3, 3), 1, 0, (
-1, -1, -1,
-1, 9, -1,
-1, -1, -1
)
class EMBOSS(BuiltinFilter):
name = "Emboss"
filterargs = (3, 3), 1, 128, (
-1, 0, 0,
0, 1, 0,
0, 0, 0
)
class FIND_EDGES(BuiltinFilter):
name = "Find Edges"
filterargs = (3, 3), 1, 0, (
-1, -1, -1,
-1, 8, -1,
-1, -1, -1
)
class SMOOTH(BuiltinFilter):
name = "Smooth"
filterargs = (3, 3), 13, 0, (
1, 1, 1,
1, 5, 1,
1, 1, 1
)
class SMOOTH_MORE(BuiltinFilter):
name = "Smooth More"
filterargs = (5, 5), 100, 0, (
1, 1, 1, 1, 1,
1, 5, 5, 5, 1,
1, 5, 44, 5, 1,
1, 5, 5, 5, 1,
1, 1, 1, 1, 1
)
class SHARPEN(BuiltinFilter):
name = "Sharpen"
filterargs = (3, 3), 16, 0, (
-2, -2, -2,
-2, 32, -2,
-2, -2, -2
)
| apache-2.0 |
xisisu/RT-Xen | tools/python/logging/logging-0.4.9.2/test/log_test5.py | 42 | 1795 | #!/usr/bin/env python
#
# Copyright 2001-2002 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# This file is part of the Python logging distribution. See
# http://www.red-dove.com/python_logging.html
#
"""
A test harness for the logging module. Tests SMTPHandler.
Copyright (C) 2001-2002 Vinay Sajip. All Rights Reserved.
"""
import logging, logging.handlers
MAILHOST = 'beta'
FROM = '[email protected]'
TO = ['arkadi_renko']
SUBJECT = 'Test Logging email from Python logging module (non-buffering)'
def main():
log = logging.getLogger("")
log.setLevel(logging.DEBUG)
hdlr = logging.handlers.SMTPHandler(MAILHOST, FROM, TO, SUBJECT)
hdlr.setFormatter(logging.Formatter("%(asctime)s %(levelname)-5s %(message)s"))
log.addHandler(hdlr)
log.info("Test email contents")
log.removeHandler(hdlr)
if __name__ == "__main__":
main() | gpl-2.0 |
simongoffin/website_version | addons/purchase_requisition/wizard/purchase_requisition_partner.py | 373 | 2320 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class purchase_requisition_partner(osv.osv_memory):
_name = "purchase.requisition.partner"
_description = "Purchase Requisition Partner"
_columns = {
'partner_id': fields.many2one('res.partner', 'Supplier', required=True,domain=[('supplier', '=', True)]),
}
def view_init(self, cr, uid, fields_list, context=None):
if context is None:
context = {}
res = super(purchase_requisition_partner, self).view_init(cr, uid, fields_list, context=context)
record_id = context and context.get('active_id', False) or False
tender = self.pool.get('purchase.requisition').browse(cr, uid, record_id, context=context)
if not tender.line_ids:
raise osv.except_osv(_('Error!'), _('Define product(s) you want to include in the call for bids.'))
return res
def create_order(self, cr, uid, ids, context=None):
active_ids = context and context.get('active_ids', [])
data = self.browse(cr, uid, ids, context=context)[0]
self.pool.get('purchase.requisition').make_purchase_order(cr, uid, active_ids, data.partner_id.id, context=context)
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
menardorama/ReadyNAS-Add-ons | headphones-1.0.0/files/apps/headphones/lib/unidecode/x1d4.py | 248 | 3839 | data = (
'A', # 0x00
'B', # 0x01
'C', # 0x02
'D', # 0x03
'E', # 0x04
'F', # 0x05
'G', # 0x06
'H', # 0x07
'I', # 0x08
'J', # 0x09
'K', # 0x0a
'L', # 0x0b
'M', # 0x0c
'N', # 0x0d
'O', # 0x0e
'P', # 0x0f
'Q', # 0x10
'R', # 0x11
'S', # 0x12
'T', # 0x13
'U', # 0x14
'V', # 0x15
'W', # 0x16
'X', # 0x17
'Y', # 0x18
'Z', # 0x19
'a', # 0x1a
'b', # 0x1b
'c', # 0x1c
'd', # 0x1d
'e', # 0x1e
'f', # 0x1f
'g', # 0x20
'h', # 0x21
'i', # 0x22
'j', # 0x23
'k', # 0x24
'l', # 0x25
'm', # 0x26
'n', # 0x27
'o', # 0x28
'p', # 0x29
'q', # 0x2a
'r', # 0x2b
's', # 0x2c
't', # 0x2d
'u', # 0x2e
'v', # 0x2f
'w', # 0x30
'x', # 0x31
'y', # 0x32
'z', # 0x33
'A', # 0x34
'B', # 0x35
'C', # 0x36
'D', # 0x37
'E', # 0x38
'F', # 0x39
'G', # 0x3a
'H', # 0x3b
'I', # 0x3c
'J', # 0x3d
'K', # 0x3e
'L', # 0x3f
'M', # 0x40
'N', # 0x41
'O', # 0x42
'P', # 0x43
'Q', # 0x44
'R', # 0x45
'S', # 0x46
'T', # 0x47
'U', # 0x48
'V', # 0x49
'W', # 0x4a
'X', # 0x4b
'Y', # 0x4c
'Z', # 0x4d
'a', # 0x4e
'b', # 0x4f
'c', # 0x50
'd', # 0x51
'e', # 0x52
'f', # 0x53
'g', # 0x54
'', # 0x55
'i', # 0x56
'j', # 0x57
'k', # 0x58
'l', # 0x59
'm', # 0x5a
'n', # 0x5b
'o', # 0x5c
'p', # 0x5d
'q', # 0x5e
'r', # 0x5f
's', # 0x60
't', # 0x61
'u', # 0x62
'v', # 0x63
'w', # 0x64
'x', # 0x65
'y', # 0x66
'z', # 0x67
'A', # 0x68
'B', # 0x69
'C', # 0x6a
'D', # 0x6b
'E', # 0x6c
'F', # 0x6d
'G', # 0x6e
'H', # 0x6f
'I', # 0x70
'J', # 0x71
'K', # 0x72
'L', # 0x73
'M', # 0x74
'N', # 0x75
'O', # 0x76
'P', # 0x77
'Q', # 0x78
'R', # 0x79
'S', # 0x7a
'T', # 0x7b
'U', # 0x7c
'V', # 0x7d
'W', # 0x7e
'X', # 0x7f
'Y', # 0x80
'Z', # 0x81
'a', # 0x82
'b', # 0x83
'c', # 0x84
'd', # 0x85
'e', # 0x86
'f', # 0x87
'g', # 0x88
'h', # 0x89
'i', # 0x8a
'j', # 0x8b
'k', # 0x8c
'l', # 0x8d
'm', # 0x8e
'n', # 0x8f
'o', # 0x90
'p', # 0x91
'q', # 0x92
'r', # 0x93
's', # 0x94
't', # 0x95
'u', # 0x96
'v', # 0x97
'w', # 0x98
'x', # 0x99
'y', # 0x9a
'z', # 0x9b
'A', # 0x9c
'', # 0x9d
'C', # 0x9e
'D', # 0x9f
'', # 0xa0
'', # 0xa1
'G', # 0xa2
'', # 0xa3
'', # 0xa4
'J', # 0xa5
'K', # 0xa6
'', # 0xa7
'', # 0xa8
'N', # 0xa9
'O', # 0xaa
'P', # 0xab
'Q', # 0xac
'', # 0xad
'S', # 0xae
'T', # 0xaf
'U', # 0xb0
'V', # 0xb1
'W', # 0xb2
'X', # 0xb3
'Y', # 0xb4
'Z', # 0xb5
'a', # 0xb6
'b', # 0xb7
'c', # 0xb8
'd', # 0xb9
'', # 0xba
'f', # 0xbb
'', # 0xbc
'h', # 0xbd
'i', # 0xbe
'j', # 0xbf
'k', # 0xc0
'l', # 0xc1
'm', # 0xc2
'n', # 0xc3
'', # 0xc4
'p', # 0xc5
'q', # 0xc6
'r', # 0xc7
's', # 0xc8
't', # 0xc9
'u', # 0xca
'v', # 0xcb
'w', # 0xcc
'x', # 0xcd
'y', # 0xce
'z', # 0xcf
'A', # 0xd0
'B', # 0xd1
'C', # 0xd2
'D', # 0xd3
'E', # 0xd4
'F', # 0xd5
'G', # 0xd6
'H', # 0xd7
'I', # 0xd8
'J', # 0xd9
'K', # 0xda
'L', # 0xdb
'M', # 0xdc
'N', # 0xdd
'O', # 0xde
'P', # 0xdf
'Q', # 0xe0
'R', # 0xe1
'S', # 0xe2
'T', # 0xe3
'U', # 0xe4
'V', # 0xe5
'W', # 0xe6
'X', # 0xe7
'Y', # 0xe8
'Z', # 0xe9
'a', # 0xea
'b', # 0xeb
'c', # 0xec
'd', # 0xed
'e', # 0xee
'f', # 0xef
'g', # 0xf0
'h', # 0xf1
'i', # 0xf2
'j', # 0xf3
'k', # 0xf4
'l', # 0xf5
'm', # 0xf6
'n', # 0xf7
'o', # 0xf8
'p', # 0xf9
'q', # 0xfa
'r', # 0xfb
's', # 0xfc
't', # 0xfd
'u', # 0xfe
'v', # 0xff
)
| gpl-2.0 |
panchenji/Ryu_modified | ryu/controller/tunnels.py | 50 | 6952 | # Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2012 Isaku Yamahata <yamahata at private email ne jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import logging
import ryu.exception as ryu_exc
from ryu.base import app_manager
from ryu.controller import event
class RemoteDPIDAlreadyExist(ryu_exc.RyuException):
message = ('port (%(dpid)s, %(port)s) has already '
'remote dpid %(remote_dpid)s')
class TunnelKeyAlreadyExist(ryu_exc.RyuException):
message = 'tunnel key %(tunnel_key)s already exists'
class TunnelKeyNotFound(ryu_exc.RyuException):
message = 'no tunnel key for network %(network_id)s'
class EventTunnelKeyBase(event.EventBase):
def __init__(self, network_id, tunnel_key):
super(EventTunnelKeyBase, self).__init__()
self.network_id = network_id
self.tunnel_key = tunnel_key
class EventTunnelKeyAdd(EventTunnelKeyBase):
def __init__(self, network_id, tunnel_key):
super(EventTunnelKeyAdd, self).__init__(network_id, tunnel_key)
class EventTunnelKeyDel(EventTunnelKeyBase):
def __init__(self, network_id, tunnel_key):
super(EventTunnelKeyDel, self).__init__(network_id, tunnel_key)
class EventTunnelPort(event.EventBase):
def __init__(self, dpid, port_no, remote_dpid, add_del):
super(EventTunnelPort, self).__init__()
self.dpid = dpid
self.port_no = port_no
self.remote_dpid = remote_dpid
self.add_del = add_del
class TunnelKeys(dict):
"""network id(uuid) <-> tunnel key(32bit unsigned int)"""
def __init__(self, f):
super(TunnelKeys, self).__init__()
self.send_event = f
def get_key(self, network_id):
try:
return self[network_id]
except KeyError:
raise TunnelKeyNotFound(network_id=network_id)
def _set_key(self, network_id, tunnel_key):
self[network_id] = tunnel_key
self.send_event(EventTunnelKeyAdd(network_id, tunnel_key))
def register_key(self, network_id, tunnel_key):
if network_id in self:
raise ryu_exc.NetworkAlreadyExist(network_id=network_id)
if tunnel_key in self.values():
raise TunnelKeyAlreadyExist(tunnel_key=tunnel_key)
self._set_key(network_id, tunnel_key)
def update_key(self, network_id, tunnel_key):
if network_id not in self and tunnel_key in self.values():
raise TunnelKeyAlreadyExist(key=tunnel_key)
key = self.get(network_id)
if key is None:
self._set_key(network_id, tunnel_key)
return
if key != tunnel_key:
raise ryu_exc.NetworkAlreadyExist(network_id=network_id)
def delete_key(self, network_id):
try:
tunnel_key = self[network_id]
self.send_event(EventTunnelKeyDel(network_id, tunnel_key))
del self[network_id]
except KeyError:
raise ryu_exc.NetworkNotFound(network_id=network_id)
class DPIDs(object):
"""dpid -> port_no -> remote_dpid"""
def __init__(self, f):
super(DPIDs, self).__init__()
self.dpids = collections.defaultdict(dict)
self.send_event = f
def list_ports(self, dpid):
return self.dpids[dpid]
def _add_remote_dpid(self, dpid, port_no, remote_dpid):
self.dpids[dpid][port_no] = remote_dpid
self.send_event(EventTunnelPort(dpid, port_no, remote_dpid, True))
def add_remote_dpid(self, dpid, port_no, remote_dpid):
if port_no in self.dpids[dpid]:
raise ryu_exc.PortAlreadyExist(dpid=dpid, port=port_no,
network_id=None)
self._add_remote_dpid(dpid, port_no, remote_dpid)
def update_remote_dpid(self, dpid, port_no, remote_dpid):
remote_dpid_ = self.dpids[dpid].get(port_no)
if remote_dpid_ is None:
self._add_remote_dpid(dpid, port_no, remote_dpid)
elif remote_dpid_ != remote_dpid:
raise ryu_exc.RemoteDPIDAlreadyExist(dpid=dpid, port=port_no,
remote_dpid=remote_dpid)
def get_remote_dpid(self, dpid, port_no):
try:
return self.dpids[dpid][port_no]
except KeyError:
raise ryu_exc.PortNotFound(dpid=dpid, port=port_no)
def delete_port(self, dpid, port_no):
try:
remote_dpid = self.dpids[dpid][port_no]
self.send_event(EventTunnelPort(dpid, port_no, remote_dpid, False))
del self.dpids[dpid][port_no]
except KeyError:
raise ryu_exc.PortNotFound(dpid=dpid, port=port_no)
def get_port(self, dpid, remote_dpid):
try:
dp = self.dpids[dpid]
except KeyError:
raise ryu_exc.PortNotFound(dpid=dpid, port=None, network_id=None)
res = [port_no for (port_no, remote_dpid_) in dp.items()
if remote_dpid_ == remote_dpid]
assert len(res) <= 1
if len(res) == 0:
raise ryu_exc.PortNotFound(dpid=dpid, port=None, network_id=None)
return res[0]
class Tunnels(app_manager.RyuApp):
def __init__(self):
super(Tunnels, self).__init__()
self.name = 'tunnels'
self.tunnel_keys = TunnelKeys(self.send_event_to_observers)
self.dpids = DPIDs(self.send_event_to_observers)
def get_key(self, network_id):
return self.tunnel_keys.get_key(network_id)
def register_key(self, network_id, tunnel_key):
self.tunnel_keys.register_key(network_id, tunnel_key)
def update_key(self, network_id, tunnel_key):
self.tunnel_keys.update_key(network_id, tunnel_key)
def delete_key(self, network_id):
self.tunnel_keys.delete_key(network_id)
def list_ports(self, dpid):
return self.dpids.list_ports(dpid).keys()
def register_port(self, dpid, port_no, remote_dpid):
self.dpids.add_remote_dpid(dpid, port_no, remote_dpid)
def update_port(self, dpid, port_no, remote_dpid):
self.dpids.update_remote_dpid(dpid, port_no, remote_dpid)
def get_remote_dpid(self, dpid, port_no):
return self.dpids.get_remote_dpid(dpid, port_no)
def delete_port(self, dpid, port_no):
self.dpids.delete_port(dpid, port_no)
#
# methods for gre tunnel
#
def get_port(self, dpid, remote_dpid):
return self.dpids.get_port(dpid, remote_dpid)
| apache-2.0 |
jeffninghan/tracker | OCR_test/ocr_test.py | 1 | 1285 | import numpy as np
import cv2
from matplotlib import pyplot as plt
# test algorithm to recognize digits using kNN
# source: http://docs.opencv.org/trunk/doc/py_tutorials/py_ml/py_knn/py_knn_opencv/py_knn_opencv.html
img = cv2.imread('../data/digits.png')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# Now we split the image to 5000 cells, each 20x20 size
cells = [np.hsplit(row,100) for row in np.vsplit(gray,50)]
# Make it into a Numpy array. It size will be (50,100,20,20)
x = np.array(cells)
# Now we prepare train_data and test_data.
train = x[:,:50].reshape(-1,400).astype(np.float32) # Size = (2500,400)
test = x[:,50:100].reshape(-1,400).astype(np.float32) # Size = (2500,400)
# Create labels for train and test data
k = np.arange(10)
train_labels = np.repeat(k,250)[:,np.newaxis]
test_labels = train_labels.copy()
# Initiate kNN, train the data, then test it with test data for k=1
print "Training using kNN..."
knn = cv2.KNearest()
knn.train(train,train_labels)
ret,result,neighbours,dist = knn.find_nearest(test,k=5)
# Now we check the accuracy of classification
# For that, compare the result with test_labels and check which are wrong
matches = result==test_labels
correct = np.count_nonzero(matches)
accuracy = correct*100.0/result.size
print "Accuracy:", accuracy | mit |
be-cloud-be/horizon-addons | web/web_search_alphabetic/__init__.py | 2 | 1129 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2011-2014 Serpent Consulting Services (<http://www.serpentcs.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
darkleons/BE | addons/crm/res_partner.py | 47 | 4790 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
class res_partner(osv.osv):
""" Inherits partner and adds CRM information in the partner form """
_inherit = 'res.partner'
def _opportunity_meeting_phonecall_count(self, cr, uid, ids, field_name, arg, context=None):
res = dict(map(lambda x: (x,{'opportunity_count': 0, 'meeting_count': 0}), ids))
# the user may not have access rights for opportunities or meetings
try:
for partner in self.browse(cr, uid, ids, context):
res[partner.id] = {
'opportunity_count': len(partner.opportunity_ids),
'meeting_count': len(partner.meeting_ids),
}
except:
pass
for partner in self.browse(cr, uid, ids, context):
res[partner.id]['phonecall_count'] = len(partner.phonecall_ids)
return res
_columns = {
'section_id': fields.many2one('crm.case.section', 'Sales Team'),
'opportunity_ids': fields.one2many('crm.lead', 'partner_id',\
'Leads and Opportunities', domain=[('probability', 'not in', ['0', '100'])]),
'meeting_ids': fields.many2many('calendar.event', 'calendar_event_res_partner_rel','res_partner_id', 'calendar_event_id',
'Meetings'),
'phonecall_ids': fields.one2many('crm.phonecall', 'partner_id',\
'Phonecalls'),
'opportunity_count': fields.function(_opportunity_meeting_phonecall_count, string="Opportunity", type='integer', multi='opp_meet'),
'meeting_count': fields.function(_opportunity_meeting_phonecall_count, string="# Meetings", type='integer', multi='opp_meet'),
'phonecall_count': fields.function(_opportunity_meeting_phonecall_count, string="Phonecalls", type="integer", multi='opp_meet'),
}
def redirect_partner_form(self, cr, uid, partner_id, context=None):
search_view = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'base', 'view_res_partner_filter')
value = {
'domain': "[]",
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'res.partner',
'res_id': int(partner_id),
'view_id': False,
'context': context,
'type': 'ir.actions.act_window',
'search_view_id': search_view and search_view[1] or False
}
return value
def make_opportunity(self, cr, uid, ids, opportunity_summary, planned_revenue=0.0, probability=0.0, partner_id=None, context=None):
categ_obj = self.pool.get('crm.case.categ')
categ_ids = categ_obj.search(cr, uid, [('object_id.model','=','crm.lead')])
lead_obj = self.pool.get('crm.lead')
opportunity_ids = {}
for partner in self.browse(cr, uid, ids, context=context):
if not partner_id:
partner_id = partner.id
opportunity_id = lead_obj.create(cr, uid, {
'name' : opportunity_summary,
'planned_revenue' : planned_revenue,
'probability' : probability,
'partner_id' : partner_id,
'categ_ids' : categ_ids and categ_ids[0:1] or [],
'type': 'opportunity'
}, context=context)
opportunity_ids[partner_id] = opportunity_id
return opportunity_ids
def schedule_meeting(self, cr, uid, ids, context=None):
partner_ids = list(ids)
partner_ids.append(self.pool.get('res.users').browse(cr, uid, uid).partner_id.id)
res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'calendar', 'action_calendar_event', context)
res['context'] = {
'search_default_partner_ids': list(ids),
'default_partner_ids': partner_ids,
}
return res
| agpl-3.0 |
viewdy/phantomjs2 | src/qt/qtwebkit/Tools/Scripts/webkitpy/style/checkers/watchlist.py | 134 | 2280 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Checks WebKit style for the watchlist file."""
from webkitpy.common.watchlist.watchlistparser import WatchListParser
class WatchListChecker(object):
"""Processes the watch list for checking style."""
def __init__(self, file_path, handle_style_error):
self._handle_style_error = handle_style_error
self._handle_style_error.turn_off_line_filtering()
def check(self, lines):
def log_to_style_error(message):
# Always report line 0 since we don't have anything better.
self._handle_style_error(0,
'watchlist/general', 5,
message)
WatchListParser(log_error=log_to_style_error).parse('\n'.join(lines))
| bsd-3-clause |
polojacky/ehfpi | ehf/filebrowser/management/commands/fb_version_remove.py | 13 | 5220 | # coding: utf-8
# PYTHON IMPORTS
import os
import re
# DJANGO IMPORTS
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from django.utils.six.moves import input
# FILEBROWSER IMPORTS
from filebrowser.settings import EXTENSION_LIST, EXCLUDE, DIRECTORY, VERSIONS, EXTENSIONS
class Command(BaseCommand):
args = '<media_path>'
help = "Remove Image-Versions within FILEBROWSER_DIRECTORY/MEDIA_ROOT."
def handle(self, *args, **options):
media_path = ""
if len(args):
media_path = args[0]
path = os.path.join(settings.MEDIA_ROOT, media_path)
if not os.path.isdir(path):
raise CommandError('<media_path> must be a directory in MEDIA_ROOT. "%s" is no directory.' % path)
self.stdout.write("\n%s\n" % self.help)
self.stdout.write("in this case: %s\n" % path)
# get suffix or prefix
default_prefix_or_suffix = "s"
while 1:
self.stdout.write('\nOlder versions of the FileBrowser used to prefix the filename with the version name.\n')
self.stdout.write('Current version of the FileBrowser adds the version name as suffix.\n')
prefix_or_suffix = input('"p" for prefix or "s" for suffix (leave blank for "%s"): ' % default_prefix_or_suffix)
if default_prefix_or_suffix and prefix_or_suffix == '':
prefix_or_suffix = default_prefix_or_suffix
if prefix_or_suffix != "s" and prefix_or_suffix != "p":
sys.stderr.write('Error: "p" and "s" are the only valid inputs.\n')
prefix_or_suffix = None
continue
break
# get version name
while 1:
version_name = input('\nversion name as defined with VERSIONS: ')
if version_name == "":
self.stderr.write('Error: You have to enter a version name.\n')
version_name = None
continue
else:
break
# get list of all matching files
files = self.get_files(path, version_name, (prefix_or_suffix == "p"))
# output (short version) of files to be deleted
if len(files) > 15:
self.stdout.write('\nFirst/Last 5 files to remove:\n')
for current_file in files[:5]:
self.stdout.write('%s\n' % current_file)
self.stdout.write('...\n')
self.stdout.write('...\n')
for current_file in files[len(files)-5:]:
self.stdout.write('%s\n' % current_file)
else:
self.stdout.write('\nFiles to remove:\n')
for current_file in files:
self.stdout.write('%s\n' % current_file)
# no files...done
if len(files) == 0:
self.stdout.write('0 files removed.\n\n')
return
else:
self.stdout.write('%d file(s) will be removed.\n\n' % len(files))
# ask to make sure
do_remove = ""
self.stdout.write('Are Sure you want to delete these files?\n')
do_remove = input('"y" for Yes or "n" for No (leave blank for "n"): ')
# if "yes" we delete. any different case we finish without removing anything
if do_remove == "y":
for current_file in files:
os.remove(current_file)
self.stdout.write('%d file(s) removed.\n\n' % len(files))
else:
self.stdout.write('No files removed.\n\n')
return
# get files mathing:
# path: search recoursive in this path (os.walk)
# version_name: string is pre/suffix of filename
# search_for_prefix: if true we match against the start of the filename (default is the end)
def get_files(self, path, version_name, search_for_prefix):
file_list = []
# Precompile regular expressions
filter_re = []
for exp in EXCLUDE:
filter_re.append(re.compile(exp))
# walkt throu the filebrowser directory
# for all/new files (except file versions itself and excludes)
for dirpath, dirnames, filenames in os.walk(path, followlinks=True):
for filename in filenames:
filtered = False
# no "hidden" files (stating with ".")
if filename.startswith('.'):
continue
# check the exclude list
for re_prefix in filter_re:
if re_prefix.search(filename):
filtered = True
if filtered:
continue
(filename_noext, extension) = os.path.splitext(filename)
# images only
if extension in EXTENSIONS["Image"]:
# if image matches with version_name we add it to the file_list
if search_for_prefix:
if filename_noext.startswith(version_name + "_"):
file_list.append(os.path.join(dirpath, filename))
elif filename_noext.endswith("_" + version_name):
file_list.append(os.path.join(dirpath, filename))
return file_list
| apache-2.0 |
wildtetris/python-social-auth | social/tests/backends/test_stocktwits.py | 91 | 1658 | import json
from social.tests.backends.oauth import OAuth2Test
class StocktwitsOAuth2Test(OAuth2Test):
backend_path = 'social.backends.stocktwits.StocktwitsOAuth2'
user_data_url = 'https://api.stocktwits.com/api/2/account/verify.json'
expected_username = 'foobar'
access_token_body = json.dumps({
'access_token': 'foobar',
'token_type': 'bearer'
})
user_data_body = json.dumps({
'response': {
'status': 200
},
'user': {
'username': 'foobar',
'name': 'Foo Bar',
'classification': [],
'avatar_url': 'http://avatars.stocktwits.net/images/'
'default_avatar_thumb.jpg',
'avatar_url_ssl': 'https://s3.amazonaws.com/st-avatars/images/'
'default_avatar_thumb.jpg',
'id': 101010,
'identity': 'User'
}
})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
class StocktwitsOAuth2UsernameAlternativeTest(StocktwitsOAuth2Test):
user_data_body = json.dumps({
'response': {
'status': 200
},
'user': {
'username': 'foobar',
'name': 'Foobar',
'classification': [],
'avatar_url': 'http://avatars.stocktwits.net/images/'
'default_avatar_thumb.jpg',
'avatar_url_ssl': 'https://s3.amazonaws.com/st-avatars/images/'
'default_avatar_thumb.jpg',
'id': 101010,
'identity': 'User'
}
})
| bsd-3-clause |
young-geng/leet_code | problems/200_number-of-islands/main.py | 1 | 2271 | # https://leetcode.com/problems/number-of-islands/
class Solution(object):
def numIslands(self, grid):
"""
:type grid: List[List[str]]
:rtype: int
"""
if len(grid) == 0: return 0
cols,rows = len(grid), len(grid[0])
def dfs(i, j):
"""
helper function to start filling every cell reachable from (i,j)
with '0's. A variation of Flood Fill algorithm for connected
components.
"""
if i >= 0 and j >= 0 and i < cols and j < rows:
# if not visited, mark it '0' and run dfs
if grid[i][j] == '1':
grid[i][j] = '0'
# vertical move
dfs(i-1, j)
dfs(i+1, j)
# horizontal move
dfs(i, j-1)
dfs(i, j+1)
cc_count = 0
for i in xrange(cols):
for j in xrange(rows):
if grid[i][j] == '1':
dfs(i, j)
cc_count += 1
return cc_count
# BFS Solution for practice
# Runtime: O(cN) => O(N)
import Queue
class BFSSolution(object):
def numIslands(self, grid):
"""
:type grid: List[List[str]]
:rtype: int
"""
if len(grid) == 0: return 0
cols, rows = len(grid), len(grid[0])
cc_count = 0
for i in xrange(cols):
for j in xrange(rows):
if grid[i][j] == '1':
self.bfs(grid, i, j)
cc_count += 1
return cc_count
def bfs(self, grid, i, j):
cols, rows = len(grid), len(grid[0])
q = Queue.Queue()
q.put([i,j])
while not q.empty():
x, y = q.get()
# if the point is not visited, a.k.a '1'
if grid[x][y] == '1':
grid[x][y] = '0'
self.pushToQueue(q, cols, rows, x-1, y)
self.pushToQueue(q, cols, rows, x+1, y)
self.pushToQueue(q, cols, rows, x, y-1)
self.pushToQueue(q, cols, rows, x, y+1)
def pushToQueue(self, q, cols, rows, i, j):
if i >= 0 and i < cols and j >= 0 and j < rows:
q.put([i,j])
| mit |
onponomarev/ganeti | lib/build/sphinx_ext.py | 3 | 18231 | #
#
# Copyright (C) 2011, 2012, 2013 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Sphinx extension for building opcode documentation.
"""
# pylint: disable=C0413
# C0413: Wrong import position
import re
from cStringIO import StringIO
import docutils.statemachine
import docutils.nodes
import docutils.utils
import docutils.parsers.rst
import sphinx.errors
import sphinx.util.compat
import sphinx.roles
import sphinx.addnodes
s_compat = sphinx.util.compat
try:
# Access to a protected member of a client class
# pylint: disable=W0212
orig_manpage_role = docutils.parsers.rst.roles._roles["manpage"]
except (AttributeError, ValueError, KeyError), err:
# Normally the "manpage" role is registered by sphinx/roles.py
raise Exception("Can't find reST role named 'manpage': %s" % err)
from ganeti import _constants
from ganeti import constants
from ganeti import compat
from ganeti import errors
from ganeti import utils
from ganeti import opcodes
from ganeti import opcodes_base
from ganeti import ht
from ganeti import rapi
from ganeti import luxi
from ganeti import objects
from ganeti import http
from ganeti import pathutils
import ganeti.rapi.rlib2 # pylint: disable=W0611
import ganeti.rapi.connector # pylint: disable=W0611
#: Regular expression for man page names
_MAN_RE = re.compile(r"^(?P<name>[-\w_]+)\((?P<section>\d+)\)$")
_TAB_WIDTH = 2
RAPI_URI_ENCODE_RE = re.compile("[^_a-z0-9]+", re.I)
class ReSTError(Exception):
"""Custom class for generating errors in Sphinx.
"""
def _GetCommonParamNames():
"""Builds a list of parameters common to all opcodes.
"""
names = set(map(compat.fst, opcodes.OpCode.OP_PARAMS))
# The "depends" attribute should be listed
names.remove(opcodes_base.DEPEND_ATTR)
return names
COMMON_PARAM_NAMES = _GetCommonParamNames()
#: Namespace for evaluating expressions
EVAL_NS = dict(compat=compat, constants=constants, utils=utils, errors=errors,
rlib2=rapi.rlib2, luxi=luxi, rapi=rapi, objects=objects,
http=http, pathutils=pathutils)
# Constants documentation for man pages
CV_ECODES_DOC = "ecodes"
# We don't care about the leak of variables _, name and doc here.
# pylint: disable=W0621
CV_ECODES_DOC_LIST = [(name, doc) for (_, name, doc) in constants.CV_ALL_ECODES]
DOCUMENTED_CONSTANTS = {
CV_ECODES_DOC: CV_ECODES_DOC_LIST,
}
class OpcodeError(sphinx.errors.SphinxError):
category = "Opcode error"
def _SplitOption(text):
"""Split simple option list.
@type text: string
@param text: Options, e.g. "foo, bar, baz"
"""
return [i.strip(",").strip() for i in text.split()]
def _ParseAlias(text):
"""Parse simple assignment option.
@type text: string
@param text: Assignments, e.g. "foo=bar, hello=world"
@rtype: dict
"""
result = {}
for part in _SplitOption(text):
if "=" not in part:
raise OpcodeError("Invalid option format, missing equal sign")
(name, value) = part.split("=", 1)
result[name.strip()] = value.strip()
return result
def _BuildOpcodeParams(op_id, include, exclude, alias):
"""Build opcode parameter documentation.
@type op_id: string
@param op_id: Opcode ID
"""
op_cls = opcodes.OP_MAPPING[op_id]
params_with_alias = \
utils.NiceSort([(alias.get(name, name), name, default, test, doc)
for (name, default, test, doc) in op_cls.GetAllParams()],
key=compat.fst)
for (rapi_name, name, default, test, doc) in params_with_alias:
# Hide common parameters if not explicitly included
if (name in COMMON_PARAM_NAMES and
(not include or name not in include)):
continue
if exclude is not None and name in exclude:
continue
if include is not None and name not in include:
continue
has_default = default is not None or default is not ht.NoDefault
has_test = test is not None
buf = StringIO()
buf.write("``%s``" % (rapi_name,))
if has_default or has_test:
buf.write(" (")
if has_default:
if default == "":
buf.write("defaults to the empty string")
else:
buf.write("defaults to ``%s``" % (default,))
if has_test:
buf.write(", ")
if has_test:
buf.write("must be ``%s``" % (test,))
buf.write(")")
yield buf.getvalue()
# Add text
for line in doc.splitlines():
yield " %s" % line
def _BuildOpcodeResult(op_id):
"""Build opcode result documentation.
@type op_id: string
@param op_id: Opcode ID
"""
op_cls = opcodes.OP_MAPPING[op_id]
result_fn = getattr(op_cls, "OP_RESULT", None)
if not result_fn:
raise OpcodeError("Opcode '%s' has no result description" % op_id)
return "``%s``" % result_fn
class OpcodeParams(s_compat.Directive):
"""Custom directive for opcode parameters.
See also <http://docutils.sourceforge.net/docs/howto/rst-directives.html>.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
option_spec = dict(include=_SplitOption, exclude=_SplitOption,
alias=_ParseAlias)
def run(self):
op_id = self.arguments[0]
include = self.options.get("include", None)
exclude = self.options.get("exclude", None)
alias = self.options.get("alias", {})
path = op_id
include_text = "\n\n".join(_BuildOpcodeParams(op_id,
include,
exclude,
alias))
# Inject into state machine
include_lines = docutils.statemachine.string2lines(include_text, _TAB_WIDTH,
convert_whitespace=1)
self.state_machine.insert_input(include_lines, path)
return []
class OpcodeResult(s_compat.Directive):
"""Custom directive for opcode result.
See also <http://docutils.sourceforge.net/docs/howto/rst-directives.html>.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
def run(self):
op_id = self.arguments[0]
path = op_id
include_text = _BuildOpcodeResult(op_id)
# Inject into state machine
include_lines = docutils.statemachine.string2lines(include_text, _TAB_WIDTH,
convert_whitespace=1)
self.state_machine.insert_input(include_lines, path)
return []
def PythonEvalRole(role, rawtext, text, lineno, inliner,
options={}, content=[]):
"""Custom role to evaluate Python expressions.
The expression's result is included as a literal.
"""
# pylint: disable=W0102,W0613
# W0102: Dangerous default value as argument
# W0613: Unused argument
code = docutils.utils.unescape(text, restore_backslashes=True)
try:
result = eval(code, EVAL_NS) # pylint: disable=W0123
except Exception, err: # pylint: disable=W0703
msg = inliner.reporter.error("Failed to evaluate %r: %s" % (code, err),
line=lineno)
return ([inliner.problematic(rawtext, rawtext, msg)], [msg])
node = docutils.nodes.literal("", unicode(result), **options)
return ([node], [])
class PythonAssert(s_compat.Directive):
"""Custom directive for writing assertions.
The content must be a valid Python expression. If its result does not
evaluate to C{True}, the assertion fails.
"""
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
def run(self):
# Handle combinations of Sphinx and docutils not providing the wanted method
if hasattr(self, "assert_has_content"):
self.assert_has_content()
else:
assert self.content
code = "\n".join(self.content)
try:
result = eval(code, EVAL_NS) # pylint: disable=W0123
except Exception, err:
raise self.error("Failed to evaluate %r: %s" % (code, err))
if not result:
raise self.error("Assertion failed: %s" % (code, ))
return []
def BuildQueryFields(fields):
"""Build query fields documentation.
@type fields: dict (field name as key, field details as value)
"""
defs = [(fdef.name, fdef.doc)
for (_, (fdef, _, _, _)) in utils.NiceSort(fields.items(),
key=compat.fst)]
return BuildValuesDoc(defs)
def BuildValuesDoc(values):
"""Builds documentation for a list of values
@type values: list of tuples in the form (value, documentation)
"""
for name, doc in values:
assert len(doc.splitlines()) == 1
yield "``%s``" % (name,)
yield " %s" % (doc,)
def _ManPageNodeClass(*args, **kwargs):
"""Generates a pending XRef like a ":doc:`...`" reference.
"""
# Type for sphinx/environment.py:BuildEnvironment.resolve_references
kwargs["reftype"] = "doc"
# Force custom title
kwargs["refexplicit"] = True
return sphinx.addnodes.pending_xref(*args, **kwargs)
class _ManPageXRefRole(sphinx.roles.XRefRole):
def __init__(self):
"""Initializes this class.
"""
sphinx.roles.XRefRole.__init__(self, nodeclass=_ManPageNodeClass,
warn_dangling=True)
assert not hasattr(self, "converted"), \
"Sphinx base class gained an attribute named 'converted'"
self.converted = None
def process_link(self, env, refnode, has_explicit_title, title, target):
"""Specialization for man page links.
"""
if has_explicit_title:
raise ReSTError("Setting explicit title is not allowed for man pages")
# Check format and extract name and section
m = _MAN_RE.match(title)
if not m:
raise ReSTError("Man page reference '%s' does not match regular"
" expression '%s'" % (title, _MAN_RE.pattern))
name = m.group("name")
section = int(m.group("section"))
wanted_section = _constants.MAN_PAGES.get(name, None)
if not (wanted_section is None or wanted_section == section):
raise ReSTError("Referenced man page '%s' has section number %s, but the"
" reference uses section %s" %
(name, wanted_section, section))
self.converted = bool(wanted_section is not None and
env.app.config.enable_manpages)
if self.converted:
# Create link to known man page
return (title, "man-%s" % name)
else:
# No changes
return (title, target)
def _ManPageRole(typ, rawtext, text, lineno, inliner, # pylint: disable=W0102
options={}, content=[]):
"""Custom role for man page references.
Converts man pages to links if enabled during the build.
"""
xref = _ManPageXRefRole()
assert ht.TNone(xref.converted)
# Check if it's a known man page
try:
result = xref(typ, rawtext, text, lineno, inliner,
options=options, content=content)
except ReSTError, err:
msg = inliner.reporter.error(str(err), line=lineno)
return ([inliner.problematic(rawtext, rawtext, msg)], [msg])
assert ht.TBool(xref.converted)
# Return if the conversion was successful (i.e. the man page was known and
# conversion was enabled)
if xref.converted:
return result
# Fallback if man page links are disabled or an unknown page is referenced
return orig_manpage_role(typ, rawtext, text, lineno, inliner,
options=options, content=content)
def _EncodeRapiResourceLink(method, uri):
"""Encodes a RAPI resource URI for use as a link target.
"""
parts = [RAPI_URI_ENCODE_RE.sub("-", uri.lower()).strip("-")]
if method is not None:
parts.append(method.lower())
return "rapi-res-%s" % "+".join(filter(None, parts))
def _MakeRapiResourceLink(method, uri):
"""Generates link target name for RAPI resource.
"""
if uri in ["/", "/2"]:
# Don't link these
return None
elif uri == "/version":
return _EncodeRapiResourceLink(method, uri)
elif uri.startswith("/2/"):
return _EncodeRapiResourceLink(method, uri[len("/2/"):])
else:
raise ReSTError("Unhandled URI '%s'" % uri)
def _GetHandlerMethods(handler):
"""Returns list of HTTP methods supported by handler class.
@type handler: L{rapi.baserlib.ResourceBase}
@param handler: Handler class
@rtype: list of strings
"""
return sorted(m_attrs.method for m_attrs in rapi.baserlib.OPCODE_ATTRS
# Only if handler supports method
if hasattr(handler, m_attrs.method) or
hasattr(handler, m_attrs.opcode))
def _DescribeHandlerAccess(handler, method):
"""Returns textual description of required RAPI permissions.
@type handler: L{rapi.baserlib.ResourceBase}
@param handler: Handler class
@type method: string
@param method: HTTP method (e.g. L{http.HTTP_GET})
@rtype: string
"""
access = rapi.baserlib.GetHandlerAccess(handler, method)
if access:
return utils.CommaJoin(sorted(access))
else:
return "*(none)*"
class _RapiHandlersForDocsHelper(object):
@classmethod
def Build(cls):
"""Returns dictionary of resource handlers.
"""
resources = \
rapi.connector.GetHandlers("[node_name]", "[instance_name]",
"[group_name]", "[network_name]", "[job_id]",
"[disk_index]", "[filter_uuid]",
"[resource]",
translate=cls._TranslateResourceUri)
return resources
@classmethod
def _TranslateResourceUri(cls, *args):
"""Translates a resource URI for use in documentation.
@see: L{rapi.connector.GetHandlers}
"""
return "".join(map(cls._UriPatternToString, args))
@staticmethod
def _UriPatternToString(value):
"""Converts L{rapi.connector.UriPattern} to strings.
"""
if isinstance(value, rapi.connector.UriPattern):
return value.content
else:
return value
_RAPI_RESOURCES_FOR_DOCS = _RapiHandlersForDocsHelper.Build()
def _BuildRapiAccessTable(res):
"""Build a table with access permissions needed for all RAPI resources.
"""
for (uri, handler) in utils.NiceSort(res.items(), key=compat.fst):
reslink = _MakeRapiResourceLink(None, uri)
if not reslink:
# No link was generated
continue
yield ":ref:`%s <%s>`" % (uri, reslink)
for method in _GetHandlerMethods(handler):
yield (" | :ref:`%s <%s>`: %s" %
(method, _MakeRapiResourceLink(method, uri),
_DescribeHandlerAccess(handler, method)))
class RapiAccessTable(s_compat.Directive):
"""Custom directive to generate table of all RAPI resources.
See also <http://docutils.sourceforge.net/docs/howto/rst-directives.html>.
"""
has_content = False
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
option_spec = {}
def run(self):
include_text = "\n".join(_BuildRapiAccessTable(_RAPI_RESOURCES_FOR_DOCS))
# Inject into state machine
include_lines = docutils.statemachine.string2lines(include_text, _TAB_WIDTH,
convert_whitespace=1)
self.state_machine.insert_input(include_lines, self.__class__.__name__)
return []
class RapiResourceDetails(s_compat.Directive):
"""Custom directive for RAPI resource details.
See also <http://docutils.sourceforge.net/docs/howto/rst-directives.html>.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
def run(self):
uri = self.arguments[0]
try:
handler = _RAPI_RESOURCES_FOR_DOCS[uri]
except KeyError:
raise self.error("Unknown resource URI '%s'" % uri)
lines = [
".. list-table::",
" :widths: 1 4",
" :header-rows: 1",
"",
" * - Method",
" - :ref:`Required permissions <rapi-users>`",
]
for method in _GetHandlerMethods(handler):
lines.extend([
" * - :ref:`%s <%s>`" % (method, _MakeRapiResourceLink(method, uri)),
" - %s" % _DescribeHandlerAccess(handler, method),
])
# Inject into state machine
include_lines = \
docutils.statemachine.string2lines("\n".join(lines), _TAB_WIDTH,
convert_whitespace=1)
self.state_machine.insert_input(include_lines, self.__class__.__name__)
return []
def setup(app):
"""Sphinx extension callback.
"""
# TODO: Implement Sphinx directive for query fields
app.add_directive("opcode_params", OpcodeParams)
app.add_directive("opcode_result", OpcodeResult)
app.add_directive("pyassert", PythonAssert)
app.add_role("pyeval", PythonEvalRole)
app.add_directive("rapi_access_table", RapiAccessTable)
app.add_directive("rapi_resource_details", RapiResourceDetails)
app.add_config_value("enable_manpages", False, True)
app.add_role("manpage", _ManPageRole)
| bsd-2-clause |
anryko/ansible | lib/ansible/modules/network/netvisor/pn_port_config.py | 38 | 12285 | #!/usr/bin/python
# Copyright: (c) 2018, Pluribus Networks
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: pn_port_config
author: "Pluribus Networks (@rajaspachipulusu17)"
version_added: 2.8
short_description: CLI command to modify port-config
description:
- This module can be used to modify a port configuration.
options:
pn_cliswitch:
description:
- Target switch to run the CLI on.
required: False
type: str
state:
description:
- State the action to perform. Use C(update) to modify the port-config.
required: True
type: str
choices: ['update']
pn_intf:
description:
- physical interface.
required: False
type: str
pn_crc_check_enable:
description:
- CRC check on ingress and rewrite on egress.
required: False
type: bool
pn_dscp_map:
description:
- DSCP map name to enable on port.
required: False
type: str
pn_autoneg:
description:
- physical port autonegotiation.
required: False
type: bool
pn_speed:
description:
- physical port speed.
required: False
choices: ['disable', '10m', '100m', '1g',
'2.5g', '10g', '25g', '40g', '50g', '100g']
pn_port:
description:
- physical port.
required: False
type: str
pn_vxlan_termination:
description:
- physical port vxlan termination setting.
required: False
type: bool
pn_pause:
description:
- physical port pause.
required: False
type: bool
pn_loopback:
description:
- physical port loopback.
required: False
type: bool
pn_loop_vlans:
description:
- looping vlans.
required: False
type: str
pn_routing:
description:
- routing.
required: False
type: bool
pn_edge_switch:
description:
- physical port edge switch.
required: False
type: bool
pn_enable:
description:
- physical port enable.
required: False
type: bool
pn_description:
description:
- physical port description.
required: False
type: str
pn_host_enable:
description:
- Host facing port control setting.
required: False
type: bool
pn_allowed_tpid:
description:
- Allowed TPID in addition to 0x8100 on Vlan header.
required: False
type: str
choices: ['vlan', 'q-in-q', 'q-in-q-old']
pn_mirror_only:
description:
- physical port mirror only.
required: False
type: bool
pn_reflect:
description:
- physical port reflection.
required: False
type: bool
pn_jumbo:
description:
- jumbo frames on physical port.
required: False
type: bool
pn_egress_rate_limit:
description:
- max egress port data rate limit.
required: False
type: str
pn_eth_mode:
description:
- physical Ethernet mode.
required: False
choices: ['1000base-x', 'sgmii', 'disabled', 'GMII']
pn_fabric_guard:
description:
- Fabric guard configuration.
required: False
type: bool
pn_local_switching:
description:
- no-local-switching port cannot bridge traffic to
another no-local-switching port.
required: False
type: bool
pn_lacp_priority:
description:
- LACP priority from 1 to 65535.
required: False
type: str
pn_send_port:
description:
- send port.
required: False
type: str
pn_port_mac_address:
description:
- physical port MAC Address.
required: False
type: str
pn_defer_bringup:
description:
- defer port bringup.
required: False
type: bool
"""
EXAMPLES = """
- name: port config modify
pn_port_config:
pn_cliswitch: "sw01"
state: "update"
pn_port: "all"
pn_dscp_map: "foo"
- name: port config modify
pn_port_config:
pn_cliswitch: "sw01"
state: "update"
pn_port: "all"
pn_host_enable: true
"""
RETURN = """
command:
description: the CLI command run on the target node.
returned: always
type: str
stdout:
description: set of responses from the port-config command.
returned: always
type: list
stderr:
description: set of error responses from the port-config command.
returned: on error
type: list
changed:
description: indicates whether the CLI caused changes on the target.
returned: always
type: bool
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli, booleanArgs
from ansible.module_utils.network.netvisor.netvisor import run_commands
def check_cli(module, cli):
"""
This method checks for idempotency using the dscp-map-show name command.
If a user with given name exists, return True else False.
:param module: The Ansible module to fetch input parameters
:param cli: The CLI string
"""
name = module.params['pn_dscp_map']
cli += ' dscp-map-show name %s format name no-show-headers' % name
out = run_commands(module, cli)[1]
out = out.split()
return True if name in out[-1] else False
def main():
""" This section is for arguments parsing """
state_map = dict(
update='port-config-modify'
)
module = AnsibleModule(
argument_spec=dict(
pn_cliswitch=dict(required=False, type='str'),
state=dict(required=True, type='str',
choices=['update']),
pn_intf=dict(required=False, type='str'),
pn_crc_check_enable=dict(required=False, type='bool'),
pn_dscp_map=dict(required=False, type='str'),
pn_autoneg=dict(required=False, type='bool'),
pn_speed=dict(required=False, type='str',
choices=['disable', '10m', '100m',
'1g', '2.5g', '10g', '25g',
'40g', '50g', '100g']),
pn_port=dict(required=False, type='str'),
pn_vxlan_termination=dict(required=False, type='bool'),
pn_pause=dict(required=False, type='bool'),
pn_loopback=dict(required=False, type='bool'),
pn_loop_vlans=dict(required=False, type='str'),
pn_routing=dict(required=False, type='bool'),
pn_edge_switch=dict(required=False, type='bool'),
pn_enable=dict(required=False, type='bool'),
pn_description=dict(required=False, type='str'),
pn_host_enable=dict(required=False, type='bool'),
pn_allowed_tpid=dict(required=False, type='str',
choices=['vlan', 'q-in-q', 'q-in-q-old']),
pn_mirror_only=dict(required=False, type='bool'),
pn_reflect=dict(required=False, type='bool'),
pn_jumbo=dict(required=False, type='bool'),
pn_egress_rate_limit=dict(required=False, type='str'),
pn_eth_mode=dict(required=False, type='str',
choices=['1000base-x', 'sgmii',
'disabled', 'GMII']),
pn_fabric_guard=dict(required=False, type='bool'),
pn_local_switching=dict(required=False, type='bool'),
pn_lacp_priority=dict(required=False, type='str'),
pn_send_port=dict(required=False, type='str'),
pn_port_mac_address=dict(required=False, type='str'),
pn_defer_bringup=dict(required=False, type='bool'),
),
required_if=(
['state', 'update', ['pn_port']],
),
required_one_of=[['pn_intf', 'pn_crc_check_enable', 'pn_dscp_map',
'pn_speed', 'pn_autoneg',
'pn_vxlan_termination', 'pn_pause',
'pn_fec', 'pn_loopback', 'pn_loop_vlans',
'pn_routing', 'pn_edge_switch',
'pn_enable', 'pn_description',
'pn_host_enable', 'pn_allowed_tpid',
'pn_mirror_only', 'pn_reflect',
'pn_jumbo', 'pn_egress_rate_limit',
'pn_eth_mode', 'pn_fabric_guard',
'pn_local_switching', 'pn_lacp_priority',
'pn_send_port', 'pn_port_mac_address',
'pn_defer_bringup']],
)
# Accessing the arguments
cliswitch = module.params['pn_cliswitch']
state = module.params['state']
intf = module.params['pn_intf']
crc_check_enable = module.params['pn_crc_check_enable']
dscp_map = module.params['pn_dscp_map']
autoneg = module.params['pn_autoneg']
speed = module.params['pn_speed']
port = module.params['pn_port']
vxlan_termination = module.params['pn_vxlan_termination']
pause = module.params['pn_pause']
loopback = module.params['pn_loopback']
loop_vlans = module.params['pn_loop_vlans']
routing = module.params['pn_routing']
edge_switch = module.params['pn_edge_switch']
enable = module.params['pn_enable']
description = module.params['pn_description']
host_enable = module.params['pn_host_enable']
allowed_tpid = module.params['pn_allowed_tpid']
mirror_only = module.params['pn_mirror_only']
reflect = module.params['pn_reflect']
jumbo = module.params['pn_jumbo']
egress_rate_limit = module.params['pn_egress_rate_limit']
eth_mode = module.params['pn_eth_mode']
fabric_guard = module.params['pn_fabric_guard']
local_switching = module.params['pn_local_switching']
lacp_priority = module.params['pn_lacp_priority']
send_port = module.params['pn_send_port']
port_mac_address = module.params['pn_port_mac_address']
defer_bringup = module.params['pn_defer_bringup']
command = state_map[state]
# Building the CLI command string
cli = pn_cli(module, cliswitch)
if dscp_map:
NAME_EXISTS = check_cli(module, cli)
if command == 'port-config-modify':
cli += ' %s ' % command
if dscp_map:
if NAME_EXISTS is False:
module.fail_json(
failed=True,
msg='Create dscp map with name %s before updating' % dscp_map
)
cli += ' dscp-map ' + dscp_map
if intf:
cli += ' intf ' + intf
if speed:
cli += ' speed ' + speed
if port:
cli += ' port ' + port
if allowed_tpid:
cli += ' allowed-tpid ' + allowed_tpid
if egress_rate_limit:
cli += ' egress-rate-limit ' + egress_rate_limit
if eth_mode:
cli += ' eth-mode ' + eth_mode
if lacp_priority:
cli += ' lacp-priority ' + lacp_priority
if send_port:
cli += ' send-port ' + send_port
if port_mac_address:
cli += ' port-mac-address ' + port_mac_address
cli += booleanArgs(crc_check_enable, 'crc-check-enable', 'crc-check-disable')
cli += booleanArgs(autoneg, 'autoneg', 'no-autoneg')
cli += booleanArgs(vxlan_termination, 'vxlan-termination', 'no-vxlan-termination')
cli += booleanArgs(pause, 'pause', 'no-pause')
cli += booleanArgs(loopback, 'loopback', 'no-loopback')
cli += booleanArgs(routing, 'routing', 'no-routing')
cli += booleanArgs(edge_switch, 'edge-switch', 'no-edge-switch')
cli += booleanArgs(enable, 'enable', 'disable')
cli += booleanArgs(host_enable, 'host-enable', 'host-disable')
cli += booleanArgs(mirror_only, 'mirror-only', 'no-mirror-receive-only')
cli += booleanArgs(reflect, 'reflect', 'no-reflect')
cli += booleanArgs(jumbo, 'jumbo', 'no-jumbo')
cli += booleanArgs(fabric_guard, 'fabric-guard', 'no-fabric-guard')
cli += booleanArgs(local_switching, 'local-switching', 'no-local-switching')
cli += booleanArgs(defer_bringup, 'defer-bringup', 'no-defer-bringup')
run_cli(module, cli, state_map)
if __name__ == '__main__':
main()
| gpl-3.0 |
UOMx/edx-platform | lms/djangoapps/instructor/views/instructor_dashboard.py | 3 | 30702 | """
Instructor Dashboard Views
"""
import logging
import datetime
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
import uuid
import pytz
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_POST
from django.utils.translation import ugettext as _, ugettext_noop
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.decorators.cache import cache_control
from edxmako.shortcuts import render_to_response
from django.core.urlresolvers import reverse
from django.utils.html import escape
from django.http import Http404, HttpResponseServerError
from django.conf import settings
from util.json_request import JsonResponse
from mock import patch
from lms.djangoapps.lms_xblock.runtime import quote_slashes
from openedx.core.lib.xblock_utils import wrap_xblock
from xmodule.html_module import HtmlDescriptor
from xmodule.modulestore.django import modulestore
from xmodule.tabs import CourseTab
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from courseware.access import has_access
from courseware.courses import get_course_by_id, get_studio_url
from django_comment_client.utils import has_forum_access
from django_comment_common.models import FORUM_ROLE_ADMINISTRATOR
from student.models import CourseEnrollment
from shoppingcart.models import Coupon, PaidCourseRegistration, CourseRegCodeItem
from course_modes.models import CourseMode, CourseModesArchive
from student.roles import CourseFinanceAdminRole, CourseSalesAdminRole
from certificates.models import (
CertificateGenerationConfiguration,
CertificateWhitelist,
GeneratedCertificate,
CertificateStatuses,
CertificateGenerationHistory,
CertificateInvalidation,
)
from certificates import api as certs_api
from util.date_utils import get_default_time_display
from class_dashboard.dashboard_data import get_section_display_name, get_array_section_has_problem
from .tools import get_units_with_due_date, title_or_url, bulk_email_is_enabled_for_course
from opaque_keys.edx.locations import SlashSeparatedCourseKey
log = logging.getLogger(__name__)
class InstructorDashboardTab(CourseTab):
"""
Defines the Instructor Dashboard view type that is shown as a course tab.
"""
type = "instructor"
title = ugettext_noop('Instructor')
view_name = "instructor_dashboard"
is_dynamic = True # The "Instructor" tab is instead dynamically added when it is enabled
@classmethod
def is_enabled(cls, course, user=None):
"""
Returns true if the specified user has staff access.
"""
return bool(user and has_access(user, 'staff', course, course.id))
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def instructor_dashboard_2(request, course_id):
""" Display the instructor dashboard for a course. """
try:
course_key = CourseKey.from_string(course_id)
except InvalidKeyError:
log.error(u"Unable to find course with course key %s while loading the Instructor Dashboard.", course_id)
return HttpResponseServerError()
course = get_course_by_id(course_key, depth=0)
access = {
'admin': request.user.is_staff,
'instructor': bool(has_access(request.user, 'instructor', course)),
'finance_admin': CourseFinanceAdminRole(course_key).has_user(request.user),
'sales_admin': CourseSalesAdminRole(course_key).has_user(request.user),
'staff': bool(has_access(request.user, 'staff', course)),
'forum_admin': has_forum_access(request.user, course_key, FORUM_ROLE_ADMINISTRATOR),
}
if not access['staff']:
raise Http404()
is_white_label = CourseMode.is_white_label(course_key)
sections = [
_section_course_info(course, access),
_section_membership(course, access, is_white_label),
_section_cohort_management(course, access),
_section_student_admin(course, access),
_section_data_download(course, access),
]
analytics_dashboard_message = None
if settings.ANALYTICS_DASHBOARD_URL:
# Construct a URL to the external analytics dashboard
analytics_dashboard_url = '{0}/courses/{1}'.format(settings.ANALYTICS_DASHBOARD_URL, unicode(course_key))
link_start = "<a href=\"{}\" target=\"_blank\">".format(analytics_dashboard_url)
analytics_dashboard_message = _(
"To gain insights into student enrollment and participation {link_start}"
"visit {analytics_dashboard_name}, our new course analytics product{link_end}."
)
analytics_dashboard_message = analytics_dashboard_message.format(
link_start=link_start, link_end="</a>", analytics_dashboard_name=settings.ANALYTICS_DASHBOARD_NAME)
# Temporarily show the "Analytics" section until we have a better way of linking to Insights
sections.append(_section_analytics(course, access))
# Check if there is corresponding entry in the CourseMode Table related to the Instructor Dashboard course
course_mode_has_price = False
paid_modes = CourseMode.paid_modes_for_course(course_key)
if len(paid_modes) == 1:
course_mode_has_price = True
elif len(paid_modes) > 1:
log.error(
u"Course %s has %s course modes with payment options. Course must only have "
u"one paid course mode to enable eCommerce options.",
unicode(course_key), len(paid_modes)
)
if settings.FEATURES.get('INDIVIDUAL_DUE_DATES') and access['instructor']:
sections.insert(3, _section_extensions(course))
# Gate access to course email by feature flag & by course-specific authorization
if bulk_email_is_enabled_for_course(course_key):
sections.append(_section_send_email(course, access))
# Gate access to Metrics tab by featue flag and staff authorization
if settings.FEATURES['CLASS_DASHBOARD'] and access['staff']:
sections.append(_section_metrics(course, access))
# Gate access to Ecommerce tab
if course_mode_has_price and (access['finance_admin'] or access['sales_admin']):
sections.append(_section_e_commerce(course, access, paid_modes[0], is_white_label, is_white_label))
# Gate access to Special Exam tab depending if either timed exams or proctored exams
# are enabled in the course
# NOTE: For now, if we only have procotred exams enabled, then only platform Staff
# (user.is_staff) will be able to view the special exams tab. This may
# change in the future
can_see_special_exams = (
((course.enable_proctored_exams and request.user.is_staff) or course.enable_timed_exams) and
settings.FEATURES.get('ENABLE_SPECIAL_EXAMS', False)
)
if can_see_special_exams:
sections.append(_section_special_exams(course, access))
# Certificates panel
# This is used to generate example certificates
# and enable self-generated certificates for a course.
certs_enabled = CertificateGenerationConfiguration.current().enabled
if certs_enabled and access['admin']:
sections.append(_section_certificates(course))
disable_buttons = not _is_small_course(course_key)
certificate_white_list = CertificateWhitelist.get_certificate_white_list(course_key)
generate_certificate_exceptions_url = reverse( # pylint: disable=invalid-name
'generate_certificate_exceptions',
kwargs={'course_id': unicode(course_key), 'generate_for': ''}
)
generate_bulk_certificate_exceptions_url = reverse( # pylint: disable=invalid-name
'generate_bulk_certificate_exceptions',
kwargs={'course_id': unicode(course_key)}
)
certificate_exception_view_url = reverse(
'certificate_exception_view',
kwargs={'course_id': unicode(course_key)}
)
certificate_invalidation_view_url = reverse( # pylint: disable=invalid-name
'certificate_invalidation_view',
kwargs={'course_id': unicode(course_key)}
)
certificate_invalidations = CertificateInvalidation.get_certificate_invalidations(course_key)
context = {
'course': course,
'studio_url': get_studio_url(course, 'course'),
'sections': sections,
'disable_buttons': disable_buttons,
'analytics_dashboard_message': analytics_dashboard_message,
'certificate_white_list': certificate_white_list,
'certificate_invalidations': certificate_invalidations,
'generate_certificate_exceptions_url': generate_certificate_exceptions_url,
'generate_bulk_certificate_exceptions_url': generate_bulk_certificate_exceptions_url,
'certificate_exception_view_url': certificate_exception_view_url,
'certificate_invalidation_view_url': certificate_invalidation_view_url,
}
return render_to_response('instructor/instructor_dashboard_2/instructor_dashboard_2.html', context)
## Section functions starting with _section return a dictionary of section data.
## The dictionary must include at least {
## 'section_key': 'circus_expo'
## 'section_display_name': 'Circus Expo'
## }
## section_key will be used as a css attribute, javascript tie-in, and template import filename.
## section_display_name will be used to generate link titles in the nav bar.
def _section_e_commerce(course, access, paid_mode, coupons_enabled, reports_enabled):
""" Provide data for the corresponding dashboard section """
course_key = course.id
coupons = Coupon.objects.filter(course_id=course_key).order_by('-is_active')
course_price = paid_mode.min_price
total_amount = None
if access['finance_admin']:
single_purchase_total = PaidCourseRegistration.get_total_amount_of_purchased_item(course_key)
bulk_purchase_total = CourseRegCodeItem.get_total_amount_of_purchased_item(course_key)
total_amount = single_purchase_total + bulk_purchase_total
section_data = {
'section_key': 'e-commerce',
'section_display_name': _('E-Commerce'),
'access': access,
'course_id': unicode(course_key),
'currency_symbol': settings.PAID_COURSE_REGISTRATION_CURRENCY[1],
'ajax_remove_coupon_url': reverse('remove_coupon', kwargs={'course_id': unicode(course_key)}),
'ajax_get_coupon_info': reverse('get_coupon_info', kwargs={'course_id': unicode(course_key)}),
'get_user_invoice_preference_url': reverse('get_user_invoice_preference', kwargs={'course_id': unicode(course_key)}),
'sale_validation_url': reverse('sale_validation', kwargs={'course_id': unicode(course_key)}),
'ajax_update_coupon': reverse('update_coupon', kwargs={'course_id': unicode(course_key)}),
'ajax_add_coupon': reverse('add_coupon', kwargs={'course_id': unicode(course_key)}),
'get_sale_records_url': reverse('get_sale_records', kwargs={'course_id': unicode(course_key)}),
'get_sale_order_records_url': reverse('get_sale_order_records', kwargs={'course_id': unicode(course_key)}),
'instructor_url': reverse('instructor_dashboard', kwargs={'course_id': unicode(course_key)}),
'get_registration_code_csv_url': reverse('get_registration_codes', kwargs={'course_id': unicode(course_key)}),
'generate_registration_code_csv_url': reverse('generate_registration_codes', kwargs={'course_id': unicode(course_key)}),
'active_registration_code_csv_url': reverse('active_registration_codes', kwargs={'course_id': unicode(course_key)}),
'spent_registration_code_csv_url': reverse('spent_registration_codes', kwargs={'course_id': unicode(course_key)}),
'set_course_mode_url': reverse('set_course_mode_price', kwargs={'course_id': unicode(course_key)}),
'download_coupon_codes_url': reverse('get_coupon_codes', kwargs={'course_id': unicode(course_key)}),
'enrollment_report_url': reverse('get_enrollment_report', kwargs={'course_id': unicode(course_key)}),
'exec_summary_report_url': reverse('get_exec_summary_report', kwargs={'course_id': unicode(course_key)}),
'list_financial_report_downloads_url': reverse('list_financial_report_downloads',
kwargs={'course_id': unicode(course_key)}),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'look_up_registration_code': reverse('look_up_registration_code', kwargs={'course_id': unicode(course_key)}),
'coupons': coupons,
'sales_admin': access['sales_admin'],
'coupons_enabled': coupons_enabled,
'reports_enabled': reports_enabled,
'course_price': course_price,
'total_amount': total_amount
}
return section_data
def _section_special_exams(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'special_exams',
'section_display_name': _('Special Exams'),
'access': access,
'course_id': unicode(course_key)
}
return section_data
def _section_certificates(course):
"""Section information for the certificates panel.
The certificates panel allows global staff to generate
example certificates and enable self-generated certificates
for a course.
Arguments:
course (Course)
Returns:
dict
"""
example_cert_status = None
html_cert_enabled = certs_api.has_html_certificates_enabled(course.id, course)
if html_cert_enabled:
can_enable_for_course = True
else:
example_cert_status = certs_api.example_certificates_status(course.id)
# Allow the user to enable self-generated certificates for students
# *only* once a set of example certificates has been successfully generated.
# If certificates have been misconfigured for the course (for example, if
# the PDF template hasn't been uploaded yet), then we don't want
# to turn on self-generated certificates for students!
can_enable_for_course = (
example_cert_status is not None and
all(
cert_status['status'] == 'success'
for cert_status in example_cert_status
)
)
instructor_generation_enabled = settings.FEATURES.get('CERTIFICATES_INSTRUCTOR_GENERATION', False)
certificate_statuses_with_count = {
certificate['status']: certificate['count']
for certificate in GeneratedCertificate.get_unique_statuses(course_key=course.id)
}
return {
'section_key': 'certificates',
'section_display_name': _('Certificates'),
'example_certificate_status': example_cert_status,
'can_enable_for_course': can_enable_for_course,
'enabled_for_course': certs_api.cert_generation_enabled(course.id),
'instructor_generation_enabled': instructor_generation_enabled,
'html_cert_enabled': html_cert_enabled,
'active_certificate': certs_api.get_active_web_certificate(course),
'certificate_statuses_with_count': certificate_statuses_with_count,
'status': CertificateStatuses,
'certificate_generation_history':
CertificateGenerationHistory.objects.filter(course_id=course.id).order_by("-created"),
'urls': {
'generate_example_certificates': reverse(
'generate_example_certificates',
kwargs={'course_id': course.id}
),
'enable_certificate_generation': reverse(
'enable_certificate_generation',
kwargs={'course_id': course.id}
),
'start_certificate_generation': reverse(
'start_certificate_generation',
kwargs={'course_id': course.id}
),
'start_certificate_regeneration': reverse(
'start_certificate_regeneration',
kwargs={'course_id': course.id}
),
'list_instructor_tasks_url': reverse(
'list_instructor_tasks',
kwargs={'course_id': course.id}
),
}
}
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@require_POST
@login_required
def set_course_mode_price(request, course_id):
"""
set the new course price and add new entry in the CourseModesArchive Table
"""
try:
course_price = int(request.POST['course_price'])
except ValueError:
return JsonResponse(
{'message': _("Please Enter the numeric value for the course price")},
status=400) # status code 400: Bad Request
currency = request.POST['currency']
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course_honor_mode = CourseMode.objects.filter(mode_slug='honor', course_id=course_key)
if not course_honor_mode:
return JsonResponse(
{'message': _("CourseMode with the mode slug({mode_slug}) DoesNotExist").format(mode_slug='honor')},
status=400) # status code 400: Bad Request
CourseModesArchive.objects.create(
course_id=course_id, mode_slug='honor', mode_display_name='Honor Code Certificate',
min_price=course_honor_mode[0].min_price, currency=course_honor_mode[0].currency,
expiration_datetime=datetime.datetime.now(pytz.utc), expiration_date=datetime.date.today()
)
course_honor_mode.update(
min_price=course_price,
currency=currency
)
return JsonResponse({'message': _("CourseMode price updated successfully")})
def _section_course_info(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'course_info',
'section_display_name': _('Course Info'),
'access': access,
'course_id': course_key,
'course_display_name': course.display_name,
'has_started': course.has_started(),
'has_ended': course.has_ended(),
'start_date': get_default_time_display(course.start),
'end_date': get_default_time_display(course.end) or _('No end date set'),
'num_sections': len(course.children),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
}
if settings.FEATURES.get('DISPLAY_ANALYTICS_ENROLLMENTS'):
section_data['enrollment_count'] = CourseEnrollment.objects.enrollment_counts(course_key)
if settings.ANALYTICS_DASHBOARD_URL:
dashboard_link = _get_dashboard_link(course_key)
message = _("Enrollment data is now available in {dashboard_link}.").format(dashboard_link=dashboard_link)
section_data['enrollment_message'] = message
if settings.FEATURES.get('ENABLE_SYSADMIN_DASHBOARD'):
section_data['detailed_gitlogs_url'] = reverse('gitlogs_detail', kwargs={'course_id': unicode(course_key)})
try:
sorted_cutoffs = sorted(course.grade_cutoffs.items(), key=lambda i: i[1], reverse=True)
advance = lambda memo, (letter, score): "{}: {}, ".format(letter, score) + memo
section_data['grade_cutoffs'] = reduce(advance, sorted_cutoffs, "")[:-2]
except Exception: # pylint: disable=broad-except
section_data['grade_cutoffs'] = "Not Available"
# section_data['offline_grades'] = offline_grades_available(course_key)
try:
section_data['course_errors'] = [(escape(a), '') for (a, _unused) in modulestore().get_course_errors(course.id)]
except Exception: # pylint: disable=broad-except
section_data['course_errors'] = [('Error fetching errors', '')]
return section_data
def _section_membership(course, access, is_white_label):
""" Provide data for the corresponding dashboard section """
course_key = course.id
ccx_enabled = settings.FEATURES.get('CUSTOM_COURSES_EDX', False) and course.enable_ccx
section_data = {
'section_key': 'membership',
'section_display_name': _('Membership'),
'access': access,
'ccx_is_enabled': ccx_enabled,
'is_white_label': is_white_label,
'enroll_button_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'unenroll_button_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'upload_student_csv_button_url': reverse('register_and_enroll_students', kwargs={'course_id': unicode(course_key)}),
'modify_beta_testers_button_url': reverse('bulk_beta_modify_access', kwargs={'course_id': unicode(course_key)}),
'list_course_role_members_url': reverse('list_course_role_members', kwargs={'course_id': unicode(course_key)}),
'modify_access_url': reverse('modify_access', kwargs={'course_id': unicode(course_key)}),
'list_forum_members_url': reverse('list_forum_members', kwargs={'course_id': unicode(course_key)}),
'update_forum_role_membership_url': reverse('update_forum_role_membership', kwargs={'course_id': unicode(course_key)}),
}
return section_data
def _section_cohort_management(course, access):
""" Provide data for the corresponding cohort management section """
course_key = course.id
section_data = {
'section_key': 'cohort_management',
'section_display_name': _('Cohorts'),
'access': access,
'course_cohort_settings_url': reverse(
'course_cohort_settings',
kwargs={'course_key_string': unicode(course_key)}
),
'cohorts_url': reverse('cohorts', kwargs={'course_key_string': unicode(course_key)}),
'upload_cohorts_csv_url': reverse('add_users_to_cohorts', kwargs={'course_id': unicode(course_key)}),
'discussion_topics_url': reverse('cohort_discussion_topics', kwargs={'course_key_string': unicode(course_key)}),
}
return section_data
def _is_small_course(course_key):
""" Compares against MAX_ENROLLMENT_INSTR_BUTTONS to determine if course enrollment is considered small. """
is_small_course = False
enrollment_count = CourseEnrollment.objects.num_enrolled_in(course_key)
max_enrollment_for_buttons = settings.FEATURES.get("MAX_ENROLLMENT_INSTR_BUTTONS")
if max_enrollment_for_buttons is not None:
is_small_course = enrollment_count <= max_enrollment_for_buttons
return is_small_course
def _section_student_admin(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
is_small_course = _is_small_course(course_key)
section_data = {
'section_key': 'student_admin',
'section_display_name': _('Student Admin'),
'access': access,
'is_small_course': is_small_course,
'get_student_progress_url_url': reverse('get_student_progress_url', kwargs={'course_id': unicode(course_key)}),
'enrollment_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'reset_student_attempts_url': reverse('reset_student_attempts', kwargs={'course_id': unicode(course_key)}),
'reset_student_attempts_for_entrance_exam_url': reverse(
'reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(course_key)},
),
'rescore_problem_url': reverse('rescore_problem', kwargs={'course_id': unicode(course_key)}),
'rescore_entrance_exam_url': reverse('rescore_entrance_exam', kwargs={'course_id': unicode(course_key)}),
'student_can_skip_entrance_exam_url': reverse(
'mark_student_can_skip_entrance_exam',
kwargs={'course_id': unicode(course_key)},
),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'list_entrace_exam_instructor_tasks_url': reverse('list_entrance_exam_instructor_tasks',
kwargs={'course_id': unicode(course_key)}),
'spoc_gradebook_url': reverse('spoc_gradebook', kwargs={'course_id': unicode(course_key)}),
}
return section_data
def _section_extensions(course):
""" Provide data for the corresponding dashboard section """
section_data = {
'section_key': 'extensions',
'section_display_name': _('Extensions'),
'units_with_due_dates': [(title_or_url(unit), unicode(unit.location))
for unit in get_units_with_due_date(course)],
'change_due_date_url': reverse('change_due_date', kwargs={'course_id': unicode(course.id)}),
'reset_due_date_url': reverse('reset_due_date', kwargs={'course_id': unicode(course.id)}),
'show_unit_extensions_url': reverse('show_unit_extensions', kwargs={'course_id': unicode(course.id)}),
'show_student_extensions_url': reverse('show_student_extensions', kwargs={'course_id': unicode(course.id)}),
}
return section_data
def _section_data_download(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
show_proctored_report_button = (
settings.FEATURES.get('ENABLE_SPECIAL_EXAMS', False) and
course.enable_proctored_exams
)
section_data = {
'section_key': 'data_download',
'section_display_name': _('Data Download'),
'access': access,
'show_generate_proctored_exam_report_button': show_proctored_report_button,
'get_problem_responses_url': reverse('get_problem_responses', kwargs={'course_id': unicode(course_key)}),
'get_grading_config_url': reverse('get_grading_config', kwargs={'course_id': unicode(course_key)}),
'get_students_features_url': reverse('get_students_features', kwargs={'course_id': unicode(course_key)}),
'get_issued_certificates_url': reverse(
'get_issued_certificates', kwargs={'course_id': unicode(course_key)}
),
'get_students_who_may_enroll_url': reverse(
'get_students_who_may_enroll', kwargs={'course_id': unicode(course_key)}
),
'get_anon_ids_url': reverse('get_anon_ids', kwargs={'course_id': unicode(course_key)}),
'list_proctored_results_url': reverse('get_proctored_exam_results', kwargs={'course_id': unicode(course_key)}),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'list_report_downloads_url': reverse('list_report_downloads', kwargs={'course_id': unicode(course_key)}),
'calculate_grades_csv_url': reverse('calculate_grades_csv', kwargs={'course_id': unicode(course_key)}),
'problem_grade_report_url': reverse('problem_grade_report', kwargs={'course_id': unicode(course_key)}),
'course_has_survey': True if course.course_survey_name else False,
'course_survey_results_url': reverse('get_course_survey_results', kwargs={'course_id': unicode(course_key)}),
'export_ora2_data_url': reverse('export_ora2_data', kwargs={'course_id': unicode(course_key)}),
}
return section_data
def null_applicable_aside_types(block): # pylint: disable=unused-argument
"""
get_aside method for monkey-patching into applicable_aside_types
while rendering an HtmlDescriptor for email text editing. This returns
an empty list.
"""
return []
def _section_send_email(course, access):
""" Provide data for the corresponding bulk email section """
course_key = course.id
# Monkey-patch applicable_aside_types to return no asides for the duration of this render
with patch.object(course.runtime, 'applicable_aside_types', null_applicable_aside_types):
# This HtmlDescriptor is only being used to generate a nice text editor.
html_module = HtmlDescriptor(
course.system,
DictFieldData({'data': ''}),
ScopeIds(None, None, None, course_key.make_usage_key('html', 'fake'))
)
fragment = course.system.render(html_module, 'studio_view')
fragment = wrap_xblock(
'LmsRuntime', html_module, 'studio_view', fragment, None,
extra_data={"course-id": unicode(course_key)},
usage_id_serializer=lambda usage_id: quote_slashes(unicode(usage_id)),
# Generate a new request_token here at random, because this module isn't connected to any other
# xblock rendering.
request_token=uuid.uuid1().get_hex()
)
email_editor = fragment.content
section_data = {
'section_key': 'send_email',
'section_display_name': _('Email'),
'access': access,
'send_email': reverse('send_email', kwargs={'course_id': unicode(course_key)}),
'editor': email_editor,
'list_instructor_tasks_url': reverse(
'list_instructor_tasks', kwargs={'course_id': unicode(course_key)}
),
'email_background_tasks_url': reverse(
'list_background_email_tasks', kwargs={'course_id': unicode(course_key)}
),
'email_content_history_url': reverse(
'list_email_content', kwargs={'course_id': unicode(course_key)}
),
}
return section_data
def _get_dashboard_link(course_key):
""" Construct a URL to the external analytics dashboard """
analytics_dashboard_url = '{0}/courses/{1}'.format(settings.ANALYTICS_DASHBOARD_URL, unicode(course_key))
link = u"<a href=\"{0}\" target=\"_blank\">{1}</a>".format(analytics_dashboard_url,
settings.ANALYTICS_DASHBOARD_NAME)
return link
def _section_analytics(course, access):
""" Provide data for the corresponding dashboard section """
section_data = {
'section_key': 'instructor_analytics',
'section_display_name': _('Analytics'),
'access': access,
'course_id': unicode(course.id),
}
return section_data
def _section_metrics(course, access):
"""Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'metrics',
'section_display_name': _('Metrics'),
'access': access,
'course_id': unicode(course_key),
'sub_section_display_name': get_section_display_name(course_key),
'section_has_problem': get_array_section_has_problem(course_key),
'get_students_opened_subsection_url': reverse('get_students_opened_subsection'),
'get_students_problem_grades_url': reverse('get_students_problem_grades'),
'post_metrics_data_csv_url': reverse('post_metrics_data_csv'),
}
return section_data
| agpl-3.0 |
agentxan/plugin.video.emby | resources/lib/mutagen/easyid3.py | 38 | 15099 | # -*- coding: utf-8 -*-
# Copyright (C) 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License as
# published by the Free Software Foundation.
"""Easier access to ID3 tags.
EasyID3 is a wrapper around mutagen.id3.ID3 to make ID3 tags appear
more like Vorbis or APEv2 tags.
"""
import mutagen.id3
from ._compat import iteritems, text_type, PY2
from mutagen import Metadata
from mutagen._util import DictMixin, dict_match
from mutagen.id3 import ID3, error, delete, ID3FileType
__all__ = ['EasyID3', 'Open', 'delete']
class EasyID3KeyError(KeyError, ValueError, error):
"""Raised when trying to get/set an invalid key.
Subclasses both KeyError and ValueError for API compatibility,
catching KeyError is preferred.
"""
class EasyID3(DictMixin, Metadata):
"""A file with an ID3 tag.
Like Vorbis comments, EasyID3 keys are case-insensitive ASCII
strings. Only a subset of ID3 frames are supported by default. Use
EasyID3.RegisterKey and its wrappers to support more.
You can also set the GetFallback, SetFallback, and DeleteFallback
to generic key getter/setter/deleter functions, which are called
if no specific handler is registered for a key. Additionally,
ListFallback can be used to supply an arbitrary list of extra
keys. These can be set on EasyID3 or on individual instances after
creation.
To use an EasyID3 class with mutagen.mp3.MP3::
from mutagen.mp3 import EasyMP3 as MP3
MP3(filename)
Because many of the attributes are constructed on the fly, things
like the following will not work::
ezid3["performer"].append("Joe")
Instead, you must do::
values = ezid3["performer"]
values.append("Joe")
ezid3["performer"] = values
"""
Set = {}
Get = {}
Delete = {}
List = {}
# For compatibility.
valid_keys = Get
GetFallback = None
SetFallback = None
DeleteFallback = None
ListFallback = None
@classmethod
def RegisterKey(cls, key,
getter=None, setter=None, deleter=None, lister=None):
"""Register a new key mapping.
A key mapping is four functions, a getter, setter, deleter,
and lister. The key may be either a string or a glob pattern.
The getter, deleted, and lister receive an ID3 instance and
the requested key name. The setter also receives the desired
value, which will be a list of strings.
The getter, setter, and deleter are used to implement __getitem__,
__setitem__, and __delitem__.
The lister is used to implement keys(). It should return a
list of keys that are actually in the ID3 instance, provided
by its associated getter.
"""
key = key.lower()
if getter is not None:
cls.Get[key] = getter
if setter is not None:
cls.Set[key] = setter
if deleter is not None:
cls.Delete[key] = deleter
if lister is not None:
cls.List[key] = lister
@classmethod
def RegisterTextKey(cls, key, frameid):
"""Register a text key.
If the key you need to register is a simple one-to-one mapping
of ID3 frame name to EasyID3 key, then you can use this
function::
EasyID3.RegisterTextKey("title", "TIT2")
"""
def getter(id3, key):
return list(id3[frameid])
def setter(id3, key, value):
try:
frame = id3[frameid]
except KeyError:
id3.add(mutagen.id3.Frames[frameid](encoding=3, text=value))
else:
frame.encoding = 3
frame.text = value
def deleter(id3, key):
del(id3[frameid])
cls.RegisterKey(key, getter, setter, deleter)
@classmethod
def RegisterTXXXKey(cls, key, desc):
"""Register a user-defined text frame key.
Some ID3 tags are stored in TXXX frames, which allow a
freeform 'description' which acts as a subkey,
e.g. TXXX:BARCODE.::
EasyID3.RegisterTXXXKey('barcode', 'BARCODE').
"""
frameid = "TXXX:" + desc
def getter(id3, key):
return list(id3[frameid])
def setter(id3, key, value):
try:
frame = id3[frameid]
except KeyError:
enc = 0
# Store 8859-1 if we can, per MusicBrainz spec.
for v in value:
if v and max(v) > u'\x7f':
enc = 3
break
id3.add(mutagen.id3.TXXX(encoding=enc, text=value, desc=desc))
else:
frame.text = value
def deleter(id3, key):
del(id3[frameid])
cls.RegisterKey(key, getter, setter, deleter)
def __init__(self, filename=None):
self.__id3 = ID3()
if filename is not None:
self.load(filename)
load = property(lambda s: s.__id3.load,
lambda s, v: setattr(s.__id3, 'load', v))
def save(self, *args, **kwargs):
# ignore v2_version until we support 2.3 here
kwargs.pop("v2_version", None)
self.__id3.save(*args, **kwargs)
delete = property(lambda s: s.__id3.delete,
lambda s, v: setattr(s.__id3, 'delete', v))
filename = property(lambda s: s.__id3.filename,
lambda s, fn: setattr(s.__id3, 'filename', fn))
size = property(lambda s: s.__id3.size,
lambda s, fn: setattr(s.__id3, 'size', s))
def __getitem__(self, key):
key = key.lower()
func = dict_match(self.Get, key, self.GetFallback)
if func is not None:
return func(self.__id3, key)
else:
raise EasyID3KeyError("%r is not a valid key" % key)
def __setitem__(self, key, value):
key = key.lower()
if PY2:
if isinstance(value, basestring):
value = [value]
else:
if isinstance(value, text_type):
value = [value]
func = dict_match(self.Set, key, self.SetFallback)
if func is not None:
return func(self.__id3, key, value)
else:
raise EasyID3KeyError("%r is not a valid key" % key)
def __delitem__(self, key):
key = key.lower()
func = dict_match(self.Delete, key, self.DeleteFallback)
if func is not None:
return func(self.__id3, key)
else:
raise EasyID3KeyError("%r is not a valid key" % key)
def keys(self):
keys = []
for key in self.Get.keys():
if key in self.List:
keys.extend(self.List[key](self.__id3, key))
elif key in self:
keys.append(key)
if self.ListFallback is not None:
keys.extend(self.ListFallback(self.__id3, ""))
return keys
def pprint(self):
"""Print tag key=value pairs."""
strings = []
for key in sorted(self.keys()):
values = self[key]
for value in values:
strings.append("%s=%s" % (key, value))
return "\n".join(strings)
Open = EasyID3
def genre_get(id3, key):
return id3["TCON"].genres
def genre_set(id3, key, value):
try:
frame = id3["TCON"]
except KeyError:
id3.add(mutagen.id3.TCON(encoding=3, text=value))
else:
frame.encoding = 3
frame.genres = value
def genre_delete(id3, key):
del(id3["TCON"])
def date_get(id3, key):
return [stamp.text for stamp in id3["TDRC"].text]
def date_set(id3, key, value):
id3.add(mutagen.id3.TDRC(encoding=3, text=value))
def date_delete(id3, key):
del(id3["TDRC"])
def original_date_get(id3, key):
return [stamp.text for stamp in id3["TDOR"].text]
def original_date_set(id3, key, value):
id3.add(mutagen.id3.TDOR(encoding=3, text=value))
def original_date_delete(id3, key):
del(id3["TDOR"])
def performer_get(id3, key):
people = []
wanted_role = key.split(":", 1)[1]
try:
mcl = id3["TMCL"]
except KeyError:
raise KeyError(key)
for role, person in mcl.people:
if role == wanted_role:
people.append(person)
if people:
return people
else:
raise KeyError(key)
def performer_set(id3, key, value):
wanted_role = key.split(":", 1)[1]
try:
mcl = id3["TMCL"]
except KeyError:
mcl = mutagen.id3.TMCL(encoding=3, people=[])
id3.add(mcl)
mcl.encoding = 3
people = [p for p in mcl.people if p[0] != wanted_role]
for v in value:
people.append((wanted_role, v))
mcl.people = people
def performer_delete(id3, key):
wanted_role = key.split(":", 1)[1]
try:
mcl = id3["TMCL"]
except KeyError:
raise KeyError(key)
people = [p for p in mcl.people if p[0] != wanted_role]
if people == mcl.people:
raise KeyError(key)
elif people:
mcl.people = people
else:
del(id3["TMCL"])
def performer_list(id3, key):
try:
mcl = id3["TMCL"]
except KeyError:
return []
else:
return list(set("performer:" + p[0] for p in mcl.people))
def musicbrainz_trackid_get(id3, key):
return [id3["UFID:http://musicbrainz.org"].data.decode('ascii')]
def musicbrainz_trackid_set(id3, key, value):
if len(value) != 1:
raise ValueError("only one track ID may be set per song")
value = value[0].encode('ascii')
try:
frame = id3["UFID:http://musicbrainz.org"]
except KeyError:
frame = mutagen.id3.UFID(owner="http://musicbrainz.org", data=value)
id3.add(frame)
else:
frame.data = value
def musicbrainz_trackid_delete(id3, key):
del(id3["UFID:http://musicbrainz.org"])
def website_get(id3, key):
urls = [frame.url for frame in id3.getall("WOAR")]
if urls:
return urls
else:
raise EasyID3KeyError(key)
def website_set(id3, key, value):
id3.delall("WOAR")
for v in value:
id3.add(mutagen.id3.WOAR(url=v))
def website_delete(id3, key):
id3.delall("WOAR")
def gain_get(id3, key):
try:
frame = id3["RVA2:" + key[11:-5]]
except KeyError:
raise EasyID3KeyError(key)
else:
return [u"%+f dB" % frame.gain]
def gain_set(id3, key, value):
if len(value) != 1:
raise ValueError(
"there must be exactly one gain value, not %r.", value)
gain = float(value[0].split()[0])
try:
frame = id3["RVA2:" + key[11:-5]]
except KeyError:
frame = mutagen.id3.RVA2(desc=key[11:-5], gain=0, peak=0, channel=1)
id3.add(frame)
frame.gain = gain
def gain_delete(id3, key):
try:
frame = id3["RVA2:" + key[11:-5]]
except KeyError:
pass
else:
if frame.peak:
frame.gain = 0.0
else:
del(id3["RVA2:" + key[11:-5]])
def peak_get(id3, key):
try:
frame = id3["RVA2:" + key[11:-5]]
except KeyError:
raise EasyID3KeyError(key)
else:
return [u"%f" % frame.peak]
def peak_set(id3, key, value):
if len(value) != 1:
raise ValueError(
"there must be exactly one peak value, not %r.", value)
peak = float(value[0])
if peak >= 2 or peak < 0:
raise ValueError("peak must be => 0 and < 2.")
try:
frame = id3["RVA2:" + key[11:-5]]
except KeyError:
frame = mutagen.id3.RVA2(desc=key[11:-5], gain=0, peak=0, channel=1)
id3.add(frame)
frame.peak = peak
def peak_delete(id3, key):
try:
frame = id3["RVA2:" + key[11:-5]]
except KeyError:
pass
else:
if frame.gain:
frame.peak = 0.0
else:
del(id3["RVA2:" + key[11:-5]])
def peakgain_list(id3, key):
keys = []
for frame in id3.getall("RVA2"):
keys.append("replaygain_%s_gain" % frame.desc)
keys.append("replaygain_%s_peak" % frame.desc)
return keys
for frameid, key in iteritems({
"TALB": "album",
"TBPM": "bpm",
"TCMP": "compilation", # iTunes extension
"TCOM": "composer",
"TCOP": "copyright",
"TENC": "encodedby",
"TEXT": "lyricist",
"TLEN": "length",
"TMED": "media",
"TMOO": "mood",
"TIT2": "title",
"TIT3": "version",
"TPE1": "artist",
"TPE2": "performer",
"TPE3": "conductor",
"TPE4": "arranger",
"TPOS": "discnumber",
"TPUB": "organization",
"TRCK": "tracknumber",
"TOLY": "author",
"TSO2": "albumartistsort", # iTunes extension
"TSOA": "albumsort",
"TSOC": "composersort", # iTunes extension
"TSOP": "artistsort",
"TSOT": "titlesort",
"TSRC": "isrc",
"TSST": "discsubtitle",
"TLAN": "language",
}):
EasyID3.RegisterTextKey(key, frameid)
EasyID3.RegisterKey("genre", genre_get, genre_set, genre_delete)
EasyID3.RegisterKey("date", date_get, date_set, date_delete)
EasyID3.RegisterKey("originaldate", original_date_get, original_date_set,
original_date_delete)
EasyID3.RegisterKey(
"performer:*", performer_get, performer_set, performer_delete,
performer_list)
EasyID3.RegisterKey("musicbrainz_trackid", musicbrainz_trackid_get,
musicbrainz_trackid_set, musicbrainz_trackid_delete)
EasyID3.RegisterKey("website", website_get, website_set, website_delete)
EasyID3.RegisterKey(
"replaygain_*_gain", gain_get, gain_set, gain_delete, peakgain_list)
EasyID3.RegisterKey("replaygain_*_peak", peak_get, peak_set, peak_delete)
# At various times, information for this came from
# http://musicbrainz.org/docs/specs/metadata_tags.html
# http://bugs.musicbrainz.org/ticket/1383
# http://musicbrainz.org/doc/MusicBrainzTag
for desc, key in iteritems({
u"MusicBrainz Artist Id": "musicbrainz_artistid",
u"MusicBrainz Album Id": "musicbrainz_albumid",
u"MusicBrainz Album Artist Id": "musicbrainz_albumartistid",
u"MusicBrainz TRM Id": "musicbrainz_trmid",
u"MusicIP PUID": "musicip_puid",
u"MusicMagic Fingerprint": "musicip_fingerprint",
u"MusicBrainz Album Status": "musicbrainz_albumstatus",
u"MusicBrainz Album Type": "musicbrainz_albumtype",
u"MusicBrainz Album Release Country": "releasecountry",
u"MusicBrainz Disc Id": "musicbrainz_discid",
u"ASIN": "asin",
u"ALBUMARTISTSORT": "albumartistsort",
u"BARCODE": "barcode",
u"CATALOGNUMBER": "catalognumber",
u"MusicBrainz Release Track Id": "musicbrainz_releasetrackid",
u"MusicBrainz Release Group Id": "musicbrainz_releasegroupid",
u"MusicBrainz Work Id": "musicbrainz_workid",
u"Acoustid Fingerprint": "acoustid_fingerprint",
u"Acoustid Id": "acoustid_id",
}):
EasyID3.RegisterTXXXKey(key, desc)
class EasyID3FileType(ID3FileType):
"""Like ID3FileType, but uses EasyID3 for tags."""
ID3 = EasyID3
| gpl-2.0 |
AICP/external_chromium_org | tools/deep_memory_profiler/subcommands/upload.py | 123 | 2545 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import subprocess
import tempfile
import zipfile
from lib.subcommand import SubCommand
from lib.symbol import SymbolDataSources
LOGGER = logging.getLogger('dmprof')
class UploadCommand(SubCommand):
def __init__(self):
super(UploadCommand, self).__init__(
'Usage: %prog upload [--gsutil path/to/gsutil] '
'<first-dump> <destination-gs-path>')
self._parser.add_option('--gsutil', default='gsutil',
help='path to GSUTIL', metavar='GSUTIL')
def do(self, sys_argv):
options, args = self._parse_args(sys_argv, 2)
dump_path = args[1]
gs_path = args[2]
dump_files = SubCommand._find_all_dumps(dump_path)
bucket_files = SubCommand._find_all_buckets(dump_path)
prefix = SubCommand._find_prefix(dump_path)
symbol_data_sources = SymbolDataSources(prefix)
symbol_data_sources.prepare()
symbol_path = symbol_data_sources.path()
handle_zip, filename_zip = tempfile.mkstemp('.zip', 'dmprof')
os.close(handle_zip)
try:
file_zip = zipfile.ZipFile(filename_zip, 'w', zipfile.ZIP_DEFLATED)
for filename in dump_files:
file_zip.write(filename, os.path.basename(os.path.abspath(filename)))
for filename in bucket_files:
file_zip.write(filename, os.path.basename(os.path.abspath(filename)))
symbol_basename = os.path.basename(os.path.abspath(symbol_path))
for filename in os.listdir(symbol_path):
if not filename.startswith('.'):
file_zip.write(os.path.join(symbol_path, filename),
os.path.join(symbol_basename, os.path.basename(
os.path.abspath(filename))))
file_zip.close()
returncode = UploadCommand._run_gsutil(
options.gsutil, 'cp', '-a', 'public-read', filename_zip, gs_path)
finally:
os.remove(filename_zip)
return returncode
@staticmethod
def _run_gsutil(gsutil, *args):
"""Run gsutil as a subprocess.
Args:
*args: Arguments to pass to gsutil. The first argument should be an
operation such as ls, cp or cat.
Returns:
The return code from the process.
"""
command = [gsutil] + list(args)
LOGGER.info("Running: %s", command)
try:
return subprocess.call(command)
except OSError, e:
LOGGER.error('Error to run gsutil: %s', e)
| bsd-3-clause |
Endika/OpenUpgrade | addons/sale_journal/__openerp__.py | 262 | 2637 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Invoicing Journals',
'version': '1.0',
'category': 'Sales Management',
'description': """
The sales journal modules allows you to categorise your sales and deliveries (picking lists) between different journals.
========================================================================================================================
This module is very helpful for bigger companies that works by departments.
You can use journal for different purposes, some examples:
----------------------------------------------------------
* isolate sales of different departments
* journals for deliveries by truck or by UPS
Journals have a responsible and evolves between different status:
-----------------------------------------------------------------
* draft, open, cancel, done.
Batch operations can be processed on the different journals to confirm all sales
at once, to validate or invoice packing.
It also supports batch invoicing methods that can be configured by partners and sales orders, examples:
-------------------------------------------------------------------------------------------------------
* daily invoicing
* monthly invoicing
Some statistics by journals are provided.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/billing',
'depends': ['sale_stock'],
'data': [
'security/ir.model.access.csv',
'sale_journal_view.xml',
'sale_journal_data.xml'
],
'demo': ['sale_journal_demo.xml'],
'test': [ ],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/matplotlib/sphinxext/plot_directive.py | 1 | 28321 | """
A directive for including a matplotlib plot in a Sphinx document.
By default, in HTML output, `plot` will include a .png file with a
link to a high-res .png and .pdf. In LaTeX output, it will include a
.pdf.
The source code for the plot may be included in one of three ways:
1. **A path to a source file** as the argument to the directive::
.. plot:: path/to/plot.py
When a path to a source file is given, the content of the
directive may optionally contain a caption for the plot::
.. plot:: path/to/plot.py
This is the caption for the plot
Additionally, one may specify the name of a function to call (with
no arguments) immediately after importing the module::
.. plot:: path/to/plot.py plot_function1
2. Included as **inline content** to the directive::
.. plot::
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
img = mpimg.imread('_static/stinkbug.png')
imgplot = plt.imshow(img)
3. Using **doctest** syntax::
.. plot::
A plotting example:
>>> import matplotlib.pyplot as plt
>>> plt.plot([1,2,3], [4,5,6])
Options
-------
The ``plot`` directive supports the following options:
format : {'python', 'doctest'}
Specify the format of the input
include-source : bool
Whether to display the source code. The default can be changed
using the `plot_include_source` variable in conf.py
encoding : str
If this source file is in a non-UTF8 or non-ASCII encoding,
the encoding must be specified using the `:encoding:` option.
The encoding will not be inferred using the ``-*- coding -*-``
metacomment.
context : bool or str
If provided, the code will be run in the context of all
previous plot directives for which the `:context:` option was
specified. This only applies to inline code plot directives,
not those run from files. If the ``:context: reset`` option is
specified, the context is reset for this and future plots, and
previous figures are closed prior to running the code.
``:context:close-figs`` keeps the context but closes previous figures
before running the code.
nofigs : bool
If specified, the code block will be run, but no figures will
be inserted. This is usually useful with the ``:context:``
option.
Additionally, this directive supports all of the options of the
`image` directive, except for `target` (since plot will add its own
target). These include `alt`, `height`, `width`, `scale`, `align` and
`class`.
Configuration options
---------------------
The plot directive has the following configuration options:
plot_include_source
Default value for the include-source option
plot_html_show_source_link
Whether to show a link to the source in HTML.
plot_pre_code
Code that should be executed before each plot.
plot_basedir
Base directory, to which ``plot::`` file names are relative
to. (If None or empty, file names are relative to the
directory where the file containing the directive is.)
plot_formats
File formats to generate. List of tuples or strings::
[(suffix, dpi), suffix, ...]
that determine the file format and the DPI. For entries whose
DPI was omitted, sensible defaults are chosen. When passing from
the command line through sphinx_build the list should be passed as
suffix:dpi,suffix:dpi, ....
plot_html_show_formats
Whether to show links to the files in HTML.
plot_rcparams
A dictionary containing any non-standard rcParams that should
be applied before each plot.
plot_apply_rcparams
By default, rcParams are applied when `context` option is not used in
a plot directive. This configuration option overrides this behavior
and applies rcParams before each plot.
plot_working_directory
By default, the working directory will be changed to the directory of
the example, so the code can get at its data files, if any. Also its
path will be added to `sys.path` so it can import any helper modules
sitting beside it. This configuration option can be used to specify
a central directory (also added to `sys.path`) where data files and
helper modules for all code are located.
plot_template
Provide a customized template for preparing restructured text.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange
import sys, os, shutil, io, re, textwrap
from os.path import relpath
import traceback
import warnings
if not six.PY3:
import cStringIO
from docutils.parsers.rst import directives
from docutils.parsers.rst.directives.images import Image
align = Image.align
import sphinx
sphinx_version = sphinx.__version__.split(".")
# The split is necessary for sphinx beta versions where the string is
# '6b1'
sphinx_version = tuple([int(re.split('[^0-9]', x)[0])
for x in sphinx_version[:2]])
try:
# Sphinx depends on either Jinja or Jinja2
import jinja2
def format_template(template, **kw):
return jinja2.Template(template).render(**kw)
except ImportError:
import jinja
def format_template(template, **kw):
return jinja.from_string(template, **kw)
import matplotlib
import matplotlib.cbook as cbook
try:
with warnings.catch_warnings(record=True):
warnings.simplefilter("error", UserWarning)
matplotlib.use('Agg')
except UserWarning:
import matplotlib.pyplot as plt
plt.switch_backend("Agg")
else:
import matplotlib.pyplot as plt
from matplotlib import _pylab_helpers
__version__ = 2
#------------------------------------------------------------------------------
# Registration hook
#------------------------------------------------------------------------------
def plot_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return run(arguments, content, options, state_machine, state, lineno)
plot_directive.__doc__ = __doc__
def _option_boolean(arg):
if not arg or not arg.strip():
# no argument given, assume used as a flag
return True
elif arg.strip().lower() in ('no', '0', 'false'):
return False
elif arg.strip().lower() in ('yes', '1', 'true'):
return True
else:
raise ValueError('"%s" unknown boolean' % arg)
def _option_context(arg):
if arg in [None, 'reset', 'close-figs']:
return arg
raise ValueError("argument should be None or 'reset' or 'close-figs'")
def _option_format(arg):
return directives.choice(arg, ('python', 'doctest'))
def _option_align(arg):
return directives.choice(arg, ("top", "middle", "bottom", "left", "center",
"right"))
def mark_plot_labels(app, document):
"""
To make plots referenceable, we need to move the reference from
the "htmlonly" (or "latexonly") node to the actual figure node
itself.
"""
for name, explicit in six.iteritems(document.nametypes):
if not explicit:
continue
labelid = document.nameids[name]
if labelid is None:
continue
node = document.ids[labelid]
if node.tagname in ('html_only', 'latex_only'):
for n in node:
if n.tagname == 'figure':
sectname = name
for c in n:
if c.tagname == 'caption':
sectname = c.astext()
break
node['ids'].remove(labelid)
node['names'].remove(name)
n['ids'].append(labelid)
n['names'].append(name)
document.settings.env.labels[name] = \
document.settings.env.docname, labelid, sectname
break
def setup(app):
setup.app = app
setup.config = app.config
setup.confdir = app.confdir
options = {'alt': directives.unchanged,
'height': directives.length_or_unitless,
'width': directives.length_or_percentage_or_unitless,
'scale': directives.nonnegative_int,
'align': _option_align,
'class': directives.class_option,
'include-source': _option_boolean,
'format': _option_format,
'context': _option_context,
'nofigs': directives.flag,
'encoding': directives.encoding
}
app.add_directive('plot', plot_directive, True, (0, 2, False), **options)
app.add_config_value('plot_pre_code', None, True)
app.add_config_value('plot_include_source', False, True)
app.add_config_value('plot_html_show_source_link', True, True)
app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True)
app.add_config_value('plot_basedir', None, True)
app.add_config_value('plot_html_show_formats', True, True)
app.add_config_value('plot_rcparams', {}, True)
app.add_config_value('plot_apply_rcparams', False, True)
app.add_config_value('plot_working_directory', None, True)
app.add_config_value('plot_template', None, True)
app.connect(str('doctree-read'), mark_plot_labels)
#------------------------------------------------------------------------------
# Doctest handling
#------------------------------------------------------------------------------
def contains_doctest(text):
try:
# check if it's valid Python as-is
compile(text, '<string>', 'exec')
return False
except SyntaxError:
pass
r = re.compile(r'^\s*>>>', re.M)
m = r.search(text)
return bool(m)
def unescape_doctest(text):
"""
Extract code from a piece of text, which contains either Python code
or doctests.
"""
if not contains_doctest(text):
return text
code = ""
for line in text.split("\n"):
m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line)
if m:
code += m.group(2) + "\n"
elif line.strip():
code += "# " + line.strip() + "\n"
else:
code += "\n"
return code
def split_code_at_show(text):
"""
Split code at plt.show()
"""
parts = []
is_doctest = contains_doctest(text)
part = []
for line in text.split("\n"):
if (not is_doctest and line.strip() == 'plt.show()') or \
(is_doctest and line.strip() == '>>> plt.show()'):
part.append(line)
parts.append("\n".join(part))
part = []
else:
part.append(line)
if "\n".join(part).strip():
parts.append("\n".join(part))
return parts
def remove_coding(text):
"""
Remove the coding comment, which six.exec_ doesn't like.
"""
sub_re = re.compile("^#\s*-\*-\s*coding:\s*.*-\*-$", flags=re.MULTILINE)
return sub_re.sub("", text)
#------------------------------------------------------------------------------
# Template
#------------------------------------------------------------------------------
TEMPLATE = """
{{ source_code }}
{{ only_html }}
{% if source_link or (html_show_formats and not multi_image) %}
(
{%- if source_link -%}
`Source code <{{ source_link }}>`__
{%- endif -%}
{%- if html_show_formats and not multi_image -%}
{%- for img in images -%}
{%- for fmt in img.formats -%}
{%- if source_link or not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
{%- endfor -%}
{%- endif -%}
)
{% endif %}
{% for img in images %}
.. figure:: {{ build_dir }}/{{ img.basename }}.png
{% for option in options -%}
{{ option }}
{% endfor %}
{% if html_show_formats and multi_image -%}
(
{%- for fmt in img.formats -%}
{%- if not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
)
{%- endif -%}
{{ caption }}
{% endfor %}
{{ only_latex }}
{% for img in images %}
{% if 'pdf' in img.formats -%}
.. image:: {{ build_dir }}/{{ img.basename }}.pdf
{% endif -%}
{% endfor %}
{{ only_texinfo }}
{% for img in images %}
.. image:: {{ build_dir }}/{{ img.basename }}.png
{% for option in options -%}
{{ option }}
{% endfor %}
{% endfor %}
"""
exception_template = """
.. htmlonly::
[`source code <%(linkdir)s/%(basename)s.py>`__]
Exception occurred rendering plot.
"""
# the context of the plot for all directives specified with the
# :context: option
plot_context = dict()
class ImageFile(object):
def __init__(self, basename, dirname):
self.basename = basename
self.dirname = dirname
self.formats = []
def filename(self, format):
return os.path.join(self.dirname, "%s.%s" % (self.basename, format))
def filenames(self):
return [self.filename(fmt) for fmt in self.formats]
def out_of_date(original, derived):
"""
Returns True if derivative is out-of-date wrt original,
both of which are full file paths.
"""
return (not os.path.exists(derived) or
(os.path.exists(original) and
os.stat(derived).st_mtime < os.stat(original).st_mtime))
class PlotError(RuntimeError):
pass
def run_code(code, code_path, ns=None, function_name=None):
"""
Import a Python module from a path, and run the function given by
name, if function_name is not None.
"""
# Change the working directory to the directory of the example, so
# it can get at its data files, if any. Add its path to sys.path
# so it can import any helper modules sitting beside it.
if six.PY2:
pwd = os.getcwdu()
else:
pwd = os.getcwd()
old_sys_path = list(sys.path)
if setup.config.plot_working_directory is not None:
try:
os.chdir(setup.config.plot_working_directory)
except OSError as err:
raise OSError(str(err) + '\n`plot_working_directory` option in'
'Sphinx configuration file must be a valid '
'directory path')
except TypeError as err:
raise TypeError(str(err) + '\n`plot_working_directory` option in '
'Sphinx configuration file must be a string or '
'None')
sys.path.insert(0, setup.config.plot_working_directory)
elif code_path is not None:
dirname = os.path.abspath(os.path.dirname(code_path))
os.chdir(dirname)
sys.path.insert(0, dirname)
# Reset sys.argv
old_sys_argv = sys.argv
sys.argv = [code_path]
# Redirect stdout
stdout = sys.stdout
if six.PY3:
sys.stdout = io.StringIO()
else:
sys.stdout = cStringIO.StringIO()
# Assign a do-nothing print function to the namespace. There
# doesn't seem to be any other way to provide a way to (not) print
# that works correctly across Python 2 and 3.
def _dummy_print(*arg, **kwarg):
pass
try:
try:
code = unescape_doctest(code)
if ns is None:
ns = {}
if not ns:
if setup.config.plot_pre_code is None:
six.exec_(six.text_type("import numpy as np\n" +
"from matplotlib import pyplot as plt\n"), ns)
else:
six.exec_(six.text_type(setup.config.plot_pre_code), ns)
ns['print'] = _dummy_print
if "__main__" in code:
six.exec_("__name__ = '__main__'", ns)
code = remove_coding(code)
six.exec_(code, ns)
if function_name is not None:
six.exec_(function_name + "()", ns)
except (Exception, SystemExit) as err:
raise PlotError(traceback.format_exc())
finally:
os.chdir(pwd)
sys.argv = old_sys_argv
sys.path[:] = old_sys_path
sys.stdout = stdout
return ns
def clear_state(plot_rcparams, close=True):
if close:
plt.close('all')
matplotlib.rc_file_defaults()
matplotlib.rcParams.update(plot_rcparams)
def render_figures(code, code_path, output_dir, output_base, context,
function_name, config, context_reset=False,
close_figs=False):
"""
Run a pyplot script and save the low and high res PNGs and a PDF
in *output_dir*.
Save the images under *output_dir* with file names derived from
*output_base*
"""
# -- Parse format list
default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 200}
formats = []
plot_formats = config.plot_formats
if isinstance(plot_formats, six.string_types):
# String Sphinx < 1.3, Split on , to mimic
# Sphinx 1.3 and later. Sphinx 1.3 always
# returns a list.
plot_formats = plot_formats.split(',')
for fmt in plot_formats:
if isinstance(fmt, six.string_types):
if ':' in fmt:
suffix,dpi = fmt.split(':')
formats.append((str(suffix), int(dpi)))
else:
formats.append((fmt, default_dpi.get(fmt, 80)))
elif type(fmt) in (tuple, list) and len(fmt)==2:
formats.append((str(fmt[0]), int(fmt[1])))
else:
raise PlotError('invalid image format "%r" in plot_formats' % fmt)
# -- Try to determine if all images already exist
code_pieces = split_code_at_show(code)
# Look for single-figure output files first
all_exists = True
img = ImageFile(output_base, output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
if all_exists:
return [(code, [img])]
# Then look for multi-figure output files
results = []
all_exists = True
for i, code_piece in enumerate(code_pieces):
images = []
for j in xrange(1000):
if len(code_pieces) > 1:
img = ImageFile('%s_%02d_%02d' % (output_base, i, j), output_dir)
else:
img = ImageFile('%s_%02d' % (output_base, j), output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
# assume that if we have one, we have them all
if not all_exists:
all_exists = (j > 0)
break
images.append(img)
if not all_exists:
break
results.append((code_piece, images))
if all_exists:
return results
# We didn't find the files, so build them
results = []
if context:
ns = plot_context
else:
ns = {}
if context_reset:
clear_state(config.plot_rcparams)
plot_context.clear()
close_figs = not context or close_figs
for i, code_piece in enumerate(code_pieces):
if not context or config.plot_apply_rcparams:
clear_state(config.plot_rcparams, close_figs)
elif close_figs:
plt.close('all')
run_code(code_piece, code_path, ns, function_name)
images = []
fig_managers = _pylab_helpers.Gcf.get_all_fig_managers()
for j, figman in enumerate(fig_managers):
if len(fig_managers) == 1 and len(code_pieces) == 1:
img = ImageFile(output_base, output_dir)
elif len(code_pieces) == 1:
img = ImageFile("%s_%02d" % (output_base, j), output_dir)
else:
img = ImageFile("%s_%02d_%02d" % (output_base, i, j),
output_dir)
images.append(img)
for format, dpi in formats:
try:
figman.canvas.figure.savefig(img.filename(format), dpi=dpi)
except Exception as err:
raise PlotError(traceback.format_exc())
img.formats.append(format)
results.append((code_piece, images))
if not context or config.plot_apply_rcparams:
clear_state(config.plot_rcparams, close=not context)
return results
def run(arguments, content, options, state_machine, state, lineno):
# The user may provide a filename *or* Python code content, but not both
if arguments and content:
raise RuntimeError("plot:: directive can't have both args and content")
document = state_machine.document
config = document.settings.env.config
nofigs = 'nofigs' in options
options.setdefault('include-source', config.plot_include_source)
keep_context = 'context' in options
context_opt = None if not keep_context else options['context']
rst_file = document.attributes['source']
rst_dir = os.path.dirname(rst_file)
if len(arguments):
if not config.plot_basedir:
source_file_name = os.path.join(setup.app.builder.srcdir,
directives.uri(arguments[0]))
else:
source_file_name = os.path.join(setup.confdir, config.plot_basedir,
directives.uri(arguments[0]))
# If there is content, it will be passed as a caption.
caption = '\n'.join(content)
# If the optional function name is provided, use it
if len(arguments) == 2:
function_name = arguments[1]
else:
function_name = None
with io.open(source_file_name, 'r', encoding='utf-8') as fd:
code = fd.read()
output_base = os.path.basename(source_file_name)
else:
source_file_name = rst_file
code = textwrap.dedent("\n".join(map(str, content)))
counter = document.attributes.get('_plot_counter', 0) + 1
document.attributes['_plot_counter'] = counter
base, ext = os.path.splitext(os.path.basename(source_file_name))
output_base = '%s-%d.py' % (base, counter)
function_name = None
caption = ''
base, source_ext = os.path.splitext(output_base)
if source_ext in ('.py', '.rst', '.txt'):
output_base = base
else:
source_ext = ''
# ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
output_base = output_base.replace('.', '-')
# is it in doctest format?
is_doctest = contains_doctest(code)
if 'format' in options:
if options['format'] == 'python':
is_doctest = False
else:
is_doctest = True
# determine output directory name fragment
source_rel_name = relpath(source_file_name, setup.confdir)
source_rel_dir = os.path.dirname(source_rel_name)
while source_rel_dir.startswith(os.path.sep):
source_rel_dir = source_rel_dir[1:]
# build_dir: where to place output files (temporarily)
build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
'plot_directive',
source_rel_dir)
# get rid of .. in paths, also changes pathsep
# see note in Python docs for warning about symbolic links on Windows.
# need to compare source and dest paths at end
build_dir = os.path.normpath(build_dir)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
# output_dir: final location in the builder's directory
dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
source_rel_dir))
if not os.path.exists(dest_dir):
os.makedirs(dest_dir) # no problem here for me, but just use built-ins
# how to link to files from the RST file
dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
source_rel_dir).replace(os.path.sep, '/')
try:
build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
except ValueError:
# on Windows, relpath raises ValueError when path and start are on
# different mounts/drives
build_dir_link = build_dir
source_link = dest_dir_link + '/' + output_base + source_ext
# make figures
try:
results = render_figures(code,
source_file_name,
build_dir,
output_base,
keep_context,
function_name,
config,
context_reset=context_opt == 'reset',
close_figs=context_opt == 'close-figs')
errors = []
except PlotError as err:
reporter = state.memo.reporter
sm = reporter.system_message(
2, "Exception occurred in plotting %s\n from %s:\n%s" % (output_base,
source_file_name, err),
line=lineno)
results = [(code, [])]
errors = [sm]
# Properly indent the caption
caption = '\n'.join(' ' + line.strip()
for line in caption.split('\n'))
# generate output restructuredtext
total_lines = []
for j, (code_piece, images) in enumerate(results):
if options['include-source']:
if is_doctest:
lines = ['']
lines += [row.rstrip() for row in code_piece.split('\n')]
else:
lines = ['.. code-block:: python', '']
lines += [' %s' % row.rstrip()
for row in code_piece.split('\n')]
source_code = "\n".join(lines)
else:
source_code = ""
if nofigs:
images = []
opts = [':%s: %s' % (key, val) for key, val in six.iteritems(options)
if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]
only_html = ".. only:: html"
only_latex = ".. only:: latex"
only_texinfo = ".. only:: texinfo"
# Not-None src_link signals the need for a source link in the generated
# html
if j == 0 and config.plot_html_show_source_link:
src_link = source_link
else:
src_link = None
result = format_template(
config.plot_template or TEMPLATE,
dest_dir=dest_dir_link,
build_dir=build_dir_link,
source_link=src_link,
multi_image=len(images) > 1,
only_html=only_html,
only_latex=only_latex,
only_texinfo=only_texinfo,
options=opts,
images=images,
source_code=source_code,
html_show_formats=config.plot_html_show_formats and not nofigs,
caption=caption)
total_lines.extend(result.split("\n"))
total_lines.extend("\n")
if total_lines:
state_machine.insert_input(total_lines, source=source_file_name)
# copy image files to builder's output directory, if necessary
if not os.path.exists(dest_dir):
cbook.mkdirs(dest_dir)
for code_piece, images in results:
for img in images:
for fn in img.filenames():
destimg = os.path.join(dest_dir, os.path.basename(fn))
if fn != destimg:
shutil.copyfile(fn, destimg)
# copy script (if necessary)
target_name = os.path.join(dest_dir, output_base + source_ext)
with io.open(target_name, 'w', encoding="utf-8") as f:
if source_file_name == rst_file:
code_escaped = unescape_doctest(code)
else:
code_escaped = code
f.write(code_escaped)
return errors
| mit |
vpelletier/neoppod | neo/storage/app.py | 1 | 13647 | #
# Copyright (C) 2006-2016 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
from collections import deque
from neo.lib import logging
from neo.lib.app import BaseApplication
from neo.lib.protocol import uuid_str, \
CellStates, ClusterStates, NodeTypes, Packets
from neo.lib.node import NodeManager
from neo.lib.connection import ListeningConnection
from neo.lib.exception import StoppedOperation, PrimaryFailure
from neo.lib.pt import PartitionTable
from neo.lib.util import dump
from neo.lib.bootstrap import BootstrapManager
from .checker import Checker
from .database import buildDatabaseManager
from .exception import AlreadyPendingError
from .handlers import identification, initialization
from .handlers import master, hidden
from .replicator import Replicator
from .transactions import TransactionManager
from neo.lib.debug import register as registerLiveDebugger
class Application(BaseApplication):
"""The storage node application."""
def __init__(self, config):
super(Application, self).__init__(
config.getSSL(), config.getDynamicMasterList())
# set the cluster name
self.name = config.getCluster()
self.tm = TransactionManager(self)
self.dm = buildDatabaseManager(config.getAdapter(),
(config.getDatabase(), config.getEngine(), config.getWait()),
)
# load master nodes
for master_address in config.getMasters():
self.nm.createMaster(address=master_address)
# set the bind address
self.server = config.getBind()
logging.debug('IP address is %s, port is %d', *self.server)
# The partition table is initialized after getting the number of
# partitions.
self.pt = None
self.checker = Checker(self)
self.replicator = Replicator(self)
self.listening_conn = None
self.master_conn = None
self.master_node = None
# operation related data
self.event_queue = None
self.event_queue_dict = None
self.operational = False
# ready is True when operational and got all informations
self.ready = False
self.dm.setup(reset=config.getReset())
self.loadConfiguration()
# force node uuid from command line argument, for testing purpose only
if config.getUUID() is not None:
self.uuid = config.getUUID()
registerLiveDebugger(on_log=self.log)
def close(self):
self.listening_conn = None
self.dm.close()
super(Application, self).close()
def _poll(self):
self.em.poll(1)
def log(self):
self.em.log()
self.logQueuedEvents()
self.nm.log()
self.tm.log()
if self.pt is not None:
self.pt.log()
def loadConfiguration(self):
"""Load persistent configuration data from the database.
If data is not present, generate it."""
dm = self.dm
# check cluster name
name = dm.getName()
if name is None:
dm.setName(self.name)
elif name != self.name:
raise RuntimeError('name %r does not match with the database: %r'
% (self.name, name))
# load configuration
self.uuid = dm.getUUID()
num_partitions = dm.getNumPartitions()
num_replicas = dm.getNumReplicas()
ptid = dm.getPTID()
# check partition table configuration
if num_partitions is not None and num_replicas is not None:
if num_partitions <= 0:
raise RuntimeError, 'partitions must be more than zero'
# create a partition table
self.pt = PartitionTable(num_partitions, num_replicas)
logging.info('Configuration loaded:')
logging.info('UUID : %s', uuid_str(self.uuid))
logging.info('PTID : %s', dump(ptid))
logging.info('Name : %s', self.name)
logging.info('Partitions: %s', num_partitions)
logging.info('Replicas : %s', num_replicas)
def loadPartitionTable(self):
"""Load a partition table from the database."""
ptid = self.dm.getPTID()
cell_list = self.dm.getPartitionTable()
new_cell_list = []
for offset, uuid, state in cell_list:
# convert from int to Enum
state = CellStates[state]
# register unknown nodes
if self.nm.getByUUID(uuid) is None:
self.nm.createStorage(uuid=uuid)
new_cell_list.append((offset, uuid, state))
# load the partition table in manager
self.pt.clear()
self.pt.update(ptid, new_cell_list, self.nm)
def run(self):
try:
self._run()
except Exception:
logging.exception('Pre-mortem data:')
self.log()
logging.flush()
raise
def _run(self):
"""Make sure that the status is sane and start a loop."""
if len(self.name) == 0:
raise RuntimeError, 'cluster name must be non-empty'
# Make a listening port
handler = identification.IdentificationHandler(self)
self.listening_conn = ListeningConnection(self, handler, self.server)
self.server = self.listening_conn.getAddress()
# Connect to a primary master node, verify data, and
# start the operation. This cycle will be executed permanently,
# until the user explicitly requests a shutdown.
while True:
self.cluster_state = None
self.ready = False
self.operational = False
if self.master_node is None:
# look for the primary master
self.connectToPrimary()
# check my state
node = self.nm.getByUUID(self.uuid)
if node is not None and node.isHidden():
self.wait()
# drop any client node
for conn in self.em.getConnectionList():
if conn not in (self.listening_conn, self.master_conn):
conn.close()
# create/clear event queue
self.event_queue = deque()
self.event_queue_dict = {}
try:
self.initialize()
self.doOperation()
raise RuntimeError, 'should not reach here'
except StoppedOperation, msg:
logging.error('operation stopped: %s', msg)
except PrimaryFailure, msg:
logging.error('primary master is down: %s', msg)
finally:
self.checker = Checker(self)
def connectToPrimary(self):
"""Find a primary master node, and connect to it.
If a primary master node is not elected or ready, repeat
the attempt of a connection periodically.
Note that I do not accept any connection from non-master nodes
at this stage."""
pt = self.pt
# First of all, make sure that I have no connection.
for conn in self.em.getConnectionList():
if not conn.isListening():
conn.close()
# search, find, connect and identify to the primary master
bootstrap = BootstrapManager(self, self.name,
NodeTypes.STORAGE, self.uuid, self.server)
data = bootstrap.getPrimaryConnection()
(node, conn, uuid, num_partitions, num_replicas) = data
self.master_node = node
self.master_conn = conn
logging.info('I am %s', uuid_str(uuid))
self.uuid = uuid
self.dm.setUUID(uuid)
# Reload a partition table from the database. This is necessary
# when a previous primary master died while sending a partition
# table, because the table might be incomplete.
if pt is not None:
self.loadPartitionTable()
if num_partitions != pt.getPartitions():
raise RuntimeError('the number of partitions is inconsistent')
if pt is None or pt.getReplicas() != num_replicas:
# changing number of replicas is not an issue
self.dm.setNumPartitions(num_partitions)
self.dm.setNumReplicas(num_replicas)
self.pt = PartitionTable(num_partitions, num_replicas)
self.loadPartitionTable()
def initialize(self):
logging.debug('initializing...')
_poll = self._poll
self.master_conn.setHandler(initialization.InitializationHandler(self))
while not self.operational:
_poll()
self.ready = True
self.replicator.populate()
self.master_conn.notify(Packets.NotifyReady())
def doOperation(self):
"""Handle everything, including replications and transactions."""
logging.info('doing operation')
poll = self._poll
_poll = self.em._poll
isIdle = self.em.isIdle
handler = master.MasterOperationHandler(self)
self.master_conn.setHandler(handler)
# Forget all unfinished data.
self.dm.dropUnfinishedData()
self.tm.reset()
self.task_queue = task_queue = deque()
try:
self.dm.doOperation(self)
while True:
while task_queue:
try:
while isIdle():
if task_queue[-1].next():
_poll(0)
task_queue.rotate()
break
except StopIteration:
task_queue.pop()
poll()
finally:
del self.task_queue
# XXX: Although no handled exception should happen between
# replicator.populate() and the beginning of this 'try'
# clause, the replicator should be reset in a safer place.
self.replicator = Replicator(self)
# Abort any replication, whether we are feeding or out-of-date.
for node in self.nm.getStorageList(only_identified=True):
node.getConnection().close()
def changeClusterState(self, state):
self.cluster_state = state
if state == ClusterStates.STOPPING_BACKUP:
self.replicator.stop()
def wait(self):
# change handler
logging.info("waiting in hidden state")
_poll = self._poll
handler = hidden.HiddenHandler(self)
for conn in self.em.getConnectionList():
conn.setHandler(handler)
node = self.nm.getByUUID(self.uuid)
while True:
_poll()
if not node.isHidden():
break
def queueEvent(self, some_callable, conn=None, args=(), key=None,
raise_on_duplicate=True):
event_queue_dict = self.event_queue_dict
n = event_queue_dict.get(key)
if n and raise_on_duplicate:
raise AlreadyPendingError()
msg_id = None if conn is None else conn.getPeerId()
self.event_queue.append((key, some_callable, msg_id, conn, args))
if key is not None:
event_queue_dict[key] = n + 1 if n else 1
def executeQueuedEvents(self):
p = self.event_queue.popleft
event_queue_dict = self.event_queue_dict
for _ in xrange(len(self.event_queue)):
key, some_callable, msg_id, conn, args = p()
if key is not None:
n = event_queue_dict[key] - 1
if n:
event_queue_dict[key] = n
else:
del event_queue_dict[key]
if conn is None:
some_callable(*args)
elif not conn.isClosed():
orig_msg_id = conn.getPeerId()
try:
conn.setPeerId(msg_id)
some_callable(conn, *args)
finally:
conn.setPeerId(orig_msg_id)
def logQueuedEvents(self):
if self.event_queue is None:
return
logging.info("Pending events:")
for key, event, _msg_id, _conn, args in self.event_queue:
logging.info(' %r:%r: %r:%r %r %r', key, event.__name__,
_msg_id, _conn, args)
def newTask(self, iterator):
try:
iterator.next()
except StopIteration:
return
self.task_queue.appendleft(iterator)
def closeClient(self, connection):
if connection is not self.replicator.getCurrentConnection() and \
connection not in self.checker.conn_dict:
connection.closeClient()
def shutdown(self, erase=False):
"""Close all connections and exit"""
for c in self.em.getConnectionList():
try:
c.close()
except PrimaryFailure:
pass
# clear database to avoid polluting the cluster at restart
if erase:
self.dm.erase()
logging.info("Application has been asked to shut down")
sys.exit()
| gpl-2.0 |
sq5bpf/osmo-tetra-sq5bpf | src/demod/python-3.6/usrp1-tetra_demod.py | 9 | 5194 | #!/usr/bin/env python
import sys
import math
from gnuradio import gr, gru, audio, eng_notation, blks2, optfir
from gnuradio import usrp
from gnuradio.eng_option import eng_option
from optparse import OptionParser
try:
import cqpsk
except:
from tetra_demod import cqpsk
# applies frequency translation, resampling (interpolation/decimation) and cqpsk demodulation
class my_top_block(gr.top_block):
def __init__(self, options):
gr.top_block.__init__(self)
fusb_block_size = gr.prefs().get_long('fusb', 'block_size', 4096)
fusb_nblocks = gr.prefs().get_long('fusb', 'nblocks', 16)
self._u = usrp.source_c(decim_rate=options.decim, fusb_block_size=fusb_block_size, fusb_nblocks=fusb_nblocks)
# master clock
if options.fpga_freq is not None:
self._u.set_fpga_master_clock_freq(long(options.fpga_freq))
# default subdev if use didn't pick one
if options.rx_subdev_spec is None:
if u.db(0, 0).dbid() >= 0:
options.rx_subdev_spec = (0, 0)
elif u.db(1, 0).dbid() >= 0:
options.rx_subdev_spec = (1, 0)
else:
options.rx_subdev_spec = (0, 0)
# configure usrp mux
self._u.set_mux(usrp.determine_rx_mux_value(self._u, options.rx_subdev_spec))
# determine the daughterboard subdevice
self.subdev = usrp.selected_subdev(self._u, options.rx_subdev_spec)
# select antenna
if options.antenna is not None:
print "Selecting antenna %s" % (options.antenna,)
self.subdev.select_rx_antenna(options.antenna)
# set initial values
if options.gain is None:
# if no gain was specified, use the mid-point in dB
g = self.subdev.gain_range()
options.gain = float(g[0]+g[1])/2
r = self._u.tune(0, self.subdev, options.freq)
self.subdev.set_gain(options.gain)
#sample_rate = options.fpga_clock/options.decim
sample_rate = self._u.adc_freq() / self._u.decim_rate()
symbol_rate = 18000
sps = 2
# output rate will be 36,000
ntaps = 11 * sps
new_sample_rate = symbol_rate * sps
channel_taps = gr.firdes.low_pass(1.0, sample_rate, options.low_pass, options.low_pass * 0.1, gr.firdes.WIN_HANN)
FILTER = gr.freq_xlating_fir_filter_ccf(1, channel_taps, options.calibration, sample_rate)
sys.stderr.write("sample rate: %d\n" %(sample_rate))
DEMOD = cqpsk.cqpsk_demod( samples_per_symbol = sps,
excess_bw=0.35,
costas_alpha=0.03,
gain_mu=0.05,
mu=0.05,
omega_relative_limit=0.05,
log=options.log,
verbose=options.verbose)
OUT = gr.file_sink(gr.sizeof_float, options.output_file)
r = float(sample_rate) / float(new_sample_rate)
INTERPOLATOR = gr.fractional_interpolator_cc(0, r)
self.connect(self._u, FILTER, INTERPOLATOR, DEMOD, OUT)
def get_options():
parser = OptionParser(option_class=eng_option)
# usrp related settings
parser.add_option("-d", "--decim", type="int", default=250,
help="Set USRP decimation rate to DECIM [default=%default]")
parser.add_option("-f", "--freq", type="eng_float", default=None,
help="set frequency to FREQ", metavar="FREQ")
parser.add_option("-g", "--gain", type="eng_float", default=None,
help="set gain in dB (default is midpoint)")
parser.add_option("-R", "--rx-subdev-spec", type="subdev", default=None,
help="Select USRP Rx side A or B (default=first one with a daughterboard)")
parser.add_option("-A", "--antenna", default=None,
help="select Rx Antenna")
parser.add_option("-F", "--fpga-freq", type="eng_float", default=None,
help="set USRP reference clock frequency to FPGA_FREQ", metavar="FPGA_FREQ")
# demodulator related settings
parser.add_option("-c", "--calibration", type="int", default=0, help="freq offset")
parser.add_option("-l", "--log", action="store_true", default=False, help="dump debug .dat files")
parser.add_option("-L", "--low-pass", type="eng_float", default=25e3, help="low pass cut-off", metavar="Hz")
parser.add_option("-o", "--output-file", type="string", default="out.float", help="specify the bit output file")
parser.add_option("-v", "--verbose", action="store_true", default=False, help="dump demodulation data")
(options, args) = parser.parse_args()
if len(args) != 0:
parser.print_help()
raise SystemExit, 1
if options.freq is None:
parser.print_help()
sys.stderr.write('You must specify the frequency with -f FREQ\n');
raise SystemExit, 1
return (options)
if __name__ == "__main__":
(options) = get_options()
tb = my_top_block(options)
try:
tb.run()
except KeyboardInterrupt:
tb.stop()
| agpl-3.0 |
sontek/rethinkdb | external/v8_3.30.33.16/tools/push-to-trunk/chromium_roll.py | 40 | 4767 | #!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import os
import sys
from common_includes import *
class Preparation(Step):
MESSAGE = "Preparation."
def RunStep(self):
# Update v8 remote tracking branches.
self.GitFetchOrigin()
class DetectLastPush(Step):
MESSAGE = "Detect commit ID of last push to trunk."
def RunStep(self):
self["last_push"] = self._options.last_push or self.FindLastTrunkPush(
branch="origin/candidates", include_patches=True)
self["push_title"] = self.GitLog(n=1, format="%s",
git_hash=self["last_push"])
class SwitchChromium(Step):
MESSAGE = "Switch to Chromium checkout."
def RunStep(self):
self["v8_path"] = os.getcwd()
cwd = self._options.chromium
os.chdir(cwd)
self.InitialEnvironmentChecks(cwd)
# Check for a clean workdir.
if not self.GitIsWorkdirClean(cwd=cwd): # pragma: no cover
self.Die("Workspace is not clean. Please commit or undo your changes.")
# Assert that the DEPS file is there.
if not os.path.exists(os.path.join(cwd, "DEPS")): # pragma: no cover
self.Die("DEPS file not present.")
class UpdateChromiumCheckout(Step):
MESSAGE = "Update the checkout and create a new branch."
def RunStep(self):
self.GitCheckout("master", cwd=self._options.chromium)
self.Command("gclient", "sync --nohooks", cwd=self._options.chromium)
self.GitPull(cwd=self._options.chromium)
# Update v8 remotes.
self.GitFetchOrigin()
self.GitCreateBranch("v8-roll-%s" % self["last_push"],
cwd=self._options.chromium)
class UploadCL(Step):
MESSAGE = "Create and upload CL."
def RunStep(self):
# Patch DEPS file.
if self.Command(
"roll-dep", "v8 %s" % self["last_push"],
cwd=self._options.chromium) is None:
self.Die("Failed to create deps for %s" % self["last_push"])
commit_title = "Update V8 to %s." % self["push_title"].lower()
sheriff = ""
if self["sheriff"]:
sheriff = ("\n\nPlease reply to the V8 sheriff %s in case of problems."
% self["sheriff"])
self.GitCommit("%s%s\n\nTBR=%s" %
(commit_title, sheriff, self._options.reviewer),
author=self._options.author,
cwd=self._options.chromium)
if not self._options.dry_run:
self.GitUpload(author=self._options.author,
force=True,
cq=self._options.use_commit_queue,
cwd=self._options.chromium)
print "CL uploaded."
else:
self.GitCheckout("master", cwd=self._options.chromium)
self.GitDeleteBranch("v8-roll-%s" % self["last_push"],
cwd=self._options.chromium)
print "Dry run - don't upload."
# TODO(machenbach): Make this obsolete. We are only in the chromium chechout
# for the initial .git check.
class SwitchV8(Step):
MESSAGE = "Returning to V8 checkout."
def RunStep(self):
os.chdir(self["v8_path"])
class CleanUp(Step):
MESSAGE = "Done!"
def RunStep(self):
print("Congratulations, you have successfully rolled %s into "
"Chromium. Please don't forget to update the v8rel spreadsheet."
% self["last_push"])
# Clean up all temporary files.
Command("rm", "-f %s*" % self._config["PERSISTFILE_BASENAME"])
class ChromiumRoll(ScriptsBase):
def _PrepareOptions(self, parser):
parser.add_argument("-c", "--chromium", required=True,
help=("The path to your Chromium src/ "
"directory to automate the V8 roll."))
parser.add_argument("-l", "--last-push",
help="The git commit ID of the last push to trunk.")
parser.add_argument("--use-commit-queue",
help="Check the CQ bit on upload.",
default=False, action="store_true")
def _ProcessOptions(self, options): # pragma: no cover
if not options.author or not options.reviewer:
print "A reviewer (-r) and an author (-a) are required."
return False
options.requires_editor = False
options.force = True
options.manual = False
return True
def _Config(self):
return {
"PERSISTFILE_BASENAME": "/tmp/v8-chromium-roll-tempfile",
}
def _Steps(self):
return [
Preparation,
DetectLastPush,
DetermineV8Sheriff,
SwitchChromium,
UpdateChromiumCheckout,
UploadCL,
SwitchV8,
CleanUp,
]
if __name__ == "__main__": # pragma: no cover
sys.exit(ChromiumRoll().Run())
| agpl-3.0 |
JonathanBennett/marathon-metric-forwarder | node_modules/node-gyp/gyp/pylib/gyp/msvs_emulation.py | 1407 | 47697 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This module helps emulate Visual Studio 2008 behavior on top of other
build systems, primarily ninja.
"""
import os
import re
import subprocess
import sys
from gyp.common import OrderedSet
import gyp.MSVSUtil
import gyp.MSVSVersion
windows_quoter_regex = re.compile(r'(\\*)"')
def QuoteForRspFile(arg):
"""Quote a command line argument so that it appears as one argument when
processed via cmd.exe and parsed by CommandLineToArgvW (as is typical for
Windows programs)."""
# See http://goo.gl/cuFbX and http://goo.gl/dhPnp including the comment
# threads. This is actually the quoting rules for CommandLineToArgvW, not
# for the shell, because the shell doesn't do anything in Windows. This
# works more or less because most programs (including the compiler, etc.)
# use that function to handle command line arguments.
# For a literal quote, CommandLineToArgvW requires 2n+1 backslashes
# preceding it, and results in n backslashes + the quote. So we substitute
# in 2* what we match, +1 more, plus the quote.
arg = windows_quoter_regex.sub(lambda mo: 2 * mo.group(1) + '\\"', arg)
# %'s also need to be doubled otherwise they're interpreted as batch
# positional arguments. Also make sure to escape the % so that they're
# passed literally through escaping so they can be singled to just the
# original %. Otherwise, trying to pass the literal representation that
# looks like an environment variable to the shell (e.g. %PATH%) would fail.
arg = arg.replace('%', '%%')
# These commands are used in rsp files, so no escaping for the shell (via ^)
# is necessary.
# Finally, wrap the whole thing in quotes so that the above quote rule
# applies and whitespace isn't a word break.
return '"' + arg + '"'
def EncodeRspFileList(args):
"""Process a list of arguments using QuoteCmdExeArgument."""
# Note that the first argument is assumed to be the command. Don't add
# quotes around it because then built-ins like 'echo', etc. won't work.
# Take care to normpath only the path in the case of 'call ../x.bat' because
# otherwise the whole thing is incorrectly interpreted as a path and not
# normalized correctly.
if not args: return ''
if args[0].startswith('call '):
call, program = args[0].split(' ', 1)
program = call + ' ' + os.path.normpath(program)
else:
program = os.path.normpath(args[0])
return program + ' ' + ' '.join(QuoteForRspFile(arg) for arg in args[1:])
def _GenericRetrieve(root, default, path):
"""Given a list of dictionary keys |path| and a tree of dicts |root|, find
value at path, or return |default| if any of the path doesn't exist."""
if not root:
return default
if not path:
return root
return _GenericRetrieve(root.get(path[0]), default, path[1:])
def _AddPrefix(element, prefix):
"""Add |prefix| to |element| or each subelement if element is iterable."""
if element is None:
return element
# Note, not Iterable because we don't want to handle strings like that.
if isinstance(element, list) or isinstance(element, tuple):
return [prefix + e for e in element]
else:
return prefix + element
def _DoRemapping(element, map):
"""If |element| then remap it through |map|. If |element| is iterable then
each item will be remapped. Any elements not found will be removed."""
if map is not None and element is not None:
if not callable(map):
map = map.get # Assume it's a dict, otherwise a callable to do the remap.
if isinstance(element, list) or isinstance(element, tuple):
element = filter(None, [map(elem) for elem in element])
else:
element = map(element)
return element
def _AppendOrReturn(append, element):
"""If |append| is None, simply return |element|. If |append| is not None,
then add |element| to it, adding each item in |element| if it's a list or
tuple."""
if append is not None and element is not None:
if isinstance(element, list) or isinstance(element, tuple):
append.extend(element)
else:
append.append(element)
else:
return element
def _FindDirectXInstallation():
"""Try to find an installation location for the DirectX SDK. Check for the
standard environment variable, and if that doesn't exist, try to find
via the registry. May return None if not found in either location."""
# Return previously calculated value, if there is one
if hasattr(_FindDirectXInstallation, 'dxsdk_dir'):
return _FindDirectXInstallation.dxsdk_dir
dxsdk_dir = os.environ.get('DXSDK_DIR')
if not dxsdk_dir:
# Setup params to pass to and attempt to launch reg.exe.
cmd = ['reg.exe', 'query', r'HKLM\Software\Microsoft\DirectX', '/s']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
for line in p.communicate()[0].splitlines():
if 'InstallPath' in line:
dxsdk_dir = line.split(' ')[3] + "\\"
# Cache return value
_FindDirectXInstallation.dxsdk_dir = dxsdk_dir
return dxsdk_dir
def GetGlobalVSMacroEnv(vs_version):
"""Get a dict of variables mapping internal VS macro names to their gyp
equivalents. Returns all variables that are independent of the target."""
env = {}
# '$(VSInstallDir)' and '$(VCInstallDir)' are available when and only when
# Visual Studio is actually installed.
if vs_version.Path():
env['$(VSInstallDir)'] = vs_version.Path()
env['$(VCInstallDir)'] = os.path.join(vs_version.Path(), 'VC') + '\\'
# Chromium uses DXSDK_DIR in include/lib paths, but it may or may not be
# set. This happens when the SDK is sync'd via src-internal, rather than
# by typical end-user installation of the SDK. If it's not set, we don't
# want to leave the unexpanded variable in the path, so simply strip it.
dxsdk_dir = _FindDirectXInstallation()
env['$(DXSDK_DIR)'] = dxsdk_dir if dxsdk_dir else ''
# Try to find an installation location for the Windows DDK by checking
# the WDK_DIR environment variable, may be None.
env['$(WDK_DIR)'] = os.environ.get('WDK_DIR', '')
return env
def ExtractSharedMSVSSystemIncludes(configs, generator_flags):
"""Finds msvs_system_include_dirs that are common to all targets, removes
them from all targets, and returns an OrderedSet containing them."""
all_system_includes = OrderedSet(
configs[0].get('msvs_system_include_dirs', []))
for config in configs[1:]:
system_includes = config.get('msvs_system_include_dirs', [])
all_system_includes = all_system_includes & OrderedSet(system_includes)
if not all_system_includes:
return None
# Expand macros in all_system_includes.
env = GetGlobalVSMacroEnv(GetVSVersion(generator_flags))
expanded_system_includes = OrderedSet([ExpandMacros(include, env)
for include in all_system_includes])
if any(['$' in include for include in expanded_system_includes]):
# Some path relies on target-specific variables, bail.
return None
# Remove system includes shared by all targets from the targets.
for config in configs:
includes = config.get('msvs_system_include_dirs', [])
if includes: # Don't insert a msvs_system_include_dirs key if not needed.
# This must check the unexpanded includes list:
new_includes = [i for i in includes if i not in all_system_includes]
config['msvs_system_include_dirs'] = new_includes
return expanded_system_includes
class MsvsSettings(object):
"""A class that understands the gyp 'msvs_...' values (especially the
msvs_settings field). They largely correpond to the VS2008 IDE DOM. This
class helps map those settings to command line options."""
def __init__(self, spec, generator_flags):
self.spec = spec
self.vs_version = GetVSVersion(generator_flags)
supported_fields = [
('msvs_configuration_attributes', dict),
('msvs_settings', dict),
('msvs_system_include_dirs', list),
('msvs_disabled_warnings', list),
('msvs_precompiled_header', str),
('msvs_precompiled_source', str),
('msvs_configuration_platform', str),
('msvs_target_platform', str),
]
configs = spec['configurations']
for field, default in supported_fields:
setattr(self, field, {})
for configname, config in configs.iteritems():
getattr(self, field)[configname] = config.get(field, default())
self.msvs_cygwin_dirs = spec.get('msvs_cygwin_dirs', ['.'])
unsupported_fields = [
'msvs_prebuild',
'msvs_postbuild',
]
unsupported = []
for field in unsupported_fields:
for config in configs.values():
if field in config:
unsupported += ["%s not supported (target %s)." %
(field, spec['target_name'])]
if unsupported:
raise Exception('\n'.join(unsupported))
def GetExtension(self):
"""Returns the extension for the target, with no leading dot.
Uses 'product_extension' if specified, otherwise uses MSVS defaults based on
the target type.
"""
ext = self.spec.get('product_extension', None)
if ext:
return ext
return gyp.MSVSUtil.TARGET_TYPE_EXT.get(self.spec['type'], '')
def GetVSMacroEnv(self, base_to_build=None, config=None):
"""Get a dict of variables mapping internal VS macro names to their gyp
equivalents."""
target_platform = 'Win32' if self.GetArch(config) == 'x86' else 'x64'
target_name = self.spec.get('product_prefix', '') + \
self.spec.get('product_name', self.spec['target_name'])
target_dir = base_to_build + '\\' if base_to_build else ''
target_ext = '.' + self.GetExtension()
target_file_name = target_name + target_ext
replacements = {
'$(InputName)': '${root}',
'$(InputPath)': '${source}',
'$(IntDir)': '$!INTERMEDIATE_DIR',
'$(OutDir)\\': target_dir,
'$(PlatformName)': target_platform,
'$(ProjectDir)\\': '',
'$(ProjectName)': self.spec['target_name'],
'$(TargetDir)\\': target_dir,
'$(TargetExt)': target_ext,
'$(TargetFileName)': target_file_name,
'$(TargetName)': target_name,
'$(TargetPath)': os.path.join(target_dir, target_file_name),
}
replacements.update(GetGlobalVSMacroEnv(self.vs_version))
return replacements
def ConvertVSMacros(self, s, base_to_build=None, config=None):
"""Convert from VS macro names to something equivalent."""
env = self.GetVSMacroEnv(base_to_build, config=config)
return ExpandMacros(s, env)
def AdjustLibraries(self, libraries):
"""Strip -l from library if it's specified with that."""
libs = [lib[2:] if lib.startswith('-l') else lib for lib in libraries]
return [lib + '.lib' if not lib.endswith('.lib') else lib for lib in libs]
def _GetAndMunge(self, field, path, default, prefix, append, map):
"""Retrieve a value from |field| at |path| or return |default|. If
|append| is specified, and the item is found, it will be appended to that
object instead of returned. If |map| is specified, results will be
remapped through |map| before being returned or appended."""
result = _GenericRetrieve(field, default, path)
result = _DoRemapping(result, map)
result = _AddPrefix(result, prefix)
return _AppendOrReturn(append, result)
class _GetWrapper(object):
def __init__(self, parent, field, base_path, append=None):
self.parent = parent
self.field = field
self.base_path = [base_path]
self.append = append
def __call__(self, name, map=None, prefix='', default=None):
return self.parent._GetAndMunge(self.field, self.base_path + [name],
default=default, prefix=prefix, append=self.append, map=map)
def GetArch(self, config):
"""Get architecture based on msvs_configuration_platform and
msvs_target_platform. Returns either 'x86' or 'x64'."""
configuration_platform = self.msvs_configuration_platform.get(config, '')
platform = self.msvs_target_platform.get(config, '')
if not platform: # If no specific override, use the configuration's.
platform = configuration_platform
# Map from platform to architecture.
return {'Win32': 'x86', 'x64': 'x64'}.get(platform, 'x86')
def _TargetConfig(self, config):
"""Returns the target-specific configuration."""
# There's two levels of architecture/platform specification in VS. The
# first level is globally for the configuration (this is what we consider
# "the" config at the gyp level, which will be something like 'Debug' or
# 'Release_x64'), and a second target-specific configuration, which is an
# override for the global one. |config| is remapped here to take into
# account the local target-specific overrides to the global configuration.
arch = self.GetArch(config)
if arch == 'x64' and not config.endswith('_x64'):
config += '_x64'
if arch == 'x86' and config.endswith('_x64'):
config = config.rsplit('_', 1)[0]
return config
def _Setting(self, path, config,
default=None, prefix='', append=None, map=None):
"""_GetAndMunge for msvs_settings."""
return self._GetAndMunge(
self.msvs_settings[config], path, default, prefix, append, map)
def _ConfigAttrib(self, path, config,
default=None, prefix='', append=None, map=None):
"""_GetAndMunge for msvs_configuration_attributes."""
return self._GetAndMunge(
self.msvs_configuration_attributes[config],
path, default, prefix, append, map)
def AdjustIncludeDirs(self, include_dirs, config):
"""Updates include_dirs to expand VS specific paths, and adds the system
include dirs used for platform SDK and similar."""
config = self._TargetConfig(config)
includes = include_dirs + self.msvs_system_include_dirs[config]
includes.extend(self._Setting(
('VCCLCompilerTool', 'AdditionalIncludeDirectories'), config, default=[]))
return [self.ConvertVSMacros(p, config=config) for p in includes]
def AdjustMidlIncludeDirs(self, midl_include_dirs, config):
"""Updates midl_include_dirs to expand VS specific paths, and adds the
system include dirs used for platform SDK and similar."""
config = self._TargetConfig(config)
includes = midl_include_dirs + self.msvs_system_include_dirs[config]
includes.extend(self._Setting(
('VCMIDLTool', 'AdditionalIncludeDirectories'), config, default=[]))
return [self.ConvertVSMacros(p, config=config) for p in includes]
def GetComputedDefines(self, config):
"""Returns the set of defines that are injected to the defines list based
on other VS settings."""
config = self._TargetConfig(config)
defines = []
if self._ConfigAttrib(['CharacterSet'], config) == '1':
defines.extend(('_UNICODE', 'UNICODE'))
if self._ConfigAttrib(['CharacterSet'], config) == '2':
defines.append('_MBCS')
defines.extend(self._Setting(
('VCCLCompilerTool', 'PreprocessorDefinitions'), config, default=[]))
return defines
def GetCompilerPdbName(self, config, expand_special):
"""Get the pdb file name that should be used for compiler invocations, or
None if there's no explicit name specified."""
config = self._TargetConfig(config)
pdbname = self._Setting(
('VCCLCompilerTool', 'ProgramDataBaseFileName'), config)
if pdbname:
pdbname = expand_special(self.ConvertVSMacros(pdbname))
return pdbname
def GetMapFileName(self, config, expand_special):
"""Gets the explicitly overriden map file name for a target or returns None
if it's not set."""
config = self._TargetConfig(config)
map_file = self._Setting(('VCLinkerTool', 'MapFileName'), config)
if map_file:
map_file = expand_special(self.ConvertVSMacros(map_file, config=config))
return map_file
def GetOutputName(self, config, expand_special):
"""Gets the explicitly overridden output name for a target or returns None
if it's not overridden."""
config = self._TargetConfig(config)
type = self.spec['type']
root = 'VCLibrarianTool' if type == 'static_library' else 'VCLinkerTool'
# TODO(scottmg): Handle OutputDirectory without OutputFile.
output_file = self._Setting((root, 'OutputFile'), config)
if output_file:
output_file = expand_special(self.ConvertVSMacros(
output_file, config=config))
return output_file
def GetPDBName(self, config, expand_special, default):
"""Gets the explicitly overridden pdb name for a target or returns
default if it's not overridden, or if no pdb will be generated."""
config = self._TargetConfig(config)
output_file = self._Setting(('VCLinkerTool', 'ProgramDatabaseFile'), config)
generate_debug_info = self._Setting(
('VCLinkerTool', 'GenerateDebugInformation'), config)
if generate_debug_info == 'true':
if output_file:
return expand_special(self.ConvertVSMacros(output_file, config=config))
else:
return default
else:
return None
def GetNoImportLibrary(self, config):
"""If NoImportLibrary: true, ninja will not expect the output to include
an import library."""
config = self._TargetConfig(config)
noimplib = self._Setting(('NoImportLibrary',), config)
return noimplib == 'true'
def GetAsmflags(self, config):
"""Returns the flags that need to be added to ml invocations."""
config = self._TargetConfig(config)
asmflags = []
safeseh = self._Setting(('MASM', 'UseSafeExceptionHandlers'), config)
if safeseh == 'true':
asmflags.append('/safeseh')
return asmflags
def GetCflags(self, config):
"""Returns the flags that need to be added to .c and .cc compilations."""
config = self._TargetConfig(config)
cflags = []
cflags.extend(['/wd' + w for w in self.msvs_disabled_warnings[config]])
cl = self._GetWrapper(self, self.msvs_settings[config],
'VCCLCompilerTool', append=cflags)
cl('Optimization',
map={'0': 'd', '1': '1', '2': '2', '3': 'x'}, prefix='/O', default='2')
cl('InlineFunctionExpansion', prefix='/Ob')
cl('DisableSpecificWarnings', prefix='/wd')
cl('StringPooling', map={'true': '/GF'})
cl('EnableFiberSafeOptimizations', map={'true': '/GT'})
cl('OmitFramePointers', map={'false': '-', 'true': ''}, prefix='/Oy')
cl('EnableIntrinsicFunctions', map={'false': '-', 'true': ''}, prefix='/Oi')
cl('FavorSizeOrSpeed', map={'1': 't', '2': 's'}, prefix='/O')
cl('FloatingPointModel',
map={'0': 'precise', '1': 'strict', '2': 'fast'}, prefix='/fp:',
default='0')
cl('CompileAsManaged', map={'false': '', 'true': '/clr'})
cl('WholeProgramOptimization', map={'true': '/GL'})
cl('WarningLevel', prefix='/W')
cl('WarnAsError', map={'true': '/WX'})
cl('CallingConvention',
map={'0': 'd', '1': 'r', '2': 'z', '3': 'v'}, prefix='/G')
cl('DebugInformationFormat',
map={'1': '7', '3': 'i', '4': 'I'}, prefix='/Z')
cl('RuntimeTypeInfo', map={'true': '/GR', 'false': '/GR-'})
cl('EnableFunctionLevelLinking', map={'true': '/Gy', 'false': '/Gy-'})
cl('MinimalRebuild', map={'true': '/Gm'})
cl('BufferSecurityCheck', map={'true': '/GS', 'false': '/GS-'})
cl('BasicRuntimeChecks', map={'1': 's', '2': 'u', '3': '1'}, prefix='/RTC')
cl('RuntimeLibrary',
map={'0': 'T', '1': 'Td', '2': 'D', '3': 'Dd'}, prefix='/M')
cl('ExceptionHandling', map={'1': 'sc','2': 'a'}, prefix='/EH')
cl('DefaultCharIsUnsigned', map={'true': '/J'})
cl('TreatWChar_tAsBuiltInType',
map={'false': '-', 'true': ''}, prefix='/Zc:wchar_t')
cl('EnablePREfast', map={'true': '/analyze'})
cl('AdditionalOptions', prefix='')
cl('EnableEnhancedInstructionSet',
map={'1': 'SSE', '2': 'SSE2', '3': 'AVX', '4': 'IA32', '5': 'AVX2'},
prefix='/arch:')
cflags.extend(['/FI' + f for f in self._Setting(
('VCCLCompilerTool', 'ForcedIncludeFiles'), config, default=[])])
if self.vs_version.short_name in ('2013', '2013e', '2015'):
# New flag required in 2013 to maintain previous PDB behavior.
cflags.append('/FS')
# ninja handles parallelism by itself, don't have the compiler do it too.
cflags = filter(lambda x: not x.startswith('/MP'), cflags)
return cflags
def _GetPchFlags(self, config, extension):
"""Get the flags to be added to the cflags for precompiled header support.
"""
config = self._TargetConfig(config)
# The PCH is only built once by a particular source file. Usage of PCH must
# only be for the same language (i.e. C vs. C++), so only include the pch
# flags when the language matches.
if self.msvs_precompiled_header[config]:
source_ext = os.path.splitext(self.msvs_precompiled_source[config])[1]
if _LanguageMatchesForPch(source_ext, extension):
pch = os.path.split(self.msvs_precompiled_header[config])[1]
return ['/Yu' + pch, '/FI' + pch, '/Fp${pchprefix}.' + pch + '.pch']
return []
def GetCflagsC(self, config):
"""Returns the flags that need to be added to .c compilations."""
config = self._TargetConfig(config)
return self._GetPchFlags(config, '.c')
def GetCflagsCC(self, config):
"""Returns the flags that need to be added to .cc compilations."""
config = self._TargetConfig(config)
return ['/TP'] + self._GetPchFlags(config, '.cc')
def _GetAdditionalLibraryDirectories(self, root, config, gyp_to_build_path):
"""Get and normalize the list of paths in AdditionalLibraryDirectories
setting."""
config = self._TargetConfig(config)
libpaths = self._Setting((root, 'AdditionalLibraryDirectories'),
config, default=[])
libpaths = [os.path.normpath(
gyp_to_build_path(self.ConvertVSMacros(p, config=config)))
for p in libpaths]
return ['/LIBPATH:"' + p + '"' for p in libpaths]
def GetLibFlags(self, config, gyp_to_build_path):
"""Returns the flags that need to be added to lib commands."""
config = self._TargetConfig(config)
libflags = []
lib = self._GetWrapper(self, self.msvs_settings[config],
'VCLibrarianTool', append=libflags)
libflags.extend(self._GetAdditionalLibraryDirectories(
'VCLibrarianTool', config, gyp_to_build_path))
lib('LinkTimeCodeGeneration', map={'true': '/LTCG'})
lib('TargetMachine', map={'1': 'X86', '17': 'X64', '3': 'ARM'},
prefix='/MACHINE:')
lib('AdditionalOptions')
return libflags
def GetDefFile(self, gyp_to_build_path):
"""Returns the .def file from sources, if any. Otherwise returns None."""
spec = self.spec
if spec['type'] in ('shared_library', 'loadable_module', 'executable'):
def_files = [s for s in spec.get('sources', []) if s.endswith('.def')]
if len(def_files) == 1:
return gyp_to_build_path(def_files[0])
elif len(def_files) > 1:
raise Exception("Multiple .def files")
return None
def _GetDefFileAsLdflags(self, ldflags, gyp_to_build_path):
""".def files get implicitly converted to a ModuleDefinitionFile for the
linker in the VS generator. Emulate that behaviour here."""
def_file = self.GetDefFile(gyp_to_build_path)
if def_file:
ldflags.append('/DEF:"%s"' % def_file)
def GetPGDName(self, config, expand_special):
"""Gets the explicitly overridden pgd name for a target or returns None
if it's not overridden."""
config = self._TargetConfig(config)
output_file = self._Setting(
('VCLinkerTool', 'ProfileGuidedDatabase'), config)
if output_file:
output_file = expand_special(self.ConvertVSMacros(
output_file, config=config))
return output_file
def GetLdflags(self, config, gyp_to_build_path, expand_special,
manifest_base_name, output_name, is_executable, build_dir):
"""Returns the flags that need to be added to link commands, and the
manifest files."""
config = self._TargetConfig(config)
ldflags = []
ld = self._GetWrapper(self, self.msvs_settings[config],
'VCLinkerTool', append=ldflags)
self._GetDefFileAsLdflags(ldflags, gyp_to_build_path)
ld('GenerateDebugInformation', map={'true': '/DEBUG'})
ld('TargetMachine', map={'1': 'X86', '17': 'X64', '3': 'ARM'},
prefix='/MACHINE:')
ldflags.extend(self._GetAdditionalLibraryDirectories(
'VCLinkerTool', config, gyp_to_build_path))
ld('DelayLoadDLLs', prefix='/DELAYLOAD:')
ld('TreatLinkerWarningAsErrors', prefix='/WX',
map={'true': '', 'false': ':NO'})
out = self.GetOutputName(config, expand_special)
if out:
ldflags.append('/OUT:' + out)
pdb = self.GetPDBName(config, expand_special, output_name + '.pdb')
if pdb:
ldflags.append('/PDB:' + pdb)
pgd = self.GetPGDName(config, expand_special)
if pgd:
ldflags.append('/PGD:' + pgd)
map_file = self.GetMapFileName(config, expand_special)
ld('GenerateMapFile', map={'true': '/MAP:' + map_file if map_file
else '/MAP'})
ld('MapExports', map={'true': '/MAPINFO:EXPORTS'})
ld('AdditionalOptions', prefix='')
minimum_required_version = self._Setting(
('VCLinkerTool', 'MinimumRequiredVersion'), config, default='')
if minimum_required_version:
minimum_required_version = ',' + minimum_required_version
ld('SubSystem',
map={'1': 'CONSOLE%s' % minimum_required_version,
'2': 'WINDOWS%s' % minimum_required_version},
prefix='/SUBSYSTEM:')
stack_reserve_size = self._Setting(
('VCLinkerTool', 'StackReserveSize'), config, default='')
if stack_reserve_size:
stack_commit_size = self._Setting(
('VCLinkerTool', 'StackCommitSize'), config, default='')
if stack_commit_size:
stack_commit_size = ',' + stack_commit_size
ldflags.append('/STACK:%s%s' % (stack_reserve_size, stack_commit_size))
ld('TerminalServerAware', map={'1': ':NO', '2': ''}, prefix='/TSAWARE')
ld('LinkIncremental', map={'1': ':NO', '2': ''}, prefix='/INCREMENTAL')
ld('BaseAddress', prefix='/BASE:')
ld('FixedBaseAddress', map={'1': ':NO', '2': ''}, prefix='/FIXED')
ld('RandomizedBaseAddress',
map={'1': ':NO', '2': ''}, prefix='/DYNAMICBASE')
ld('DataExecutionPrevention',
map={'1': ':NO', '2': ''}, prefix='/NXCOMPAT')
ld('OptimizeReferences', map={'1': 'NOREF', '2': 'REF'}, prefix='/OPT:')
ld('ForceSymbolReferences', prefix='/INCLUDE:')
ld('EnableCOMDATFolding', map={'1': 'NOICF', '2': 'ICF'}, prefix='/OPT:')
ld('LinkTimeCodeGeneration',
map={'1': '', '2': ':PGINSTRUMENT', '3': ':PGOPTIMIZE',
'4': ':PGUPDATE'},
prefix='/LTCG')
ld('IgnoreDefaultLibraryNames', prefix='/NODEFAULTLIB:')
ld('ResourceOnlyDLL', map={'true': '/NOENTRY'})
ld('EntryPointSymbol', prefix='/ENTRY:')
ld('Profile', map={'true': '/PROFILE'})
ld('LargeAddressAware',
map={'1': ':NO', '2': ''}, prefix='/LARGEADDRESSAWARE')
# TODO(scottmg): This should sort of be somewhere else (not really a flag).
ld('AdditionalDependencies', prefix='')
if self.GetArch(config) == 'x86':
safeseh_default = 'true'
else:
safeseh_default = None
ld('ImageHasSafeExceptionHandlers',
map={'false': ':NO', 'true': ''}, prefix='/SAFESEH',
default=safeseh_default)
# If the base address is not specifically controlled, DYNAMICBASE should
# be on by default.
base_flags = filter(lambda x: 'DYNAMICBASE' in x or x == '/FIXED',
ldflags)
if not base_flags:
ldflags.append('/DYNAMICBASE')
# If the NXCOMPAT flag has not been specified, default to on. Despite the
# documentation that says this only defaults to on when the subsystem is
# Vista or greater (which applies to the linker), the IDE defaults it on
# unless it's explicitly off.
if not filter(lambda x: 'NXCOMPAT' in x, ldflags):
ldflags.append('/NXCOMPAT')
have_def_file = filter(lambda x: x.startswith('/DEF:'), ldflags)
manifest_flags, intermediate_manifest, manifest_files = \
self._GetLdManifestFlags(config, manifest_base_name, gyp_to_build_path,
is_executable and not have_def_file, build_dir)
ldflags.extend(manifest_flags)
return ldflags, intermediate_manifest, manifest_files
def _GetLdManifestFlags(self, config, name, gyp_to_build_path,
allow_isolation, build_dir):
"""Returns a 3-tuple:
- the set of flags that need to be added to the link to generate
a default manifest
- the intermediate manifest that the linker will generate that should be
used to assert it doesn't add anything to the merged one.
- the list of all the manifest files to be merged by the manifest tool and
included into the link."""
generate_manifest = self._Setting(('VCLinkerTool', 'GenerateManifest'),
config,
default='true')
if generate_manifest != 'true':
# This means not only that the linker should not generate the intermediate
# manifest but also that the manifest tool should do nothing even when
# additional manifests are specified.
return ['/MANIFEST:NO'], [], []
output_name = name + '.intermediate.manifest'
flags = [
'/MANIFEST',
'/ManifestFile:' + output_name,
]
# Instead of using the MANIFESTUAC flags, we generate a .manifest to
# include into the list of manifests. This allows us to avoid the need to
# do two passes during linking. The /MANIFEST flag and /ManifestFile are
# still used, and the intermediate manifest is used to assert that the
# final manifest we get from merging all the additional manifest files
# (plus the one we generate here) isn't modified by merging the
# intermediate into it.
# Always NO, because we generate a manifest file that has what we want.
flags.append('/MANIFESTUAC:NO')
config = self._TargetConfig(config)
enable_uac = self._Setting(('VCLinkerTool', 'EnableUAC'), config,
default='true')
manifest_files = []
generated_manifest_outer = \
"<?xml version='1.0' encoding='UTF-8' standalone='yes'?>" \
"<assembly xmlns='urn:schemas-microsoft-com:asm.v1' manifestVersion='1.0'>%s" \
"</assembly>"
if enable_uac == 'true':
execution_level = self._Setting(('VCLinkerTool', 'UACExecutionLevel'),
config, default='0')
execution_level_map = {
'0': 'asInvoker',
'1': 'highestAvailable',
'2': 'requireAdministrator'
}
ui_access = self._Setting(('VCLinkerTool', 'UACUIAccess'), config,
default='false')
inner = '''
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level='%s' uiAccess='%s' />
</requestedPrivileges>
</security>
</trustInfo>''' % (execution_level_map[execution_level], ui_access)
else:
inner = ''
generated_manifest_contents = generated_manifest_outer % inner
generated_name = name + '.generated.manifest'
# Need to join with the build_dir here as we're writing it during
# generation time, but we return the un-joined version because the build
# will occur in that directory. We only write the file if the contents
# have changed so that simply regenerating the project files doesn't
# cause a relink.
build_dir_generated_name = os.path.join(build_dir, generated_name)
gyp.common.EnsureDirExists(build_dir_generated_name)
f = gyp.common.WriteOnDiff(build_dir_generated_name)
f.write(generated_manifest_contents)
f.close()
manifest_files = [generated_name]
if allow_isolation:
flags.append('/ALLOWISOLATION')
manifest_files += self._GetAdditionalManifestFiles(config,
gyp_to_build_path)
return flags, output_name, manifest_files
def _GetAdditionalManifestFiles(self, config, gyp_to_build_path):
"""Gets additional manifest files that are added to the default one
generated by the linker."""
files = self._Setting(('VCManifestTool', 'AdditionalManifestFiles'), config,
default=[])
if isinstance(files, str):
files = files.split(';')
return [os.path.normpath(
gyp_to_build_path(self.ConvertVSMacros(f, config=config)))
for f in files]
def IsUseLibraryDependencyInputs(self, config):
"""Returns whether the target should be linked via Use Library Dependency
Inputs (using component .objs of a given .lib)."""
config = self._TargetConfig(config)
uldi = self._Setting(('VCLinkerTool', 'UseLibraryDependencyInputs'), config)
return uldi == 'true'
def IsEmbedManifest(self, config):
"""Returns whether manifest should be linked into binary."""
config = self._TargetConfig(config)
embed = self._Setting(('VCManifestTool', 'EmbedManifest'), config,
default='true')
return embed == 'true'
def IsLinkIncremental(self, config):
"""Returns whether the target should be linked incrementally."""
config = self._TargetConfig(config)
link_inc = self._Setting(('VCLinkerTool', 'LinkIncremental'), config)
return link_inc != '1'
def GetRcflags(self, config, gyp_to_ninja_path):
"""Returns the flags that need to be added to invocations of the resource
compiler."""
config = self._TargetConfig(config)
rcflags = []
rc = self._GetWrapper(self, self.msvs_settings[config],
'VCResourceCompilerTool', append=rcflags)
rc('AdditionalIncludeDirectories', map=gyp_to_ninja_path, prefix='/I')
rcflags.append('/I' + gyp_to_ninja_path('.'))
rc('PreprocessorDefinitions', prefix='/d')
# /l arg must be in hex without leading '0x'
rc('Culture', prefix='/l', map=lambda x: hex(int(x))[2:])
return rcflags
def BuildCygwinBashCommandLine(self, args, path_to_base):
"""Build a command line that runs args via cygwin bash. We assume that all
incoming paths are in Windows normpath'd form, so they need to be
converted to posix style for the part of the command line that's passed to
bash. We also have to do some Visual Studio macro emulation here because
various rules use magic VS names for things. Also note that rules that
contain ninja variables cannot be fixed here (for example ${source}), so
the outer generator needs to make sure that the paths that are written out
are in posix style, if the command line will be used here."""
cygwin_dir = os.path.normpath(
os.path.join(path_to_base, self.msvs_cygwin_dirs[0]))
cd = ('cd %s' % path_to_base).replace('\\', '/')
args = [a.replace('\\', '/').replace('"', '\\"') for a in args]
args = ["'%s'" % a.replace("'", "'\\''") for a in args]
bash_cmd = ' '.join(args)
cmd = (
'call "%s\\setup_env.bat" && set CYGWIN=nontsec && ' % cygwin_dir +
'bash -c "%s ; %s"' % (cd, bash_cmd))
return cmd
def IsRuleRunUnderCygwin(self, rule):
"""Determine if an action should be run under cygwin. If the variable is
unset, or set to 1 we use cygwin."""
return int(rule.get('msvs_cygwin_shell',
self.spec.get('msvs_cygwin_shell', 1))) != 0
def _HasExplicitRuleForExtension(self, spec, extension):
"""Determine if there's an explicit rule for a particular extension."""
for rule in spec.get('rules', []):
if rule['extension'] == extension:
return True
return False
def _HasExplicitIdlActions(self, spec):
"""Determine if an action should not run midl for .idl files."""
return any([action.get('explicit_idl_action', 0)
for action in spec.get('actions', [])])
def HasExplicitIdlRulesOrActions(self, spec):
"""Determine if there's an explicit rule or action for idl files. When
there isn't we need to generate implicit rules to build MIDL .idl files."""
return (self._HasExplicitRuleForExtension(spec, 'idl') or
self._HasExplicitIdlActions(spec))
def HasExplicitAsmRules(self, spec):
"""Determine if there's an explicit rule for asm files. When there isn't we
need to generate implicit rules to assemble .asm files."""
return self._HasExplicitRuleForExtension(spec, 'asm')
def GetIdlBuildData(self, source, config):
"""Determine the implicit outputs for an idl file. Returns output
directory, outputs, and variables and flags that are required."""
config = self._TargetConfig(config)
midl_get = self._GetWrapper(self, self.msvs_settings[config], 'VCMIDLTool')
def midl(name, default=None):
return self.ConvertVSMacros(midl_get(name, default=default),
config=config)
tlb = midl('TypeLibraryName', default='${root}.tlb')
header = midl('HeaderFileName', default='${root}.h')
dlldata = midl('DLLDataFileName', default='dlldata.c')
iid = midl('InterfaceIdentifierFileName', default='${root}_i.c')
proxy = midl('ProxyFileName', default='${root}_p.c')
# Note that .tlb is not included in the outputs as it is not always
# generated depending on the content of the input idl file.
outdir = midl('OutputDirectory', default='')
output = [header, dlldata, iid, proxy]
variables = [('tlb', tlb),
('h', header),
('dlldata', dlldata),
('iid', iid),
('proxy', proxy)]
# TODO(scottmg): Are there configuration settings to set these flags?
target_platform = 'win32' if self.GetArch(config) == 'x86' else 'x64'
flags = ['/char', 'signed', '/env', target_platform, '/Oicf']
return outdir, output, variables, flags
def _LanguageMatchesForPch(source_ext, pch_source_ext):
c_exts = ('.c',)
cc_exts = ('.cc', '.cxx', '.cpp')
return ((source_ext in c_exts and pch_source_ext in c_exts) or
(source_ext in cc_exts and pch_source_ext in cc_exts))
class PrecompiledHeader(object):
"""Helper to generate dependencies and build rules to handle generation of
precompiled headers. Interface matches the GCH handler in xcode_emulation.py.
"""
def __init__(
self, settings, config, gyp_to_build_path, gyp_to_unique_output, obj_ext):
self.settings = settings
self.config = config
pch_source = self.settings.msvs_precompiled_source[self.config]
self.pch_source = gyp_to_build_path(pch_source)
filename, _ = os.path.splitext(pch_source)
self.output_obj = gyp_to_unique_output(filename + obj_ext).lower()
def _PchHeader(self):
"""Get the header that will appear in an #include line for all source
files."""
return os.path.split(self.settings.msvs_precompiled_header[self.config])[1]
def GetObjDependencies(self, sources, objs, arch):
"""Given a list of sources files and the corresponding object files,
returns a list of the pch files that should be depended upon. The
additional wrapping in the return value is for interface compatibility
with make.py on Mac, and xcode_emulation.py."""
assert arch is None
if not self._PchHeader():
return []
pch_ext = os.path.splitext(self.pch_source)[1]
for source in sources:
if _LanguageMatchesForPch(os.path.splitext(source)[1], pch_ext):
return [(None, None, self.output_obj)]
return []
def GetPchBuildCommands(self, arch):
"""Not used on Windows as there are no additional build steps required
(instead, existing steps are modified in GetFlagsModifications below)."""
return []
def GetFlagsModifications(self, input, output, implicit, command,
cflags_c, cflags_cc, expand_special):
"""Get the modified cflags and implicit dependencies that should be used
for the pch compilation step."""
if input == self.pch_source:
pch_output = ['/Yc' + self._PchHeader()]
if command == 'cxx':
return ([('cflags_cc', map(expand_special, cflags_cc + pch_output))],
self.output_obj, [])
elif command == 'cc':
return ([('cflags_c', map(expand_special, cflags_c + pch_output))],
self.output_obj, [])
return [], output, implicit
vs_version = None
def GetVSVersion(generator_flags):
global vs_version
if not vs_version:
vs_version = gyp.MSVSVersion.SelectVisualStudioVersion(
generator_flags.get('msvs_version', 'auto'),
allow_fallback=False)
return vs_version
def _GetVsvarsSetupArgs(generator_flags, arch):
vs = GetVSVersion(generator_flags)
return vs.SetupScript()
def ExpandMacros(string, expansions):
"""Expand $(Variable) per expansions dict. See MsvsSettings.GetVSMacroEnv
for the canonical way to retrieve a suitable dict."""
if '$' in string:
for old, new in expansions.iteritems():
assert '$(' not in new, new
string = string.replace(old, new)
return string
def _ExtractImportantEnvironment(output_of_set):
"""Extracts environment variables required for the toolchain to run from
a textual dump output by the cmd.exe 'set' command."""
envvars_to_save = (
'goma_.*', # TODO(scottmg): This is ugly, but needed for goma.
'include',
'lib',
'libpath',
'path',
'pathext',
'systemroot',
'temp',
'tmp',
)
env = {}
for line in output_of_set.splitlines():
for envvar in envvars_to_save:
if re.match(envvar + '=', line.lower()):
var, setting = line.split('=', 1)
if envvar == 'path':
# Our own rules (for running gyp-win-tool) and other actions in
# Chromium rely on python being in the path. Add the path to this
# python here so that if it's not in the path when ninja is run
# later, python will still be found.
setting = os.path.dirname(sys.executable) + os.pathsep + setting
env[var.upper()] = setting
break
for required in ('SYSTEMROOT', 'TEMP', 'TMP'):
if required not in env:
raise Exception('Environment variable "%s" '
'required to be set to valid path' % required)
return env
def _FormatAsEnvironmentBlock(envvar_dict):
"""Format as an 'environment block' directly suitable for CreateProcess.
Briefly this is a list of key=value\0, terminated by an additional \0. See
CreateProcess documentation for more details."""
block = ''
nul = '\0'
for key, value in envvar_dict.iteritems():
block += key + '=' + value + nul
block += nul
return block
def _ExtractCLPath(output_of_where):
"""Gets the path to cl.exe based on the output of calling the environment
setup batch file, followed by the equivalent of `where`."""
# Take the first line, as that's the first found in the PATH.
for line in output_of_where.strip().splitlines():
if line.startswith('LOC:'):
return line[len('LOC:'):].strip()
def GenerateEnvironmentFiles(toplevel_build_dir, generator_flags,
system_includes, open_out):
"""It's not sufficient to have the absolute path to the compiler, linker,
etc. on Windows, as those tools rely on .dlls being in the PATH. We also
need to support both x86 and x64 compilers within the same build (to support
msvs_target_platform hackery). Different architectures require a different
compiler binary, and different supporting environment variables (INCLUDE,
LIB, LIBPATH). So, we extract the environment here, wrap all invocations
of compiler tools (cl, link, lib, rc, midl, etc.) via win_tool.py which
sets up the environment, and then we do not prefix the compiler with
an absolute path, instead preferring something like "cl.exe" in the rule
which will then run whichever the environment setup has put in the path.
When the following procedure to generate environment files does not
meet your requirement (e.g. for custom toolchains), you can pass
"-G ninja_use_custom_environment_files" to the gyp to suppress file
generation and use custom environment files prepared by yourself."""
archs = ('x86', 'x64')
if generator_flags.get('ninja_use_custom_environment_files', 0):
cl_paths = {}
for arch in archs:
cl_paths[arch] = 'cl.exe'
return cl_paths
vs = GetVSVersion(generator_flags)
cl_paths = {}
for arch in archs:
# Extract environment variables for subprocesses.
args = vs.SetupScript(arch)
args.extend(('&&', 'set'))
popen = subprocess.Popen(
args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
variables, _ = popen.communicate()
env = _ExtractImportantEnvironment(variables)
# Inject system includes from gyp files into INCLUDE.
if system_includes:
system_includes = system_includes | OrderedSet(
env.get('INCLUDE', '').split(';'))
env['INCLUDE'] = ';'.join(system_includes)
env_block = _FormatAsEnvironmentBlock(env)
f = open_out(os.path.join(toplevel_build_dir, 'environment.' + arch), 'wb')
f.write(env_block)
f.close()
# Find cl.exe location for this architecture.
args = vs.SetupScript(arch)
args.extend(('&&',
'for', '%i', 'in', '(cl.exe)', 'do', '@echo', 'LOC:%~$PATH:i'))
popen = subprocess.Popen(args, shell=True, stdout=subprocess.PIPE)
output, _ = popen.communicate()
cl_paths[arch] = _ExtractCLPath(output)
return cl_paths
def VerifyMissingSources(sources, build_dir, generator_flags, gyp_to_ninja):
"""Emulate behavior of msvs_error_on_missing_sources present in the msvs
generator: Check that all regular source files, i.e. not created at run time,
exist on disk. Missing files cause needless recompilation when building via
VS, and we want this check to match for people/bots that build using ninja,
so they're not surprised when the VS build fails."""
if int(generator_flags.get('msvs_error_on_missing_sources', 0)):
no_specials = filter(lambda x: '$' not in x, sources)
relative = [os.path.join(build_dir, gyp_to_ninja(s)) for s in no_specials]
missing = filter(lambda x: not os.path.exists(x), relative)
if missing:
# They'll look like out\Release\..\..\stuff\things.cc, so normalize the
# path for a slightly less crazy looking output.
cleaned_up = [os.path.normpath(x) for x in missing]
raise Exception('Missing input files:\n%s' % '\n'.join(cleaned_up))
# Sets some values in default_variables, which are required for many
# generators, run on Windows.
def CalculateCommonVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
# Set a variable so conditions can be based on msvs_version.
msvs_version = gyp.msvs_emulation.GetVSVersion(generator_flags)
default_variables['MSVS_VERSION'] = msvs_version.ShortName()
# To determine processor word size on Windows, in addition to checking
# PROCESSOR_ARCHITECTURE (which reflects the word size of the current
# process), it is also necessary to check PROCESSOR_ARCHITEW6432 (which
# contains the actual word size of the system when running thru WOW64).
if ('64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', '')):
default_variables['MSVS_OS_BITS'] = 64
else:
default_variables['MSVS_OS_BITS'] = 32
| mit |
wileeam/airflow | airflow/migrations/versions/cf5dc11e79ad_drop_user_and_chart.py | 7 | 3768 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""drop_user_and_chart
Revision ID: cf5dc11e79ad
Revises: 41f5f12752f8
Create Date: 2019-01-24 15:30:35.834740
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import mysql
from sqlalchemy.engine.reflection import Inspector
# revision identifiers, used by Alembic.
revision = 'cf5dc11e79ad'
down_revision = '41f5f12752f8'
branch_labels = None
depends_on = None
def upgrade():
# We previously had a KnownEvent's table, but we deleted the table without
# a down migration to remove it (so we didn't delete anyone's data if they
# were happing to use the feature.
#
# But before we can delete the users table we need to drop the FK
conn = op.get_bind()
inspector = Inspector.from_engine(conn)
if 'known_event' in inspector.get_table_names() != 'sqlite':
op.drop_constraint('known_event_user_id_fkey', 'known_event')
op.drop_table("chart")
op.drop_table("users")
def downgrade():
conn = op.get_bind()
op.create_table(
'users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=250), nullable=True),
sa.Column('email', sa.String(length=500), nullable=True),
sa.Column('password', sa.String(255)),
sa.Column('superuser', sa.Boolean(), default=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('username')
)
op.create_table(
'chart',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('label', sa.String(length=200), nullable=True),
sa.Column('conn_id', sa.String(length=250), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('chart_type', sa.String(length=100), nullable=True),
sa.Column('sql_layout', sa.String(length=50), nullable=True),
sa.Column('sql', sa.Text(), nullable=True),
sa.Column('y_log_scale', sa.Boolean(), nullable=True),
sa.Column('show_datatable', sa.Boolean(), nullable=True),
sa.Column('show_sql', sa.Boolean(), nullable=True),
sa.Column('height', sa.Integer(), nullable=True),
sa.Column('default_params', sa.String(length=5000), nullable=True),
sa.Column('x_is_date', sa.Boolean(), nullable=True),
sa.Column('iteration_no', sa.Integer(), nullable=True),
sa.Column('last_modified', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
if conn.dialect.name == 'mysql':
conn.execute("SET time_zone = '+00:00'")
op.alter_column(table_name='chart', column_name='last_modified', type_=mysql.TIMESTAMP(fsp=6))
else:
if conn.dialect.name in ('sqlite', 'mssql'):
return
if conn.dialect.name == 'postgresql':
conn.execute("set timezone=UTC")
op.alter_column(table_name='chart', column_name='last_modified', type_=sa.TIMESTAMP(timezone=True))
| apache-2.0 |
etnestad/xcsoar | tools/gdb.py | 25 | 7308 | #
# XCSoar Glide Computer - http://www.xcsoar.org/
# Copyright (C) 2000-2012 The XCSoar Project
# A detailed list of copyright holders can be found in the file "AUTHORS".
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
#
# This is a gdb module that aids debugging XCSoar. To load it, launch
# gdb and type:
#
# source gdb.py
#
import gdb
double_type = gdb.lookup_type('double')
string_type = gdb.lookup_type('char').pointer()
def fixed_value(f):
"""Extracts the floating point value of a 'fixed' instance."""
if f.type.unqualified().strip_typedefs().code == double_type.code:
return float(f.cast(double_type))
else:
return long(f['m_nVal']) / (1. * (1 << 28))
def angle_value(a):
return fixed_value(a['value']) * 57.2957795131
class FixedPrinter:
def __init__(self, value):
self.value = value
def to_string(self):
return str(fixed_value(self.value))
class AnglePrinter:
def __init__(self, value):
self.value = value
def to_string(self):
return str(angle_value(self.value))
class AngleRangePrinter:
def __init__(self, value):
self.value = value
def to_string(self):
start = AnglePrinter(self.value['start']).to_string()
end = AnglePrinter(self.value['end']).to_string()
return 'AngleRange(%s..%s)' % (start, end)
class GeoPointPrinter:
def __init__(self, value):
self.value = value
def to_string(self):
if angle_value(self.value['latitude']) >= 180:
return 'GeoPoint::INVALID'
longitude = AnglePrinter(self.value['longitude']).to_string()
latitude = AnglePrinter(self.value['latitude']).to_string()
return 'GeoPoint(%s %s)' % (longitude, latitude)
class GeoBoundsPrinter:
def __init__(self, value):
self.value = value
def to_string(self):
if angle_value(self.value['latitude']['end']) >= 180:
return 'GeoBounds::INVALID'
west = AnglePrinter(self.value['longitude']['start']).to_string()
east = AnglePrinter(self.value['longitude']['end']).to_string()
south = AnglePrinter(self.value['latitude']['start']).to_string()
north = AnglePrinter(self.value['latitude']['end']).to_string()
return 'GeoBounds([%s .. %s] [%s .. %s])' % (west, east, south, north)
class GeoVectorPrinter:
def __init__(self, value):
self.value = value
def to_string(self):
bearing = AnglePrinter(self.value['bearing']).to_string()
distance = fixed_value(self.value['distance'])
if distance < 0:
return 'GeoVector::INVALID'
return 'GeoVector(%s %s)' % (bearing, distance)
class SpeedVectorPrinter:
def __init__(self, value):
self.value = value
def to_string(self):
bearing = AnglePrinter(self.value['bearing']).to_string()
norm = fixed_value(self.value['norm'])
if norm < 0:
return 'GeoVector::INVALID'
if norm == 0:
return 'GeoVector::ZERO'
return 'SpeedVector(%s %s)' % (bearing, norm)
class ValidityPrinter:
def __init__(self, value):
self.value = value
def to_string(self):
return 'Validity(%u)' % self.value['last']
class StaticStringPrinter:
def __init__(self, value):
self.value = value
def to_string(self):
return self.value['data'].cast(string_type)
class BrokenDatePrinter:
def __init__(self, value):
self.value = value
def to_string(self):
return 'Date(%04u/%02u/%02u)' % \
(int(self.value['year']),
int(self.value['month']),
int(self.value['day']))
class BrokenTimePrinter:
def __init__(self, value):
self.value = value
def to_string(self):
return 'Time(%02u:%02u:%02u)' % \
(int(self.value['hour']),
int(self.value['minute']),
int(self.value['second']))
class BrokenDateTimePrinter:
def __init__(self, value):
self.value = value
def to_string(self):
return 'DateTime(%04u/%02u/%02u %02u:%02u:%02u)' % \
(int(self.value['year']),
int(self.value['month']),
int(self.value['day']),
int(self.value['hour']),
int(self.value['minute']),
int(self.value['second']))
class RoughTimePrinter:
def __init__(self, value):
self.value = value
def to_string(self):
value = int(self.value['value'])
if value == 0xffff:
return 'RoughTime::INVALID'
return 'RoughTime(%02u:%02u)' % (value / 60, value % 60)
class RoughTimeSpanPrinter:
def __init__(self, value):
self.value = value
def to_string(self):
start = int(self.value['start']['value'])
end = int(self.value['start']['value'])
if start == 0xffff and end == 0xffff:
return 'RoughTimeSpan::INVALID'
if start == 0xffff:
start = ''
else:
start = '%02u:%02u' % (start / 60, start % 60)
if end == 0xffff:
end = ''
else:
end = '%02u:%02u' % (end / 60, end % 60)
return 'RoughTimeSpan(%s..%s)' % (start, end)
def lookup_function(value):
type = value.type
if type.code == gdb.TYPE_CODE_REF:
type = type.target ()
type = type.unqualified().strip_typedefs()
typename = type.tag
if typename == None:
return None
if typename == 'fixed':
return FixedPrinter(value)
elif typename == 'Angle':
return AnglePrinter(value)
elif typename == 'AngleRange':
return AngleRangePrinter(value)
elif typename == 'GeoPoint':
return GeoPointPrinter(value)
elif typename == 'GeoBounds':
return GeoBoundsPrinter(value)
elif typename == 'GeoVector':
return GeoVectorPrinter(value)
elif typename == 'SpeedVector':
return SpeedVectorPrinter(value)
elif typename == 'Validity':
return ValidityPrinter(value)
elif typename == 'BrokenDate':
return BrokenDatePrinter(value)
elif typename == 'BrokenTime':
return BrokenTimePrinter(value)
elif typename == 'BrokenDateTime':
return BrokenDateTimePrinter(value)
elif typename == 'RoughTime':
return RoughTimePrinter(value)
elif typename == 'RoughTimeSpan':
return RoughTimeSpanPrinter(value)
elif typename[:12] == 'StaticString' or typename[:12] == 'NarrowString':
return StaticStringPrinter(value)
return None
gdb.pretty_printers.append(lookup_function)
| gpl-2.0 |
natetrue/ReplicatorG | skein_engines/skeinforge-31/fabmetheus_utilities/geometry/geometry_tools/path.py | 2 | 6168 | """
Face of a triangle mesh.
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities.geometry.geometry_tools import dictionary
from fabmetheus_utilities.geometry.geometry_tools import vertex
from fabmetheus_utilities.geometry.manipulation_evaluator import matrix
from fabmetheus_utilities.geometry.geometry_utilities import evaluate
from fabmetheus_utilities.vector3 import Vector3
from fabmetheus_utilities import euclidean
from fabmetheus_utilities import svg_writer
from fabmetheus_utilities import xml_simple_reader
from fabmetheus_utilities import xml_simple_writer
__author__ = 'Enrique Perez ([email protected])'
__credits__ = 'Art of Illusion <http://www.artofillusion.org/>'
__date__ = "$Date: 2008/02/05 $"
__license__ = 'GPL 3.0'
def convertProcessXMLElementRenameByPaths(geometryOutput, xmlElement):
"Convert the xml element to a path xml element, add paths and process."
convertXMLElementRenameByPaths(geometryOutput, xmlElement)
processXMLElement(xmlElement)
def convertXMLElement(geometryOutput, xmlElement):
"Convert the xml element to a path xml element."
vertex.addGeometryList(geometryOutput, xmlElement)
def convertXMLElementRename(geometryOutput, xmlElement):
"Convert the xml element to a path xml element."
xmlElement.className = 'path'
convertXMLElement(geometryOutput, xmlElement)
def convertXMLElementRenameByPaths(geometryOutput, xmlElement):
"Convert the xml element to a path xml element and add paths."
xmlElement.className = 'path'
for geometryOutputChild in geometryOutput:
pathElement = xml_simple_reader.XMLElement()
pathElement.setParentAddToChildren(xmlElement)
convertXMLElementRename( geometryOutputChild, pathElement)
def processXMLElement(xmlElement):
"Process the xml element."
evaluate.processArchivable(Path, xmlElement)
class Path(dictionary.Dictionary):
"A path."
def __init__(self):
"Add empty lists."
dictionary.Dictionary.__init__(self)
self.matrix4X4 = matrix.Matrix()
self.oldChainTetragrid = None
self.transformedPath = None
self.vertexes = []
def addXMLInnerSection(self, depth, output):
"Add the xml section for this object."
if self.matrix4X4 != None:
self.matrix4X4.addXML(depth, output)
xml_simple_writer.addXMLFromVertexes(depth, output, self.vertexes)
def getFabricationExtension(self):
"Get fabrication extension."
return 'svg'
def getFabricationText(self):
"Get fabrication text."
carving = SVGFabricationCarving(self.xmlElement)
carving.setCarveLayerThickness(evaluate.getSheetThickness(self.xmlElement))
carving.processSVGElement(self.xmlElement.getRoot().parser.fileName)
return str(carving)
def getMatrixChainTetragrid(self):
"Get the matrix chain tetragrid."
return self.matrix4X4.getOtherTimesSelf(self.xmlElement.parent.object.getMatrixChainTetragrid()).matrixTetragrid
def getPaths(self):
"Get all paths."
self.transformedPath = None
return dictionary.getAllPaths([self.vertexes], self)
def getTransformedPaths(self):
"Get all transformed paths."
if self.xmlElement == None:
return dictionary.getAllPaths([self.vertexes], self)
chainTetragrid = self.getMatrixChainTetragrid()
if self.oldChainTetragrid != chainTetragrid:
self.oldChainTetragrid = chainTetragrid
self.transformedPath = None
if self.transformedPath == None:
self.transformedPath = matrix.getTransformedVector3s(chainTetragrid, self.vertexes)
return dictionary.getAllTransformedPaths([self.transformedPath], self)
class SVGFabricationCarving:
"An slc carving."
def __init__(self, xmlElement):
"Add empty lists."
self.layerThickness = 1.0
self.rotatedLoopLayers = []
self.xmlElement = xmlElement
def __repr__(self):
"Get the string representation of this carving."
return self.getCarvedSVG()
def addXML(self, depth, output):
"Add xml for this object."
xml_simple_writer.addXMLFromObjects(depth, self.rotatedLoopLayers, output)
def getCarveCornerMaximum(self):
"Get the corner maximum of the vertexes."
return self.cornerMaximum
def getCarveCornerMinimum(self):
"Get the corner minimum of the vertexes."
return self.cornerMinimum
def getCarvedSVG(self):
"Get the carved svg text."
return svg_writer.getSVGByLoopLayers(False, self.rotatedLoopLayers, self)
def getCarveLayerThickness(self):
"Get the layer thickness."
return self.layerThickness
def getCarveRotatedBoundaryLayers(self):
"Get the rotated boundary layers."
return self.rotatedLoopLayers
def getFabmetheusXML(self):
"Return the fabmetheus XML."
return self.xmlElement.getParser().getOriginalRoot()
def getInterpretationSuffix(self):
"Return the suffix for a carving."
return 'svg'
def processSVGElement(self, fileName):
"Parse SVG element and store the layers."
self.fileName = fileName
paths = self.xmlElement.object.getPaths()
if len(paths) < 1:
return
firstPath = paths[0]
if len(firstPath) < 1:
return
rotatedLoopLayer = euclidean.RotatedLoopLayer(firstPath[0].z)
self.rotatedLoopLayers.append(rotatedLoopLayer)
for path in paths:
rotatedLoopLayer.loops.append(euclidean.getComplexPath(path))
self.cornerMaximum = Vector3(-999999999.0, -999999999.0, -999999999.0)
self.cornerMinimum = Vector3(999999999.0, 999999999.0, 999999999.0)
svg_writer.setSVGCarvingCorners(self.rotatedLoopLayers, self)
halfLayerThickness = 0.5 * self.layerThickness
self.cornerMaximum.z += halfLayerThickness
self.cornerMinimum.z -= halfLayerThickness
def setCarveBridgeLayerThickness( self, bridgeLayerThickness ):
"Set the bridge layer thickness. If the infill is not in the direction of the bridge, the bridge layer thickness should be given as None or not set at all."
pass
def setCarveLayerThickness( self, layerThickness ):
"Set the layer thickness."
self.layerThickness = layerThickness
def setCarveImportRadius( self, importRadius ):
"Set the import radius."
pass
def setCarveIsCorrectMesh( self, isCorrectMesh ):
"Set the is correct mesh flag."
pass
| gpl-2.0 |
xzovy/cryptographer.py | cryptochatter-server.py | 1 | 2340 | # chat_server.py
import sys, socket, select
HOST = ''
SOCKET_LIST = []
RECV_BUFFER = 4096
PORT = 443
def chat_server():
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind((HOST, PORT))
server_socket.listen(10)
# add server socket object to the list of readable connections
SOCKET_LIST.append(server_socket)
print("Chat server started on port " + str(PORT))
while 1:
# get the list sockets which are ready to be read through select
# 4th arg, time_out = 0 : poll and never block
ready_to_read,ready_to_write,in_error = select.select(SOCKET_LIST,[],[],0)
for sock in ready_to_read:
# a new connection request recieved
if sock == server_socket:
sockfd, addr = server_socket.accept()
SOCKET_LIST.append(sockfd)
# a message from a client, not a new connection
else:
# process data recieved from client,
try:
# receiving data from the socket.
data = sock.recv(RECV_BUFFER)
if data:
# there is something in the socket
msg = data.decode('utf-8')
print(msg)
broadcast(server_socket, sock, data)
else:
# remove the socket that's broken
if sock in SOCKET_LIST:
SOCKET_LIST.remove(sock)
# exception
except:
continue
server_socket.close()
# broadcast chat messages to all connected clients
def broadcast (server_socket, sock, message):
for socket in SOCKET_LIST:
# send the message only to peer
if socket != server_socket and socket != sock :
try :
socket.send(message)
except :
# broken socket connection
socket.close()
# broken socket, remove it
if socket in SOCKET_LIST:
SOCKET_LIST.remove(socket)
if __name__ == "__main__":
sys.exit(chat_server())
| gpl-2.0 |
lrr-tum/mmbwmon | vendor/fast-lib/vendor/mosquitto-1.4.12/test/lib/03-publish-c2b-qos2-disconnect.py | 10 | 3012 | #!/usr/bin/env python
# Test whether a client sends a correct PUBLISH to a topic with QoS 2 and responds to a disconnect.
import inspect
import os
import socket
import sys
# From http://stackoverflow.com/questions/279237/python-import-a-module-from-a-folder
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"..")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
import mosq_test
rc = 1
keepalive = 60
connect_packet = mosq_test.gen_connect("publish-qos2-test", keepalive=keepalive)
connack_packet = mosq_test.gen_connack(rc=0)
disconnect_packet = mosq_test.gen_disconnect()
mid = 1
publish_packet = mosq_test.gen_publish("pub/qos2/test", qos=2, mid=mid, payload="message")
publish_dup_packet = mosq_test.gen_publish("pub/qos2/test", qos=2, mid=mid, payload="message", dup=True)
pubrec_packet = mosq_test.gen_pubrec(mid)
pubrel_packet = mosq_test.gen_pubrel(mid)
pubcomp_packet = mosq_test.gen_pubcomp(mid)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.settimeout(10)
sock.bind(('', 1888))
sock.listen(5)
client_args = sys.argv[1:]
env = dict(os.environ)
env['LD_LIBRARY_PATH'] = '../../lib:../../lib/cpp'
try:
pp = env['PYTHONPATH']
except KeyError:
pp = ''
env['PYTHONPATH'] = '../../lib/python:'+pp
client = mosq_test.start_client(filename=sys.argv[1].replace('/', '-'), cmd=client_args, env=env)
try:
(conn, address) = sock.accept()
conn.settimeout(10)
if mosq_test.expect_packet(conn, "connect", connect_packet):
conn.send(connack_packet)
if mosq_test.expect_packet(conn, "publish", publish_packet):
# Disconnect client. It should reconnect.
conn.close()
(conn, address) = sock.accept()
conn.settimeout(15)
if mosq_test.expect_packet(conn, "connect", connect_packet):
conn.send(connack_packet)
if mosq_test.expect_packet(conn, "retried publish", publish_dup_packet):
conn.send(pubrec_packet)
if mosq_test.expect_packet(conn, "pubrel", pubrel_packet):
# Disconnect client. It should reconnect.
conn.close()
(conn, address) = sock.accept()
conn.settimeout(15)
# Complete connection and message flow.
if mosq_test.expect_packet(conn, "connect", connect_packet):
conn.send(connack_packet)
if mosq_test.expect_packet(conn, "retried pubrel", pubrel_packet):
conn.send(pubcomp_packet)
if mosq_test.expect_packet(conn, "disconnect", disconnect_packet):
rc = 0
conn.close()
finally:
client.terminate()
client.wait()
sock.close()
exit(rc)
| gpl-3.0 |
j340m3/python-msquaredc | docs/conf.py | 1 | 1313 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.extlinks',
'sphinx.ext.ifconfig',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
if os.getenv('SPELLCHECK'):
extensions += 'sphinxcontrib.spelling',
spelling_show_suggestions = True
spelling_lang = 'en_US'
source_suffix = '.rst'
master_doc = 'index'
project = 'MsquaredC'
year = '2016-2018'
author = 'Jerome Bergmann'
copyright = '{0}, {1}'.format(year, author)
version = release = '0.1.0'
pygments_style = 'trac'
templates_path = ['.']
extlinks = {
'issue': ('https://github.com/j340m3/python-msquaredc/issues/%s', '#'),
'pr': ('https://github.com/j340m3/python-msquaredc/pull/%s', 'PR #'),
}
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_theme_options = {}
html_use_smartypants = True
html_last_updated_fmt = '%b %d, %Y'
html_split_index = False
html_sidebars = {
'**': ['searchbox.html', 'globaltoc.html', 'sourcelink.html'],
}
html_short_title = '%s-%s' % (project, version)
napoleon_use_ivar = True
napoleon_use_rtype = False
napoleon_use_param = False
| bsd-2-clause |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.