repo_name
stringlengths
5
92
path
stringlengths
4
221
copies
stringclasses
19 values
size
stringlengths
4
6
content
stringlengths
766
896k
license
stringclasses
15 values
hash
int64
-9,223,277,421,539,062,000
9,223,102,107B
line_mean
float64
6.51
99.9
line_max
int64
32
997
alpha_frac
float64
0.25
0.96
autogenerated
bool
1 class
ratio
float64
1.5
13.6
config_test
bool
2 classes
has_no_keywords
bool
2 classes
few_assignments
bool
1 class
Jasonmk47/OpenWPM
automation/Proxy/mitm_commands.py
1
4714
# This module parses MITM Proxy requests/responses into (command, data pairs) # This should mean that the MITMProxy code should simply pass the messages + its own data to this module from urlparse import urlparse import datetime import mmh3 import json import zlib import os def encode_to_unicode(msg): """ Tries different encodings before setting on utf8 ignoring any errors We can likely inspect the headers for an encoding as well, though it won't always be correct. """ try: msg = unicode(msg, 'utf8') except UnicodeDecodeError: try: msg = unicode(msg, 'ISO-8859-1') except UnicodeDecodeError: msg = unicode(msg, 'utf8', 'ignore') return msg def process_general_mitm_request(db_socket, browser_params, visit_id, msg): """ Logs a HTTP request object """ referrer = msg.request.headers['referer'][0] if len(msg.request.headers['referer']) > 0 else '' data = (browser_params['crawl_id'], encode_to_unicode(msg.request.url), msg.request.method, encode_to_unicode(referrer), json.dumps(msg.request.headers.get_state()), visit_id, str(datetime.datetime.now())) db_socket.send(("INSERT INTO http_requests (crawl_id, url, method, referrer, headers, " "visit_id, time_stamp) VALUES (?,?,?,?,?,?,?)", data)) def process_general_mitm_response(db_socket, ldb_socket, logger, browser_params, visit_id, msg): """ Logs a HTTP response object and, if necessary, """ referrer = msg.request.headers['referer'][0] if len(msg.request.headers['referer']) > 0 else '' location = msg.response.headers['location'][0] if len(msg.response.headers['location']) > 0 else '' content_hash = save_javascript_content(ldb_socket, logger, browser_params, msg) data = (browser_params['crawl_id'], encode_to_unicode(msg.request.url), encode_to_unicode(msg.request.method), encode_to_unicode(referrer), msg.response.code, msg.response.msg, json.dumps(msg.response.headers.get_state()), encode_to_unicode(location), visit_id, str(datetime.datetime.now()), content_hash) db_socket.send(("INSERT INTO http_responses (crawl_id, url, method, referrer, response_status, " "response_status_text, headers, location, visit_id, time_stamp, content_hash) VALUES (?,?,?,?,?,?,?,?,?,?,?)", data)) def save_javascript_content(ldb_socket, logger, browser_params, msg): """ Save javascript files de-duplicated and compressed on disk """ if not browser_params['save_javascript']: return # Check if this response is javascript content is_js = False if (len(msg.response.headers['Content-Type']) > 0 and 'javascript' in msg.response.headers['Content-Type'][0]): is_js = True if not is_js and urlparse(msg.request.url).path.split('.')[-1] == 'js': is_js = True if not is_js: return # Decompress any content with compression # We want files to hash to the same value # Firefox currently only accepts gzip/deflate script = '' content_encoding = msg.response.headers['Content-Encoding'] if (len(content_encoding) == 0 or content_encoding[0].lower() == 'utf-8' or content_encoding[0].lower() == 'identity' or content_encoding[0].lower() == 'none' or content_encoding[0].lower() == 'ansi_x3.4-1968' or content_encoding[0].lower() == 'utf8' or content_encoding[0] == ''): script = msg.response.content elif 'gzip' in content_encoding[0].lower(): try: script = zlib.decompress(msg.response.content, zlib.MAX_WBITS|16) except zlib.error as e: logger.error('BROWSER %i: Received zlib error when trying to decompress gzipped javascript: %s' % (browser_params['crawl_id'],str(e))) return elif 'deflate' in content_encoding[0].lower(): try: script = zlib.decompress(msg.response.content, -zlib.MAX_WBITS) except zlib.error as e: logger.error('BROWSER %i: Received zlib error when trying to decompress deflated javascript: %s' % (browser_params['crawl_id'],str(e))) return else: logger.error('BROWSER %i: Received Content-Encoding %s. Not supported by Firefox, skipping archive.' % (browser_params['crawl_id'], str(content_encoding))) return ldb_socket.send(script) # Hash script for deduplication on disk hasher = mmh3.hash128 script_hash = str(hasher(script) >> 64) return script_hash
gpl-3.0
2,147,477,469,874,632,200
39.637931
163
0.627068
false
3.823195
false
false
false
Sharecare/cyclops
app/httpreq.py
1
4917
import urllib import urllib2 import urlparse import socket import time import json import sys import logging logger = logging.getLogger(__name__) import pprint pp = pprint.PrettyPrinter(indent=4) # we need to make sure we don't follow redirects so build a new opener class NoRedirection(urllib2.HTTPErrorProcessor): def http_response(self, request, response): return response https_response = http_response # by default, urllib2 only deals with GET and POST # so we subclass it and make it handle other methods class RequestWithMethod(urllib2.Request): def __init__(self, url, method, data=None, headers={}, origin_req_host=None, unverifiable=False): self._method = method # build up a copy of the full request u = urlparse.urlparse(url) self._the_request = "%s %s HTTP/1.1\n" % (method, u.path) for h in headers: self._the_request += "%s: %s\n" % (h, headers[h]) self._the_request += "\n" if data: self._the_request += data urllib2.Request.__init__(self, url, data, headers, origin_req_host, unverifiable) def get_method(self): if self._method: return self._method else: return urllib2.Request.get_method(self) class HTTPReq(): def __init__(self, timeout=10): self.timeout = timeout self.AcceptTypes = {} self.AcceptTypes['json'] = 'application/json' self.AcceptTypes['xml'] = 'application/xml' self.AcceptTypes['text'] = 'text/plain' self.AcceptTypes['csv'] = 'text/csv' def accept2type(self, accept): for k in self.AcceptTypes: try: if self.AcceptTypes[k] == accept: return(k) except: pass return('json') def _query(self, req): start = end = 0 code = -1 rheaders = {} ret = None retheaders = None try: opener = urllib2.build_opener(NoRedirection) except Exception, e: logger.exception(e) sys.exit(0) try: start = time.time() response = opener.open(req, timeout=self.timeout) end = time.time() code = response.code retheaders = response.info() except urllib2.URLError, e: if hasattr(e, 'reason'): logger.exception(e) ret = str(e.reason) else: code = e.code retheaders = e.info() ret = e.read() raise e except IOError, e: if hasattr(e, 'reason'): reason = e.reason elif hasattr(e, 'code'): code = e.code rheaders = e.info() else: logger.exception(e) raise e try: ret = response.read() except: pass try: for r in retheaders.items(): rheaders[r[0].lower()] = r[1] except: pass #return dict(content=ret.decode('ascii', errors='ignore'), status=code, headers=rheaders, speed=(end - start), request=req._the_request) return dict(content=ret, status=code, headers=rheaders, speed=(end - start), request=req._the_request) def get(self, url, data=None, headers={}, type=None): req = None try: if self.AcceptTypes[type]: headers['Accept'] = self.AcceptTypes[type] headers['Content-Type'] = self.AcceptTypes[type] req = RequestWithMethod(url, 'GET', headers=headers) except: req = RequestWithMethod(url, 'GET', headers=headers) return(self._query(req)) def post(self, url, data, headers={}, type=None): req = None try: if self.AcceptTypes[type]: headers['Accept'] = self.AcceptTypes[type] headers['Content-Type'] = self.AcceptTypes[type] req = RequestWithMethod(url, 'POST', data=data, headers=headers) except Exception, e: req = RequestWithMethod(url, 'POST', data=data, headers=headers) #logger.exception(e) return(self._query(req)) def delete(self, url, data=None, headers={}, type=None): req = None try: if self.AcceptTypes[type]: headers['Accept'] = self.AcceptTypes[type] headers['Content-Type'] = self.AcceptTypes[type] req = RequestWithMethod(url, 'DELETE', headers=headers) except: req = RequestWithMethod(url, 'DELETE', headers=headers) return(self._query(req)) def put(self, url, data, headers={}, type=None): req = None try: if self.AcceptTypes[type]: headers['Accept'] = self.AcceptTypes[type] headers['Content-Type'] = self.AcceptTypes[type] req = RequestWithMethod(url, 'PUT', data=data, headers=headers) except: req = RequestWithMethod(url, 'PUT', data=data, headers=headers) return(self._query(req))
apache-2.0
-4,183,367,405,335,792,600
28.620482
142
0.580232
false
3.847418
false
false
false
jiaphuan/models
research/astronet/astronet/astro_model/astro_model.py
1
10261
# Copyright 2018 The TensorFlow Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A TensorFlow model for identifying exoplanets in astrophysical light curves. AstroModel is a concrete base class for models that identify exoplanets in astrophysical light curves. This class implements a simple linear model that can be extended by subclasses. The general framework for AstroModel and its subclasses is as follows: * Model inputs: - Zero or more time_series_features (e.g. astrophysical light curves) - Zero or more aux_features (e.g. orbital period, transit duration) * Labels: - An integer feature with 2 or more values (eg. 0 = Not Planet, 1 = Planet) * Model outputs: - The predicted probabilities for each label * Architecture: predictions ^ | logits ^ | (pre_logits_hidden_layers) ^ | pre_logits_concat ^ | (concatenate) ^ ^ | | (time_series_hidden_layers) (aux_hidden_layers) ^ ^ | | time_series_features aux_features Subclasses will typically override the build_time_series_hidden_layers() and/or build_aux_hidden_layers() functions. For example, a subclass could override build_time_series_hidden_layers() to apply convolutional layers to the time series features. In this class, those functions are simple concatenations of the input features. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import operator import tensorflow as tf class AstroModel(object): """A TensorFlow model for classifying astrophysical light curves.""" def __init__(self, features, labels, hparams, mode): """Basic setup. The actual TensorFlow graph is constructed in build(). Args: features: A dictionary containing "time_series_features" and "aux_features", each of which is a dictionary of named input Tensors. All features have dtype float32 and shape [batch_size, length]. labels: An int64 Tensor with shape [batch_size]. May be None if mode is tf.estimator.ModeKeys.PREDICT. hparams: A ConfigDict of hyperparameters for building the model. mode: A tf.estimator.ModeKeys to specify whether the graph should be built for training, evaluation or prediction. Raises: ValueError: If mode is invalid. """ valid_modes = [ tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL, tf.estimator.ModeKeys.PREDICT ] if mode not in valid_modes: raise ValueError("Expected mode in %s. Got: %s" % (valid_modes, mode)) self.hparams = hparams self.mode = mode # A dictionary of input Tensors. Values have dtype float32 and shape # [batch_size, length]. self.time_series_features = features.get("time_series_features", {}) # A dictionary of input Tensors. Values have dtype float32 and shape # [batch_size, length]. self.aux_features = features.get("aux_features", {}) # An int32 Tensor with shape [batch_size]. May be None if mode is # tf.estimator.ModeKeys.PREDICT. self.labels = labels # Optional Tensor; the weights corresponding to self.labels. self.weights = features.get("weights") # A Python boolean or a scalar boolean Tensor. Indicates whether the model # is in training mode for the purpose of graph ops, such as dropout. (Since # this might be a Tensor, its value is defined in build()). self.is_training = None # Global step Tensor. self.global_step = None # A dictionary of float32 Tensors with shape [batch_size, layer_size]; the # outputs of the time series hidden layers. self.time_series_hidden_layers = {} # A dictionary of float32 Tensors with shape [batch_size, layer_size]; the # outputs of the auxiliary hidden layers. self.aux_hidden_layers = {} # A float32 Tensor with shape [batch_size, layer_size]; the concatenation of # outputs from the hidden layers. self.pre_logits_concat = None # A float32 Tensor with shape [batch_size, output_dim]. self.logits = None # A float32 Tensor with shape [batch_size, output_dim]. self.predictions = None # A float32 Tensor with shape [batch_size]; the cross-entropy losses for the # current batch. self.batch_losses = None # Scalar Tensor; the total loss for the trainer to optimize. self.total_loss = None def build_time_series_hidden_layers(self): """Builds hidden layers for the time series features. Inputs: self.time_series_features Outputs: self.time_series_hidden_layers """ # No hidden layers. self.time_series_hidden_layers = self.time_series_features def build_aux_hidden_layers(self): """Builds hidden layers for the auxiliary features. Inputs: self.aux_features Outputs: self.aux_hidden_layers """ # No hidden layers. self.aux_hidden_layers = self.aux_features def build_logits(self): """Builds the model logits. Inputs: self.aux_hidden_layers self.time_series_hidden_layers Outputs: self.pre_logits_concat self.logits Raises: ValueError: If self.time_series_hidden_layers and self.aux_hidden_layers are both empty. """ # Sort the hidden layers by name because the order of dictionary items is # nondeterministic between invocations of Python. time_series_hidden_layers = sorted( self.time_series_hidden_layers.items(), key=operator.itemgetter(0)) aux_hidden_layers = sorted( self.aux_hidden_layers.items(), key=operator.itemgetter(0)) hidden_layers = time_series_hidden_layers + aux_hidden_layers if not hidden_layers: raise ValueError("At least one time series hidden layer or auxiliary " "hidden layer is required.") # Concatenate the hidden layers. if len(hidden_layers) == 1: pre_logits_concat = hidden_layers[0][1] else: pre_logits_concat = tf.concat( [layer[1] for layer in hidden_layers], axis=1, name="pre_logits_concat") net = pre_logits_concat with tf.variable_scope("pre_logits_hidden"): for i in range(self.hparams.num_pre_logits_hidden_layers): net = tf.layers.dense( inputs=net, units=self.hparams.pre_logits_hidden_layer_size, activation=tf.nn.relu, name="fully_connected_%s" % (i + 1)) if self.hparams.pre_logits_dropout_rate > 0: net = tf.layers.dropout( net, self.hparams.pre_logits_dropout_rate, training=self.is_training) # Identify the final pre-logits hidden layer as "pre_logits_hidden/final". tf.identity(net, "final") logits = tf.layers.dense( inputs=net, units=self.hparams.output_dim, name="logits") self.pre_logits_concat = pre_logits_concat self.logits = logits def build_predictions(self): """Builds the output predictions and losses. Inputs: self.logits Outputs: self.predictions """ # Use sigmoid activation function for binary classification, or softmax for # multi-class classification. prediction_fn = ( tf.sigmoid if self.hparams.output_dim == 1 else tf.nn.softmax) predictions = prediction_fn(self.logits, name="predictions") self.predictions = predictions def build_losses(self): """Builds the training losses. Inputs: self.logits self.labels Outputs: self.batch_losses self.total_loss """ if self.hparams.output_dim == 1: # Binary classification. batch_losses = tf.nn.sigmoid_cross_entropy_with_logits( labels=tf.to_float(self.labels), logits=tf.squeeze(self.logits, [1])) else: # Multi-class classification. batch_losses = tf.nn.sparse_softmax_cross_entropy_with_logits( labels=self.labels, logits=self.logits) # Compute the weighted mean cross entropy loss and add it to the LOSSES # collection. weights = self.weights if self.weights is not None else 1.0 tf.losses.compute_weighted_loss( losses=batch_losses, weights=weights, reduction=tf.losses.Reduction.MEAN) # Compute the total loss, including any other losses added to the LOSSES # collection (e.g. regularization losses). total_loss = tf.losses.get_total_loss() self.batch_losses = batch_losses self.total_loss = total_loss def build(self): """Creates all ops for training, evaluation or inference.""" self.global_step = tf.train.get_or_create_global_step() if self.mode == tf.estimator.ModeKeys.TRAIN: # This is implemented as a placeholder Tensor, rather than a constant, to # allow its value to be feedable during training (e.g. to disable dropout # when performing in-process validation set evaluation). self.is_training = tf.placeholder_with_default(True, [], "is_training") else: self.is_training = False self.build_time_series_hidden_layers() self.build_aux_hidden_layers() self.build_logits() self.build_predictions() if self.mode in [tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL]: self.build_losses()
apache-2.0
8,468,219,150,089,580,000
32.753289
80
0.645941
false
4.169443
false
false
false
pawelmhm/splash
splash/network_manager.py
1
17016
# -*- coding: utf-8 -*- from __future__ import absolute_import import base64 import itertools import functools from datetime import datetime import traceback from PyQt5.QtCore import QByteArray, QTimer from PyQt5.QtNetwork import ( QNetworkAccessManager, QNetworkProxyQuery, QNetworkRequest, QNetworkReply ) from twisted.python import log from splash.qtutils import qurl2ascii, REQUEST_ERRORS, get_request_webframe from splash.request_middleware import ( AdblockMiddleware, AllowedDomainsMiddleware, AllowedSchemesMiddleware, RequestLoggingMiddleware, AdblockRulesRegistry, ResourceTimeoutMiddleware, ResponseBodyTrackingMiddleware, ) from splash.response_middleware import ContentTypeMiddleware from splash import defaults from splash.utils import to_bytes from splash.cookies import SplashCookieJar class NetworkManagerFactory(object): def __init__(self, filters_path=None, verbosity=None, allowed_schemes=None): verbosity = defaults.VERBOSITY if verbosity is None else verbosity self.verbosity = verbosity self.request_middlewares = [] self.response_middlewares = [] self.adblock_rules = None # Initialize request and response middlewares allowed_schemes = (defaults.ALLOWED_SCHEMES if allowed_schemes is None else allowed_schemes.split(',')) if allowed_schemes: self.request_middlewares.append( AllowedSchemesMiddleware(allowed_schemes, verbosity=verbosity) ) if self.verbosity >= 2: self.request_middlewares.append(RequestLoggingMiddleware()) self.request_middlewares.append(AllowedDomainsMiddleware(verbosity=verbosity)) self.request_middlewares.append(ResourceTimeoutMiddleware()) self.request_middlewares.append(ResponseBodyTrackingMiddleware()) if filters_path is not None: self.adblock_rules = AdblockRulesRegistry(filters_path, verbosity=verbosity) self.request_middlewares.append( AdblockMiddleware(self.adblock_rules, verbosity=verbosity) ) self.response_middlewares.append(ContentTypeMiddleware(self.verbosity)) def __call__(self): manager = SplashQNetworkAccessManager( request_middlewares=self.request_middlewares, response_middlewares=self.response_middlewares, verbosity=self.verbosity, ) manager.setCache(None) return manager class ProxiedQNetworkAccessManager(QNetworkAccessManager): """ QNetworkAccessManager subclass with extra features. It * Enables "splash proxy factories" support. Qt provides similar functionality via setProxyFactory method, but standard QNetworkProxyFactory is not flexible enough. * Sets up extra logging. * Provides a way to get the "source" request (that was made to Splash itself). * Tracks information about requests/responses and stores it in HAR format, including response content. * Allows to set per-request timeouts. """ _REQUEST_ID = QNetworkRequest.User + 1 _SHOULD_TRACK = QNetworkRequest.User + 2 def __init__(self, verbosity): super(ProxiedQNetworkAccessManager, self).__init__() self.sslErrors.connect(self._on_ssl_errors) self.finished.connect(self._on_finished) self.verbosity = verbosity self._reply_timeout_timers = {} # requestId => timer self._default_proxy = self.proxy() self.cookiejar = SplashCookieJar(self) self.setCookieJar(self.cookiejar) self._response_bodies = {} # requestId => response content self._request_ids = itertools.count() assert self.proxyFactory() is None, "Standard QNetworkProxyFactory is not supported" def _on_ssl_errors(self, reply, errors): reply.ignoreSslErrors() def _on_finished(self, reply): reply.deleteLater() def createRequest(self, operation, request, outgoingData=None): """ This method is called when a new request is sent; it must return a reply object to work with. """ start_time = datetime.utcnow() # Proxies are managed per-request, so we're restoring a default # before each request. This assumes all requests go through # this method. self._clear_proxy() request, req_id = self._wrap_request(request) self._handle_custom_headers(request) self._handle_request_cookies(request) self._run_webpage_callbacks(request, 'on_request', request, operation, outgoingData) self._handle_custom_proxies(request) self._handle_request_response_tracking(request) har = self._get_har(request) if har is not None: har.store_new_request( req_id=req_id, start_time=start_time, operation=operation, request=request, outgoingData=outgoingData, ) reply = super(ProxiedQNetworkAccessManager, self).createRequest( operation, request, outgoingData ) if hasattr(request, 'timeout'): timeout = request.timeout * 1000 if timeout: self._set_reply_timeout(reply, timeout) if har is not None: har.store_new_reply(req_id, reply) reply.error.connect(self._on_reply_error) reply.finished.connect(self._on_reply_finished) if self._should_track_content(request): self._response_bodies[req_id] = QByteArray() reply.readyRead.connect(self._on_reply_ready_read) reply.metaDataChanged.connect(self._on_reply_headers) reply.downloadProgress.connect(self._on_reply_download_progress) return reply def _set_reply_timeout(self, reply, timeout_ms): request_id = self._get_request_id(reply.request()) # reply is used as a parent for the timer in order to destroy # the timer when reply is destroyed. It segfaults otherwise. timer = QTimer(reply) timer.setSingleShot(True) timer_callback = functools.partial(self._on_reply_timeout, reply=reply, timer=timer, request_id=request_id) timer.timeout.connect(timer_callback) self._reply_timeout_timers[request_id] = timer timer.start(timeout_ms) def _on_reply_timeout(self, reply, timer, request_id): self._reply_timeout_timers.pop(request_id) self.log("timed out, aborting: {url}", reply, min_level=1) # FIXME: set proper error code reply.abort() def _cancel_reply_timer(self, reply): request_id = self._get_request_id(reply.request()) timer = self._reply_timeout_timers.pop(request_id, None) if timer and timer.isActive(): timer.stop() def _clear_proxy(self): """ Init default proxy """ self.setProxy(self._default_proxy) def _wrap_request(self, request): req = QNetworkRequest(request) req_id = next(self._request_ids) req.setAttribute(self._REQUEST_ID, req_id) for attr in ['timeout', 'track_response_body']: if hasattr(request, attr): setattr(req, attr, getattr(request, attr)) return req, req_id def _handle_custom_proxies(self, request): proxy = None # proxies set in proxy profiles or `proxy` HTTP argument splash_proxy_factory = self._get_webpage_attribute(request, 'splash_proxy_factory') if splash_proxy_factory: proxy_query = QNetworkProxyQuery(request.url()) proxy = splash_proxy_factory.queryProxy(proxy_query)[0] self.setProxy(proxy) # proxies set in on_request if hasattr(request, 'custom_proxy'): proxy = request.custom_proxy self.setProxy(proxy) # Handle proxy auth. We're setting Proxy-Authorization header # explicitly because Qt loves to cache proxy credentials. if proxy is None: return user, password = proxy.user(), proxy.password() if not user and not password: return auth = b"Basic " + base64.b64encode("{}:{}".format(user, password).encode("utf-8")) request.setRawHeader(b"Proxy-Authorization", auth) def _handle_custom_headers(self, request): if self._get_webpage_attribute(request, "skip_custom_headers"): # XXX: this hack assumes that new requests between # BrowserTab._create_request and this function are not possible, # i.e. we don't give control to the event loop in between. # Unfortunately we can't store this flag on a request itself # because a new QNetworkRequest instance is created by QWebKit. self._set_webpage_attribute(request, "skip_custom_headers", False) return headers = self._get_webpage_attribute(request, "custom_headers") if isinstance(headers, dict): headers = headers.items() for name, value in headers or []: request.setRawHeader(to_bytes(name), to_bytes(value)) def _handle_request_cookies(self, request): self.cookiejar.update_cookie_header(request) def _handle_reply_cookies(self, reply): self.cookiejar.fill_from_reply(reply) def _handle_request_response_tracking(self, request): track = getattr(request, 'track_response_body', False) request.setAttribute(self._SHOULD_TRACK, track) def _should_track_content(self, request): return request.attribute(self._SHOULD_TRACK) def _get_request_id(self, request=None): if request is None: request = self.sender().request() return request.attribute(self._REQUEST_ID) def _get_har(self, request=None): """ Return HarBuilder instance. :rtype: splash.har_builder.HarBuilder | None """ if request is None: request = self.sender().request() return self._get_webpage_attribute(request, "har") def _get_webpage_attribute(self, request, attribute): web_frame = get_request_webframe(request) if web_frame: return getattr(web_frame.page(), attribute, None) def _set_webpage_attribute(self, request, attribute, value): web_frame = get_request_webframe(request) if web_frame: return setattr(web_frame.page(), attribute, value) def _on_reply_error(self, error_id): self._response_bodies.pop(self._get_request_id(), None) if error_id != QNetworkReply.OperationCanceledError: error_msg = REQUEST_ERRORS.get(error_id, 'unknown error') self.log('Download error %d: %s ({url})' % (error_id, error_msg), self.sender(), min_level=2) def _on_reply_ready_read(self): reply = self.sender() self._store_response_chunk(reply) def _store_response_chunk(self, reply): req_id = self._get_request_id(reply.request()) if req_id not in self._response_bodies: self.log("Internal problem in _store_response_chunk: " "request %s is not tracked" % req_id, reply, min_level=1) return chunk = reply.peek(reply.bytesAvailable()) self._response_bodies[req_id].append(chunk) def _on_reply_finished(self): reply = self.sender() request = reply.request() self._cancel_reply_timer(reply) har = self._get_har() har_entry, content = None, None if har is not None: req_id = self._get_request_id() # FIXME: what if har is None? When can it be None? # Who removes the content from self._response_bodies dict? content = self._response_bodies.pop(req_id, None) if content is not None: content = bytes(content) # FIXME: content is kept in memory at least twice, # as raw data and as a base64-encoded copy. har.store_reply_finished(req_id, reply, content) har_entry = har.get_entry(req_id) # We're passing HAR entry to the callbacks because reply object # itself doesn't have all information. # Content is passed in order to avoid decoding it from base64. self._run_webpage_callbacks(request, "on_response", reply, har_entry, content) self.log("Finished downloading {url}", reply) def _on_reply_headers(self): """Signal emitted before reading response body, after getting headers """ reply = self.sender() request = reply.request() self._handle_reply_cookies(reply) self._run_webpage_callbacks(request, "on_response_headers", reply) har = self._get_har() if har is not None: har.store_reply_headers_received(self._get_request_id(request), reply) self.log("Headers received for {url}", reply, min_level=3) def _on_reply_download_progress(self, received, total): har = self._get_har() if har is not None: req_id = self._get_request_id() har.store_reply_download_progress(req_id, received, total) if total == -1: total = '?' self.log("Downloaded %d/%s of {url}" % (received, total), self.sender(), min_level=4) def _on_reply_upload_progress(self, sent, total): # FIXME: is it used? har = self._get_har() if har is not None: req_id = self._get_request_id() har.store_request_upload_progress(req_id, sent, total) if total == -1: total = '?' self.log("Uploaded %d/%s of {url}" % (sent, total), self.sender(), min_level=4) def _get_render_options(self, request): return self._get_webpage_attribute(request, 'render_options') def _run_webpage_callbacks(self, request, event_name, *args): callbacks = self._get_webpage_attribute(request, "callbacks") if not callbacks: return for cb in callbacks.get(event_name, []): try: cb(*args) except: # TODO unhandled exceptions in lua callbacks # should we raise errors here? # https://github.com/scrapinghub/splash/issues/161 self.log("error in %s callback" % event_name, min_level=1) self.log(traceback.format_exc(), min_level=1, format_msg=False) def log(self, msg, reply=None, min_level=2, format_msg=True): if self.verbosity < min_level: return if not reply: url = '' else: url = qurl2ascii(reply.url()) if not url: return if format_msg: msg = msg.format(url=url) log.msg(msg, system='network-manager') class SplashQNetworkAccessManager(ProxiedQNetworkAccessManager): """ This QNetworkAccessManager provides: * proxy support; * request middleware support; * additional logging. """ def __init__(self, request_middlewares, response_middlewares, verbosity): super(SplashQNetworkAccessManager, self).__init__(verbosity=verbosity) self.request_middlewares = request_middlewares self.response_middlewares = response_middlewares def run_response_middlewares(self): reply = self.sender() reply.metaDataChanged.disconnect(self.run_response_middlewares) render_options = self._get_render_options(reply.request()) if render_options: try: for middleware in self.response_middlewares: middleware.process(reply, render_options) except: self.log("internal error in response middleware", min_level=1) self.log(traceback.format_exc(), min_level=1, format_msg=False) def createRequest(self, operation, request, outgoingData=None): # XXX: This method MUST return a reply, otherwise PyQT segfaults. render_options = self._get_render_options(request) if render_options: try: for middleware in self.request_middlewares: request = middleware.process(request, render_options, operation, outgoingData) except: self.log("internal error in request middleware", min_level=1) self.log(traceback.format_exc(), min_level=1, format_msg=False) reply = super(SplashQNetworkAccessManager, self).createRequest(operation, request, outgoingData) if render_options: reply.metaDataChanged.connect(self.run_response_middlewares) return reply
bsd-3-clause
1,547,243,669,720,539,400
37.497738
104
0.622297
false
4.146199
false
false
false
mefly2012/platform
src/clean_validate/zyktgg.py
1
1453
# -*- coding: utf-8 -*- import sys reload(sys) sys.setdefaultencoding('utf-8') from common import public class zyktgg(): """开庭公告""" need_check_ziduan = ['main', 'city', 'bbd_dotime', 'title' ] def check_main(self, indexstr, ustr): """main 清洗验证""" ret = None if ustr and len(ustr): if not public.has_count_hz(ustr, 1): ret = u'不包含中文' else: ret = u'为空' return ret def check_city(self, indexstr, ustr): """city 清洗验证""" ret = None if ustr and len(ustr): if ustr not in public.PROVINCE: ret = u'非法的省名' pass else: ret = u'为空' return ret def check_bbd_dotime(self, indexstr, ustr): """do_time 清洗验证""" ret = None if ustr and len(ustr): if not public.bbd_dotime_date_format(ustr): ret = u"不合法日期" return ret def check_title(self, indexstr, ustr): """title 清洗验证""" ret = None if ustr and len(ustr): if all(not public.is_chinese(c) for c in ustr): ret = u'没有中文' elif not len(ustr) >= 5: ret = u'不够5个字以上' return ret
apache-2.0
8,685,627,748,616,619,000
23.636364
59
0.451661
false
3.188235
false
false
false
libvirt/libvirt-test-API
libvirttestapi/repos/domain/save.py
1
2922
# Copyright (C) 2010-2012 Red Hat, Inc. # This work is licensed under the GNU GPLv2 or later. # Save domain as a statefile import os import libvirt from libvirt import libvirtError from libvirttestapi.src import sharedmod from libvirttestapi.utils import utils required_params = ('guestname', 'filepath',) optional_params = {} def get_guest_ipaddr(*args): """Get guest ip address""" (guestname, logger) = args mac = utils.get_dom_mac_addr(guestname) logger.debug("guest mac address: %s" % mac) ipaddr = utils.mac_to_ip(mac, 15) logger.debug("guest ip address: %s" % ipaddr) if utils.do_ping(ipaddr, 20) == 1: logger.info("ping current guest successfull") return ipaddr else: logger.error("Error: can't ping current guest") return None def check_guest_status(*args): """Check guest current status""" (domobj, logger) = args state = domobj.info()[0] logger.debug("current guest status: %s" % state) if state == libvirt.VIR_DOMAIN_SHUTOFF or \ state == libvirt.VIR_DOMAIN_SHUTDOWN or \ state == libvirt.VIR_DOMAIN_BLOCKED: return False else: return True def check_guest_save(*args): """Check save domain result, if save domain is successful, guestname.save will exist under /tmp directory and guest can't be ping and status is paused """ (guestname, domobj, logger) = args if not check_guest_status(domobj, logger): if not get_guest_ipaddr(guestname, logger): return True else: return False else: return False def save(params): """Save domain to a disk file""" logger = params['logger'] guestname = params['guestname'] filepath = params['filepath'] conn = sharedmod.libvirtobj['conn'] domobj = conn.lookupByName(guestname) # Save domain ipaddr = get_guest_ipaddr(guestname, logger) if not check_guest_status(domobj, logger): logger.error("Error: current guest status is shutoff") return 1 if not ipaddr: logger.error("Error: can't get guest ip address") return 1 try: domobj.save(filepath) if check_guest_save(guestname, domobj, logger): logger.info("save %s domain successful" % guestname) else: logger.error("Error: fail to check save domain") return 1 except libvirtError as e: logger.error("API error message: %s, error code is %s" % (e.get_error_message(), e.get_error_code())) logger.error("Error: fail to save %s domain" % guestname) return 1 return 0 def save_clean(params): """ clean testing environment """ logger = params['logger'] filepath = params['filepath'] if os.path.exists(filepath): logger.info("remove dump file from save %s" % filepath) os.remove(filepath)
gpl-2.0
-1,875,613,081,637,927,700
26.055556
67
0.629363
false
3.824607
false
false
false
noelevans/sandpit
fivethiryeight/riddler_casino.py
1
1180
""" Suppose a casino invents a new game that you must pay $250 to play. The game works like this: The casino draws random numbers between 0 and 1, from a uniform distribution. It adds them together until their sum is greater than 1, at which time it stops drawing new numbers. You get a payout of $100 each time a new number is drawn. For example, suppose the casino draws 0.4 and then 0.7. Since the sum is greater than 1, it will stop after these two draws, and you receive $200. If instead it draws 0.2, 0.3, 0.3, and then 0.6, it will stop after the fourth draw and you will receive $400. Given the $250 entrance fee, should you play the game? Specifically, what is the expected value of your winnings? From: http://fivethirtyeight.com/features/ should-you-pay-250-to-play-this-casino-game """ import numpy as np def trial(): total = 0 spins = 0 while total < 1: total += np.random.random() spins += 1 return spins def main(): n = 10000000 dollar_return = (np.mean([trial() for _ in range(n)])) return_on_stake = 100 * dollar_return print(return_on_stake) if __name__ == '__main__': main()
mit
-3,011,533,244,456,876,500
27.780488
78
0.677119
false
3.323944
false
false
false
sunlightlabs/sarahs_inbox
mail/views.py
1
8502
from django.shortcuts import render_to_response, get_object_or_404 from django.template import RequestContext from django.core.paginator import Paginator from django.http import HttpResponse, HttpResponseRedirect from urllib import unquote from haystack.query import SearchQuerySet from mail.models import * from django.db.models import Q from django.core.urlresolvers import reverse from django.core.cache import cache import re RESULTS_PER_PAGE = 50 def _search_string(request): return request.GET.get('q', None) def _search_tokens(request): s = _search_string(request) if s is None: return [] # protection! re_sanitize = re.compile(r'[^\w\d\s\'"\,\.\?\$]', re.I) s = re_sanitize.sub('', s) tokens = [] re_quotes = re.compile(r'\"([^\"]+)\"') for m in re_quotes.findall(s): tokens.append(m.replace('"','').strip()) s = s.replace('"%s"' % m, '') for t in s.split(' '): tokens.append(t.strip()) while '' in tokens: tokens.remove('') return tokens def _highlight(text, tokens): regexes = [] sorted_tokens = sorted(tokens, key=lambda x: len(x)) for t in sorted_tokens: regexes.append(re.compile(r'(%s)' % t.replace(' ', r'\s+'), re.I)) for r in regexes: text = r.sub('<span class="highlight">\\1</span>', text) return text def _prepare_ids_from_cookie(request, cookie_name, method=None): if method == 'post': cookie = unquote(request.POST.get(cookie_name, '')).replace(',,', ',') else: cookie = unquote(request.COOKIES.get(cookie_name,'')).replace(',,', ',') print cookie if len(cookie)>1: if cookie[0]==',': cookie = cookie[1:] if cookie[-1]==',': cookie = cookie[:-1] try: id_list = map(lambda x: (x!='') and int(x) or 0, cookie.split(',')) except: id_list = [] return id_list def _annotate_emails(emails, search=[]): r = [] for email in emails: email.text = _highlight(email.text, search) r.append({ 'creator_html': email.creator_html(), 'to_html': email.to_html(), 'cc_html': email.cc_html(), 'obj': email }) return r def index(request, search=[], threads=None): if threads is None: palin = Person.objects.sarah_palin() threads = Thread.objects.exclude(creator__in=palin).order_by('-date') threads_count = threads.count() p = Paginator(threads, RESULTS_PER_PAGE) page_num = 1 try: page_num = int(request.GET.get('page', 1)) except: pass page = p.page(page_num) highlighted_threads = [] for thread in page.object_list: if (threads is not None) and type(threads) is SearchQuerySet: # deal with searchqueryset objects thread = thread.object thread.name = _highlight(thread.name, search) highlighted_threads.append(thread) template_vars = { 'range': "<strong>%d</strong> - <strong>%d</strong> of <strong>%d</strong>" % (page.start_index(), page.end_index(), threads_count), 'num_pages': p.num_pages , 'next': page_num<p.num_pages and min(p.num_pages,page_num+1) or False, 'prev': page_num>1 and max(1, page_num-1) or False, 'first': '1', 'last': p.num_pages, 'current_page': page_num, 'threads': highlighted_threads, 'search': " ".join(search), 'search_orig': (_search_string(request) is not None) and _search_string(request) or '', 'path': request.path, } return render_to_response('index.html', template_vars, context_instance=RequestContext(request)) def sent(request): kagan = Person.objects.elena_kagan() emails = Thread.objects.filter(creator=kagan).order_by('-date') return index(request, threads=emails) def contact_by_id(request, contact_id, suppress_redirect=False): cache_key = 'contact_%d' % int(contact_id) threads = cache.get(cache_key) if threads is None: try: person = Person.objects.get(id=contact_id) except Person.DoesNotExist, e: return HttpResponseRedirect(reverse('mail.views.index')) if person.merged_into is not None: return HttpResponseRedirect('/contact/%d/' % person.merged_into.id) threads = [] emails = Email.objects.filter(Q(to=person)|Q(cc=person)) for e in emails: if e.email_thread is not None: threads.append(e.email_thread.id) threads = Thread.objects.filter(id__in=threads).order_by('-date') cache.set(cache_key, threads) return index(request, threads=threads) def contact_by_name(request, contact_name): try: contact = Person.objects.get(slug=contact_name) except Person.DoesNotExist, e: return HttpResponseRedirect(reverse('mail.views.contacts_index')) except Thread.MultipleObjectsReturned, e: return HttpResponseRedirect(reverse('mail.views.contacts_index')) return contact_by_id(request, contact.id, suppress_redirect=True) def contacts_index(request): return index(request) def thread_by_id(request, thread_id, suppress_redirect=False): try: thread = Thread.objects.get(id=thread_id) except Thread.DoesNotExist, e: return HttpResponseRedirect(reverse('mail.views.index')) # if merged thread, redirect if thread.merged_into is not None: return HttpResponseRedirect('/thread/%d/' % thread.merged_into.id) # if access by ID, redirect to descriptive URL if (not suppress_redirect) and (len(thread.slug.strip())>3): return HttpResponseRedirect('/thread/%s/' % thread.slug) search = _search_tokens(request) thread_starred = thread.id in _prepare_ids_from_cookie(request, 'kagan_star') emails = _annotate_emails(Email.objects.filter(email_thread=thread).order_by('creation_date_time'), search) return render_to_response('thread.html', {'thread': thread, 'thread_starred': thread_starred, 'emails': emails }, context_instance=RequestContext(request)) def thread_by_name(request, thread_name): try: thread = Thread.objects.get(slug=thread_name) except Thread.DoesNotExist, e: return HttpResponseRedirect(reverse('mail.views.index')) except Thread.MultipleObjectsReturned, e: return HttpResponseRedirect(reverse('mail.views.index')) return thread_by_id(request, thread.id, suppress_redirect=True) def search(request): tokens = _search_tokens(request) if len(tokens) is None: return HttpResponseRedirect(reverse('mail.views.index')) sqs = SearchQuerySet().models(Thread) for t in tokens: sqs = sqs.filter_or(text_and_recipients=t) sqs = sqs.order_by('-date') if sqs.count()==0: return render_to_response('search_empty.html', { 'path': request.path }, context_instance=RequestContext(request)) return index(request, search=tokens, threads=sqs) def star_record_ajax(request, thread_id, action): try: thread = Thread.objects.get(id=thread_id) except Thread.DoesNotExist, e: return HttpResponse('{ status: \'not_found\'}'); if thread.star_count is None: thread.star_count = 0 if action=='add': thread.star_count += 1 elif action=='remove': thread.star_count -= 1 thread.save() return HttpResponse('{ status: \'success\'}') def starred(request): if not request.POST.get('kagan_star'): return HttpResponseRedirect(reverse('mail.views.index')) starred_ids = _prepare_ids_from_cookie(request, 'kagan_star', method='post') if len(starred_ids)==0: return HttpResponseRedirect(reverse('mail.views.index')) starred = Thread.objects.filter(id__in=starred_ids).order_by('-date') if starred.count()==0: return render_to_response('search_empty.html', { 'path': request.path }, context_instance=RequestContext(request)) else: return index(request, threads=starred) return index(request, threads=starred) def starred_all(request): starred = Thread.objects.filter(star_count__gt=0).order_by('-star_count','-date') if starred.count()==0: return render_to_response('search_empty.html', { 'path': request.path }, context_instance=RequestContext(request)) else: return index(request, threads=starred)
bsd-3-clause
8,381,273,969,921,054,000
32.738095
159
0.631498
false
3.683709
false
false
false
nbeck90/city-swap
cityswap/requests/migrations/0001_initial.py
1
1445
# -*- coding: utf-8 -*- # Generated by Django 1.9.5 on 2016-04-11 16:52 from __future__ import unicode_literals import datetime from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ('profiles', '0001_initial'), ] operations = [ migrations.CreateModel( name='Request', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.TextField(default=b'Type your title here')), ('description', models.TextField(default=b'Type your description here')), ('origin', models.CharField(choices=[(b'Seattle', b'Seattle'), (b'Portland', b'Portland')], default=b'Seattle', max_length=25)), ('destination', models.CharField(choices=[(b'Seattle', b'Seattle'), (b'Portland', b'Portland')], default=b'Seattle', max_length=25)), ('date_created', models.DateTimeField(blank=True, default=datetime.datetime.now)), ('courier', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='requests', to='profiles.Profile')), ('sender', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sent_from', to='profiles.Profile')), ], ), ]
mit
4,474,503,118,401,711,600
44.15625
163
0.624913
false
3.96978
false
false
false
ingmarlehmann/franca-tools
franca_parser/franca_parser/franca_ast.py
1
15547
#------------------------------------------------------------------------------ # franca_parser: franca_ast.py # # AST node classes: AST node classes for Franca IDL (*.fidl). # Builds an AST to be used in other tools. # # This code is *heavlily* inspired by 'pycparser' by Eli Bendersky # (https://github.com/eliben/pycparser/) # # Copyright (C) 2016, Ingmar Lehmann # License: BSD #------------------------------------------------------------------------------ import sys class Node(object): def __init__(self): print ("node constructor") def children(self): pass def show(self, buf=sys.stdout, offset=0, attrnames=False, nodenames=False, showcoord=False, _my_node_name=None): """ Pretty print the Node and all its attributes and children (recursively) to a buffer. buf: Open IO buffer into which the Node is printed. offset: Initial offset (amount of leading spaces) attrnames: True if you want to see the attribute names in name=value pairs. False to only see the values. nodenames: True if you want to see the actual node names within their parents. showcoord: Do you want the coordinates of each Node to be displayed. """ lead = ' ' * offset if nodenames and _my_node_name is not None: buf.write(lead + self.__class__.__name__+ ' <' + _my_node_name + '>: ') else: buf.write(lead + self.__class__.__name__+ ': ') if self.attr_names: if attrnames: nvlist = [(n, getattr(self,n)) for n in self.attr_names] attrstr = ', '.join('%s=%s' % nv for nv in nvlist) else: vlist = [getattr(self, n) for n in self.attr_names] attrstr = ', '.join('%s' % v for v in vlist) buf.write(attrstr) if showcoord: buf.write(' (at %s)' % self.coord) buf.write('\n') for (child_name, child) in self.children(): child.show( buf, offset=offset + 2, attrnames=attrnames, nodenames=nodenames, showcoord=showcoord, _my_node_name=child_name) class ArrayTypeDeclaration(Node): def __init__(self, typename, type, dimension): self.typename = typename self.type = type self.dimension = dimension def children(self): nodelist = [] if self.type is not None: nodelist.append(("type", self.type)) if self.typename is not None: nodelist.append(("typename", self.typename)) return tuple(nodelist) attr_names = ('dimension',) class Attribute(Node): def __init__(self, typename, name): self.typename = typename self.name = name def children(self): nodelist = [] if self.name is not None: nodelist.append(("name", self.name)) if self.typename is not None: nodelist.append(("typename", self.typename)) return tuple(nodelist) attr_names = () class BroadcastMethod(Node): def __init__(self, name, comment, out_args, is_selective=False): self.name = name self.comment = comment self.out_args = out_args self.is_selective = is_selective def children(self): nodelist = [] if self.name is not None: nodelist.append(("name", self.name)) if self.comment is not None: nodelist.append(("comment", self.comment)) if self.out_args is not None: nodelist.append(("out_args", self.out_args)) return tuple(nodelist) attr_names = ('is_selective',) class ComplexTypeDeclarationList(Node): def __init__(self, members): self.members = members def children(self): nodelist = [] for i, child in enumerate(self.members or []): nodelist.append(("members[%d]" % i, child)) return tuple(nodelist) attr_names = () class Constant(Node): def __init__(self, comment): self.value = value def children(self): return tuple() attr_names = ('value',) class Enum(Node): def __init__(self, name, values, comment=None): self.name = name self.values = values self.comment = comment def children(self): nodelist = [] if self.name is not None: nodelist.append(("name", self.name)) if self.values is not None: nodelist.append(("values", self.values)) if self.comment is not None: nodelist.append(("comment", self.comment)) return tuple(nodelist) attr_names = () class Enumerator(Node): def __init__(self, name, value=None, comment=None): self.name = name self.value = value self.comment = comment def children(self): nodelist = [] if self.name is not None: nodelist.append(("name", self.name)) if self.value is not None: nodelist.append(("value", self.value)) if self.comment is not None: nodelist.append(("comment", self.comment)) return tuple(nodelist) attr_names = () class EnumeratorList(Node): def __init__(self, enumerators): self.enumerators = enumerators def children(self): nodelist = [] for i, child in enumerate(self.enumerators or []): nodelist.append(("enumerators[%d]" % i, child)) return tuple(nodelist) attr_names = () class FrancaComment(Node): def __init__(self, comment): self.comment = comment def children(self): return tuple() attr_names = ('comment',) class FrancaDocument(Node): def __init__(self, package_identifier, imports, child_objects): self.package_identifier = package_identifier self.imports = imports self.child_objects = child_objects def children(self): nodelist = [] if self.package_identifier is not None: nodelist.append(("package_identifier", self.package_identifier)) if self.imports is not None: nodelist.append(("imports", self.imports)) if self.child_objects is not None: nodelist.append(("child_objects", self.child_objects)) return tuple(nodelist) attr_names = () class ID(Node): def __init__(self, id): self.id = id def children(self): return tuple() attr_names = ('id',) class ImportIdentifier(Node): def __init__(self, import_identifier): self.import_identifier = import_identifier def children(self): return tuple() attr_names = ('import_identifier',) class ImportStatement(Node): def __init__(self, import_identifier, filename): self.import_identifier = import_identifier self.filename = filename def children(self): nodelist = [] if self.import_identifier is not None: nodelist.append(("import_identifier", self.import_identifier)) if self.filename is not None: nodelist.append(("filename", self.filename)) return tuple(nodelist) attr_names = () class ImportStatementList(Node): def __init__(self, members): self.members = members def children(self): nodelist = [] for i, child in enumerate(self.members or []): nodelist.append(("imports[%d]" % i, child)) return tuple(nodelist) attr_names = () class IntegerConstant(Node): def __init__(self, value): self.value = value def children(self): return tuple() attr_names = ('value',) class Interface(Node): def __init__(self, name, members, comment=None): self.name = name self.members = members self.comment = comment def children(self): nodelist = [] if self.name is not None: nodelist.append(("name", self.name)) if self.members is not None: nodelist.append(("members", self.members)) if self.comment is not None: nodelist.append(("comment", self.comment)) return tuple(nodelist) attr_names = () class Map(Node): def __init__(self, name, key_type, value_type, comment=None): self.name = name self.key_type = key_type self.value_type = value_type self.comment = comment def children(self): nodelist = [] if self.name is not None: nodelist.append(("name", self.name)) if self.key_type is not None: nodelist.append(("key_type", self.key_type)) if self.value_type is not None: nodelist.append(("value_type", self.value_type)) if self.comment is not None: nodelist.append(("comment", self.comment)) return tuple(nodelist) attr_names = () class Method(Node): def __init__(self, name, comment, body, is_fire_and_forget=False): self.name = name self.comment = comment self.body = body self.is_fire_and_forget = is_fire_and_forget def children(self): nodelist = [] if self.name is not None: nodelist.append(("name", self.name)) if self.comment is not None: nodelist.append(("comment", self.comment)) if self.body is not None: nodelist.append(("body", self.body)) return tuple(nodelist) attr_names = ('is_fire_and_forget',) class MethodBody(Node): def __init__(self, in_args, out_args): self.in_args = in_args self.out_args = out_args def children(self): nodelist = [] if self.in_args is not None: nodelist.append(("in_args", self.in_args)) if self.out_args is not None: nodelist.append(("out_args", self.out_args)) return tuple(nodelist) attr_names = () class MethodArgument(Node): def __init__(self, type, name, comment=None): self.type = type self.name = name self.comment = comment def children(self): nodelist = [] if self.type is not None: nodelist.append(("type", self.type)) if self.name is not None: nodelist.append(("name", self.name)) if self.comment is not None: nodelist.append(("comment", self.comment)) return tuple(nodelist) attr_names = () class MethodArgumentList(Node): def __init__(self, args): self.args = args def children(self): nodelist = [] for i, child in enumerate(self.args or []): nodelist.append(("args[%d]" % i, child)) return tuple(nodelist) attr_names = () class MethodOutArguments(Node): def __init__(self, args): self.args = args def children(self): nodelist = [] if self.args is not None: nodelist.append(("args", self.args)) return tuple(nodelist) attr_names = () class MethodInArguments(Node): def __init__(self, args): self.args = args def children(self): nodelist = [] if self.args is not None: nodelist.append(("args", self.args)) return tuple(nodelist) attr_names = () class PackageStatement(Node): def __init__(self, package_identifier): self.package_identifier = package_identifier def children(self): nodelist = [] if self.package_identifier is not None: nodelist.append(("package_identifier", self.package_identifier)) return tuple(nodelist) attr_names = () class PackageIdentifier(Node): def __init__(self, package_identifier): self.package_identifier = package_identifier def children(self): return tuple() attr_names = ('package_identifier',) class RootLevelObjectList(Node): def __init__(self, root_level_objects): self.members = root_level_objects def children(self): nodelist = [] for i, child in enumerate(self.members or []): nodelist.append(("root_objects[%d]" % i, child)) return tuple(nodelist) attr_names = () class String(Node): def __init__(self, string): self.string = string def children(self): return tuple() attr_names = ('string',) class Struct(Node): def __init__(self, name, struct_members, comment=None): self.name = name self.struct_members = struct_members self.comment = comment def children(self): nodelist = [] if self.name is not None: nodelist.append(("name", self.name)) if self.struct_members is not None: nodelist.append(("struct_members", self.struct_members)) if self.comment is not None: nodelist.append(("comment", self.comment)) return tuple(nodelist) attr_names = () class TypeCollection(Node): def __init__(self, name, members, comment=None): self.name = name self.members = members self.comment = comment def children(self): nodelist = [] if self.name is not None: nodelist.append(("name", self.name)) if self.members is not None: nodelist.append(("members", self.members)) if self.comment is not None: nodelist.append(("comment", self.comment)) return tuple(nodelist) attr_names = () class Typedef(Node): def __init__(self, existing_type, new_type): self.existing_type = existing_type self.new_type = new_type def children(self): nodelist = [] if self.existing_type is not None: nodelist.append(("existing_type", self.existing_type)) if self.new_type is not None: nodelist.append(("new_type", self.new_type)) return tuple(nodelist) attr_names = () class Typename(Node): def __init__(self, typename): self.typename = typename def children(self): nodelist = [] if self.typename is not None and isinstance(self.typename, Node): nodelist.append(("typename", self.typename)) return tuple(nodelist) attr_names = ('typename',) class Union(Node): def __init__(self, name, member_list, comment=None): self.name = name self.member_list = member_list self.comment = comment def children(self): nodelist = [] if self.name is not None: nodelist.append(("name", self.name)) if self.member_list is not None: nodelist.append(("member_list", self.member_list)) if self.comment is not None: nodelist.append(("comment", self.comment)) return tuple(nodelist) attr_names = () class Variable(Node): def __init__(self, typename, name, comment): self.typename = typename self.name = name self.comment = comment def children(self): nodelist = [] if self.typename is not None: nodelist.append(("typename", self.typename)) if self.name is not None: nodelist.append(("name", self.name)) if self.comment is not None: nodelist.append(("comment", self.comment)) return tuple(nodelist) attr_names = () class VariableList(Node): def __init__(self, members): self.members = members def children(self): nodelist = [] for i, child in enumerate(self.members or []): nodelist.append(("members[%d]" % i, child)) return tuple(nodelist) attr_names = () class Version(Node): def __init__(self, major, minor): self.major = major self.minor = minor def children(self): nodelist = [] if self.major is not None: nodelist.append(("major", self.major)) if self.minor is not None: nodelist.append(("minor", self.minor)) return tuple(nodelist) attr_names = ()
mpl-2.0
2,878,004,356,479,588,400
29.247082
118
0.589631
false
4.06139
false
false
false
nuobit/odoo-addons
connector_oxigesti/components_custom/binder.py
1
8847
# -*- coding: utf-8 -*- # Copyright 2013-2017 Camptocamp SA # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html) """ Binders ======= Binders are components that know how to find the external ID for an Odoo ID, how to find the Odoo ID for an external ID and how to create the binding between them. """ import psycopg2 import json from odoo import fields, models, tools from odoo.addons.component.core import AbstractComponent from contextlib import contextmanager from odoo.addons.connector.exception import (RetryableJobError, ) import odoo class BinderComposite(AbstractComponent): """ The same as Binder but allowing composite external keys """ _name = 'base.binder.composite' _inherit = 'base.binder' _default_binding_field = 'oxigesti_bind_ids' _external_display_field = 'external_id_display' _odoo_extra_fields = [] @contextmanager def _retry_unique_violation(self): """ Context manager: catch Unique constraint error and retry the job later. When we execute several jobs workers concurrently, it happens that 2 jobs are creating the same record at the same time (binding record created by :meth:`_export_dependency`), resulting in: IntegrityError: duplicate key value violates unique constraint "my_backend_product_product_odoo_uniq" DETAIL: Key (backend_id, odoo_id)=(1, 4851) already exists. In that case, we'll retry the import just later. .. warning:: The unique constraint must be created on the binding record to prevent 2 bindings to be created for the same External record. """ try: yield except psycopg2.IntegrityError as err: if err.pgcode == psycopg2.errorcodes.UNIQUE_VIOLATION: raise RetryableJobError( 'A database error caused the failure of the job:\n' '%s\n\n' 'Likely due to 2 concurrent jobs wanting to create ' 'the same record. The job will be retried later.' % err) else: raise def _is_binding(self, binding): try: binding._fields[self._odoo_field] except KeyError: return False return True def _find_binding(self, relation, binding_extra_vals={}): if self._is_binding(relation): raise Exception("The source object %s must not be a binding" % relation.model._name) if not set(self._odoo_extra_fields).issubset(set(binding_extra_vals.keys())): raise Exception("If _odoo_extra_fields are defined %s, " "you must specify the correpsonding binding_extra_vals %s" % ( self._odoo_extra_fields, binding_extra_vals)) domain = [(self._odoo_field, '=', relation.id), (self._backend_field, '=', self.backend_record.id)] for f in self._odoo_extra_fields: domain.append((f, '=', binding_extra_vals[f])) binding = self.model.with_context( active_test=False).search(domain) if binding: binding.ensure_one() return binding def wrap_binding(self, relation, binding_field=None, binding_extra_vals={}): if not relation: return if binding_field is None: if not self._default_binding_field: raise Exception("_default_binding_field defined on synchronizer class is mandatory") binding_field = self._default_binding_field # wrap is typically True if the relation is a 'product.product' # record but the binding model is 'oxigesti.product.product' wrap = relation._name != self.model._name if wrap and hasattr(relation, binding_field): binding = self._find_binding(relation, binding_extra_vals) if not binding: # we are working with a unwrapped record (e.g. # product.template) and the binding does not exist yet. # Example: I created a product.product and its binding # oxigesti.product.product, it is exported, but we need to # create the binding for the template. _bind_values = {self._odoo_field: relation.id, self._backend_field: self.backend_record.id} _bind_values.update(binding_extra_vals) # If 2 jobs create it at the same time, retry # one later. A unique constraint (backend_id, # odoo_id) should exist on the binding model with self._retry_unique_violation(): binding = (self.model .with_context(connector_no_export=True) .sudo() .create(_bind_values)) # Eager commit to avoid having 2 jobs # exporting at the same time. The constraint # will pop if an other job already created # the same binding. It will be caught and # raise a RetryableJobError. if not odoo.tools.config['test_enable']: self.env.cr.commit() # nowait else: # If oxigest_bind_ids does not exist we are typically in a # "direct" binding (the binding record is the same record). # If wrap is True, relation is already a binding record. binding = relation if not self._is_binding(binding): raise Exception( "Expected binding '%s' and found regular model '%s'" % (self.model._name, relation._name)) return binding def to_internal(self, external_id, unwrap=False): """ Give the Odoo recordset for an external ID :param external_id: external ID for which we want the Odoo ID :param unwrap: if True, returns the normal record else return the binding record :return: a recordset, depending on the value of unwrap, or an empty recordset if the external_id is not mapped :rtype: recordset """ domain = [(self._backend_field, '=', self.backend_record.id), (self._external_display_field, '=', json.dumps(external_id))] bindings = self.model.with_context(active_test=False).search( domain ) if not bindings: if unwrap: return self.model.browse()[self._odoo_field] return self.model.browse() bindings.ensure_one() if unwrap: bindings = bindings[self._odoo_field] return bindings def to_external(self, binding, wrap=False, wrapped_model=None, binding_extra_vals={}): """ Give the external ID for an Odoo binding ID :param binding: Odoo binding for which we want the external id :param wrap: if True, binding is a normal record, the method will search the corresponding binding and return the external id of the binding :return: external ID of the record """ if isinstance(binding, models.BaseModel): binding.ensure_one() else: if wrap: if not wrapped_model: raise Exception("The wrapped model is mandatory if binding is not an object") binding = self.env[wrapped_model].browse(binding) else: binding = self.model.browse(binding) if wrap: binding = self._find_binding(binding, binding_extra_vals) if not binding: return None return binding[self._external_field] or None def bind(self, external_id, binding): """ Create the link between an external ID and an Odoo ID :param external_id: external id to bind :param binding: Odoo record to bind :type binding: int """ # Prevent False, None, or "", but not 0 assert (external_id or external_id is 0) and binding, ( "external_id or binding missing, " "got: %s, %s" % (external_id, binding) ) # avoid to trigger the export when we modify the `external_id` now_fmt = fields.Datetime.now() if isinstance(binding, models.BaseModel): binding.ensure_one() else: binding = self.model.browse(binding) binding.with_context(connector_no_export=True).write({ self._external_field: external_id, self._sync_date_field: now_fmt, }) def _get_external_id(self, binding): return None
agpl-3.0
2,360,778,247,693,015,600
38.851351
106
0.58144
false
4.523006
false
false
false
helfertool/helfertool
src/registration/models/shift.py
1
7529
from django.core.validators import MinValueValidator from django.db import models from django.db.models.signals import pre_delete from django.dispatch import receiver from django.template.defaultfilters import date as date_f from django.utils.timezone import localtime from django.utils.translation import ugettext_lazy as _ from collections import OrderedDict from copy import deepcopy from datetime import datetime import math class Shift(models.Model): """ A shift of one job. Columns: :job: job of this shift :begin: begin of the shift :end: end of the shift :number: number of people :blocked: shift is blocked, if the job is public :hidden: shift is not displayed publicly :name: name of the shift (optional) """ class Meta: ordering = ['job', 'begin', 'end'] job = models.ForeignKey( 'Job', on_delete=models.CASCADE, ) name = models.CharField( max_length=200, verbose_name=_("Name (optional)"), default="", blank=True, ) begin = models.DateTimeField( verbose_name=_("Begin"), ) end = models.DateTimeField( verbose_name=_("End"), ) number = models.IntegerField( default=0, verbose_name=_("Number of helpers"), validators=[MinValueValidator(0)], ) blocked = models.BooleanField( default=False, verbose_name=_("The shift is blocked and displayed as full."), ) hidden = models.BooleanField( default=False, verbose_name=_("The shift is not visible."), ) gifts = models.ManyToManyField( 'gifts.GiftSet', verbose_name=_("Gifts"), blank=True, ) archived_number = models.IntegerField( default=0, verbose_name=_("Number of registered helpers for archived event"), ) def __str__(self): if self.name: return "%s, %s, %s" % (self.job.name, self.name, self.time_with_day()) else: return "%s, %s" % (self.job.name, self.time_with_day()) def time(self): """ Returns a string representation of the begin and end time. The begin contains the date and time, the end only the time. """ return "%s, %s - %s" % (date_f(localtime(self.begin), 'DATE_FORMAT'), date_f(localtime(self.begin), 'TIME_FORMAT'), date_f(localtime(self.end), 'TIME_FORMAT')) def time_hours(self): """ Returns a string representation of the begin and end time. Only the time is used, the date is not shown. """ return "%s - %s" % (date_f(localtime(self.begin), 'TIME_FORMAT'), date_f(localtime(self.end), 'TIME_FORMAT')) def time_with_day(self): """ Returns a string representation of the day. If the shift is on two days only the name of the first day is returned. """ day = date_f(localtime(self.begin), "l") return "{}, {}".format(day, self.time()) def date(self): """ Returns the day on which the shifts begins. """ return localtime(self.begin).date() def num_helpers(self): """ Returns the current number of helpers, but 0 if event is archived. """ return self.helper_set.count() def num_helpers_archived(self): """ Returns the current number of helpers- """ if self.job.event.archived: return self.archived_number else: return self.helper_set.count() def is_full(self): """ Check if the shift is full and return a boolean. """ return self.num_helpers() >= self.number def helpers_percent(self): """ Calculate the percentage of registered helpers and returns an int. If the maximal number of helpers for a shift is 0, 0 is returned. """ if self.number == 0: return 0 num = self.num_helpers_archived() return int(round(float(num) / self.number * 100.0, 0)) def helpers_percent_5percent(self): """ Returns the percentage of registered helpers in 5% steps. So the returned value is between 0 and 20 (including both values). This is used to generate the CSS class names defined in style.css. Therefore, inline CSS can be avoided. """ percent = self.helpers_percent() return math.ceil(percent / 5) def helpers_percent_vacant_5percent(self): """ Same as `helpers_percent_5percent`, but for the missing helpers. """ return 20 - self.helpers_percent_5percent() @property def shirt_sizes(self): # data structure shirts = OrderedDict() for size, name in self.job.event.get_shirt_choices(): shirts.update({name: 0}) # collect all sizes, this must be the first shift of the helper for helper in self.helper_set.all(): if helper.first_shift == self: tmp = shirts[helper.get_shirt_display()] shirts.update({helper.get_shirt_display(): tmp+1}) return shirts def duplicate(self, new_date=None, new_job=None, gift_set_mapping=None): """ Duplicate a shift. There are multiple possibilities: * Shift is copied to new day in same job: set new_date * Shift is copied to new job in same event: set new_job * Shift is copied to new event: set new_job and gift_set_mapping """ new_shift = deepcopy(self) new_shift.pk = None new_shift.archived_number = 0 # maybe shift is copied to new job if new_job: new_shift.job = new_job # if shift is copied to new event, move begin and end time according to diff in event dates if self.job.event != new_job.event: diff = new_job.event.date - self.job.event.date new_shift.begin += diff new_shift.end += diff # maybe just the date is changed if new_date: new_shift.move_date(new_date) # now save that new_shift.save() # and finally set the gifts again for gift in self.gifts.all(): if gift_set_mapping: new_shift.gifts.add(gift_set_mapping[gift]) else: new_shift.gifts.add(gift) return new_shift def move_date(self, new_date): # current begin and end in local time old_begin_localtime = localtime(self.begin) old_end_localtime = localtime(self.end) # move date alone without chainging time diff_days = new_date - old_begin_localtime.date() new_begin_date = old_begin_localtime.date() + diff_days new_end_date = old_end_localtime.date() + diff_days # set time separately (10 am should always be 10 am, also when a time change is between old and new date) begin_time = old_begin_localtime.time() end_time = old_end_localtime.time() self.begin = datetime.combine(new_begin_date, begin_time) self.end = datetime.combine(new_end_date, end_time) @receiver(pre_delete, sender=Shift) def shift_deleted(sender, instance, using, **kwargs): # m2m_changed does not trigger here, so remote the helpers before the shift is deleted for helper in instance.helper_set.all(): helper.shifts.remove(instance)
agpl-3.0
-1,135,561,369,318,406,400
31.175214
113
0.594501
false
4.045674
false
false
false
mpapierski/hb_balancer
protocol.py
1
6557
#!/usr/bin/env python # -*- coding: utf-8 -*- # # hb_balancer # High performance load balancer between Helbreath World Servers. # # Copyright (C) 2012 Michał Papierski <[email protected]> # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # import struct import random import logging from twisted.internet import reactor from twisted.protocols.stateful import StatefulProtocol from twisted.python import log from packets import Packets class BaseHelbreathProtocol(StatefulProtocol): ''' Basic Helbreath Protocol ''' def getInitialState(self): ''' Protocol overview: [Key unsigned byte] [Size unsigned short] [Data Size-bytes] ''' return (self.get_key, 1) def get_key(self, data): ''' Get key ''' self.key, = struct.unpack('<B', data) return (self.get_data_size, 2) def get_data_size(self, data): ''' Read data size ''' self.data_size, = struct.unpack('<H', data) return (self.get_data, self.data_size - 3) def get_data(self, data): ''' Read encoded data and decode it ''' if self.key > 0: # Decode data = list(data) for i in range(len(data)): data[i] = chr(((ord(data[i]) ^ (self.key ^ (self.data_size - 3 - i))) - (i ^ self.key)) % 256) data = ''.join(data) # Pass decoded data self.raw_data(data) return (self.get_key, 1) def send_message(self, data): ''' Send a Helbreath Packet data ''' key = random.randint(0, 255) if key > 0: # Encode data = list(data) for i in range(len(data)): data[i] = chr(((ord(data[i]) + (i ^ key)) ^ (key ^ (len(data) - i))) % 256) data = ''.join(data) self.transport.write(struct.pack('<BH', key, len(data) + 3) + data) def raw_data(self, data): ''' Got packet ''' pass class ProxyHelbreathProtocol(BaseHelbreathProtocol): ''' Proxy Helbreath protocol used for proxying packets ''' def connectionMade(self): self.factory.success(self) def login(self, account_name, account_password, world_name): ''' Request a login ''' # Casting to str is made for sure # world_name could be either str or unicode. self.send_message(struct.pack('<IH10s10s30s', Packets.MSGID_REQUEST_LOGIN, # MsgID 0, # MsgType str(account_name), str(account_password), str(world_name))) def raw_data(self, data): self.factory.receiver(data) self.transport.loseConnection() class HelbreathProtocol(BaseHelbreathProtocol): def raw_data(self, data): # Header msg_id, msg_type = struct.unpack('<IH', data[:6]) # Process packet data if msg_id == Packets.MSGID_REQUEST_LOGIN: # Client is requesting login packet_format = '<10s10s30s' account_name, account_password, world_name = struct.unpack( packet_format, data[6:] ) self.request_login( account_name.rstrip('\x00'), account_password.rstrip('\x00'), world_name.rstrip('\x00') ) elif msg_id == Packets.MSGID_REQUEST_ENTERGAME: # Client is trying to enter game packet_format = '<10s10s10s10si30s120s' player_name, map_name, account_name, account_password, \ level, world_name, cmd_line = struct.unpack( packet_format, data[6:]) self.request_entergame( msg_type, player_name.rstrip('\x00'), map_name.rstrip('\x00'), account_name.rstrip('\x00'), account_password.rstrip('\x00'), level, world_name.rstrip('\x00'), cmd_line.rstrip('\x00')) else: # Abort if a packet is not (yet) known self.transport.loseConnection() def request_login(self, account_name, account_password, world_name): ''' Request client login account_name -- Account name account_password -- Account password world_name -- World server name ''' def world_is_down(failure = None): ''' The requested world is offline ''' self.send_message(struct.pack('<IH', Packets.MSGID_RESPONSE_LOG, Packets.DEF_LOGRESMSGTYPE_NOTEXISTINGWORLDSERVER)) reactor.callLater(10, self.transport.loseConnection) def handle_response(data): ''' Pass data and close the connection nicely ''' self.send_message(data) reactor.callLater(10, self.transport.loseConnection) def connection_made(remote): ''' Connection is made. Request a login. ''' log.msg('Remote connection made!') remote.login( account_name, account_password, remote.factory.world_name ) # Request connection to a world by its name, pass some callbacks self.factory.connect_to_world( world_name = world_name, receiver = handle_response, success = connection_made, failure = world_is_down) log.msg('Request world %s' % (world_name, )) def request_entergame(self, msg_type, player_name, map_name, account_name, account_password, level, world_name, cmd_line): ''' Client wants to enter game. ''' log.msg('Request entergame player(%s) map(%s) account(%s) world(%s)' % ( player_name, map_name, account_name, world_name)) def connection_made(remote): ''' Request enter game, construct exacly the same data. TODO: Parse the msg_type. ''' log.msg('Requesting enter game...') remote.send_message(struct.pack('<IH10s10s10s10si30s120s', Packets.MSGID_REQUEST_ENTERGAME, msg_type, player_name, map_name, account_name, account_password, level, str(remote.factory.world_name), cmd_line)) def error_handler(failure = None): ''' Unable to connect to destination world ''' log.err('Enter game error for account(%s) at world(%s)' % ( account_name, world_name)) self.send_message(struct.pack('<IHB', Packets.MSGID_RESPONSE_ENTERGAME, Packets.DEF_ENTERGAMERESTYPE_REJECT, Packets.DEF_REJECTTYPE_DATADIFFERENCE)) reactor.callLater(10, self.transport.loseConnection) def response_handler(data): ''' Pass the (modified) data ''' self.send_message(data) self.factory.connect_to_world( world_name = world_name, receiver = response_handler, success = connection_made, failure = error_handler )
agpl-3.0
-1,380,034,530,598,753,500
28.399103
98
0.675412
false
3.133843
false
false
false
sameersingh/bibere
scripts/first_pages.py
1
1750
#!/usr/bin/python3 import argparse from read_json import * import tempfile import shutil import pypdftk import os def get_pdf(source, dest): shutil.copy(source, dest) def run(idir, bdir, ofile): authors, venues, papers = read_all_info(idir) fpdf_names = [] tmpdirname = tempfile.mkdtemp() for p in papers: if p['pubTypeSlot'] == 'Conference' or p['pubTypeSlot'] == 'Journal': if 'pdfLink' not in p: print("pdfLink missing:", p['id']) elif p['pdfLink'].startswith("http"): print("local link missing:", p['id']) else: source = bdir + "/" + p['pdfLink'] i = len(fpdf_names) dest = "%s/%d.pdf" % (tmpdirname, i) print("getting %s, putting it %s" % (source, dest)) get_pdf(source, dest) tdir = "%s/%d/" % (tmpdirname, i) os.mkdir(tdir) fpdf_names.append(tdir + "page_01.pdf") pypdftk.split(dest, tdir) pypdftk.concat(fpdf_names, out_file=ofile) shutil.rmtree(tmpdirname) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("-i", "--input", help="directory containing the json files for authors/papers", required=True) parser.add_argument("-b", "--basedir", help="the base directory of where the full PDFs reside.", required=True) parser.add_argument("-o", "--output", help="output pdf file for the first pages", required=True) args = parser.parse_args() print("input: ", args.input) print("basedir: ", args.basedir) print("output: ", args.output) run(args.input, args.basedir, args.output)
bsd-2-clause
-2,552,111,543,381,166,600
36.888889
118
0.564
false
3.535354
false
false
false
openstack/yaql
yaql/language/contexts.py
1
9928
# Copyright (c) 2013 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from yaql.language import exceptions from yaql.language import runner from yaql.language import specs from yaql.language import utils class ContextBase(metaclass=abc.ABCMeta): def __init__(self, parent_context=None, convention=None): self._parent_context = parent_context self._convention = convention if convention is None and parent_context: self._convention = parent_context.convention @property def parent(self): return self._parent_context @abc.abstractmethod def register_function(self, spec, *args, **kwargs): pass @abc.abstractmethod def get_data(self, name, default=None, ask_parent=True): return default def __getitem__(self, name): return self.get_data(name) @abc.abstractmethod def __setitem__(self, name, value): pass @abc.abstractmethod def __delitem__(self, name): pass @abc.abstractmethod def __contains__(self, item): return False def __call__(self, name, engine, receiver=utils.NO_VALUE, data_context=None, use_convention=False, function_filter=None): return lambda *args, **kwargs: runner.call( name, self, args, kwargs, engine, receiver, data_context, use_convention, function_filter) @abc.abstractmethod def get_functions(self, name, predicate=None, use_convention=False): return [], False @abc.abstractmethod def delete_function(self, spec): pass def collect_functions(self, name, predicate=None, use_convention=False): overloads = [] p = self while p is not None: context_predicate = None if predicate: context_predicate = lambda fd: predicate(fd, p) # noqa: E731 layer_overloads, is_exclusive = p.get_functions( name, context_predicate, use_convention) p = None if is_exclusive else p.parent if layer_overloads: overloads.append(layer_overloads) return overloads def create_child_context(self): return type(self)(self) @property def convention(self): return self._convention @abc.abstractmethod def keys(self): return {}.keys() class Context(ContextBase): def __init__(self, parent_context=None, data=utils.NO_VALUE, convention=None): super(Context, self).__init__(parent_context, convention) self._functions = {} self._data = {} self._exclusive_funcs = set() if data is not utils.NO_VALUE: self['$'] = data @staticmethod def _import_function_definition(fd): return fd def register_function(self, spec, *args, **kwargs): exclusive = kwargs.pop('exclusive', False) if not isinstance(spec, specs.FunctionDefinition) and callable(spec): spec = specs.get_function_definition( spec, *args, convention=self._convention, **kwargs) spec = self._import_function_definition(spec) if spec.is_method: if not spec.is_valid_method(): raise exceptions.InvalidMethodException(spec.name) self._functions.setdefault(spec.name, set()).add(spec) if exclusive: self._exclusive_funcs.add(spec.name) def delete_function(self, spec): self._functions.get(spec.name, set()).discard(spec) self._exclusive_funcs.discard(spec.name) def get_functions(self, name, predicate=None, use_convention=False): name = name.rstrip('_') if use_convention and self._convention is not None: name = self._convention.convert_function_name(name) if predicate is None: predicate = lambda x: True # noqa: E731 return ( set(filter(predicate, self._functions.get(name, set()))), name in self._exclusive_funcs ) @staticmethod def _normalize_name(name): if not name.startswith('$'): name = ('$' + name) if name == '$': name = '$1' return name def __setitem__(self, name, value): self._data[self._normalize_name(name)] = value def get_data(self, name, default=None, ask_parent=True): name = self._normalize_name(name) if name in self._data: return self._data[name] ctx = self.parent while ask_parent and ctx: result = ctx.get_data(name, utils.NO_VALUE, False) if result is utils.NO_VALUE: ctx = ctx.parent else: return result return default def __delitem__(self, name): self._data.pop(self._normalize_name(name)) def __contains__(self, item): if isinstance(item, specs.FunctionDefinition): return item in self._functions.get(item.name, []) if isinstance(item, str): return self._normalize_name(item) in self._data return False def keys(self): return self._data.keys() class MultiContext(ContextBase): def __init__(self, context_list, convention=None): self._context_list = context_list if convention is None: convention = context_list[0].convention parents = tuple( filter(lambda t: t, map(lambda t: t.parent, context_list)) ) if not parents: super(MultiContext, self).__init__(None, convention) elif len(parents) == 1: super(MultiContext, self).__init__(parents[0], convention) else: super(MultiContext, self).__init__(MultiContext(parents), convention) def register_function(self, spec, *args, **kwargs): self._context_list[0].register_function(spec, *args, **kwargs) def get_data(self, name, default=None, ask_parent=True): for context in self._context_list: result = context.get_data(name, utils.NO_VALUE, False) if result is not utils.NO_VALUE: return result ctx = self.parent while ask_parent and ctx: result = ctx.get_data(name, utils.NO_VALUE, False) if result is utils.NO_VALUE: ctx = ctx.parent else: return result return default def __setitem__(self, name, value): self._context_list[0][name] = value def __delitem__(self, name): for context in self._context_list: del context[name] def create_child_context(self): return Context(self) def keys(self): prev_keys = set() for context in self._context_list: for key in context.keys(): if key not in prev_keys: prev_keys.add(key) yield key def delete_function(self, spec): for context in self._context_list: context.delete_function(spec) def __contains__(self, item): for context in self._context_list: if item in context: return True return False def get_functions(self, name, predicate=None, use_convention=False): result = set() is_exclusive = False for context in self._context_list: funcs, exclusive = context.get_functions( name, predicate, use_convention) result.update(funcs) if exclusive: is_exclusive = True return result, is_exclusive class LinkedContext(ContextBase): """Context that is as a proxy to another context but has its own parent.""" def __init__(self, parent_context, linked_context, convention=None): self.linked_context = linked_context if linked_context.parent: super(LinkedContext, self).__init__( LinkedContext(parent_context, linked_context.parent, convention), convention) else: super(LinkedContext, self).__init__(parent_context, convention) def register_function(self, spec, *args, **kwargs): return self.linked_context.register_function(spec, *args, **kwargs) def keys(self): return self.linked_context.keys() def get_data(self, name, default=None, ask_parent=True): result = self.linked_context.get_data( name, default=utils.NO_VALUE, ask_parent=False) if result is utils.NO_VALUE: if not ask_parent or not self.parent: return default return self.parent.get_data(name, default=default, ask_parent=True) return result def get_functions(self, name, predicate=None, use_convention=False): return self.linked_context.get_functions( name, predicate=predicate, use_convention=use_convention) def delete_function(self, spec): return self.linked_context.delete_function(spec) def __contains__(self, item): return item in self.linked_context def __delitem__(self, name): del self.linked_context[name] def __setitem__(self, name, value): self.linked_context[name] = value def create_child_context(self): return type(self.linked_context)(self)
apache-2.0
7,695,287,142,699,506,000
32.427609
79
0.599919
false
4.192568
false
false
false
ActiveState/code
recipes/Python/59867_crossplatform_import_hook_endofline/recipe-59867.py
1
1504
# Import hook for end-of-line conversion, # by David Goodger ([email protected]). # Put in your sitecustomize.py, anywhere on sys.path, and you'll be able to # import Python modules with any of Unix, Mac, or Windows line endings. import ihooks, imp, py_compile class MyHooks(ihooks.Hooks): def load_source(self, name, filename, file=None): """Compile source files with any line ending.""" if file: file.close() py_compile.compile(filename) # line ending conversion is in here cfile = open(filename + (__debug__ and 'c' or 'o'), 'rb') try: return self.load_compiled(name, filename, cfile) finally: cfile.close() class MyModuleLoader(ihooks.ModuleLoader): def load_module(self, name, stuff): """Special-case package directory imports.""" file, filename, (suff, mode, type) = stuff path = None if type == imp.PKG_DIRECTORY: stuff = self.find_module_in_dir("__init__", filename, 0) file = stuff[0] # package/__init__.py path = [filename] try: # let superclass handle the rest module = ihooks.ModuleLoader.load_module(self, name, stuff) finally: if file: file.close() if path: module.__path__ = path # necessary for pkg.module imports return module ihooks.ModuleImporter(MyModuleLoader(MyHooks())).install()
mit
6,434,838,695,669,646,000
35.682927
75
0.588431
false
3.978836
false
false
false
RTHMaK/RPGOne
deep_qa-master/deep_qa/layers/recurrence_modes.py
1
1184
from typing import Any, Dict from collections import OrderedDict from keras import backend as K class FixedRecurrence: ''' This recurrence class simply performs a fixed number of memory network steps and returns the memory representation and representation of the background knowledge generated by the knowledge_selector and knowledge_combiner layers (the simplest case being a weighted sum). ''' def __init__(self, memory_network, params: Dict[str, Any]): self.num_memory_layers = params.pop("num_memory_layers", 1) self.memory_network = memory_network def __call__(self, encoded_question, current_memory, encoded_background): for _ in range(self.num_memory_layers): current_memory, attended_knowledge = \ self.memory_network.memory_step(encoded_question, current_memory, encoded_background) return current_memory, attended_knowledge recurrence_modes = OrderedDict() # pylint: disable=invalid-name recurrence_modes["fixed"] = FixedRecurrence if K.backend() == 'tensorflow': from .adaptive_recurrence import AdaptiveRecurrence recurrence_modes["adaptive"] = AdaptiveRecurrence
apache-2.0
6,218,812,813,095,521,000
39.827586
101
0.723818
false
4.274368
false
false
false
disler/Kontact
App/Server.py
1
3547
from flask import Flask, render_template, current_app, Response, request from server.DBInterface import DBInterface from server.Validator import Validator from server.WebUtil import WebUtil import json import ast app = Flask(__name__) #load database interface db = DBInterface() #load validator validator = Validator.Kontact() @app.route('/') def Home(): """ Landing page for application """ return current_app.send_static_file("index.html") @app.route('/kontacts') def Get(): """ Get the list of kontacts """ return WebUtil.AsJson(db.Get("tblKontact")) @app.route('/kontacts/<int:id>') def GetByID(id): """ Get single record by id """ #get record by id from the kontact table oRecord = db.GetByID("tblKontact", id) #if the record returned is nothing return an empty object if(oRecord is None): oRecord = dict({}) return WebUtil.AsJson(oRecord) @app.route('/kontacts', methods=["POST"]) def Create(): """ Create a new kontact record """ #convert request data to json to be rendered as a python dict oKontact = WebUtil.ToObject(request.data) #if our processed data is a dict if type(oKontact) is dict: #validate to proper data structure bValid = validator.Validate(oKontact) #if valid kontact object is valid add to db if bValid: #create kontact obj db.Create("tblKontact", oKontact) #return success response return WebUtil.SuccessResponse() #kontact object is not valid return failure response else: return WebUtil.FailureResponse() @app.route("/kontacts/<int:id>", methods=["PUT"]) def Update(id): """ Update a currently existing kontact record """ #Convert request to python structure oNewKontact = WebUtil.ToObject(request.data) #get current kontact we're going to update oPreviousKontact = db.GetByID("tblKontact", id) #if the kontact we're trying to update exists if(oPreviousKontact is not None): #combine the old kontact with the new - new having priority oMergedKontact = WebUtil.MergeDict(oPreviousKontact, oNewKontact) #validate the newly merged kontact object bValid = validator.Validate(oMergedKontact) #if the kontact object is valid if bValid: #update the kontact object db.Update("tblKontact", id, oMergedKontact) #return failure response return WebUtil.SuccessResponse() #kontact object is not valid else: #return failure response return WebUtil.FailureResponse() #the kontact we're trying to update does not exists return failure response else: return WebUtil.FailureResponse() @app.route("/kontacts/<int:id>", methods=["DELETE"]) def Delete(id): """ Delete a kontact based on it's id' """ #get current kontact we're going to delete oPreviousKontact = db.GetByID("tblKontact", id) #if the kontact we're trying to delete exists if(oPreviousKontact is not None): #delete the kontact db.Delete("tblKontact", id) #return success response return WebUtil.SuccessResponse() #kontact does not exists return failure response else: return WebUtil.FailureResponse() #launch flask app if __name__ == '__main__': app.run(host="0.0.0.0", port=5000, debug=True, threaded=True)
mit
-8,879,114,534,368,940,000
24.702899
79
0.643079
false
3.809882
false
false
false
arenadata/ambari
ambari-server/src/test/python/stacks/2.3/MAHOUT/test_mahout_service_check.py
1
7000
#!/usr/bin/env python ''' Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' from stacks.utils.RMFTestCase import * from only_for_platform import not_for_platform, PLATFORM_WINDOWS @not_for_platform(PLATFORM_WINDOWS) class TestMahoutClient(RMFTestCase): COMMON_SERVICES_PACKAGE_DIR = "MAHOUT/1.0.0.2.3/package" STACK_VERSION = "2.3" DEFAULT_IMMUTABLE_PATHS = ['/apps/hive/warehouse', '/apps/falcon', '/mr-history/done', '/app-logs', '/tmp'] def test_configure_default(self): self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/service_check.py", classname = "MahoutServiceCheck", command = "service_check", config_file="default.json", stack_version = self.STACK_VERSION, target = RMFTestCase.TARGET_COMMON_SERVICES ) self.assertResourceCalled('File', '/tmp/sample-mahout-test.txt', content = 'Test text which will be converted to sequence file.', mode = 0755, ) self.maxDiff=None self.assertResourceCalled('HdfsResource', '/user/ambari-qa', immutable_paths = self.DEFAULT_IMMUTABLE_PATHS, security_enabled = False, hadoop_bin_dir = '/usr/hdp/2.2.1.0-2067/hadoop/bin', keytab = UnknownConfigurationMock(), kinit_path_local = '/usr/bin/kinit', user = 'hdfs', dfs_type = '', mode = 0770, owner = 'ambari-qa', action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020', hadoop_conf_dir = '/usr/hdp/2.2.1.0-2067/hadoop/conf', type = 'directory', ) self.assertResourceCalled('HdfsResource', '/user/ambari-qa/mahoutsmokeoutput', immutable_paths = self.DEFAULT_IMMUTABLE_PATHS, security_enabled = False, hadoop_bin_dir = '/usr/hdp/2.2.1.0-2067/hadoop/bin', keytab = UnknownConfigurationMock(), kinit_path_local = '/usr/bin/kinit', user = 'hdfs', dfs_type = '', action = ['delete_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020', hadoop_conf_dir = '/usr/hdp/2.2.1.0-2067/hadoop/conf', type = 'directory', ) self.assertResourceCalled('HdfsResource', '/user/ambari-qa/mahoutsmokeinput', immutable_paths = self.DEFAULT_IMMUTABLE_PATHS, security_enabled = False, hadoop_bin_dir = '/usr/hdp/2.2.1.0-2067/hadoop/bin', keytab = UnknownConfigurationMock(), kinit_path_local = '/usr/bin/kinit', user = 'hdfs', dfs_type = '', owner = 'ambari-qa', hadoop_conf_dir = '/usr/hdp/2.2.1.0-2067/hadoop/conf', type = 'directory', action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020', ) self.assertResourceCalled('HdfsResource', '/user/ambari-qa/mahoutsmokeinput/sample-mahout-test.txt', immutable_paths = self.DEFAULT_IMMUTABLE_PATHS, security_enabled = False, hadoop_bin_dir = '/usr/hdp/2.2.1.0-2067/hadoop/bin', keytab = UnknownConfigurationMock(), kinit_path_local = '/usr/bin/kinit', source = '/tmp/sample-mahout-test.txt', user = 'hdfs', dfs_type = '', owner = 'ambari-qa', hadoop_conf_dir = '/usr/hdp/2.2.1.0-2067/hadoop/conf', type = 'file', action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020', ) self.assertResourceCalled('HdfsResource', None, immutable_paths = self.DEFAULT_IMMUTABLE_PATHS, security_enabled = False, hadoop_bin_dir = '/usr/hdp/2.2.1.0-2067/hadoop/bin', keytab = UnknownConfigurationMock(), kinit_path_local = '/usr/bin/kinit', user = 'hdfs', dfs_type = '', action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020', hadoop_conf_dir = '/usr/hdp/2.2.1.0-2067/hadoop/conf', ) self.assertResourceCalled('Execute', 'mahout seqdirectory --input /user/ambari-qa/mahoutsmokeinput/' 'sample-mahout-test.txt --output /user/ambari-qa/mahoutsmokeoutput/ ' '--charset utf-8', environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45', 'MAHOUT_HOME': '/usr/hdp/current/mahout-client'}, path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'], tries = 3, user = 'ambari-qa', try_sleep = 5, ) self.assertResourceCalled('ExecuteHadoop', 'fs -test -e /user/ambari-qa/mahoutsmokeoutput/_SUCCESS', try_sleep = 6, tries = 10, bin_dir = '/usr/hdp/2.2.1.0-2067/hadoop/bin', user = 'ambari-qa', conf_dir = '/usr/hdp/2.2.1.0-2067/hadoop/conf', ) self.assertNoMoreResources()
apache-2.0
1,205,591,747,481,414,000
53.6875
291
0.59
false
3.721425
true
false
false
frew/simpleproto
scons-local-1.1.0/SCons/Scanner/C.py
1
4739
"""SCons.Scanner.C This module implements the depenency scanner for C/C++ code. """ # # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "src/engine/SCons/Scanner/C.py 3603 2008/10/10 05:46:45 scons" import SCons.Node.FS import SCons.Scanner import SCons.Util import SCons.cpp class SConsCPPScanner(SCons.cpp.PreProcessor): """ SCons-specific subclass of the cpp.py module's processing. We subclass this so that: 1) we can deal with files represented by Nodes, not strings; 2) we can keep track of the files that are missing. """ def __init__(self, *args, **kw): apply(SCons.cpp.PreProcessor.__init__, (self,)+args, kw) self.missing = [] def initialize_result(self, fname): self.result = SCons.Util.UniqueList([fname]) def finalize_result(self, fname): return self.result[1:] def find_include_file(self, t): keyword, quote, fname = t result = SCons.Node.FS.find_file(fname, self.searchpath[quote]) if not result: self.missing.append((fname, self.current_file)) return result def read_file(self, file): try: fp = open(str(file.rfile())) except EnvironmentError, e: self.missing.append((file, self.current_file)) return '' else: return fp.read() def dictify_CPPDEFINES(env): cppdefines = env.get('CPPDEFINES', {}) if cppdefines is None: return {} if SCons.Util.is_Sequence(cppdefines): result = {} for c in cppdefines: if SCons.Util.is_Sequence(c): result[c[0]] = c[1] else: result[c] = None return result if not SCons.Util.is_Dict(cppdefines): return {cppdefines : None} return cppdefines class SConsCPPScannerWrapper: """ The SCons wrapper around a cpp.py scanner. This is the actual glue between the calling conventions of generic SCons scanners, and the (subclass of) cpp.py class that knows how to look for #include lines with reasonably real C-preprocessor-like evaluation of #if/#ifdef/#else/#elif lines. """ def __init__(self, name, variable): self.name = name self.path = SCons.Scanner.FindPathDirs(variable) def __call__(self, node, env, path = ()): cpp = SConsCPPScanner(current = node.get_dir(), cpppath = path, dict = dictify_CPPDEFINES(env)) result = cpp(node) for included, includer in cpp.missing: fmt = "No dependency generated for file: %s (included from: %s) -- file not found" SCons.Warnings.warn(SCons.Warnings.DependencyWarning, fmt % (included, includer)) return result def recurse_nodes(self, nodes): return nodes def select(self, node): return self def CScanner(): """Return a prototype Scanner instance for scanning source files that use the C pre-processor""" # Here's how we would (or might) use the CPP scanner code above that # knows how to evaluate #if/#ifdef/#else/#elif lines when searching # for #includes. This is commented out for now until we add the # right configurability to let users pick between the scanners. #return SConsCPPScannerWrapper("CScanner", "CPPPATH") cs = SCons.Scanner.ClassicCPP("CScanner", "$CPPSUFFIXES", "CPPPATH", '^[ \t]*#[ \t]*(?:include|import)[ \t]*(<|")([^>"]+)(>|")') return cs
bsd-2-clause
-3,936,452,968,050,813,000
36.611111
94
0.638109
false
4.053892
false
false
false
uclouvain/osis_louvain
base/models/learning_unit_year.py
1
24536
############################################################################## # # OSIS stands for Open Student Information System. It's an application # designed to manage the core business of higher education institutions, # such as universities, faculties, institutes and professional schools. # The core business involves the administration of students, teachers, # courses, programs and so on. # # Copyright (C) 2015-2018 Université catholique de Louvain (http://www.uclouvain.be) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of this license - GNU General Public License - is available # at the root of the source code of this program. If not, # see http://www.gnu.org/licenses/. # ############################################################################## import re from django.core.exceptions import ValidationError from django.core.validators import MinValueValidator, MaxValueValidator, RegexValidator from django.db import models from django.db.models import Q from django.utils.functional import cached_property from django.utils.translation import ugettext_lazy as _, ngettext from base.models import entity_container_year as mdl_entity_container_year from base.models.academic_year import compute_max_academic_year_adjournment, AcademicYear, \ MAX_ACADEMIC_YEAR_FACULTY, starting_academic_year from base.models.enums import active_status, learning_container_year_types from base.models.enums import learning_unit_year_subtypes, internship_subtypes, \ learning_unit_year_session, entity_container_year_link_type, quadrimesters, attribution_procedure from base.models.enums.learning_container_year_types import COURSE, INTERNSHIP from base.models.enums.learning_unit_year_periodicity import PERIODICITY_TYPES, ANNUAL, BIENNIAL_EVEN, BIENNIAL_ODD from base.models.learning_unit import LEARNING_UNIT_ACRONYM_REGEX_ALL, REGEX_BY_SUBTYPE from osis_common.models.serializable_model import SerializableModel, SerializableModelAdmin AUTHORIZED_REGEX_CHARS = "$*+.^" REGEX_ACRONYM_CHARSET = "[A-Z0-9" + AUTHORIZED_REGEX_CHARS + "]+" MINIMUM_CREDITS = 0.0 MAXIMUM_CREDITS = 500 def academic_year_validator(value): academic = AcademicYear.objects.get(pk=value) academic_year_max = compute_max_academic_year_adjournment() if academic.year > academic_year_max: raise ValidationError(_('learning_unit_creation_academic_year_max_error').format(academic_year_max)) class LearningUnitYearAdmin(SerializableModelAdmin): list_display = ('external_id', 'acronym', 'specific_title', 'academic_year', 'credits', 'changed', 'structure', 'status') list_filter = ('academic_year', 'decimal_scores', 'summary_locked') search_fields = ['acronym', 'structure__acronym', 'external_id'] actions = [ 'resend_messages_to_queue', 'apply_learning_unit_year_postponement' ] def apply_learning_unit_year_postponement(self, request, queryset): # Potential circular imports from base.business.learning_units.automatic_postponement import LearningUnitAutomaticPostponement from base.views.common import display_success_messages, display_error_messages result, errors = LearningUnitAutomaticPostponement(queryset.filter(learning_container_year__isnull=False)) count = len(result) display_success_messages( request, ngettext( '%(count)d learning unit has been postponed with success', '%(count)d learning units have been postponed with success', count ) % {'count': count} ) if errors: display_error_messages(request, "{} : {}".format( _("The following learning units ended with error"), ", ".join([str(error) for error in errors]) )) apply_learning_unit_year_postponement.short_description = _("Apply postponement on learning unit year") class LearningUnitYearWithContainerManager(models.Manager): def get_queryset(self): # FIXME For the moment, the learning_unit_year without container must be hide ! return super().get_queryset().filter(learning_container_year__isnull=False) class ExtraManagerLearningUnitYear(models.Model): # This class ensure that the default manager (from serializable model) is not override by this manager objects_with_container = LearningUnitYearWithContainerManager() class Meta: abstract = True class LearningUnitYear(SerializableModel, ExtraManagerLearningUnitYear): external_id = models.CharField(max_length=100, blank=True, null=True, db_index=True) academic_year = models.ForeignKey(AcademicYear, verbose_name=_('academic_year'), validators=[academic_year_validator]) learning_unit = models.ForeignKey('LearningUnit') learning_container_year = models.ForeignKey('LearningContainerYear', null=True) changed = models.DateTimeField(null=True, auto_now=True) acronym = models.CharField(max_length=15, db_index=True, verbose_name=_('code'), validators=[RegexValidator(LEARNING_UNIT_ACRONYM_REGEX_ALL)]) specific_title = models.CharField(max_length=255, blank=True, null=True, verbose_name=_('title_proper_to_UE')) specific_title_english = models.CharField(max_length=250, blank=True, null=True, verbose_name=_('english_title_proper_to_UE')) subtype = models.CharField(max_length=50, choices=learning_unit_year_subtypes.LEARNING_UNIT_YEAR_SUBTYPES, default=learning_unit_year_subtypes.FULL) credits = models.DecimalField(null=True, max_digits=5, decimal_places=2, validators=[MinValueValidator(MINIMUM_CREDITS), MaxValueValidator(MAXIMUM_CREDITS)], verbose_name=_('credits')) decimal_scores = models.BooleanField(default=False) structure = models.ForeignKey('Structure', blank=True, null=True) internship_subtype = models.CharField(max_length=250, blank=True, null=True, verbose_name=_('internship_subtype'), choices=internship_subtypes.INTERNSHIP_SUBTYPES) status = models.BooleanField(default=False, verbose_name=_('active_title')) session = models.CharField(max_length=50, blank=True, null=True, choices=learning_unit_year_session.LEARNING_UNIT_YEAR_SESSION, verbose_name=_('session_title')) quadrimester = models.CharField(max_length=9, blank=True, null=True, verbose_name=_('quadrimester'), choices=quadrimesters.LEARNING_UNIT_YEAR_QUADRIMESTERS) attribution_procedure = models.CharField(max_length=20, blank=True, null=True, verbose_name=_('procedure'), choices=attribution_procedure.ATTRIBUTION_PROCEDURES) summary_locked = models.BooleanField(default=False, verbose_name=_("summary_locked")) professional_integration = models.BooleanField(default=False, verbose_name=_('professional_integration')) campus = models.ForeignKey('Campus', null=True, verbose_name=_("learning_location")) language = models.ForeignKey('reference.Language', null=True, verbose_name=_('language')) periodicity = models.CharField(max_length=20, choices=PERIODICITY_TYPES, default=ANNUAL, verbose_name=_('periodicity')) _warnings = None class Meta: unique_together = (('learning_unit', 'academic_year'), ('acronym', 'academic_year')) permissions = ( ("can_receive_emails_about_automatic_postponement", "Can receive emails about automatic postponement"), ) def __str__(self): return u"%s - %s" % (self.academic_year, self.acronym) @property def subdivision(self): if self.acronym and self.learning_container_year: return self.acronym.replace(self.learning_container_year.acronym, "") return None @property def parent(self): if self.subdivision and self.is_partim(): return LearningUnitYear.objects.filter( subtype=learning_unit_year_subtypes.FULL, learning_container_year=self.learning_container_year, ).get() return None @property def same_container_learning_unit_years(self): return LearningUnitYear.objects.filter( learning_container_year=self.learning_container_year ).order_by('acronym') @cached_property def allocation_entity(self): return self.get_entity(entity_container_year_link_type.ALLOCATION_ENTITY) @cached_property def requirement_entity(self): return self.get_entity(entity_container_year_link_type.REQUIREMENT_ENTITY) @property def complete_title(self): complete_title = self.specific_title if self.learning_container_year: complete_title = ' - '.join(filter(None, [self.learning_container_year.common_title, self.specific_title])) return complete_title @property def complete_title_english(self): complete_title_english = self.specific_title_english if self.learning_container_year: complete_title_english = ' - '.join(filter(None, [ self.learning_container_year.common_title_english, self.specific_title_english, ])) return complete_title_english @property def container_common_title(self): if self.learning_container_year: return self.learning_container_year.common_title return '' def get_partims_related(self): if self.is_full() and self.learning_container_year: return self.learning_container_year.get_partims_related() return LearningUnitYear.objects.none() def find_list_group_element_year(self): return self.child_leaf.filter(child_leaf=self).select_related('parent') def get_learning_unit_next_year(self): try: return self.learning_unit.learningunityear_set.get(academic_year__year=(self.academic_year.year + 1)) except LearningUnitYear.DoesNotExist: return None @property def in_charge(self): return self.learning_container_year and self.learning_container_year.in_charge @property def container_type_verbose(self): container_type = '' if self.learning_container_year: container_type = _(self.learning_container_year.container_type) if self.learning_container_year.container_type in (COURSE, INTERNSHIP): container_type += " ({subtype})".format(subtype=_(self.subtype)) return container_type @property def status_verbose(self): return _("active") if self.status else _("inactive") @property def internship_subtype_verbose(self): return _('to_complete') if self.learning_container_year and \ self.learning_container_year.container_type == INTERNSHIP and \ not self.internship_subtype else self.internship_subtype @property def get_previous_acronym(self): return find_lt_learning_unit_year_with_different_acronym(self) @property def periodicity_verbose(self): if self.periodicity: return _(self.periodicity) return None def find_gte_learning_units_year(self): return LearningUnitYear.objects.filter(learning_unit=self.learning_unit, academic_year__year__gte=self.academic_year.year) \ .order_by('academic_year__year') def find_gt_learning_units_year(self): return LearningUnitYear.objects.filter(learning_unit=self.learning_unit, academic_year__year__gt=self.academic_year.year) \ .order_by('academic_year__year') def is_past(self): return self.academic_year.is_past() # FIXME move this method to business/perm file def can_update_by_faculty_manager(self): if not self.learning_container_year: return False starting_year = starting_academic_year().year year = self.academic_year.year return starting_year <= year <= starting_year + MAX_ACADEMIC_YEAR_FACULTY def is_full(self): return self.subtype == learning_unit_year_subtypes.FULL def is_partim(self): return self.subtype == learning_unit_year_subtypes.PARTIM def get_entity(self, entity_type): entity = None # @TODO: Remove this condition when classes will be removed from learning unit year if self.learning_container_year: entity_container_yr = mdl_entity_container_year.search( link_type=entity_type, learning_container_year=self.learning_container_year, ).get() entity = entity_container_yr.entity if entity_container_yr else None return entity def clean(self): learning_unit_years = find_gte_year_acronym(self.academic_year, self.acronym) if getattr(self, 'learning_unit', None): learning_unit_years = learning_unit_years.exclude(learning_unit=self.learning_unit) self.clean_acronym(learning_unit_years) def clean_acronym(self, learning_unit_years): if self.acronym in learning_unit_years.values_list('acronym', flat=True): raise ValidationError({'acronym': _('already_existing_acronym')}) if not re.match(REGEX_BY_SUBTYPE[self.subtype], self.acronym): raise ValidationError({'acronym': _('invalid_acronym')}) @property def warnings(self): if self._warnings is None: self._warnings = [] self._warnings.extend(self._check_credits_is_integer()) self._warnings.extend(self._check_partim_parent_credits()) self._warnings.extend(self._check_internship_subtype()) self._warnings.extend(self._check_partim_parent_status()) self._warnings.extend(self._check_partim_parent_periodicity()) self._warnings.extend(self._check_learning_component_year_warnings()) self._warnings.extend(self._check_learning_container_year_warnings()) self._warnings.extend(self._check_entity_container_year_warnings()) return self._warnings # TODO: Currently, we should warning user that the credits is not an integer def _check_credits_is_integer(self): warnings = [] if self.credits and self.credits % 1 != 0: warnings.append(_('The credits value should be an integer')) return warnings def _check_partim_parent_credits(self): children = self.get_partims_related() return [_('The credits value of the partim %(acronym)s is greater or equal than the credits value of the ' 'parent learning unit.') % {'acronym': child.acronym} for child in children if child.credits and child.credits >= self.credits] def _check_internship_subtype(self): warnings = [] if getattr(self, 'learning_container_year', None): if (self.learning_container_year.container_type == learning_container_year_types.INTERNSHIP and not self.internship_subtype): warnings.append(_('missing_internship_subtype')) return warnings def _check_partim_parent_status(self): warnings = [] if self.parent: if not self.parent.status and self.status: warnings.append(_('This partim is active and the parent is inactive')) else: if self.status is False and find_partims_with_active_status(self).exists(): warnings.append(_("The parent is inactive and there is at least one partim active")) return warnings def _check_partim_parent_periodicity(self): warnings = [] if self.parent: if self.parent.periodicity in [BIENNIAL_EVEN, BIENNIAL_ODD] and self.periodicity != self.parent.periodicity: warnings.append(_("This partim is %(partim_periodicity)s and the parent is %(parent_periodicty)s") % {'partim_periodicity': self.periodicity_verbose, 'parent_periodicty': self.parent.periodicity_verbose}) else: if self.periodicity in [BIENNIAL_EVEN, BIENNIAL_ODD] and \ find_partims_with_different_periodicity(self).exists(): warnings.append(_("The parent is %(parent_periodicty)s and there is at least one partim which is not " "%(parent_periodicty)s") % {'parent_periodicty': self.periodicity_verbose}) return warnings def _check_learning_component_year_warnings(self): _warnings = [] components_queryset = self.learning_container_year.learningcomponentyear_set all_components = components_queryset.all().order_by('learningunitcomponent__learning_unit_year__acronym') for learning_component_year in all_components: _warnings.extend(learning_component_year.warnings) return _warnings def _check_learning_container_year_warnings(self): return self.learning_container_year.warnings def _check_entity_container_year_warnings(self): _warnings = [] entity_container_years = mdl_entity_container_year.find_by_learning_container_year(self.learning_container_year) for entity_container_year in entity_container_years: _warnings.extend(entity_container_year.warnings) return _warnings def is_external(self): return hasattr(self, "externallearningunityear") def get_by_id(learning_unit_year_id): return LearningUnitYear.objects.select_related('learning_container_year__learning_container') \ .get(pk=learning_unit_year_id) def find_by_acronym(acronym): return LearningUnitYear.objects.filter(acronym=acronym).select_related('learning_container_year') def _is_regex(acronym): return set(AUTHORIZED_REGEX_CHARS).intersection(set(acronym)) def search(academic_year_id=None, acronym=None, learning_container_year_id=None, learning_unit=None, title=None, subtype=None, status=None, container_type=None, tutor=None, summary_responsible=None, requirement_entities=None, learning_unit_year_id=None, *args, **kwargs): queryset = LearningUnitYear.objects_with_container if learning_unit_year_id: queryset = queryset.filter(id=learning_unit_year_id) if academic_year_id: queryset = queryset.filter(academic_year=academic_year_id) if acronym: if _is_regex(acronym): queryset = queryset.filter(acronym__iregex=r"(" + acronym + ")") else: queryset = queryset.filter(acronym__icontains=acronym) if learning_container_year_id is not None: if isinstance(learning_container_year_id, list): queryset = queryset.filter(learning_container_year__in=learning_container_year_id) elif learning_container_year_id: queryset = queryset.filter(learning_container_year=learning_container_year_id) if requirement_entities: queryset = queryset.filter( learning_container_year__entitycontaineryear__entity__entityversion__in=requirement_entities, learning_container_year__entitycontaineryear__type=entity_container_year_link_type.REQUIREMENT_ENTITY) if learning_unit: queryset = queryset.filter(learning_unit=learning_unit) if title: queryset = queryset. \ filter(Q(specific_title__iregex=title) | Q(learning_container_year__common_title__iregex=title)) if subtype: queryset = queryset.filter(subtype=subtype) if status: queryset = queryset.filter(status=convert_status_bool(status)) if container_type: queryset = queryset.filter(learning_container_year__container_type=container_type) if tutor: for name in tutor.split(): filter_by_first_name = {_build_tutor_filter(name_type='first_name'): name} filter_by_last_name = {_build_tutor_filter(name_type='last_name'): name} queryset = queryset.filter(Q(**filter_by_first_name) | Q(**filter_by_last_name)).distinct() if summary_responsible: queryset = find_summary_responsible_by_name(queryset, summary_responsible) return queryset.select_related('learning_container_year', 'academic_year') def find_summary_responsible_by_name(queryset, name): for term in name.split(): queryset = queryset.filter( Q(attribution__tutor__person__first_name__icontains=term) | Q(attribution__tutor__person__last_name__icontains=term) ) return queryset.filter(attribution__summary_responsible=True).distinct() def _build_tutor_filter(name_type): return '__'.join(['learningunitcomponent', 'learning_component_year', 'attributionchargenew', 'attribution', 'tutor', 'person', name_type, 'iregex']) def convert_status_bool(status): if status in (active_status.ACTIVE, active_status.INACTIVE): boolean = status == active_status.ACTIVE else: boolean = status return boolean def find_gte_year_acronym(academic_yr, acronym): return LearningUnitYear.objects.filter(academic_year__year__gte=academic_yr.year, acronym__iexact=acronym) def find_lt_year_acronym(academic_yr, acronym): return LearningUnitYear.objects.filter(academic_year__year__lt=academic_yr.year, acronym__iexact=acronym).order_by('academic_year') def check_if_acronym_regex_is_valid(acronym): return isinstance(acronym, str) and \ not acronym.startswith('*') and \ re.fullmatch(REGEX_ACRONYM_CHARSET, acronym.upper()) is not None def find_max_credits_of_related_partims(a_learning_unit_year): return a_learning_unit_year.get_partims_related().aggregate(max_credits=models.Max("credits"))["max_credits"] def find_partims_with_active_status(a_learning_unit_year): return a_learning_unit_year.get_partims_related().filter(status=True) def find_partims_with_different_periodicity(a_learning_unit_year): return a_learning_unit_year.get_partims_related().exclude(periodicity=a_learning_unit_year.periodicity) def find_by_learning_unit(a_learning_unit): return search(learning_unit=a_learning_unit) def find_by_entities(entities): return LearningUnitYear.objects.filter(learning_container_year__entitycontaineryear__entity__in=entities) def find_latest_by_learning_unit(a_learning_unit): return search(learning_unit=a_learning_unit).order_by('academic_year').last() def find_lt_learning_unit_year_with_different_acronym(a_learning_unit_yr): return LearningUnitYear.objects.filter(learning_unit__id=a_learning_unit_yr.learning_unit.id, academic_year__year__lt=a_learning_unit_yr.academic_year.year, proposallearningunit__isnull=True) \ .order_by('-academic_year') \ .exclude(acronym__iexact=a_learning_unit_yr.acronym).first() def find_learning_unit_years_by_academic_year_tutor_attributions(academic_year, tutor): """ In this function, only learning unit year with containers is visible! [no classes] """ qs = LearningUnitYear.objects_with_container.filter( academic_year=academic_year, attribution__tutor=tutor, ).distinct().order_by('academic_year__year', 'acronym') return qs def toggle_summary_locked(learning_unit_year_id): luy = LearningUnitYear.objects.get(pk=learning_unit_year_id) luy.summary_locked = not luy.summary_locked luy.save() return luy
agpl-3.0
-1,512,369,831,425,935,600
43.853748
120
0.663746
false
3.836591
false
false
false
openworm/Blender2NeuroML
src/Entity/Entity.py
1
21651
''' Created on 03.06.2011 @author: Sergey Khayrulin ''' from __future__ import absolute_import from Entity.Vertex import Vertex from Entity.Face import Face from Entity.Slice import Slice, AlternateSlice from Entity.Helper import * import pprint import math class Entity(object): ''' Main Class which process data from blender file or WRL(formated file). ''' def __init__(self): ''' Constructor ''' self.vertices = [] self.faces = Faces() self.resulting_points = [] self.checked_points = [] self.neuronInfo = '' def clean_all(self): self.faces.clean_all() def add_vertex(self, coordinates): ''' Method add vertex to collection point. It get a collection of coordinates of point, create point and append it to collection of point. ''' try: if len(coordinates) != 3: raise ParserException('Error') point = Vertex(float(coordinates[0]),float(coordinates[1]),float(coordinates[2])) self.vertices.append(point) except ParserException as ex: print('It should be some incorrect data') raise ex def add_face(self, points_arr): ''' Method add face to faces collection. It get a sequence of numbers which means position in point collection. ''' try: if len(points_arr) < 4: raise ParserException('Face contains more that 4 point') face = Face(self.vertices[int(points_arr[0])],self.vertices[int(points_arr[1])],self.vertices[int(points_arr[2])],self.vertices[int(points_arr[3])]) face.order = [int(points_arr[0]),int(points_arr[1]),int(points_arr[2]),int(points_arr[3])] self.faces[face.order] = face #print("add_face %s" % face.order) #self.faces.append(face) except ParserException as ex: print('Error:%s'%ex) print(points_arr) raise ex def findCenterOfSoma(self, use_method2 = False): ''' Method find start point for work main algorithm first point should be in soma. Soma is the biggest segment of cell. ''' iter = 0 temp_points = [] slices = [] for p in range(len(self.vertices)): temp_points.append(HelpPoint(p,0)) if use_method2: startSlice = Slice(temp_points,self.faces, use_method2 = True, vertices = self.vertices) point_on_perimeter = self.vertices[startSlice[0].point] self.checked_points += startSlice.extra_dict['points_in_soma'] self.start_center_point = startSlice.extra_dict['center_pt'] self.start_center_point.diametr = 2 * self.start_center_point.len_between_point(point_on_perimeter) self.starting_slice = startSlice return slice = Slice(temp_points,self.faces) slices.append(slice) while len(slice) != 0: temp_points = list(filter(lambda p: not slice.__contains__(p), temp_points)) slice = None slice = Slice(temp_points,self.faces) if len(slice) != 0: slices.append(slice) #if not (iter % 10): # print('slice %d iter %d' % (len(temp_points), iter)) #slice.printSlice() #print slice.getPerimetr(self.vertices) iter += 1 # find slice with longest line segments perimiter_coll = sorted(slices,key=lambda slice:slice.getPerimetr(self.vertices), reverse=True) startSlice = Slice(perimiter_coll[0],self.faces) #print("findCenterOfSoma while loop done %d %d" % (iter, len(temp_points))) try: self.start_center_point = self.__getCenterPoint(startSlice, minimal = True) except IndexError: print("no center point startSlice %d perimiter_coll %d" % (len(startSlice), len(perimiter_coll[0]))) for face in self.faces.keys(): print("face order %s" % face) # the coordinates aren't organized in a pattern that the normal # code in Slice can understand, so we use an alternate method return self.findCenterOfSoma(use_method2 = True) if not use_method2: point_on_perimeter = self.vertices[perimiter_coll[0][0].point] self.start_center_point.diametr = 2 * self.start_center_point.len_between_point(point_on_perimeter) def getAllBrunches(self): ''' Method return dictionary which contains pair key=>value: key it's name of neurite, value - it's sorted sequence numbers which means position in resulting_points collection for instance 'axon' => [1,2,4] ''' brunches_temp = {} result_coll = {} i = 0 roots = [self.resulting_points.index(p) for p in self.resulting_points \ if p.parentPoint == 0 and self.resulting_points.index(p) != 0] for root in roots: brunches_temp[root] = [] for p in self.resulting_points: parent = p.getRoot(self.resulting_points) if parent == root: brunches_temp[root].append(self.resulting_points.index(p)) # the first of these two lines works with python3, the second with python2: #for k1, value in sorted(brunches_temp.iteritems(),key=lambda k,v:(len(v),k),reverse=True): # we try to determine for k1, value in sorted(brunches_temp.iteritems(),key=lambda (k,v):(len(v),k),reverse=True): # we try to determine if i == 0: for j in value: self.resulting_points[j].isAxon = True result_coll['axon'] = value else: for j in value: if self.resulting_points[j].cable != 2: self.resulting_points[j].isDendrite = True self.resulting_points[j].cable = 3 result_coll['dendrite' + str(i)] = value i += 1 return result_coll def use_alt_slice(self): return hasattr(self, 'starting_slice') def create_slice(self, coll, allow_checked = False): if self.use_alt_slice(): if not allow_checked: coll = filter(lambda p: not self.checked_points.__contains__(p.point), coll) slice = AlternateSlice(coll,self.faces, self.vertices, self.checked_points, self.vertices[self.starting_slice[0].point], None, allow_checked) else: slice = Slice(coll,self.faces) return slice def branching(self, slice): if not self.use_alt_slice(): return False for p in range(len(slice)): if len(self.starting_slice.extra_dict['adjacentPoints'][slice[p].point]) == 5: return True return False def find_point(self,center_point=Vertex(),iteration=0, parentPoint=0, isNeurite=False, isBrunchStart=False, _slice=None): ''' Main function find axon dendrite and neurite ''' vector_len = [] print("enter find_point iteration %d isBrunchStart %d" % (iteration, isBrunchStart)) if iteration == 0: center_point = self.start_center_point if isNeurite: res_point = Result_Point(center_point,parentPoint,2,isBrunchStart) res_point.isNeurite = True self.resulting_points.append(res_point) elif iteration != 0: self.resulting_points.append(Result_Point(center_point,parentPoint,1,isBrunchStart)) elif iteration == 0: self.resulting_points.append(Result_Point(center_point,parentPoint,0,isBrunchStart)) current_point = len(self.resulting_points) - 1 for p in range(len(self.vertices)): vector_len.append(HelpPoint(p,self.vertices[p].len_between_point(center_point))) vector_len = sorted(vector_len,key=lambda p:p.lenght) tmp_list = [] if iteration != 0: ''' If iteration != 0 that means we are should find next 4 or more(if we find place of brunching 6 or 8) vertices ''' if _slice is not None: slice = _slice else: slice = self.create_slice(vector_len) adjacentPoints = [] use_v5 = iteration >= 3 and self.branching(slice) # with 5 adjacent points for p in range(4): if use_v5 and not isBrunchStart: c = slice[p].point tmp_list.append(c) adjacentPoints.append(HelpPoint(c, self.vertices[c].len_between_point(center_point))) if use_v5 and isBrunchStart: #print("use_v5 br %d p %d" % (len(slice), p)) coll = self.__find_adjacent_vertices5(slice[p].point) elif p != 3: coll = self.__find_adjacent_vertices(slice[p].point, slice[p+1].point) else: coll = self.__find_adjacent_vertices(slice[p].point, slice[0].point) #print("%d-%d has %d adj v" % (slice[p].point, slice[(p+1)%4].point, len(coll))) for c in coll: helpPoint = HelpPoint(c,self.vertices[c].len_between_point(center_point)) #print("%3d %3d is checked? %d" % (p, c, self.checked_points.__contains__(c))) if not adjacentPoints.__contains__(helpPoint): if not self.checked_points.__contains__(c): adjacentPoints.append(helpPoint) tmp_list.append(c) print("got %d adjacentPoints %s" % (len(adjacentPoints), tmp_list)) if len(adjacentPoints) == 0: return ''' If we find 8 adjacent vertices it means that we place in branching segments ''' if len(adjacentPoints) > 4 and not (use_v5 and isBrunchStart): if self.__more4AdjacentPointCase(adjacentPoints, slice, isBrunchStart,iteration, current_point, center_point): return del vector_len[:] vector_len = [HelpPoint(p.point,self.vertices[p.point].len_between_point(center_point)) for p in adjacentPoints if not self.checked_points.__contains__(p.point)] vector_len = sorted(vector_len,key=lambda p:p.lenght) if self.use_alt_slice(): vector_len = filter(lambda p: not self.checked_points.__contains__(p.point), vector_len) if iteration == 0: adj_dict = self.starting_slice.extra_dict['adjacentPoints'] else: adj_dict = None slice = AlternateSlice(vector_len,self.faces, self.vertices, self.checked_points, self.vertices[self.starting_slice[0].point], adj_dict) else: slice = Slice(vector_len,self.faces) lenOfSlice = len(slice) print("lenOfSlice %d iter %d %d" % (lenOfSlice, iteration, len(vector_len))) if lenOfSlice == 0: slice = vector_len if len(slice) < 4: return new_center_point = self.__getCenterPoint(slice) iteration += 1 if lenOfSlice != 0: self.find_point(new_center_point,iteration,parentPoint=current_point,isNeurite=isNeurite,isBrunchStart=False, _slice=slice) else: if isNeurite: res_point = Result_Point(new_center_point,current_point,2,False) res_point.isNeurite = True self.resulting_points.append(res_point) elif iteration != 0: self.resulting_points.append(Result_Point(new_center_point,current_point,1,False)) if iteration == 1: self.__checkDendrite(slice, center_point, vector_len,current_point) def __getCenterPoint(self, slice, minimal = False): ''' Get center point like center of mass for input collection slice (usually it should be 4 point) ''' x=y=z=0 n_points = 4 if len(slice) < 4: print("Bad slice len %d" % len(slice)) if minimal and len(slice) > 0: n_points = len(slice) else: raise IndexError for p in range(n_points): x += self.vertices[slice[p].point].x y += self.vertices[slice[p].point].y z += self.vertices[slice[p].point].z if not self.checked_points.__contains__(slice[p].point): self.checked_points.append(slice[p].point) center_point = Vertex(x/n_points,y/n_points,z/n_points) center_point.diametr = 2 * center_point.len_between_point(self.vertices[slice[0].point]) if isinstance(slice, Slice): slice.printSlice() else: print(slice) return center_point def __find_adjacent_vertices(self, num_p1,num_p2): ''' Find for two point adjacent vertices ''' adjacentVertices = [] for key,f in self.faces.items(): if f.order.__contains__(num_p1) and f.order.__contains__(num_p2): for p in f.order: if p != num_p1 and p != num_p2: adjacentVertices.append(p) return adjacentVertices def __find_adjacent_vertices5(self, num_p1): ''' Find for one point adjacent vertices ''' adjacentVertices = [] for key,f in self.faces.items(): if f.order.__contains__(num_p1): for p in f.order: if p != num_p1 and not (p in adjacentVertices): near_old_point = False for r_pt in self.resulting_points: dist = r_pt.point.len_between_point(self.vertices[p]) if dist < r_pt.point.diametr: near_old_point = True break if not near_old_point: adjacentVertices.append(p) return adjacentVertices def __fillUpBrachesCollection(self, adjacentPoints, slice): ''' Fill branches collection ''' branchesCollection = [] for i in range(4): for p1 in adjacentPoints: for p2 in adjacentPoints: if p1 == p2: continue s = self.create_slice([slice[i], slice[(i + 1) % 4], p1, p2], allow_checked = True) if (len(s) == 4): if not branchesCollection.__contains__(s): branchesCollection.append(s) if len(self.create_slice(adjacentPoints)) != 0: branchesCollection.append(self.create_slice(adjacentPoints)) return branchesCollection def __more4AdjacentPointCase(self, adjacentPoints, slice, isBrunch,iteration, current_point, center_point): ''' Work when algorithm find more that 4 adjacent points ''' branchesCollection = self.__fillUpBrachesCollection(adjacentPoints, slice) if len(branchesCollection) >= 2 : center_points = {} thirdBrunchCollection = [] for branch in branchesCollection: branch_center_point = self.__getCenterPoint(branch) center_points[branch_center_point] = branch print("%d center_points" % (len(center_points.keys()))) for branch_center_point,branch in center_points.items(): old_num_r_points = len(self.resulting_points) print("start branch %d %d %d %d size %d %3d resulting_points" % (branch[0].point, branch[1].point, branch[2].point, branch[3].point, len(branch), len(self.resulting_points))) self.find_point(branch_center_point,iteration,current_point,True,True, _slice=branch) print("finish branch %d %3d resulting_points" % (branch[0].point, len(self.resulting_points))) if self.use_alt_slice() and len(self.resulting_points) == old_num_r_points + 1: del self.resulting_points[-1] print("undo branches of length 1") if len(adjacentPoints) > 6: thirdBrunchCollection.extend(branch) thirdBrunchPoints = [HelpPoint(p.point,self.vertices[p.point].len_between_point(center_point)) \ for p in thirdBrunchCollection if not slice.__contains__(p)] slice_t = self.create_slice(thirdBrunchPoints) if len(slice_t) == 4: third_brunch_center_point = self.__getCenterPoint(slice_t) self.find_point(third_brunch_center_point,iteration, current_point,True,True, _slice=slice_t) return True elif len(branchesCollection) == 0 or (len(branchesCollection) == 1 and not isBrunch): sortedadjacentPoints = sorted(adjacentPoints,key=lambda p:p.lenght) first_slice = self.create_slice(sortedadjacentPoints) second_slice = self.create_slice(filter(lambda p: first_slice.__contains__(p) == False, sortedadjacentPoints)) perimeter_1 = first_slice.getPerimetr(self.vertices) perimeter_2 = second_slice.getPerimetr(self.vertices) if perimeter_1 > perimeter_2 and perimeter_2 != 0: new_center_point = self.__getCenterPoint(second_slice) self.find_point(new_center_point,iteration, current_point,False,False, _slice=second_slice) return True elif perimeter_1 < perimeter_2 or perimeter_2 == 0: if perimeter_1 == 0: if len(branchesCollection) == 1: first_slice = branchesCollection[0] else: first_slice.getFaceFromColl(adjacentPoints,self.faces) new_center_point = self.__getCenterPoint(first_slice) self.find_point(new_center_point,iteration, current_point,isBrunch,False, _slice=first_slice) else: new_center_point = self.__getCenterPoint(first_slice) self.find_point(new_center_point,iteration, current_point,False,False, _slice=first_slice) return True elif len(branchesCollection) == 1 and isBrunch: slice = branchesCollection[0] if len(slice) == 0: slice = slice.getFaceFromColl(adjacentPoints,self.faces) try: new_center_point = self.__getCenterPoint(slice) except IndexError: print("Warning: __getCenterPoint failed, slice len %d, %d adjacentPoints" % (len(slice), len(adjacentPoints))) slice.printSlice() return False self.find_point(new_center_point,iteration, parentPoint=current_point,isNeurite=True,isBrunchStart=False, _slice=slice) return True return False def __checkDendrite(self, slice, center_point, vector_len, current_point): ''' Private Method. Check if soma has other output processes if it's contain than run find_point for it. ''' iteration = 1 vector_len = filter(lambda p: slice.__contains__(p) == False and self.checked_points.__contains__(p.point) == False, vector_len) vector_len = sorted(vector_len,key=lambda p:p.lenght) for i in range(5): slice2 = self.create_slice(vector_len) if (len(slice2) == 4 and int(slice.getPerimetr(self.vertices) / slice2.getPerimetr(self.vertices)) <= 1 and int(slice2.getPerimetr(self.vertices) / slice.getPerimetr(self.vertices)) <= 1): new_center_point = self.__getCenterPoint(slice2) iteration += 1 self.find_point(new_center_point,iteration,parentPoint=current_point,isNeurite=False,isBrunchStart=False, _slice=slice2) vector_len = filter(lambda p: slice2.__contains__(p) == False and self.checked_points.__contains__(p.point) == False, vector_len) vector_len = sorted(vector_len, key=lambda p:p.lenght) # # check_unused_coordinates might be of some use in checking for # sections of a neuron that were omitted due to flaws in the code # def check_unused_coordinates(self): for key,f in self.faces.items(): unused = True for p in f.order: if p in self.checked_points: unused = False break if unused: print("unused face %s" % f.order)
mit
-6,809,170,652,540,044,000
47.095238
160
0.549813
false
4.067443
false
false
false
cansik/pyunicon
pyunicon/Cocoa/CocoaMouse.py
1
2057
from Quartz.CoreGraphics import CGEventCreateMouseEvent from Quartz.CoreGraphics import CGEventPost from Quartz.CoreGraphics import kCGEventMouseMoved from Quartz.CoreGraphics import kCGEventLeftMouseDown from Quartz.CoreGraphics import kCGEventLeftMouseUp from Quartz.CoreGraphics import kCGEventRightMouseDown from Quartz.CoreGraphics import kCGEventRightMouseUp from Quartz.CoreGraphics import kCGMouseButtonLeft from Quartz.CoreGraphics import kCGHIDEventTap from Quartz.CoreGraphics import CGEventCreate from Quartz.CoreGraphics import CGEventGetLocation from Quartz.CoreGraphics import CGWarpMouseCursorPosition from pyunicon.util import UCMouseKey __author__ = 'cansik' class CocoaMouse(object): def __init__(self): pass def __mouse_event(self, type, x, y): mouse_event = CGEventCreateMouseEvent(None, type, (x, y), kCGMouseButtonLeft) CGEventPost(kCGHIDEventTap, mouse_event) def move(self, x, y): self.__mouse_event(kCGEventMouseMoved, x, y) CGWarpMouseCursorPosition((x, y)) # todo: fix race condition (get position is not accurate) def get_position(self): mouse_event = CGEventCreate(None) pos = CGEventGetLocation(mouse_event) return pos.x, pos.y def press(self, mouse_key): x, y = self.get_position() if mouse_key is UCMouseKey.UC_MOUSE_LEFT: self.__mouse_event(kCGEventLeftMouseDown, x, y) elif mouse_key is UCMouseKey.UC_MOUSE_MIDDLE: print("mouse middle not supported on OSX!") elif mouse_key is UCMouseKey.UC_MOUSE_RIGHT: self.__mouse_event(kCGEventRightMouseDown, x, y) def release(self, mouse_key): x, y = self.get_position() if mouse_key is UCMouseKey.UC_MOUSE_LEFT: self.__mouse_event(kCGEventLeftMouseUp, x, y) elif mouse_key is UCMouseKey.UC_MOUSE_MIDDLE: print("mouse middle not supported on OSX!") elif mouse_key is UCMouseKey.UC_MOUSE_RIGHT: self.__mouse_event(kCGEventRightMouseUp, x, y)
mit
2,062,059,134,703,579,100
37.092593
85
0.712202
false
3.528302
false
false
false
AhmedHani/Neural-Networks-for-ML
Implementations/simple_word2vec/cbow.py
1
2983
import tensorflow as tf class CBOW(object): def __init__(self, args): self.__args = args self.__ngram_size = args.ngram_size self.__input_size = self.__ngram_size - 1 self.__vocab_size = args.vocab_size + 1 self.__embedding_dim = args.embedding_dim self.__learning_rate = args.learning_rate self.__activation_function = args.activation_function self.__optimizer = args.optimizer self.__loss_function = args.loss_function def init_session(self, restore=False): self.__session = tf.Session() if restore: self.__saver = tf.train.Saver() self.__saver.restore(self.__session, self.__args.model) def build(self): self.__input = tf.placeholder(tf.float32, [None, self.__input_size * self.__vocab_size]) self.__output = tf.placeholder(tf.float32, [None, self.__vocab_size]) self.__input_to_hidden_weights = tf.get_variable("ih_w", shape=[self.__input_size * self.__vocab_size, self.__embedding_dim], initializer=tf.contrib.layers.xavier_initializer()) self.__input_to_hidden_bias = tf.Variable(tf.ones(self.__embedding_dim)) self.__hidden_to_output_weights = tf.get_variable("ho_w", shape=[self.__embedding_dim, self.__vocab_size], initializer=tf.contrib.layers.xavier_initializer()) self.__hidden_to_output_bias = tf.Variable(tf.ones([self.__vocab_size])) if self.__optimizer.lower() == "sgd": self.__optimizer = tf.train.GradientDescentOptimizer(self.__learning_rate) elif self.__optimizer.lower() == "adam": self.__optimizer = tf.train.AdamOptimizer(self.__learning_rate) self.__embedding_layer = tf.matmul(self.__input, self.__input_to_hidden_weights) + self.__input_to_hidden_bias if self.__activation_function.lower() == "tanh": self.__embedding_layer = tf.nn.tanh(self.__embedding_layer) elif self.__activation_function.lower() == "relu": self.__embedding_layer = tf.nn.relu(self.__embedding_layer) self.__output_layer = tf.matmul(self.__embedding_layer, self.__hidden_to_output_weights) + self.__hidden_to_output_bias self.__output_layer = tf.nn.softmax(self.__output_layer) if self.__loss_function.lower() == "mse": self.__cost_function = 0.5 * tf.reduce_sum(tf.square(self.__output_layer - self.__output)) elif self.__loss_function.lower() == "ce": self.__cost_function = -tf.reduce_mean((self.__output * tf.log(self.__output_layer)) + ((1 - self.__output) * tf.log(1 - self.__output_layer))) self.__train = self.__optimizer.minimize(self.__cost_function) def run(self, x_input, y_output): self.__session.run(tf.global_variables_initializer()) error = self.__session.run(self.__cost_function, feed_dict={self.__input: x_input, self.__output: y_output}) return error
gpl-3.0
-121,940,656,376,534,380
49.576271
166
0.612471
false
3.719451
false
false
false
alexsiri7/RoboScrum
stories/views.py
1
2140
from django.shortcuts import render_to_response, get_object_or_404 from django.template import Context, loader from stories.models import Story, Sprint from django.http import HttpResponse from django.views.generic import DetailView, ListView class SprintView(DetailView): days = ["", "","Mon", "", "", "", "Tue", "", "", "", "Wed", "", "", "", "Thu", "", "Fri"] model = Sprint def get_context_data(self, **kwargs): context = super(SprintView, self).get_context_data(**kwargs) if self.object.is_finished: context['burndown'] = self.burndown() else: context['burndown_schema'] = self.burndown_schema() return context def burndown(self): total = self.object.original_commitment() burn = map(lambda (i,e): (self.days[i], total-total*i/4, total*1.2-total*i/4*1.2, total*0.8-total*i/4*0.8,total-e),enumerate(self.object.burnup())) return burn def burndown_schema(self): total = self.object.original_commitment() burn = map(lambda (i,e): ( self.days[i], total-total*i/17, total*1.2-total*i/17*1.2, total*0.8-total*i/17*0.8) ,enumerate(range(17))) return burn class SprintListView(ListView): queryset = Sprint.objects.all().order_by('-start_date') def get_context_data(self, **kwargs): context = super(SprintListView, self).get_context_data(**kwargs) context['TVI'] = self.getTVI() context['Points'] = self.getPoints() context['Pct'] = self.getPct() return context def getTVI(self): return map(lambda s: (s.number, s.targeted_value_increase()), self.object_list.order_by('start_date').filter(is_finished=True).all()) def getPoints(self): return map(lambda s: (s.number, s.work_capacity()*100/s.member_dedication, s.velocity()*100/s.member_dedication, s.original_commitment()*100/s.member_dedication), self.object_list.order_by('start_date').filter(is_finished=True).all()) def getPct(self): return map(lambda s: (s.number, s.focus_factor(), s.accuracy_of_estimation(), s.accuracy_of_commit()), self.object_list.order_by('start_date').filter(is_finished=True).all())
gpl-3.0
218,280,061,264,985,020
43.583333
171
0.659346
false
3.065903
false
false
false
Knio/dominate
dominate/dom_tag.py
1
12996
__license__ = ''' This file is part of Dominate. Dominate is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Dominate is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with Dominate. If not, see <http://www.gnu.org/licenses/>. ''' # pylint: disable=bad-indentation, bad-whitespace, missing-docstring import copy import numbers from collections import defaultdict, namedtuple from functools import wraps import threading try: # Python 3 from collections.abc import Callable except ImportError: # Python 2.7 from collections import Callable try: basestring = basestring except NameError: # py3 basestring = str unicode = str try: import greenlet except ImportError: greenlet = None def _get_thread_context(): context = [threading.current_thread()] if greenlet: context.append(greenlet.getcurrent()) return hash(tuple(context)) class dom_tag(object): is_single = False # Tag does not require matching end tag (ex. <hr/>) is_pretty = True # Text inside the tag should be left as-is (ex. <pre>) # otherwise, text will be escaped() and whitespace may be # modified is_inline = False def __new__(_cls, *args, **kwargs): ''' Check if bare tag is being used a a decorator (called with a single function arg). decorate the function and return ''' if len(args) == 1 and isinstance(args[0], Callable) \ and not isinstance(args[0], dom_tag) and not kwargs: wrapped = args[0] @wraps(wrapped) def f(*args, **kwargs): with _cls() as _tag: return wrapped(*args, **kwargs) or _tag return f return object.__new__(_cls) def __init__(self, *args, **kwargs): ''' Creates a new tag. Child tags should be passed as arguments and attributes should be passed as keyword arguments. There is a non-rendering attribute which controls how the tag renders: * `__inline` - Boolean value. If True renders all children tags on the same line. ''' self.attributes = {} self.children = [] self.parent = None self.document = None # Does not insert newlines on all children if True (recursive attribute) self.is_inline = kwargs.pop('__inline', self.is_inline) self.is_pretty = kwargs.pop('__pretty', self.is_pretty) #Add child elements if args: self.add(*args) for attr, value in kwargs.items(): self.set_attribute(*type(self).clean_pair(attr, value)) self._ctx = None self._add_to_ctx() # context manager frame = namedtuple('frame', ['tag', 'items', 'used']) # stack of frames _with_contexts = defaultdict(list) def _add_to_ctx(self): stack = dom_tag._with_contexts.get(_get_thread_context()) if stack: self._ctx = stack[-1] stack[-1].items.append(self) def __enter__(self): stack = dom_tag._with_contexts[_get_thread_context()] stack.append(dom_tag.frame(self, [], set())) return self def __exit__(self, type, value, traceback): thread_id = _get_thread_context() stack = dom_tag._with_contexts[thread_id] frame = stack.pop() for item in frame.items: if item in frame.used: continue self.add(item) if not stack: del dom_tag._with_contexts[thread_id] def __call__(self, func): ''' tag instance is being used as a decorator. wrap func to make a copy of this tag ''' # remove decorator from its context so it doesn't # get added in where it was defined if self._ctx: self._ctx.used.add(self) @wraps(func) def f(*args, **kwargs): tag = copy.deepcopy(self) tag._add_to_ctx() with tag: return func(*args, **kwargs) or tag return f def set_attribute(self, key, value): ''' Add or update the value of an attribute. ''' if isinstance(key, int): self.children[key] = value elif isinstance(key, basestring): self.attributes[key] = value else: raise TypeError('Only integer and string types are valid for assigning ' 'child tags and attributes, respectively.') __setitem__ = set_attribute def delete_attribute(self, key): if isinstance(key, int): del self.children[key:key+1] else: del self.attributes[key] __delitem__ = delete_attribute def setdocument(self, doc): ''' Creates a reference to the parent document to allow for partial-tree validation. ''' # assume that a document is correct in the subtree if self.document != doc: self.document = doc for i in self.children: if not isinstance(i, dom_tag): return i.setdocument(doc) def add(self, *args): ''' Add new child tags. ''' for obj in args: if isinstance(obj, numbers.Number): # Convert to string so we fall into next if block obj = str(obj) if isinstance(obj, basestring): obj = escape(obj) self.children.append(obj) elif isinstance(obj, dom_tag): stack = dom_tag._with_contexts.get(_get_thread_context()) if stack: stack[-1].used.add(obj) self.children.append(obj) obj.parent = self obj.setdocument(self.document) elif isinstance(obj, dict): for attr, value in obj.items(): self.set_attribute(*dom_tag.clean_pair(attr, value)) elif hasattr(obj, '__iter__'): for subobj in obj: self.add(subobj) else: # wtf is it? raise ValueError('%r not a tag or string.' % obj) if len(args) == 1: return args[0] return args def add_raw_string(self, s): self.children.append(s) def remove(self, obj): self.children.remove(obj) def clear(self): for i in self.children: if isinstance(i, dom_tag) and i.parent is self: i.parent = None self.children = [] def get(self, tag=None, **kwargs): ''' Recursively searches children for tags of a certain type with matching attributes. ''' # Stupid workaround since we can not use dom_tag in the method declaration if tag is None: tag = dom_tag attrs = [(dom_tag.clean_attribute(attr), value) for attr, value in kwargs.items()] results = [] for child in self.children: if (isinstance(tag, basestring) and type(child).__name__ == tag) or \ (not isinstance(tag, basestring) and isinstance(child, tag)): if all(child.attributes.get(attribute) == value for attribute, value in attrs): # If the child is of correct type and has all attributes and values # in kwargs add as a result results.append(child) if isinstance(child, dom_tag): # If the child is a dom_tag extend the search down through its children results.extend(child.get(tag, **kwargs)) return results def __getitem__(self, key): ''' Returns the stored value of the specified attribute or child (if it exists). ''' if isinstance(key, int): # Children are accessed using integers try: return object.__getattribute__(self, 'children')[key] except KeyError: raise IndexError('Child with index "%s" does not exist.' % key) elif isinstance(key, basestring): # Attributes are accessed using strings try: return object.__getattribute__(self, 'attributes')[key] except KeyError: raise AttributeError('Attribute "%s" does not exist.' % key) else: raise TypeError('Only integer and string types are valid for accessing ' 'child tags and attributes, respectively.') __getattr__ = __getitem__ def __len__(self): ''' Number of child elements. ''' return len(self.children) def __bool__(self): ''' Hack for "if x" and __len__ ''' return True __nonzero__ = __bool__ def __iter__(self): ''' Iterates over child elements. ''' return self.children.__iter__() def __contains__(self, item): ''' Checks recursively if item is in children tree. Accepts both a string and a class. ''' return bool(self.get(item)) def __iadd__(self, obj): ''' Reflexive binary addition simply adds tag as a child. ''' self.add(obj) return self # String and unicode representations are the same as render() def __unicode__(self): return self.render() __str__ = __unicode__ def render(self, indent=' ', pretty=True, xhtml=False): data = self._render([], 0, indent, pretty, xhtml) return u''.join(data) def _render(self, sb, indent_level, indent_str, pretty, xhtml): pretty = pretty and self.is_pretty name = getattr(self, 'tagname', type(self).__name__) # Workaround for python keywords and standard classes/methods # (del, object, input) if name[-1] == '_': name = name[:-1] # open tag sb.append('<') sb.append(name) for attribute, value in sorted(self.attributes.items()): if value is not False: # False values must be omitted completely sb.append(' %s="%s"' % (attribute, escape(unicode(value), True))) sb.append(' />' if self.is_single and xhtml else '>') if not self.is_single: inline = self._render_children(sb, indent_level + 1, indent_str, pretty, xhtml) if pretty and not inline: sb.append('\n') sb.append(indent_str * indent_level) # close tag sb.append('</') sb.append(name) sb.append('>') return sb def _render_children(self, sb, indent_level, indent_str, pretty, xhtml): inline = True for child in self.children: if isinstance(child, dom_tag): if pretty and not child.is_inline: inline = False sb.append('\n') sb.append(indent_str * indent_level) child._render(sb, indent_level, indent_str, pretty, xhtml) else: sb.append(unicode(child)) return inline def __repr__(self): name = '%s.%s' % (self.__module__, type(self).__name__) attributes_len = len(self.attributes) attributes = '%s attribute' % attributes_len if attributes_len != 1: attributes += 's' children_len = len(self.children) children = '%s child' % children_len if children_len != 1: children += 'ren' return '<%s at %x: %s, %s>' % (name, id(self), attributes, children) @staticmethod def clean_attribute(attribute): ''' Normalize attribute names for shorthand and work arounds for limitations in Python's syntax ''' # Shorthand attribute = { 'cls': 'class', 'className': 'class', 'class_name': 'class', 'fr': 'for', 'html_for': 'for', 'htmlFor': 'for', }.get(attribute, attribute) # Workaround for Python's reserved words if attribute[0] == '_': attribute = attribute[1:] # Workaround for dash special_prefix = any([attribute.startswith(x) for x in ('data_', 'aria_')]) if attribute in set(['http_equiv']) or special_prefix: attribute = attribute.replace('_', '-').lower() # Workaround for colon if attribute.split('_')[0] in ('xlink', 'xml', 'xmlns'): attribute = attribute.replace('_', ':', 1).lower() return attribute @classmethod def clean_pair(cls, attribute, value): ''' This will call `clean_attribute` on the attribute and also allows for the creation of boolean attributes. Ex. input(selected=True) is equivalent to input(selected="selected") ''' attribute = cls.clean_attribute(attribute) # Check for boolean attributes # (i.e. selected=True becomes selected="selected") if value is True: value = attribute # Ignore `if value is False`: this is filtered out in render() return (attribute, value) _get_current_none = object() def get_current(default=_get_current_none): ''' get the current tag being used as a with context or decorated function. if no context is active, raises ValueError, or returns the default, if provided ''' h = _get_thread_context() ctx = dom_tag._with_contexts.get(h, None) if ctx: return ctx[-1].tag if default is _get_current_none: raise ValueError('no current context') return default def attr(*args, **kwargs): ''' Set attributes on the current active tag context ''' c = get_current() dicts = args + (kwargs,) for d in dicts: for attr, value in d.items(): c.set_attribute(*dom_tag.clean_pair(attr, value)) # escape() is used in render from .util import escape
lgpl-3.0
-2,593,975,155,927,345,700
25.740741
85
0.6255
false
3.903875
false
false
false
openmicroscopy/omero-marshal
omero_marshal/encode/encoders/mask.py
1
1127
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (c) 2015 Glencoe Software, Inc. All rights reserved. # # This software is distributed under the terms described by the LICENCE file # you can find at the root of the distribution bundle. # If the file is missing please request a copy by contacting # [email protected]. # from ... import SCHEMA_VERSION from .shape import ShapeEncoder from omero.model import MaskI class Mask201501Encoder(ShapeEncoder): TYPE = 'http://www.openmicroscopy.org/Schemas/ROI/2015-01#Mask' def encode(self, obj): v = super(Mask201501Encoder, self).encode(obj) self.set_if_not_none(v, 'X', obj.x) self.set_if_not_none(v, 'Y', obj.y) self.set_if_not_none(v, 'Width', obj.width) self.set_if_not_none(v, 'Height', obj.height) return v class Mask201606Encoder(Mask201501Encoder): TYPE = 'http://www.openmicroscopy.org/Schemas/OME/2016-06#Mask' if SCHEMA_VERSION == '2015-01': encoder = (MaskI, Mask201501Encoder) elif SCHEMA_VERSION == '2016-06': encoder = (MaskI, Mask201606Encoder) MaskEncoder = encoder[1]
gpl-2.0
9,218,345,415,002,380,000
27.897436
76
0.692103
false
3.096154
false
false
false
jseabold/statsmodels
statsmodels/tsa/vector_ar/output.py
5
6945
from statsmodels.compat.python import lzip from io import StringIO import numpy as np from statsmodels.iolib import SimpleTable mat = np.array _default_table_fmt = dict( empty_cell = '', colsep=' ', row_pre = '', row_post = '', table_dec_above='=', table_dec_below='=', header_dec_below='-', header_fmt = '%s', stub_fmt = '%s', title_align='c', header_align = 'r', data_aligns = 'r', stubs_align = 'l', fmt = 'txt' ) class VARSummary(object): default_fmt = dict( #data_fmts = ["%#12.6g","%#12.6g","%#10.4g","%#5.4g"], #data_fmts = ["%#10.4g","%#10.4g","%#10.4g","%#6.4g"], data_fmts = ["%#15.6F","%#15.6F","%#15.3F","%#14.3F"], empty_cell = '', #colwidths = 10, colsep=' ', row_pre = '', row_post = '', table_dec_above='=', table_dec_below='=', header_dec_below='-', header_fmt = '%s', stub_fmt = '%s', title_align='c', header_align = 'r', data_aligns = 'r', stubs_align = 'l', fmt = 'txt' ) part1_fmt = dict( default_fmt, data_fmts = ["%s"], colwidths = 15, colsep=' ', table_dec_below='', header_dec_below=None, ) part2_fmt = dict( default_fmt, data_fmts = ["%#12.6g","%#12.6g","%#10.4g","%#5.4g"], colwidths = None, colsep=' ', table_dec_above='-', table_dec_below='-', header_dec_below=None, ) def __init__(self, estimator): self.model = estimator self.summary = self.make() def __repr__(self): return self.summary def make(self, endog_names=None, exog_names=None): """ Summary of VAR model """ buf = StringIO() buf.write(self._header_table() + '\n') buf.write(self._stats_table() + '\n') buf.write(self._coef_table() + '\n') buf.write(self._resid_info() + '\n') return buf.getvalue() def _header_table(self): import time model = self.model t = time.localtime() # TODO: change when we allow coef restrictions # ncoefs = len(model.beta) # Header information part1title = "Summary of Regression Results" part1data = [[model._model_type], ["OLS"], #TODO: change when fit methods change [time.strftime("%a, %d, %b, %Y", t)], [time.strftime("%H:%M:%S", t)]] part1header = None part1stubs = ('Model:', 'Method:', 'Date:', 'Time:') part1 = SimpleTable(part1data, part1header, part1stubs, title=part1title, txt_fmt=self.part1_fmt) return str(part1) def _stats_table(self): # TODO: do we want individual statistics or should users just # use results if wanted? # Handle overall fit statistics model = self.model part2Lstubs = ('No. of Equations:', 'Nobs:', 'Log likelihood:', 'AIC:') part2Rstubs = ('BIC:', 'HQIC:', 'FPE:', 'Det(Omega_mle):') part2Ldata = [[model.neqs], [model.nobs], [model.llf], [model.aic]] part2Rdata = [[model.bic], [model.hqic], [model.fpe], [model.detomega]] part2Lheader = None part2L = SimpleTable(part2Ldata, part2Lheader, part2Lstubs, txt_fmt = self.part2_fmt) part2R = SimpleTable(part2Rdata, part2Lheader, part2Rstubs, txt_fmt = self.part2_fmt) part2L.extend_right(part2R) return str(part2L) def _coef_table(self): model = self.model k = model.neqs Xnames = self.model.exog_names data = lzip(model.params.T.ravel(), model.stderr.T.ravel(), model.tvalues.T.ravel(), model.pvalues.T.ravel()) header = ('coefficient','std. error','t-stat','prob') buf = StringIO() dim = k * model.k_ar + model.k_trend + model.k_exog_user for i in range(k): section = "Results for equation %s" % model.names[i] buf.write(section + '\n') table = SimpleTable(data[dim * i : dim * (i + 1)], header, Xnames, title=None, txt_fmt = self.default_fmt) buf.write(str(table) + '\n') if i < k - 1: buf.write('\n') return buf.getvalue() def _resid_info(self): buf = StringIO() names = self.model.names buf.write("Correlation matrix of residuals" + '\n') buf.write(pprint_matrix(self.model.resid_corr, names, names) + '\n') return buf.getvalue() def normality_summary(results): title = "Normality skew/kurtosis Chi^2-test" null_hyp = 'H_0: data generated by normally-distributed process' return hypothesis_test_table(results, title, null_hyp) def hypothesis_test_table(results, title, null_hyp): fmt = dict(_default_table_fmt, data_fmts=["%#15.6F","%#15.6F","%#15.3F", "%s"]) buf = StringIO() table = SimpleTable([[results['statistic'], results['crit_value'], results['pvalue'], str(results['df'])]], ['Test statistic', 'Critical Value', 'p-value', 'df'], [''], title=None, txt_fmt=fmt) buf.write(title + '\n') buf.write(str(table) + '\n') buf.write(null_hyp + '\n') buf.write("Conclusion: %s H_0" % results['conclusion']) buf.write(" at %.2f%% significance level" % (results['signif'] * 100)) return buf.getvalue() def pprint_matrix(values, rlabels, clabels, col_space=None): buf = StringIO() T, K = len(rlabels), len(clabels) if col_space is None: min_space = 10 col_space = [max(len(str(c)) + 2, min_space) for c in clabels] else: col_space = (col_space,) * K row_space = max([len(str(x)) for x in rlabels]) + 2 head = _pfixed('', row_space) for j, h in enumerate(clabels): head += _pfixed(h, col_space[j]) buf.write(head + '\n') for i, rlab in enumerate(rlabels): line = ('%s' % rlab).ljust(row_space) for j in range(K): line += _pfixed(values[i,j], col_space[j]) buf.write(line + '\n') return buf.getvalue() def _pfixed(s, space, nanRep=None, float_format=None): if isinstance(s, float): if float_format: formatted = float_format(s) else: formatted = "%#8.6F" % s return formatted.rjust(space) else: return ('%s' % s)[:space].rjust(space)
bsd-3-clause
-7,572,108,318,767,246,000
27.004032
79
0.500216
false
3.495219
false
false
false
dmccloskey/ddt_python
ddt_python/ddt_container_biPlotAndValidation.py
1
20231
from .ddt_container import ddt_container from .ddt_tile import ddt_tile from .ddt_tile_html import ddt_tile_html class ddt_container_biPlotAndValidation(ddt_container): def make_biPlotAndValidation(self, data1,data2, data1_keys,data1_nestkeys,data1_keymap, data2_keys,data2_nestkeys,data2_keymap, ): '''Make a biPlot and model validation plot INPUT: data1 data2 data1_keys data1_nestkeys data1_keymap data2_keys data2_nestkeys data2_keymap ''' cnt = 0; #from 1: biplot form = ddt_tile(); form.make_tileparameters( tileparameters={ 'tileheader':'Bi Plot filter menu', 'tiletype':'html', 'tileid':"filtermenu1", 'rowid':"row1", 'colid':"col1", 'tileclass':"panel panel-default", 'rowclass':"row", 'colclass':"col-sm-4"} ); form.make_htmlparameters( htmlparameters = { 'htmlid':'filtermenuform1', "htmltype":'form_01', "formsubmitbuttonidtext":{'id':'submit1','text':'submit'}, "formresetbuttonidtext":{'id':'reset1','text':'reset'}, "formupdatebuttonidtext":{'id':'update1','text':'update'}}, ); self.add_parameters(form.get_parameters()); self.update_tile2datamap("filtermenu1",[cnt]); self.add_filtermenu( {"filtermenuid":"filtermenu1", "filtermenuhtmlid":"filtermenuform1", "filtermenusubmitbuttonid":"submit1", "filtermenuresetbuttonid":"reset1", "filtermenuupdatebuttonid":"update1"} ); #svg 1: biplot svg = ddt_tile(); svg.make_tileparameters( tileparameters={ 'tileheader':'Bi Plot', 'tiletype':'svg', 'tileid':"tile1", 'rowid':"row1", 'colid':"col2", 'tileclass':"panel panel-default", 'rowclass':"row", 'colclass':"col-sm-8"} ); svg.make_svgparameters( svgparameters={ "svgtype":'scatterlineplot2d_01', "svgkeymap":[data1_keymap,data1_keymap], 'svgid':'svg1', "svgmargin":{ 'top': 50, 'right': 150, 'bottom': 50, 'left': 50 }, "svgwidth":500,"svgheight":350, "svgx1axislabel":"component", "svgy1axislabel":"variance explained", 'svgformtileid':'filtermenu1',} ); self.add_parameters(svg.get_parameters()); self.update_tile2datamap("tile1",[cnt,cnt]); # data 1: self.add_data( data1, data1_keys, data1_nestkeys ); # increment the data counter cnt+=1; #form 2: validation form = ddt_tile(); form.make_tileparameters( tileparameters={ 'tileheader':'Cross Validation filter menu', 'tiletype':'html', 'tileid':"filtermenu2", 'rowid':"row2",'colid':"col1", 'tileclass':"panel panel-default", 'rowclass':"row", 'colclass':"col-sm-4"} ); form.make_htmlparameters( htmlparameters = { 'htmlid':'filtermenuform2', "htmltype":'form_01', "formsubmitbuttonidtext":{'id':'submit2','text':'submit'}, "formresetbuttonidtext":{'id':'reset2','text':'reset'}, "formupdatebuttonidtext":{'id':'update12','text':'update'}}, ); self.add_parameters(form.get_parameters()); self.update_tile2datamap("filtermenu2",[cnt]); self.add_filtermenu( {"filtermenuid":"filtermenu2", "filtermenuhtmlid":"filtermenuform2", "filtermenusubmitbuttonid":"submit2", "filtermenuresetbuttonid":"reset2", "filtermenuupdatebuttonid":"update2"} ); #svg 2: validation svg = ddt_tile(); svg.make_tileparameters( tileparameters={ 'tileheader':'Cross Validation', 'tiletype':'svg', 'tileid':"tile2", 'rowid':"row2", 'colid':"col2", 'tileclass':"panel panel-default", 'rowclass':"row", 'colclass':"col-sm-8" }); svg.make_svgparameters( svgparameters={ "svgtype":'verticalbarschart2d_01', "svgkeymap":[data2_keymap], 'svgid':'svg2', "svgmargin":{ 'top': 50, 'right': 150, 'bottom': 50, 'left': 50 }, "svgwidth":500,"svgheight":350,"svgy1axislabel":"Value", "svgfilters":None, 'svgformtileid':'filtermenu2', } ); self.add_parameters(svg.get_parameters()); self.update_tile2datamap("tile2",[cnt]); #table 2: validation crosstable = ddt_tile(); crosstable.make_tileparameters( tileparameters = { 'tileheader':'Cross Validation', 'tiletype':'table', 'tileid':"tile3", 'rowid':"row3", 'colid':"col1", 'tileclass':"panel panel-default", 'rowclass':"row", 'colclass':"col-sm-12"} ); crosstable.make_tableparameters( tableparameters = { "tablekeymap":[data2_keymap], "tabletype":'responsivetable_01', 'tableid':'table2', "tablefilters":None, "tableheaders":None, "tableclass":"table table-condensed table-hover"} ); self.add_parameters(crosstable.get_parameters()); self.update_tile2datamap("tile3",[cnt]); # add data 2 self.add_data( data2, data2_keys, data2_nestkeys ); # increment the data counter cnt+=1; def make_biPlot(self, data1, data1_keys,data1_nestkeys,data1_keymap, ): '''Make a biPlot INPUT: data1 data1_keys data1_nestkeys data1_keymap ''' cnt = 0; #from 1: biplot form = ddt_tile(); form.make_tileparameters( tileparameters={ 'tileheader':'Bi Plot filter menu', 'tiletype':'html', 'tileid':"filtermenu1", 'rowid':"row1", 'colid':"col1", 'tileclass':"panel panel-default", 'rowclass':"row", 'colclass':"col-sm-4"} ); form.make_htmlparameters( htmlparameters = { 'htmlid':'filtermenuform1', "htmltype":'form_01', "formsubmitbuttonidtext":{'id':'submit1','text':'submit'}, "formresetbuttonidtext":{'id':'reset1','text':'reset'}, "formupdatebuttonidtext":{'id':'update1','text':'update'}}, ); self.add_parameters(form.get_parameters()); self.update_tile2datamap("filtermenu1",[cnt]); self.add_filtermenu( {"filtermenuid":"filtermenu1", "filtermenuhtmlid":"filtermenuform1", "filtermenusubmitbuttonid":"submit1", "filtermenuresetbuttonid":"reset1", "filtermenuupdatebuttonid":"update1"} ); #svg 1: biplot svg = ddt_tile(); svg.make_tileparameters( tileparameters={ 'tileheader':'Bi Plot', 'tiletype':'svg', 'tileid':"tile1", 'rowid':"row1", 'colid':"col2", 'tileclass':"panel panel-default", 'rowclass':"row", 'colclass':"col-sm-8"} ); svg.make_svgparameters( svgparameters={ "svgtype":'scatterlineplot2d_01', "svgkeymap":[data1_keymap,data1_keymap], 'svgid':'svg1', "svgmargin":{ 'top': 50, 'right': 150, 'bottom': 50, 'left': 50 }, "svgwidth":500,"svgheight":350, "svgx1axislabel":"component", "svgy1axislabel":"variance explained", 'svgformtileid':'filtermenu1',} ); self.add_parameters(svg.get_parameters()); self.update_tile2datamap("tile1",[cnt,cnt]); #table 1: Bi plot crosstable = ddt_tile(); crosstable.make_tileparameters( tileparameters = { 'tileheader':'Bi plot', 'tiletype':'table', 'tileid':"tile3", 'rowid':"row3", 'colid':"col1", 'tileclass':"panel panel-default", 'rowclass':"row", 'colclass':"col-sm-12"} ); crosstable.make_tableparameters( tableparameters = { "tablekeymap":[data1_keymap], "tabletype":'responsivetable_01', 'tableid':'table1', "tablefilters":None, "tableheaders":None, "tableclass":"table table-condensed table-hover"} ); self.add_parameters(crosstable.get_parameters()); self.update_tile2datamap("tile3",[cnt]); # data 1: self.add_data( data1, data1_keys, data1_nestkeys ); # increment the data counter cnt+=1; def make_hyperparameter(self, data1, data1_keys,data1_nestkeys,data1_keymap, data_cnt=0, ): '''Make a hyperparameter bar plot INPUT: data1 data1_keys data1_nestkeys data1_keymap ''' #form 2: validation form = ddt_tile(); form.make_tileparameters( tileparameters={ 'tileheader':'Cross Validation filter menu', 'tiletype':'html', 'tileid':"filtermenu1", 'rowid':"row1",'colid':"col1", 'tileclass':"panel panel-default", 'rowclass':"row", 'colclass':"col-sm-4"} ); form.make_htmlparameters( htmlparameters = { 'htmlid':'filtermenuform1', "htmltype":'form_01', "formsubmitbuttonidtext":{'id':'submit1','text':'submit'}, "formresetbuttonidtext":{'id':'reset1','text':'reset'}, "formupdatebuttonidtext":{'id':'update1','text':'update'}}, ); self.add_parameters(form.get_parameters()); self.update_tile2datamap("filtermenu1",[data_cnt]); self.add_filtermenu( {"filtermenuid":"filtermenu1", "filtermenuhtmlid":"filtermenuform1", "filtermenusubmitbuttonid":"submit1", "filtermenuresetbuttonid":"reset1", "filtermenuupdatebuttonid":"update1"} ); #svg 2: validation svg = ddt_tile(); svg.make_tileparameters( tileparameters={ 'tileheader':'Cross Validation', 'tiletype':'svg', 'tileid':"tile2", 'rowid':"row1", 'colid':"col2", 'tileclass':"panel panel-default", 'rowclass':"row", 'colclass':"col-sm-8" }); svg.make_svgparameters( svgparameters={ "svgtype":'verticalbarschart2d_01', "svgkeymap":[data1_keymap], 'svgid':'svg2', "svgmargin":{ 'top': 50, 'right': 150, 'bottom': 50, 'left': 50 }, "svgwidth":500,"svgheight":350,"svgy1axislabel":"Value", "svgfilters":None, 'svgformtileid':'filtermenu1', } ); self.add_parameters(svg.get_parameters()); self.update_tile2datamap("tile2",[data_cnt]); #table 2: validation crosstable = ddt_tile(); crosstable.make_tileparameters( tileparameters = { 'tileheader':'Cross Validation', 'tiletype':'table', 'tileid':"tile3", 'rowid':"row2", 'colid':"col1", 'tileclass':"panel panel-default", 'rowclass':"row", 'colclass':"col-sm-12"} ); crosstable.make_tableparameters( tableparameters = { "tablekeymap":[data1_keymap], "tabletype":'responsivetable_01', 'tableid':'table1', "tablefilters":None, "tableheaders":None, "tableclass":"table table-condensed table-hover"} ); self.add_parameters(crosstable.get_parameters()); self.update_tile2datamap("tile3",[data_cnt]); # add data 1 self.add_data( data1, data1_keys, data1_nestkeys ); # increment the data counter data_cnt+=1; def make_impfeat(self, data1, data1_keys,data1_nestkeys,data1_keymap, data_cnt=0, ): '''Make a important feature bar plot INPUT: data1 data1_keys data1_nestkeys data1_keymap ''' #form 2: validation form = ddt_tile(); form.make_tileparameters( tileparameters={ 'tileheader':'Important feature filter menu', 'tiletype':'html', 'tileid':"filtermenu1", 'rowid':"row1",'colid':"col1", 'tileclass':"panel panel-default", 'rowclass':"row", 'colclass':"col-sm-4"} ); form.make_htmlparameters( htmlparameters = { 'htmlid':'filtermenuform1', "htmltype":'form_01', "formsubmitbuttonidtext":{'id':'submit1','text':'submit'}, "formresetbuttonidtext":{'id':'reset1','text':'reset'}, "formupdatebuttonidtext":{'id':'update1','text':'update'}}, ); self.add_parameters(form.get_parameters()); self.update_tile2datamap("filtermenu1",[data_cnt]); self.add_filtermenu( {"filtermenuid":"filtermenu1", "filtermenuhtmlid":"filtermenuform1", "filtermenusubmitbuttonid":"submit1", "filtermenuresetbuttonid":"reset1", "filtermenuupdatebuttonid":"update1"} ); #svg 2: validation svg = ddt_tile(); svg.make_tileparameters( tileparameters={ 'tileheader':'Important features', 'tiletype':'svg', 'tileid':"tile2", 'rowid':"row1", 'colid':"col2", 'tileclass':"panel panel-default", 'rowclass':"row", 'colclass':"col-sm-8" }); svg.make_svgparameters( svgparameters={ "svgtype":'horizontalbarschart2d_01', "svgkeymap":[data1_keymap], 'svgid':'svg2', "svgmargin":{ 'top': 50, 'right': 150, 'bottom': 50, 'left': 250 }, "svgwidth":450,"svgheight":900, "svgx1axislabel":"impfeat_value", "svgy1axislabel":"component_name", 'svgformtileid':'filtermenu1', } ); self.add_parameters(svg.get_parameters()); self.update_tile2datamap("tile2",[data_cnt]); #table 2: validation crosstable = ddt_tile(); crosstable.make_tileparameters( tileparameters = { 'tileheader':'Important features', 'tiletype':'table', 'tileid':"tile3", 'rowid':"row2", 'colid':"col1", 'tileclass':"panel panel-default", 'rowclass':"row", 'colclass':"col-sm-12"} ); crosstable.make_tableparameters( tableparameters = { "tablekeymap":[data1_keymap], "tabletype":'responsivetable_01', 'tableid':'table1', "tablefilters":None, "tableheaders":None, "tableclass":"table table-condensed table-hover"} ); self.add_parameters(crosstable.get_parameters()); self.update_tile2datamap("tile3",[data_cnt]); # add data 1 self.add_data( data1, data1_keys, data1_nestkeys ); # increment the data counter data_cnt+=1; def make_SPlot(self, data1,data_dict1, data1_keys,data1_nestkeys,data1_keymap, data_cnt=0, ): '''Make a important feature bar plot INPUT: data1 data1_keys data1_nestkeys data1_keymap ''' #form 2: validation form = ddt_tile(); form.make_tileparameters( tileparameters={ 'tileheader':'S-Plot filter menu', 'tiletype':'html', 'tileid':"filtermenu1", 'rowid':"row1", 'colid':"col1", 'tileclass':"panel panel-default", 'rowclass':"row", 'colclass':"col-sm-4"} ); form.make_htmlparameters( htmlparameters = { 'htmlid':'filtermenuform1', "htmltype":'form_01', "formsubmitbuttonidtext":{'id':'submit1','text':'submit'}, "formresetbuttonidtext":{'id':'reset1','text':'reset'}, "formupdatebuttonidtext":{'id':'update1','text':'update'}}, ); self.add_parameters(form.get_parameters()); self.update_tile2datamap("filtermenu1",[data_cnt]); self.add_filtermenu( {"filtermenuid":"filtermenu1", "filtermenuhtmlid":"filtermenuform1", "filtermenusubmitbuttonid":"submit1", "filtermenuresetbuttonid":"reset1", "filtermenuupdatebuttonid":"update1"} ); # add data 1 self.add_data( data1, data1_keys, data1_nestkeys ); #svg 2: validation svg = ddt_tile(); for i in range(int(max(data_dict1.keys()))): axis = i+1; svgid = 'svg'+str(axis); colid = 'col'+str(axis+1); tileid = 'tile'+str(axis); svg.make_tileparameters( tileparameters={ 'tileheader':'S-Plot', 'tiletype':'svg', 'tileid':tileid, 'rowid':"row1", 'colid':colid, 'tileclass':"panel panel-default", 'rowclass':"row", 'colclass':"col-sm-6" }); svg.make_svgparameters( svgparameters={ "svgtype":'volcanoplot2d_01', "svgkeymap":[data1_keymap], 'svgid':'svg1', "svgmargin":{ 'top': 50, 'right': 150, 'bottom': 50, 'left': 50 }, "svgwidth":400,"svgheight":350, "svgx1axislabel":"loadings" + str(axis), "svgy1axislabel":"correlations" + str(axis), } ); self.add_parameters(svg.get_parameters()); self.update_tile2datamap(tileid,[axis]); self.add_data( data_dict1[axis], data1_keys, data1_nestkeys ); #table 2: validation crosstable = ddt_tile(); crosstable.make_tileparameters( tileparameters = { 'tileheader':'S-Plot', 'tiletype':'table', 'tileid':'tile'+str(axis+1), 'rowid':"row2", 'colid':"col1", 'tileclass':"panel panel-default", 'rowclass':"row", 'colclass':"col-sm-12"} ); crosstable.make_tableparameters( tableparameters = { "tablekeymap":[data1_keymap], "tabletype":'responsivetable_01', 'tableid':'table1', "tablefilters":None, "tableheaders":None, "tableclass":"table table-condensed table-hover"} ); self.add_parameters(crosstable.get_parameters()); self.update_tile2datamap('tile'+str(axis+1),[data_cnt]);
mit
-6,382,075,242,696,304,000
32.329489
82
0.497059
false
4.068168
false
false
false
nuncjo/Delver
examples.py
1
6037
# -*- coding:utf-8 -*- import os import psycopg2 from pprint import pprint from delver import Crawler def scraping_movies_table(): c = Crawler() c.logging = True c.useragent = "Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)" c.open("http://www.boxofficemojo.com/daily/") pprint(c.tables()) def user_login(): c = Crawler() c.useragent = ( "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) " "Chrome/60.0.3112.90 Safari/537.36" ) c.random_timeout = (0, 5) c.open('http://testing-ground.scraping.pro/login') forms = c.forms() if forms: login_form = forms[0] login_form.fields = { 'usr': 'admin', 'pwd': '12345' } c.submit(login_form) success_check = c.submit_check( login_form, phrase='WELCOME :)', status_codes=[200] ) print(success_check) class OnePunchManDownloader: """Downloads One Punch Man free manga chapers to local directories. Uses one main thread for scraper with random timeout. Uses 20 threads just for image downloads. """ def __init__(self): self._target_directory = 'one_punch_man' self._start_url = "http://m.mangafox.me/manga/onepunch_man_one/" self.crawler = Crawler() self.crawler.random_timeout = (0, 5) self.crawler.useragent = "Googlebot-Image/1.0" def run(self): self.crawler.open(self._start_url) for link in self.crawler.links(filters={'text': 'Ch '}, match='IN'): self.download_images(link) def download_images(self, link): target_path = '{}/{}'.format(self._target_directory, link.split('/')[-2]) full_chapter_url = link.replace('/manga/', '/roll_manga/') self.crawler.open(full_chapter_url) images = self.crawler.xpath("//img[@class='reader-page']/@data-original") os.makedirs(target_path, exist_ok=True) self.crawler.download_files(target_path, files=images, workers=20) def one_punch_downloader(): downloader = OnePunchManDownloader() downloader.run() class WithConnection: def __init__(self, params): self._connection = psycopg2.connect(**params) self._connection.autocommit = True self._cursor = self._connection.cursor() def table_exists(self, table_name): self._cursor.execute(''' select exists( select * from information_schema.tables where table_name='{}' ) '''.format(table_name)) return self._cursor.fetchone()[0] def scrape_page(crawler): """ Scrapes rows from tables with promotions. :param crawler: <delver.crawler.Crawler object> :return: generator with page of rows """ titles = crawler.xpath("//div/span[@class='title']/text()") discounts = crawler.xpath("//div[contains(@class, 'search_discount')]/span/text()") final_prices = crawler.xpath("//div[contains(@class, 'discounted')]//text()[2]").strip() yield [{ 'title': row[0], 'discount': row[1], 'price': row[2] } for row in zip(titles, discounts, final_prices)] class SteamPromotionsScraper: """ Scraper which can be iterated through Usage example:: >>> promotions_scraper = SteamPromotionsScraper() >>> for page in promotions_scraper: ... pprint(page) """ def __init__(self): self.crawler = Crawler() self.crawler.logging = True self.crawler.useragent = \ "Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)" self.crawler.random_timeout = (0, 5) def scrape_by_page(self): self.crawler.open('http://store.steampowered.com/search/?specials=1') yield from scrape_page(self.crawler) while self.crawler.links(filters={ 'class': 'pagebtn', 'text': '>' }): self.crawler.open(self.crawler.current_results[0]) yield from scrape_page(self.crawler) def __iter__(self): return self.scrape_by_page() class SteamPromotionsScraperDB(WithConnection): """Example with saving data to postgresql database Usage example:: >>> promotions_scraper_db = SteamPromotionsScraperDB({ ... 'dbname': "test", ... 'user': "testuser", ... 'password': "test" ... }) >>> promotions_scraper.save_to_db() """ def __init__(self, params): super().__init__(params) self.crawler = Crawler() self.crawler.logging = True self.crawler.useragent = \ "Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)" self.crawler.random_timeout = (0, 5) def scrape_by_page(self): self.crawler.open('http://store.steampowered.com/search/?specials=1') yield from scrape_page(self.crawler) while self.crawler.links(filters={ 'class': 'pagebtn', 'text': '>' }): self.crawler.open(self.crawler.current_results[0]) yield from scrape_page(self.crawler) def save_to_db(self): if not self.table_exists('promotions'): self._cursor.execute( ''' CREATE TABLE promotions ( id serial PRIMARY KEY, title varchar(255), discount varchar(4), price varchar(10) ); ''' ) for page in self.scrape_by_page(): for row in page: self._cursor.execute( ''' INSERT INTO promotions(title, discount, price) VALUES(%s, %s, %s) ''', (row.get('title'), row.get('discount'), row.get('price')) ) pprint(row)
mit
3,424,693,276,046,666,000
31.283422
92
0.558556
false
3.712792
false
false
false
beeftornado/sentry
src/sentry/stacktraces/processing.py
1
20204
from __future__ import absolute_import import six import logging from datetime import datetime from django.utils import timezone from collections import namedtuple, OrderedDict import sentry_sdk from sentry.models import Project, Release from sentry.utils.cache import cache from sentry.utils.hashlib import hash_values from sentry.utils.safe import get_path, safe_execute from sentry.stacktraces.functions import set_in_app, trim_function_name logger = logging.getLogger(__name__) StacktraceInfo = namedtuple( "StacktraceInfo", ["stacktrace", "container", "platforms", "is_exception"] ) StacktraceInfo.__hash__ = lambda x: id(x) StacktraceInfo.__eq__ = lambda a, b: a is b StacktraceInfo.__ne__ = lambda a, b: a is not b class ProcessableFrame(object): def __init__(self, frame, idx, processor, stacktrace_info, processable_frames): self.frame = frame self.idx = idx self.processor = processor self.stacktrace_info = stacktrace_info self.data = None self.cache_key = None self.cache_value = None self.processable_frames = processable_frames def __repr__(self): return "<ProcessableFrame %r #%r at %r>" % ( self.frame.get("function") or "unknown", self.idx, self.frame.get("instruction_addr"), ) def __contains__(self, key): return key in self.frame def __getitem__(self, key): return self.frame[key] def get(self, key, default=None): return self.frame.get(key, default) def close(self): # manually break circular references self.closed = True self.processable_frames = None self.stacktrace_info = None self.processor = None @property def previous_frame(self): last_idx = len(self.processable_frames) - self.idx - 1 - 1 if last_idx < 0: return return self.processable_frames[last_idx] def set_cache_value(self, value): if self.cache_key is not None: cache.set(self.cache_key, value, 3600) return True return False def set_cache_key_from_values(self, values): if values is None: self.cache_key = None return h = hash_values(values, seed=self.processor.__class__.__name__) self.cache_key = rv = "pf:%s" % h return rv class StacktraceProcessingTask(object): def __init__(self, processable_stacktraces, processors): self.processable_stacktraces = processable_stacktraces self.processors = processors def close(self): for frame in self.iter_processable_frames(): frame.close() def iter_processors(self): return iter(self.processors) def iter_processable_stacktraces(self): return six.iteritems(self.processable_stacktraces) def iter_processable_frames(self, processor=None): for _, frames in self.iter_processable_stacktraces(): for frame in frames: if processor is None or frame.processor == processor: yield frame class StacktraceProcessor(object): def __init__(self, data, stacktrace_infos, project=None): self.data = data self.stacktrace_infos = stacktrace_infos if project is None: project = Project.objects.get_from_cache(id=data["project"]) self.project = project def close(self): pass def get_release(self, create=False): """Convenient helper to return the release for the current data and optionally creates the release if it's missing. In case there is no release info it will return `None`. """ release = self.data.get("release") if not release: return None if not create: return Release.get(project=self.project, version=self.data["release"]) timestamp = self.data.get("timestamp") if timestamp is not None: date = datetime.fromtimestamp(timestamp).replace(tzinfo=timezone.utc) else: date = None return Release.get_or_create( project=self.project, version=self.data["release"], date_added=date ) def handles_frame(self, frame, stacktrace_info): """Returns true if this processor can handle this frame. This is the earliest check and operates on a raw frame and stacktrace info. If this returns `True` a processable frame is created. """ return False def preprocess_frame(self, processable_frame): """After a processable frame has been created this method is invoked to give the processor a chance to store additional data to the frame if wanted. In particular a cache key can be set here. """ pass def process_exception(self, exception): """Processes an exception.""" return False def process_frame(self, processable_frame, processing_task): """Processes the processable frame and returns a tuple of three lists: ``(frames, raw_frames, errors)`` where frames is the list of processed frames, raw_frames is the list of raw unprocessed frames (which however can also be modified if needed) as well as a list of optional errors. Each one of the items can be `None` in which case the original input frame is assumed. """ def preprocess_step(self, processing_task): """After frames are preprocessed but before frame processing kicks in the preprocessing step is run. This already has access to the cache values on the frames. """ return False def find_stacktraces_in_data(data, include_raw=False, with_exceptions=False): """Finds all stracktraces in a given data blob and returns it together with some meta information. If `include_raw` is True, then also raw stacktraces are included. If `with_exceptions` is set to `True` then stacktraces of the exception are always included and the `is_exception` flag is set on that stack info object. """ rv = [] def _report_stack(stacktrace, container, is_exception=False): if not is_exception and (not stacktrace or not get_path(stacktrace, "frames", filter=True)): return platforms = set( frame.get("platform") or data.get("platform") for frame in get_path(stacktrace, "frames", filter=True, default=()) ) rv.append( StacktraceInfo( stacktrace=stacktrace, container=container, platforms=platforms, is_exception=is_exception, ) ) for exc in get_path(data, "exception", "values", filter=True, default=()): _report_stack(exc.get("stacktrace"), exc, is_exception=with_exceptions) _report_stack(data.get("stacktrace"), None) for thread in get_path(data, "threads", "values", filter=True, default=()): _report_stack(thread.get("stacktrace"), thread) if include_raw: for info in rv[:]: if info.container is not None: _report_stack(info.container.get("raw_stacktrace"), info.container) return rv def _has_system_frames(frames): """ Determines whether there are any frames in the stacktrace with in_app=false. """ system_frames = 0 for frame in frames: if not frame.get("in_app"): system_frames += 1 return bool(system_frames) and len(frames) != system_frames def _normalize_in_app(stacktrace, platform=None, sdk_info=None): """ Ensures consistent values of in_app across a stacktrace. """ has_system_frames = _has_system_frames(stacktrace) for frame in stacktrace: # If all frames are in_app, flip all of them. This is expected by the UI if not has_system_frames: set_in_app(frame, False) # Default to false in all cases where processors or grouping enhancers # have not yet set in_app. elif frame.get("in_app") is None: set_in_app(frame, False) def normalize_stacktraces_for_grouping(data, grouping_config=None): """ Applies grouping enhancement rules and ensure in_app is set on all frames. This also trims functions if necessary. """ stacktraces = [] for stacktrace_info in find_stacktraces_in_data(data, include_raw=True): frames = get_path(stacktrace_info.stacktrace, "frames", filter=True, default=()) if frames: stacktraces.append(frames) if not stacktraces: return platform = data.get("platform") # Put the trimmed function names into the frames. We only do this if # the trimming produces a different function than the function we have # otherwise stored in `function` to not make the payload larger # unnecessarily. for frames in stacktraces: for frame in frames: # Restore the original in_app value before the first grouping # enhancers have been run. This allows to re-apply grouping # enhancers on the original frame data. orig_in_app = get_path(frame, "data", "orig_in_app") if orig_in_app is not None: frame["in_app"] = None if orig_in_app == -1 else bool(orig_in_app) if frame.get("raw_function") is not None: continue raw_func = frame.get("function") if not raw_func: continue function_name = trim_function_name(raw_func, frame.get("platform") or platform) if function_name != raw_func: frame["raw_function"] = raw_func frame["function"] = function_name # If a grouping config is available, run grouping enhancers if grouping_config is not None: for frames in stacktraces: grouping_config.enhancements.apply_modifications_to_frame(frames, platform) # normalize in-app for stacktrace in stacktraces: _normalize_in_app(stacktrace, platform=platform) def should_process_for_stacktraces(data): from sentry.plugins.base import plugins infos = find_stacktraces_in_data(data, with_exceptions=True) platforms = set() for info in infos: platforms.update(info.platforms or ()) for plugin in plugins.all(version=2): processors = safe_execute( plugin.get_stacktrace_processors, data=data, stacktrace_infos=infos, platforms=platforms, _with_transaction=False, ) if processors: return True return False def get_processors_for_stacktraces(data, infos): from sentry.plugins.base import plugins platforms = set() for info in infos: platforms.update(info.platforms or ()) processors = [] for plugin in plugins.all(version=2): processors.extend( safe_execute( plugin.get_stacktrace_processors, data=data, stacktrace_infos=infos, platforms=platforms, _with_transaction=False, ) or () ) if processors: project = Project.objects.get_from_cache(id=data["project"]) processors = [x(data, infos, project) for x in processors] return processors def get_processable_frames(stacktrace_info, processors): """Returns thin wrappers around the frames in a stacktrace associated with the processor for it. """ frames = get_path(stacktrace_info.stacktrace, "frames", filter=True, default=()) frame_count = len(frames) rv = [] for idx, frame in enumerate(frames): processor = next((p for p in processors if p.handles_frame(frame, stacktrace_info)), None) if processor is not None: rv.append( ProcessableFrame(frame, frame_count - idx - 1, processor, stacktrace_info, rv) ) return rv def process_single_stacktrace(processing_task, stacktrace_info, processable_frames): # TODO: associate errors with the frames and processing issues changed_raw = False changed_processed = False raw_frames = [] processed_frames = [] all_errors = [] bare_frames = get_path(stacktrace_info.stacktrace, "frames", filter=True, default=()) frame_count = len(bare_frames) processable_frames = {frame.idx: frame for frame in processable_frames} for i, bare_frame in enumerate(bare_frames): idx = frame_count - i - 1 rv = None if idx in processable_frames: processable_frame = processable_frames[idx] assert processable_frame.frame is bare_frame try: rv = processable_frame.processor.process_frame(processable_frame, processing_task) except Exception: logger.exception("Failed to process frame") expand_processed, expand_raw, errors = rv or (None, None, None) if expand_processed is not None: processed_frames.extend(expand_processed) changed_processed = True elif expand_raw: # is not empty processed_frames.extend(expand_raw) changed_processed = True else: processed_frames.append(bare_frame) if expand_raw is not None: raw_frames.extend(expand_raw) changed_raw = True else: raw_frames.append(bare_frame) all_errors.extend(errors or ()) return ( processed_frames if changed_processed else None, raw_frames if changed_raw else None, all_errors, ) def get_crash_frame_from_event_data(data, frame_filter=None): """ Return the highest (closest to the crash) in-app frame in the top stacktrace which doesn't fail the given filter test. If no such frame is available, return the highest non-in-app frame which otherwise meets the same criteria. Return None if any of the following are true: - there are no frames - all frames fail the given filter test - we're unable to find any frames nested in either event.exception or event.stacktrace, and there's anything other than exactly one thread in the data """ frames = get_path(data, "exception", "values", -1, "stacktrace", "frames") or get_path( data, "stacktrace", "frames" ) if not frames: threads = get_path(data, "threads", "values") if threads and len(threads) == 1: frames = get_path(threads, 0, "stacktrace", "frames") default = None for frame in reversed(frames or ()): if frame is None: continue if frame_filter is not None: if not frame_filter(frame): continue if frame.get("in_app"): return frame if default is None: default = frame if default: return default def lookup_frame_cache(keys): rv = {} for key in keys: rv[key] = cache.get(key) return rv def get_stacktrace_processing_task(infos, processors): """Returns a list of all tasks for the processors. This can skip over processors that seem to not handle any frames. """ by_processor = {} to_lookup = {} # by_stacktrace_info requires stable sorting as it is used in # StacktraceProcessingTask.iter_processable_stacktraces. This is important # to guarantee reproducible symbolicator requests. by_stacktrace_info = OrderedDict() for info in infos: processable_frames = get_processable_frames(info, processors) for processable_frame in processable_frames: processable_frame.processor.preprocess_frame(processable_frame) by_processor.setdefault(processable_frame.processor, []).append(processable_frame) by_stacktrace_info.setdefault(processable_frame.stacktrace_info, []).append( processable_frame ) if processable_frame.cache_key is not None: to_lookup[processable_frame.cache_key] = processable_frame frame_cache = lookup_frame_cache(to_lookup) for cache_key, processable_frame in six.iteritems(to_lookup): processable_frame.cache_value = frame_cache.get(cache_key) return StacktraceProcessingTask( processable_stacktraces=by_stacktrace_info, processors=by_processor ) def dedup_errors(errors): # This operation scales bad but we do not expect that many items to # end up in rv, so that should be okay enough to do. rv = [] for error in errors: if error not in rv: rv.append(error) return rv def process_stacktraces(data, make_processors=None, set_raw_stacktrace=True): infos = find_stacktraces_in_data(data, with_exceptions=True) if make_processors is None: processors = get_processors_for_stacktraces(data, infos) else: processors = make_processors(data, infos) # Early out if we have no processors. We don't want to record a timer # in that case. if not processors: return changed = False # Build a new processing task processing_task = get_stacktrace_processing_task(infos, processors) try: # Preprocess step for processor in processing_task.iter_processors(): with sentry_sdk.start_span( op="stacktraces.processing.process_stacktraces.preprocess_step" ) as span: span.set_data("processor", processor.__class__.__name__) if processor.preprocess_step(processing_task): changed = True span.set_data("data_changed", True) # Process all stacktraces for stacktrace_info, processable_frames in processing_task.iter_processable_stacktraces(): # Let the stacktrace processors touch the exception if stacktrace_info.is_exception and stacktrace_info.container: for processor in processing_task.iter_processors(): with sentry_sdk.start_span( op="stacktraces.processing.process_stacktraces.process_exception" ) as span: span.set_data("processor", processor.__class__.__name__) if processor.process_exception(stacktrace_info.container): changed = True span.set_data("data_changed", True) # If the stacktrace is empty we skip it for processing if not stacktrace_info.stacktrace: continue with sentry_sdk.start_span( op="stacktraces.processing.process_stacktraces.process_single_stacktrace" ) as span: new_frames, new_raw_frames, errors = process_single_stacktrace( processing_task, stacktrace_info, processable_frames ) if new_frames is not None: stacktrace_info.stacktrace["frames"] = new_frames changed = True span.set_data("data_changed", True) if ( set_raw_stacktrace and new_raw_frames is not None and stacktrace_info.container is not None ): stacktrace_info.container["raw_stacktrace"] = dict( stacktrace_info.stacktrace, frames=new_raw_frames ) changed = True if errors: data.setdefault("errors", []).extend(dedup_errors(errors)) data.setdefault("_metrics", {})["flag.processing.error"] = True changed = True except Exception: logger.exception("stacktraces.processing.crash") data.setdefault("_metrics", {})["flag.processing.fatal"] = True data.setdefault("_metrics", {})["flag.processing.error"] = True changed = True finally: for processor in processors: processor.close() processing_task.close() if changed: return data
bsd-3-clause
501,741,680,478,238,800
34.076389
100
0.620719
false
4.307889
false
false
false
MTLeeLab/RESA
resa_util.py
1
10051
### # Copyright 2016 Miler T. Lee, University of Pittburgh # This file is part of the RESA Suite # # RESA Suite is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # RESA Suite is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with RESA Suite. If not, see <http://www.gnu.org/licenses/>. # # # resa_util.py: Utilities for processing RESA data ## import bz2 import gzip import json import re import sys import subprocess def initialize_loci(utr_bed12_file, utr_fasta_file, test = False): """ Given the UTRs listed in the bed12 file and their corresponding sequences in the fasta_file, creates a dict of loci[key] = (chr, strand, exon_list, seq) """ seqs = dict(read_fasta(utr_fasta_file)) loci = {} f = open(utr_bed12_file) for line in f: fields = line.strip().split() chrom = fields[0] start = int(fields[1]) strand = fields[5] feat_id = fields[3] block_sizes = fields[10].strip(',').split(',') block_starts = fields[11].strip(',').split(',') exons = [] for i, (bsize, bstart) in enumerate(zip(block_sizes, block_starts)): gstart = start + int(bstart) gend = gstart + int(bsize) exons.append((gstart, gend)) loci[feat_id] = (chrom, strand, tuple(exons), seqs[fields[3]].upper()) if test: break f.close() return loci ### # UTILITIES ### nt_mutations = {'C': 'T', 'G': 'A'} anti_strand_str = {'-': '+', '+': '-'} ###string.maketrans('acgturyACGTURY', 'tgcaayrTGCAAYR') DNA_TRANS = '\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f !"#$%&\'()*+,-./0123456789:;<=>?@TBGDEFCHIJKLMNOPQYSAAVWXRZ[\\]^_`tbgdefchijklmnopqysaavwxrz{|}~\x7f\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff' def rc(sequence, reverse = True): """ Reverse complement a DNA sequence, preserving case """ result = sequence.translate(DNA_TRANS) if reverse: return result[::-1] else: return result def bam_entry_is_reverse(samflag): """ Flag is passed in as an integer. Determines whether the 0x10 bit is set (16 in base 10), which indicates reverse complemented sequence. This is done using a binary operator & """ return samflag & 16 == 16 def seq_mask(seq, chars = ['A', 'G']): """ Replaces specified characters with N """ for char in chars: seq = seq.replace(char, 'N') return seq def load_chr_seqs(genome_fa): """ Loads all chromosome sequences into a dict """ chr_dict = dict(read_fasta(genome_fa)) return chr_dict def load_chr_seq(chr_id, chr_dict, genome_fa): """ Loads the chromosome sequence into memory if it's not already there """ if chr_id not in chr_dict: fasta_file = genome_fa % chr_id chr_dict[chr_id] = read_fasta(fasta_file)[0][1] return chr_dict[chr_id] def decode_cigar(cigar): """ Parses the cigar string into integers and letters """ return re.findall('(\d+)([MNDISHPX=])', cigar) def cigar_span_(cigar): """ Interprets the cigar string as the number of genomic positions consumed """ span = 0 cigar_ops = decode_cigar(cigar) for nts, op in cigar_ops: nts = int(nts) if op != 'I': span += nts return span def cigar_span(cigar): return sum(int(x) for x in re.findall('(\d+)[MNDSHPX=]', cigar)) #no I def tx_indexing(exons, minus = False, inverse = False): """ Returns a dict of genomic coordinates -> tx coordinates (or the inverse if inverse = True) Exons are zero indexed. """ positions = [] for s, e in exons: positions += [i for i in range(s, e)] if minus: positions.reverse() if inverse: return {i:x for i, x in enumerate(positions)} else: return {x:i for i, x in enumerate(positions)} def pretty_str(x, fields = False): """ Handles tuples or lists """ def joined_string(x, sep=','): return sep.join(list([str(y) for y in x])) if isinstance(x, str): return x elif isinstance(x, float): if abs(x) < 0.001: return '%.1E' % x else: return '%.3f' % x elif isinstance(x, tuple) or isinstance(x, list): if fields: return joined_string(x, '\t') elif not x: return '.' elif isinstance(x[0], tuple) or isinstance(x[0], list): return ';'.join([joined_string(y) for y in x]) else: return joined_string(x) else: return str(x) ####################### # FASTA file processing ####################### def read_fasta(filename): """ Returns the contents of a fasta file in a list of (id, sequence) tuples. Empty list returned if there are no fasta sequences in the file """ a = fasta_reader(filename) seqs = [] while a.has_next(): seqs.append(next(a)) return seqs class fasta_reader: """ Lightweight class for incrementally reading fasta files. Supports reading directly from properly named gzipped (.gz or .z) or bzip2ed (.bz2) files. """ file = None nextheader='' def __init__(self, filename): try: if filename.endswith('.gz') or filename.endswith('.z'): self.file = gzip.open(filename, 'rb') elif filename.endswith('.bz2'): self.file = bz2.BZ2File(filename, 'rb') else: self.file = open(filename, 'r') # fast forward to the first entry while 1: line = self.file.readline() if line == '': self.close() return elif line[0] == '>': self.nextheader = line[1:].rstrip() return except IOError: #print('No such file', filename) raise def has_next(self): """ Returns true if there are still fasta entries """ return len(self.nextheader) > 0 def __next__(self): """ Returns an (id, sequence) tuple, or () if file is finished """ #if global nextheader is empty, return empty #otherwise, the header is the nextheader try: identifier = self.nextheader total = [] while 1: line = self.file.readline() if line == '' or line[0] == '>': #EOF, end of entry break total.append(line.rstrip()) sequence = ''.join(total) if len(line) > 0: self.nextheader = line[1:].rstrip() else: self.nextheader = '' self.close() return (identifier, sequence) except: self.nextheader='' self.close() return () def close(self): """ Close the fasta file """ self.file.close() def write_fasta(filename, id_or_list, seq='', width=60, gzip_compress = False): """ Writes a fasta file with the sequence(s) version 1: write_fasta(myfilename, 'seq1_id', 'AAAAA') version 2: write_fasta(myfilename, [('seq1_id', 'AAAAA'), ('seq2_id', BBBBB)]) """ a = fasta_writer(filename, width=width, gzip_compress = gzip_compress) a.write(id_or_list, seq) a.close() class fasta_writer: """ Rudimentary fasta file writer Supports writing out to a gzipped file. If the passed in filename does not end with .gz or .z, .gz is appended. """ file = None width = 0 def __init__(self, filename, width=60, gzip_compress = False): self.width = width try: if gzip_compress: if not filename.endswith('.gz') and not filename.endswith('.z'): filename += '.gz' self.file = gzip.open(filename, 'wb') else: self.file = open(filename, 'w') except IOError: print('Can\'t open file.') def write(self, id, seq=''): """ Supports an id and a sequence, an (id, seq) tuple, or a list of sequence tuples """ if type(id) == type([]): list(map(self.writeone, id)) else: self.writeone(id, seq) def writeone(self, id, seq=''): """ Internal method. """ if type(id) == type((0,0)): seq = id[1] id = id[0] line_width = self.width if self.width == 0: line_width = len(seq) self.file.write(">" + id + "\n") i = 0 while i < len(seq): self.file.write(seq[i:i+line_width] + "\n") i+=line_width def close(self): """ Closes the fasta file. """ self.file.close()
gpl-3.0
4,324,001,643,226,316,000
26.3125
749
0.553477
false
3.373951
false
false
false
openstack/sahara-dashboard
sahara_dashboard/content/data_processing/clusters/cluster_templates/views.py
1
7433
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from django.urls import reverse from django.urls import reverse_lazy from django.utils.translation import ugettext_lazy as _ from horizon import exceptions from horizon import forms from horizon import tabs from horizon.utils import memoized from horizon import workflows from sahara_dashboard.api import sahara as saharaclient import sahara_dashboard.content.data_processing.clusters. \ cluster_templates.tables as ct_tables import sahara_dashboard.content.data_processing.clusters. \ cluster_templates.tabs as _tabs import sahara_dashboard.content.data_processing.clusters. \ cluster_templates.workflows.copy as copy_flow import sahara_dashboard.content.data_processing.clusters. \ cluster_templates.workflows.create as create_flow import sahara_dashboard.content.data_processing.clusters. \ cluster_templates.workflows.edit as edit_flow import sahara_dashboard.content.data_processing.clusters. \ cluster_templates.forms.import_forms as import_forms class ClusterTemplateDetailsView(tabs.TabView): tab_group_class = _tabs.ClusterTemplateDetailsTabs template_name = 'horizon/common/_detail.html' page_title = "{{ template.name|default:template.id }}" @memoized.memoized_method def get_object(self): ct_id = self.kwargs["template_id"] try: return saharaclient.cluster_template_get(self.request, ct_id) except Exception: msg = _('Unable to retrieve details for ' 'cluster template "%s".') % ct_id redirect = self.get_redirect_url() exceptions.handle(self.request, msg, redirect=redirect) def get_context_data(self, **kwargs): context = super(ClusterTemplateDetailsView, self)\ .get_context_data(**kwargs) cluster_template = self.get_object() context['template'] = cluster_template context['url'] = self.get_redirect_url() context['actions'] = self._get_actions(cluster_template) return context def _get_actions(self, cluster_template): table = ct_tables.ClusterTemplatesTable(self.request) return table.render_row_actions(cluster_template) @staticmethod def get_redirect_url(): return reverse("horizon:project:data_processing." "clusters:index") class CreateClusterTemplateView(workflows.WorkflowView): workflow_class = create_flow.CreateClusterTemplate success_url = ("horizon:project:data_processing.clusters" ":create-cluster-template") classes = ("ajax-modal",) template_name = "cluster_templates/create.html" page_title = _("Create Cluster Template") class ConfigureClusterTemplateView(workflows.WorkflowView): workflow_class = create_flow.ConfigureClusterTemplate success_url = ("horizon:project:data_processing.clusters" ":index") template_name = "cluster_templates/configure.html" page_title = _("Configure Cluster Template") class CopyClusterTemplateView(workflows.WorkflowView): workflow_class = copy_flow.CopyClusterTemplate success_url = ("horizon:project:data_processing.clusters" ":index") template_name = "cluster_templates/configure.html" page_title = _("Copy Cluster Template") def get_context_data(self, **kwargs): context = super(CopyClusterTemplateView, self)\ .get_context_data(**kwargs) context["template_id"] = kwargs["template_id"] return context def get_object(self, *args, **kwargs): if not hasattr(self, "_object"): template_id = self.kwargs['template_id'] try: template = saharaclient.cluster_template_get(self.request, template_id) except Exception: template = {} exceptions.handle(self.request, _("Unable to fetch cluster template.")) self._object = template return self._object def get_initial(self): initial = super(CopyClusterTemplateView, self).get_initial() initial['template_id'] = self.kwargs['template_id'] return initial class EditClusterTemplateView(CopyClusterTemplateView): workflow_class = edit_flow.EditClusterTemplate success_url = ("horizon:project:data_processing.clusters" ":index") template_name = "cluster_templates/configure.html" class ImportClusterTemplateFileView(forms.ModalFormView): template_name = "cluster_templates/import.html" form_class = import_forms.ImportClusterTemplateFileForm submit_label = _("Next") submit_url = reverse_lazy("horizon:project:data_processing." "clusters:import-cluster-template-file") success_url = reverse_lazy("horizon:project:data_processing." "clusters:import-cluster-template-name") page_title = _("Import Cluster Template") def get_form_kwargs(self): kwargs = super(ImportClusterTemplateFileView, self).get_form_kwargs() kwargs['next_view'] = ImportClusterTemplateNameView return kwargs class ImportClusterTemplateNameView(forms.ModalFormView): template_name = "cluster_templates/import.html" form_class = import_forms.ImportClusterTemplateNameForm submit_label = _("Next") submit_url = reverse_lazy("horizon:project:data_processing." "clusters:import-cluster-template-name") success_url = reverse_lazy("horizon:project:data_processing." "clusters:import-cluster-template-nodegroups") page_title = _("Import Cluster Template") def get_form_kwargs(self): kwargs = super(ImportClusterTemplateNameView, self).get_form_kwargs() kwargs['next_view'] = ImportClusterTemplateNodegroupsView if 'template_upload' in self.kwargs: kwargs['template_upload'] = self.kwargs['template_upload'] return kwargs class ImportClusterTemplateNodegroupsView(forms.ModalFormView): template_name = "cluster_templates/import_nodegroups.html" # template_name = "some_random_stuff.html" form_class = import_forms.ImportClusterTemplateNodegroupsForm submit_label = _("Import") submit_url = reverse_lazy("horizon:project:data_processing." "clusters:import-cluster-template-nodegroups") success_url = reverse_lazy("horizon:project:data_processing." "clusters:index") page_title = _("Import Cluster Template") def get_form_kwargs(self): kwargs = super(ImportClusterTemplateNodegroupsView, self).get_form_kwargs() if 'template_upload' in self.kwargs: kwargs['template_upload'] = self.kwargs['template_upload'] return kwargs
apache-2.0
4,539,590,981,925,018,600
40.066298
77
0.675636
false
4.324026
true
false
false
caterinaurban/Lyra
src/lyra/tests/code_jam/pancake_flipper/pancakes_fyodr.py
1
1842
def pow(a: int, b: int) -> int: power: int = 1 for i in range(b): power = power * a return power def check(memos: Dict[(Tuple[(int, int, int, int)], int)], i: int, s: int, c: int, k: int) -> int: if (s == 0): return 0 elif ((i, s, c, k) not in memos): memos[(i, s, c, k)]: int = (- 1) flip: int = 0 for j in range(k): flip += pow(2, j) flip: int = flip * pow(2, i) new_s: int = (s ^ flip) best: int = (- 1) for j in range((c - (k - 1))): maybe: int = check(j, new_s, c, k) if (maybe == 0): best: int = maybe break elif (maybe == (- 1)): pass elif ((best == (- 1)) or (maybe < best)): best: int = maybe if (best == (- 1)): memos[(i, s, c, k)]: int = best else: memos[(i, s, c, k)]: int = (best + 1) return memos[(i, s, c, k)] T: int = int(input().strip()) lines: List[str] = [] memos: Dict[(Tuple[(int, int, int, int)], int)] = { } for t in range(1, (T + 1)): line: List[str] = input().strip().split() cakes: str = line[0] k: int = int(line[1]) s: int = 0 for i in range(len(cakes)): c: str = cakes[i] if (c == '-'): s += pow(2, i) best: int = (- 1) for i in range((len(cakes) - (k - 1))): maybe: int = check(memos, i, s, len(cakes), k) if (maybe == 0): best: int = maybe break if (maybe == (- 1)): pass elif ((maybe < best) or (best == (- 1))): best: int = maybe if (best == (- 1)): lines.append('Case #' + str(t) + ': ' + 'IMPOSSIBLE') else: lines.append('Case #' + str(t) + ': ' + str(best)) print(lines[(- 1)])
mpl-2.0
-7,948,510,953,474,577,000
28.709677
98
0.410966
false
3.080268
false
false
false
RedHatInsights/insights-core
insights/parsers/corosync_cmapctl.py
1
2063
""" CorosyncCmapctl - Command ``corosync-cmapctl [params]`` ======================================================= This module parses the output of the ``corosync-cmapctl [params]`` command. """ from insights import parser, CommandParser from insights.parsers import SkipException, ParseException from insights.specs import Specs @parser(Specs.corosync_cmapctl) class CorosyncCmapctl(CommandParser, dict): """ Class for parsing the `/usr/sbin/corosync-cmapctl [params]` command. All lines are stored in the dictionary with the left part of the equal sign witout parenthese info as the key and the right part of equal sign as the value. Typical output of the command is:: config.totemconfig_reload_in_progress (u8) = 0 internal_configuration.service.0.name (str) = corosync_cmap internal_configuration.service.0.ver (u32) = 0 internal_configuration.service.1.name (str) = corosync_cfg internal_configuration.service.1.ver (u32) = 0 internal_configuration.service.2.name (str) = corosync_cpg internal_configuration.service.2.ver (u32) = 0 Examples: >>> type(corosync) <class 'insights.parsers.corosync_cmapctl.CorosyncCmapctl'> >>> 'internal_configuration.service.0.name' in corosync True >>> corosync['internal_configuration.service.0.name'] 'corosync_cmap' Raises: SkipException: When there is no content ParseException: When there is no "=" in the content """ def __init__(self, context): super(CorosyncCmapctl, self).__init__(context, extra_bad_lines=['corosync-cmapctl: invalid option']) def parse_content(self, content): if not content: raise SkipException for line in content: if '=' not in line: raise ParseException("Can not parse line %s" % line) key, value = [item.strip() for item in line.split('=')] key_without_parenthese = key.split()[0] self[key_without_parenthese] = value
apache-2.0
1,929,703,601,921,233,200
36.509091
108
0.644207
false
3.848881
true
false
false
ksteinfe/decodes
src/decodes/core/dc_interval.py
1
10356
from decodes.core import * import math, random class Interval(object): """ an interval class """ def __init__(self, a=0,b=1): """ Interval Constructor. :param a: First number of the interval. :type a: float :param b: Second number of the interval. :type a: float :result: Interval Object. :rtype: Interval """ self.a = float(a) self.b = float(b) def __truediv__(self,divs): return self.__div__(divs) def __div__(self, divs): """ Overloads the division **(/)** operator. Calls Interval.divide(divs). :param divs: Number of divisions. :type divs: int :result: List of numbers in which a list is divided. :rtype: list """ return self.divide(divs) def __floordiv__(self, other): """ Overloads the integer division **(//)** operator. Calls Interval.subinterval(other). :param other: Number to subintervals. :type other: int :result: list of subintervals :rtype: list """ return self.subinterval(other) def __add__(self, val): """ Overloads the addition **(+)** operator. :param val: Value to add to the interval. :type val: float :result: New interval. :rtype: Interval """ return Interval(self.a + val, self.b + val) def __sub__(self, val): """ Overloads the subtraction **(-)** operator. :param val: Value to subtract from the interval. :type val: float :result: New interval. :rtype: Interval """ return Interval(self.a - val, self.b - val) def __contains__(self, number): """ Overloads the containment **(in)** operator :param number: Number whose containment must be determined. :type number: float :result: Boolean result of containment. :rtype: bool """ ival = self.order() return (ival.a <= number) and (ival.b >= number) def __eq__(self, other): """ Overloads the equal **(==)** operator. :param other: Interval to be compared. :type other: Interval :result: Boolean result of comparison :rtype: bool """ return all([self.a==other.a,self.b==other.b]) def __hash__(self): return hash( (self.a, self.b) ) @property def list(self): """ Returns a list of the interval's start and end values. :result: List of interval's components :rtype: list """ return [self.a, self.b] @property def is_ordered(self): """ Returns True if the start value of the interval is smaller than the end value. :result: Boolean value :rtype: bool """ return True if self.a < self.b else False @property def length(self): """| Returns the absolute value of length of the interval. | For a signed representation, use delta. :result: Absolute value of length of an interval. :rtype: int """ length = self.b - self.a if length > 0: return length else: return length *-1 @property def delta(self): """| Returns the signed delta of the interval, calculated as b-a | For an unsigned representation, use length. :result: Delta of an interval. :rtype: float """ return float(self.b - self.a) @property def mid(self): """Returns the midpoint value of the interval. """ return self.eval(0.5) def overlaps(self,other): """ """ return other.a in self or other.b in self or self.a in other or self.b in other def order(self): """ Returns a copy of this interval with ordered values, such that a < b :result: Ordered copy of Interval object. :rtype: Interval """ if self.is_ordered: return Interval(self.a, self.b) else: return Interval(self.b, self.a) def invert(self): """| Returns a copy of this interval with swapped values. | Such that this.a = new.b and this.b = new.a :result: Interval object with swapped values. :rtype: Interval """ return Interval(self.b, self.a) def divide(self, divs=10, include_last=False): """| Divides this interval into a list of values equally spaced between a and b. | Unless include_last is set to True, returned list will not include Interval.b: the first value returned is Interval.a and the last is Interval.b-(Interval.delta/divs) :param divs: Number of interval divisions. :type divs: int :param include_last: Boolean value. :type include_last: bool :result: List of numbers in which a list is divided. :rtype: list """ step = self.delta/float(divs) if include_last : divs += 1 return [self.a+step*n for n in range(divs)] def subinterval(self, divs): """ Divides an interval into a list of equal size subintervals(interval objects). :param divs: Number of subintervals. :type divs: int :result: List of subintervals (interval objects). :rtype: list """ return [Interval(n,n+self.delta/float(divs)) for n in self.divide(divs)] def rand_interval(self, divs): """ Divides an interval into a list of randomly sized subintervals(interval objects). :param divs: Number of subintervals. :type divs: int :result: List of subintervals (interval objects). :rtype: list """ if divs < 1 : return ival result = [] r_list = [self.a,self.b] r_list.extend(self.eval(random.random()) for k in range(divs-1)) r_list.sort() return [Interval(r_list[n],r_list[n+1]) for n in range(divs)] def deval(self, number): """| Returns a parameter corresponding to the position of the given number within this interval. | Effectively, the opposite of eval(). :param number: Number to find the parameter of. :type number: float :result: Parameter. :rtype: float :: print Interval(10,20).deval(12) >>0.2 print Interval(10,20).deval(25) >>1.5 """ if self.delta == 0 : raise ZeroDivisionError("This interval cannot be devaluated because the delta is zero") return (number-self.a) / self.delta def eval(self, t,limited=False): """| Evaluates a given parameter within this interval. | For example, given an Interval(0->2*math.pi): eval(0.5) == math.pi | Optionally, you may limit the resulting output to this interval :param t: Number to evaluate. :type t: float :result: Evaluated number. :rtype: float :: print Interval(10,20).eval(0.2) >>12.0 print Interval(10,20).deval(1.5) >>25.0 """ ret = self.delta * t + self.a if not limited : return ret return self.limit_val(ret) def limit_val(self, n): """ Limits a given value to the min and max of this Interval. :param n: the number to be limited by the Interval. :type n: float :result: a number between the min and max of this Interval (inclusive). :rtype: float """ if n < self.a : return self.a if n > self.b : return self.b return n def __repr__(self): return "ival[{0},{1}]".format(self.a,self.b) def remap_to(self,val,target_interval=None,limited=False): return Interval.remap(val,self,target_interval,limited) @staticmethod def remap(val, source_interval, target_interval=None, limited=False): """ Translates a number from its position within the source interval to its relative position in the target interval. Optionally, you may limit the resulting output to the target interval. :param val: Number to remap. :type val: float :param source_interval: Source interval. :type source_interval: interval :param target_interval: Target interval :type target_interval: interval :param limited: flag that limits result to target interval :type limited: bool :result: The given number remapped to the target interval. :rtype: float """ if target_interval is None: target_interval = Interval(0,1) t = source_interval.deval(val) return target_interval.eval(t,limited) @staticmethod def encompass(values = [0],nudge=False): """ Returns an interval defined by the minimum and maximum of a list of values. :param values: A list of numbers. :type values: list :result: An Interval from the min and max of a list of values. :rtype: Interval """ from .dc_base import EPSILON if nudge: return Interval(min(values)-EPSILON, max(values)+EPSILON) a, b = min(values), max(values) if a == b : return False return Interval(a,b) @staticmethod def twopi(): """ Creates an interval from 0->2PI :result: Interval from 0 to 2PI. :rtype: Interval """ return Interval(0,math.pi*2) @staticmethod def pi(): """ Creates an interval from 0->PI :result: Interval from 0 to 2PI. :rtype: Interval """ return Interval(0,math.pi)
gpl-3.0
6,334,377,875,742,004,000
31.772152
197
0.539784
false
4.367777
false
false
false
bnbowman/BifoAlgo
src/Chapter2/Sec24_LeaderCycleSeq.py
1
3081
#! /usr/bin/env python3 from collections import Counter from operator import itemgetter def cyclo_seq( spectrum_file, spectrum_table_file ): N, spectrum = parse_spectrum_file( spectrum_file ) spectrum_table = parse_spectrum_table( spectrum_table_file ) aa_weights = set(spectrum_table.values()) peptides = list(find_possible_peptides( spectrum, aa_weights, N )) max_peptides = find_max_peptides( peptides, spectrum ) return set(['-'.join([str(w) for w in p]) for p in max_peptides]) def find_possible_peptides( spectrum, weights, N ): peptides = [ [0] ] true_weight = max(spectrum) while peptides: peptides = expand_peptides( peptides, weights ) peptides = [p for p in peptides if sum(p) <= max(spectrum)] for p in peptides: if sum( p ) != true_weight: continue yield p del p peptides = cut_peptides( peptides, spectrum, N ) def expand_peptides( peptides, weights ): new_peptides = [] for peptide in peptides: for weight in weights: if peptide == [0]: copy = [] else: copy = peptide[:] copy.append( weight ) new_peptides.append( copy ) return new_peptides def cut_peptides( peptides, spectrum, N ): if len(peptides) <= N: return peptides scores = {} for peptide in peptides: sub_peptides = find_subpeptides( peptide ) weights = [sum(p) for p in sub_peptides] peptide_str = '-'.join( [str(p) for p in peptide] ) scores[peptide_str] = sum([1 for w in weights if w in spectrum]) sorted_scores = sorted(scores.items(), key=itemgetter(1), reverse=True) min_score = sorted_scores[N][1] peptides = [p for p, s in scores.items() if s >= min_score] peptides = [[int(n) for n in p.split('-')] for p in peptides] return peptides def find_max_peptides( peptides, spectrum ): scores = {} for peptide in peptides: sub_peptides = find_subpeptides( peptide ) weights = [sum(p) for p in sub_peptides] peptide_str = '-'.join( [str(p) for p in peptide] ) scores[peptide_str] = sum([1 for w in weights if w in spectrum]) sorted_scores = sorted(scores.items(), key=itemgetter(1), reverse=True) max_score = sorted_scores[0][1] peptides = [p for p, s in scores.items() if s == max_score] peptides = [[int(n) for n in p.split('-')] for p in peptides] return peptides def find_subpeptides( peptide ): subpeptides = [ peptide ] for j in range(1, len(peptide)): for i in range(len(peptide)-j+1): subpeptides.append( peptide[i:i+j] ) return subpeptides def parse_spectrum_file( spectrum_file ): inputs = [] with open(spectrum_file) as handle: for line in handle: inputs += [int(w) for w in line.strip().split()] return inputs[0], inputs[1:] def parse_spectrum_table( spectrum_table_file ): table = {} with open( spectrum_table_file ) as handle: for line in handle: aa, size = line.strip().split() try: size = int(size) table[aa] = size except: raise ValueError return table if __name__ == '__main__': import sys spectrum_file = sys.argv[1] spectrum_table_file = sys.argv[2] results = cyclo_seq( spectrum_file, spectrum_table_file ) print(' '.join(results))
gpl-2.0
-5,203,654,426,318,608,000
29.81
72
0.674132
false
2.702632
false
false
false
taigaio/taiga-back
taiga/projects/attachments/permissions.py
1
5169
# -*- coding: utf-8 -*- # Copyright (C) 2014-present Taiga Agile LLC # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from taiga.base.api.permissions import (TaigaResourcePermission, HasProjectPerm, AllowAny, PermissionComponent) class IsAttachmentOwnerPerm(PermissionComponent): def check_permissions(self, request, view, obj=None): if obj and obj.owner and request.user.is_authenticated: return request.user == obj.owner return False class CommentAttachmentPerm(PermissionComponent): def check_permissions(self, request, view, obj=None): if obj.from_comment: return True return False class EpicAttachmentPermission(TaigaResourcePermission): retrieve_perms = HasProjectPerm('view_epics') | IsAttachmentOwnerPerm() create_perms = HasProjectPerm('modify_epic') | (CommentAttachmentPerm() & HasProjectPerm('comment_epic')) update_perms = HasProjectPerm('modify_epic') | IsAttachmentOwnerPerm() partial_update_perms = HasProjectPerm('modify_epic') | IsAttachmentOwnerPerm() destroy_perms = HasProjectPerm('modify_epic') | IsAttachmentOwnerPerm() list_perms = AllowAny() class UserStoryAttachmentPermission(TaigaResourcePermission): retrieve_perms = HasProjectPerm('view_us') | IsAttachmentOwnerPerm() create_perms = HasProjectPerm('modify_us') | (CommentAttachmentPerm() & HasProjectPerm('comment_us')) update_perms = HasProjectPerm('modify_us') | IsAttachmentOwnerPerm() partial_update_perms = HasProjectPerm('modify_us') | IsAttachmentOwnerPerm() destroy_perms = HasProjectPerm('modify_us') | IsAttachmentOwnerPerm() list_perms = AllowAny() class TaskAttachmentPermission(TaigaResourcePermission): retrieve_perms = HasProjectPerm('view_tasks') | IsAttachmentOwnerPerm() create_perms = HasProjectPerm('modify_task') | (CommentAttachmentPerm() & HasProjectPerm('comment_task')) update_perms = HasProjectPerm('modify_task') | IsAttachmentOwnerPerm() partial_update_perms = HasProjectPerm('modify_task') | IsAttachmentOwnerPerm() destroy_perms = HasProjectPerm('modify_task') | IsAttachmentOwnerPerm() list_perms = AllowAny() class IssueAttachmentPermission(TaigaResourcePermission): retrieve_perms = HasProjectPerm('view_issues') | IsAttachmentOwnerPerm() create_perms = HasProjectPerm('modify_issue') | (CommentAttachmentPerm() & HasProjectPerm('comment_issue')) update_perms = HasProjectPerm('modify_issue') | IsAttachmentOwnerPerm() partial_update_perms = HasProjectPerm('modify_issue') | IsAttachmentOwnerPerm() destroy_perms = HasProjectPerm('modify_issue') | IsAttachmentOwnerPerm() list_perms = AllowAny() class WikiAttachmentPermission(TaigaResourcePermission): retrieve_perms = HasProjectPerm('view_wiki_pages') | IsAttachmentOwnerPerm() create_perms = HasProjectPerm('modify_wiki_page') | (CommentAttachmentPerm() & HasProjectPerm('comment_wiki_page')) update_perms = HasProjectPerm('modify_wiki_page') | IsAttachmentOwnerPerm() partial_update_perms = HasProjectPerm('modify_wiki_page') | IsAttachmentOwnerPerm() destroy_perms = HasProjectPerm('modify_wiki_page') | IsAttachmentOwnerPerm() list_perms = AllowAny() class RawAttachmentPerm(PermissionComponent): def check_permissions(self, request, view, obj=None): is_owner = IsAttachmentOwnerPerm().check_permissions(request, view, obj) if obj.content_type.app_label == "epics" and obj.content_type.model == "epic": return EpicAttachmentPermission(request, view).check_permissions('retrieve', obj) or is_owner elif obj.content_type.app_label == "userstories" and obj.content_type.model == "userstory": return UserStoryAttachmentPermission(request, view).check_permissions('retrieve', obj) or is_owner elif obj.content_type.app_label == "tasks" and obj.content_type.model == "task": return TaskAttachmentPermission(request, view).check_permissions('retrieve', obj) or is_owner elif obj.content_type.app_label == "issues" and obj.content_type.model == "issue": return IssueAttachmentPermission(request, view).check_permissions('retrieve', obj) or is_owner elif obj.content_type.app_label == "wiki" and obj.content_type.model == "wikipage": return WikiAttachmentPermission(request, view).check_permissions('retrieve', obj) or is_owner return False class RawAttachmentPermission(TaigaResourcePermission): retrieve_perms = RawAttachmentPerm()
agpl-3.0
-7,189,436,673,565,517,000
52.28866
119
0.734185
false
4.05094
false
false
false
cysuncn/python
spark/crm/PROC_O_LNA_XDXT_CUSTOMER_RELATIVE.py
1
5008
#coding=UTF-8 from pyspark import SparkContext, SparkConf, SQLContext, Row, HiveContext from pyspark.sql.types import * from datetime import date, datetime, timedelta import sys, re, os st = datetime.now() conf = SparkConf().setAppName('PROC_O_LNA_XDXT_CUSTOMER_RELATIVE').setMaster(sys.argv[2]) sc = SparkContext(conf = conf) sc.setLogLevel('WARN') if len(sys.argv) > 5: if sys.argv[5] == "hive": sqlContext = HiveContext(sc) else: sqlContext = SQLContext(sc) hdfs = sys.argv[3] dbname = sys.argv[4] #处理需要使用的日期 etl_date = sys.argv[1] #etl日期 V_DT = etl_date #上一日日期 V_DT_LD = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8])) + timedelta(-1)).strftime("%Y%m%d") #月初日期 V_DT_FMD = date(int(etl_date[0:4]), int(etl_date[4:6]), 1).strftime("%Y%m%d") #上月末日期 V_DT_LMD = (date(int(etl_date[0:4]), int(etl_date[4:6]), 1) + timedelta(-1)).strftime("%Y%m%d") #10位日期 V_DT10 = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8]))).strftime("%Y-%m-%d") V_STEP = 0 O_CI_XDXT_CUSTOMER_RELATIVE = sqlContext.read.parquet(hdfs+'/O_CI_XDXT_CUSTOMER_RELATIVE/*') O_CI_XDXT_CUSTOMER_RELATIVE.registerTempTable("O_CI_XDXT_CUSTOMER_RELATIVE") #任务[21] 001-01:: V_STEP = V_STEP + 1 sql = """ SELECT A.CUSTOMERID AS CUSTOMERID ,A.RELATIVEID AS RELATIVEID ,A.RELATIONSHIP AS RELATIONSHIP ,A.CUSTOMERNAME AS CUSTOMERNAME ,A.CERTTYPE AS CERTTYPE ,A.CERTID AS CERTID ,A.FICTITIOUSPERSON AS FICTITIOUSPERSON ,A.CURRENCYTYPE AS CURRENCYTYPE ,A.INVESTMENTSUM AS INVESTMENTSUM ,A.OUGHTSUM AS OUGHTSUM ,A.INVESTMENTPROP AS INVESTMENTPROP ,A.INVESTDATE AS INVESTDATE ,A.STOCKCERTNO AS STOCKCERTNO ,A.DUTY AS DUTY ,A.TELEPHONE AS TELEPHONE ,A.EFFECT AS EFFECT ,A.WHETHEN1 AS WHETHEN1 ,A.WHETHEN2 AS WHETHEN2 ,A.WHETHEN3 AS WHETHEN3 ,A.WHETHEN4 AS WHETHEN4 ,A.WHETHEN5 AS WHETHEN5 ,A.DESCRIBE AS DESCRIBE ,A.INPUTORGID AS INPUTORGID ,A.INPUTUSERID AS INPUTUSERID ,A.INPUTDATE AS INPUTDATE ,A.REMARK AS REMARK ,A.SEX AS SEX ,A.BIRTHDAY AS BIRTHDAY ,A.SINO AS SINO ,A.FAMILYADD AS FAMILYADD ,A.FAMILYZIP AS FAMILYZIP ,A.EDUEXPERIENCE AS EDUEXPERIENCE ,A.INVESTYIELD AS INVESTYIELD ,A.HOLDDATE AS HOLDDATE ,A.ENGAGETERM AS ENGAGETERM ,A.HOLDSTOCK AS HOLDSTOCK ,A.LOANCARDNO AS LOANCARDNO ,A.EFFSTATUS AS EFFSTATUS ,A.CUSTOMERTYPE AS CUSTOMERTYPE ,A.INVESINITIALSUM AS INVESINITIALSUM ,A.ACCOUNTSUM AS ACCOUNTSUM ,A.FAIRSUM AS FAIRSUM ,A.DIATHESIS AS DIATHESIS ,A.ABILITY AS ABILITY ,A.INNOVATION AS INNOVATION ,A.CHARACTER AS CHARACTER ,A.COMPETITION AS COMPETITION ,A.STRATEGY AS STRATEGY ,A.RISE AS RISE ,A.POSSESS AS POSSESS ,A.EYESHOT AS EYESHOT ,A.FORESIGHT AS FORESIGHT ,A.STATUS AS STATUS ,A.INDUSTRY AS INDUSTRY ,A.PROSECUTION AS PROSECUTION ,A.FIRSTINVESTSUM AS FIRSTINVESTSUM ,A.FIRSTINVESTDATE AS FIRSTINVESTDATE ,A.LASTINVESTSUM AS LASTINVESTSUM ,A.LASTINVESTDATE AS LASTINVESTDATE ,A.DEADLINE AS DEADLINE ,A.FR_ID AS FR_ID ,V_DT AS ODS_ST_DATE ,'LNA' AS ODS_SYS_ID FROM O_CI_XDXT_CUSTOMER_RELATIVE A --客户关联信息 """ sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql) F_CI_XDXT_CUSTOMER_RELATIVE = sqlContext.sql(sql) F_CI_XDXT_CUSTOMER_RELATIVE.registerTempTable("F_CI_XDXT_CUSTOMER_RELATIVE") dfn="F_CI_XDXT_CUSTOMER_RELATIVE/"+V_DT+".parquet" F_CI_XDXT_CUSTOMER_RELATIVE.cache() nrows = F_CI_XDXT_CUSTOMER_RELATIVE.count() F_CI_XDXT_CUSTOMER_RELATIVE.write.save(path=hdfs + '/' + dfn, mode='overwrite') F_CI_XDXT_CUSTOMER_RELATIVE.unpersist() ret = os.system("hdfs dfs -rm -r /"+dbname+"/F_CI_XDXT_CUSTOMER_RELATIVE/"+V_DT_LD+".parquet") et = datetime.now() print("Step %d start[%s] end[%s] use %d seconds, insert F_CI_XDXT_CUSTOMER_RELATIVE lines %d") % (V_STEP, st.strftime("%H:%M:%S"), et.strftime("%H:%M:%S"), (et-st).seconds, nrows)
gpl-3.0
2,939,206,429,795,156,000
41.551724
179
0.545989
false
2.891623
false
true
false
saffsd/updatedir
src/updatedir/__init__.py
1
4396
import logging import os import urlparse logger = logging.getLogger(__name__) def updatetree(source, dest, overwrite=False): parsed_url = urlparse.urlparse(dest) logger.debug(parsed_url) if parsed_url.scheme == '': import shutil if overwrite and os.path.exists(parsed_url.path): logger.debug("Deleting existing '%s'", parsed_url.path) shutil.rmtree(parsed_url.path) logger.debug("Local copy '%s' -> '%s'", source, parsed_url.path) shutil.copytree(source, parsed_url.path) else: dest = parsed_url.path def visit(arg, dirname, names): logger.debug("Visit '%s'", dirname) abs_dir = os.path.normpath(os.path.join(dest, os.path.relpath(dirname, source))) logger.debug("abs_dir '%s'", abs_dir) for name in names: src = os.path.join(dirname, name) dst = os.path.join(abs_dir, name) logger.debug("Processing '%s'", src) if os.path.isdir(src): if not os.path.isdir(dst): logger.debug("mkdir '%s'", dst) os.mkdir(dst) else: if os.path.exists(dst): if overwrite: logger.debug("overwrite '%s' -> '%s'", src, dst) shutil.copyfile(src,dst) else: logger.debug("will not overwrite '%s'", dst) else: logger.debug("copy '%s' -> '%s'", src, dst) shutil.copyfile(src,dst) # TODO: mkdir -p behaviour if not os.path.exists(dest): os.mkdir(dest) os.path.walk(source, visit, None) elif parsed_url.scheme == 'ssh': import paramiko import getpass # Work out host details host = parsed_url.hostname port = parsed_url.port if parsed_url.port else 22 transport = paramiko.Transport((host,port)) # Connect the transport username = parsed_url.username if parsed_url.username else getpass.getuser() logger.debug("Using username '%s'", username) if parsed_url.password: logger.debug("Using password") transport.connect(username = username, password = parsed_url.password) # TODO allow the keyfile to be configured in .hydratrc elif os.path.exists(os.path.expanduser('~/.ssh/id_rsa')): logger.debug("Using private RSA key") privatekeyfile = os.path.expanduser('~/.ssh/id_rsa') mykey = paramiko.RSAKey.from_private_key_file(privatekeyfile) transport.connect(username = username, pkey = mykey) elif os.path.exists(os.path.expanduser('~/.ssh/id_dsa')): logger.debug("Using private DSS key") privatekeyfile = os.path.expanduser('~/.ssh/id_dsa') mykey = paramiko.DSSKey.from_private_key_file(privatekeyfile) transport.connect(username = username, pkey = mykey) else: raise ValueError, "Cannot connect transport: Unable to authenticate" logger.debug("Transport Connected") # Start the sftp client sftp = paramiko.SFTPClient.from_transport(transport) def visit(arg, dirname, names): logger.debug("Visit '%s'", dirname) abs_dir = sftp.normalize(os.path.relpath(dirname, source)) logger.debug("abs_dir '%s'", abs_dir) for name in names: src = os.path.join(dirname, name) dst = os.path.join(abs_dir, name) logger.debug("Processing '%s'", src) if os.path.isdir(src): try: sftp.stat(dst) except IOError: sftp.mkdir(dst) else: try: sftp.stat(dst) if overwrite: logger.debug("overwrite '%s'", dst) sftp.put(src, dst) except IOError: sftp.put(src, dst) head = str(parsed_url.path) tails = [] done = False # Roll back the path until we find one that exists while not done: try: sftp.stat(head) done = True except IOError: head, tail = os.path.split(head) tails.append(tail) # Now create all the missing paths that don't exist for tail in reversed(tails): head = os.path.join(head, tail) sftp.mkdir(head) sftp.chdir(parsed_url.path) os.path.walk(source, visit, None) else: raise ValueError, "Don't know how to use scheme '%s'" % parsed_url.scheme def main(): import sys logging.basicConfig(level = logging.DEBUG) updatetree(sys.argv[1], sys.argv[2], overwrite=False)
gpl-3.0
-6,502,143,634,632,490,000
33.077519
88
0.605778
false
3.725424
false
false
false
jelly/calibre
src/calibre/gui2/actions/show_quickview.py
2
7899
#!/usr/bin/env python2 # vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai __license__ = 'GPL v3' __copyright__ = '2010, Kovid Goyal <[email protected]>' __docformat__ = 'restructuredtext en' from PyQt5.Qt import QAction from calibre.gui2.actions import InterfaceAction from calibre.gui2.dialogs.quickview import Quickview from calibre.gui2 import error_dialog, gprefs from calibre.gui2.widgets import LayoutButton class QuickviewButton(LayoutButton): # {{{ def __init__(self, gui, quickview_manager): self.qv = quickview_manager qaction = quickview_manager.qaction LayoutButton.__init__(self, I('quickview.png'), _('Quickview'), parent=gui, shortcut=qaction.shortcut().toString()) self.toggled.connect(self.update_state) self.action_toggle = qaction self.action_toggle.triggered.connect(self.toggle) self.action_toggle.changed.connect(self.update_shortcut) def update_state(self, checked): if checked: self.set_state_to_hide() self.qv._show_quickview() else: self.set_state_to_show() self.qv._hide_quickview() def save_state(self): gprefs['quickview visible'] = bool(self.isChecked()) def restore_state(self): if gprefs.get('quickview visible', False): self.toggle() # }}} current_qv_action_pi = None def set_quickview_action_plugin(pi): global current_qv_action_pi current_qv_action_pi = pi def get_quickview_action_plugin(): return current_qv_action_pi class ShowQuickviewAction(InterfaceAction): name = 'Quickview' action_spec = (_('Quickview'), 'quickview.png', None, None) dont_add_to = frozenset(['context-menu-device']) action_type = 'current' current_instance = None def genesis(self): self.gui.keyboard.register_shortcut('Toggle Quickview', _('Toggle Quickview'), description=_('Open/close the Quickview panel/window'), default_keys=('Q',), action=self.qaction, group=self.action_spec[0]) self.focus_action = QAction(self.gui) self.gui.addAction(self.focus_action) self.gui.keyboard.register_shortcut('Focus To Quickview', _('Focus to Quickview'), description=_('Move the focus to the Quickview panel/window'), default_keys=('Shift+Q',), action=self.focus_action, group=self.action_spec[0]) self.focus_action.triggered.connect(self.focus_quickview) self.focus_bl_action = QAction(self.gui) self.gui.addAction(self.focus_bl_action) self.gui.keyboard.register_shortcut('Focus from Quickview', _('Focus from Quickview to the book list'), description=_('Move the focus from Quickview to the book list'), default_keys=('Shift+Alt+Q',), action=self.focus_bl_action, group=self.action_spec[0]) self.focus_bl_action.triggered.connect(self.focus_booklist) self.focus_refresh_action = QAction(self.gui) self.gui.addAction(self.focus_refresh_action) self.gui.keyboard.register_shortcut('Refresh from Quickview', _('Refresh Quickview'), description=_('Refresh the information shown in the Quickview pane'), action=self.focus_refresh_action, group=self.action_spec[0]) self.focus_refresh_action.triggered.connect(self.refill_quickview) self.search_action = QAction(self.gui) self.gui.addAction(self.search_action) self.gui.keyboard.register_shortcut('Search from Quickview', _('Search from Quickview'), description=_('Search for the currently selected Quickview item'), default_keys=('Shift+S',), action=self.search_action, group=self.action_spec[0]) self.search_action.triggered.connect(self.search_quickview) self.search_action.changed.connect(self.set_search_shortcut) self.menuless_qaction.changed.connect(self.set_search_shortcut) self.qv_button = QuickviewButton(self.gui, self) def initialization_complete(self): set_quickview_action_plugin(self) def _hide_quickview(self): ''' This is called only from the QV button toggle ''' if self.current_instance: if not self.current_instance.is_closed: self.current_instance._reject() self.current_instance = None def _show_quickview(self, *args): ''' This is called only from the QV button toggle ''' if self.current_instance: if not self.current_instance.is_closed: self.current_instance._reject() self.current_instance = None if self.gui.current_view() is not self.gui.library_view: error_dialog(self.gui, _('No quickview available'), _('Quickview is not available for books ' 'on the device.')).exec_() return self.qv_button.set_state_to_hide() index = self.gui.library_view.currentIndex() self.current_instance = Quickview(self.gui, index) self.current_instance.reopen_after_dock_change.connect(self.open_quickview) self.set_search_shortcut() self.current_instance.show() self.current_instance.quickview_closed.connect(self.qv_button.set_state_to_show) def set_search_shortcut(self): if self.current_instance and not self.current_instance.is_closed: self.current_instance.addAction(self.focus_bl_action) self.current_instance.set_shortcuts(self.search_action.shortcut().toString(), self.menuless_qaction.shortcut().toString()) def open_quickview(self): ''' QV moved from/to dock. Close and reopen the pane/window. Also called when QV is closed and the user asks to move the focus ''' if self.current_instance and not self.current_instance.is_closed: self.current_instance.reject() self.current_instance = None self.qaction.triggered.emit() def refill_quickview(self): ''' Called when the columns shown in the QV pane might have changed. ''' if self.current_instance and not self.current_instance.is_closed: self.current_instance.refill() def refresh_quickview(self, idx): ''' Called when the data shown in the QV pane might have changed. ''' if self.current_instance and not self.current_instance.is_closed: self.current_instance.refresh(idx) def change_quickview_column(self, idx): ''' Called from the column header context menu to change the QV query column ''' self.focus_quickview() self.current_instance.slave(idx) def library_changed(self, db): ''' If QV is open, close it then reopen it so the columns are correct ''' if self.current_instance and not self.current_instance.is_closed: self.current_instance.reject() self.qaction.triggered.emit() def focus_quickview(self): ''' Used to move the focus to the QV books table. Open QV if needed ''' if not self.current_instance or self.current_instance.is_closed: self.open_quickview() else: self.current_instance.set_focus() def focus_booklist(self): self.gui.activateWindow() self.gui.library_view.setFocus() def search_quickview(self): if not self.current_instance or self.current_instance.is_closed: return self.current_instance.do_search()
gpl-3.0
-8,621,730,550,062,112,000
37.531707
96
0.620965
false
3.979345
false
false
false
kantai/passe-framework-prototype
django/http/__init__.py
1
31597
import datetime import os import re import time from pprint import pformat from urllib import urlencode, quote from urlparse import urljoin #try: # from StringIO import StringIO #except ImportError: from StringIO import StringIO from copy import deepcopy try: # The mod_python version is more efficient, so try importing it first. from mod_python.util import parse_qsl except ImportError: try: # Python 2.6 and greater from urlparse import parse_qsl except ImportError: # Python 2.5, 2.4. Works on Python 2.6 but raises # PendingDeprecationWarning from cgi import parse_qsl import Cookie # httponly support exists in Python 2.6's Cookie library, # but not in Python 2.4 or 2.5. _morsel_supports_httponly = Cookie.Morsel._reserved.has_key('httponly') # Some versions of Python 2.7 and later won't need this encoding bug fix: _cookie_encodes_correctly = Cookie.SimpleCookie().value_encode(';') == (';', '"\\073"') # See ticket #13007, http://bugs.python.org/issue2193 and http://trac.edgewall.org/ticket/2256 _tc = Cookie.SimpleCookie() _tc.load('f:oo') _cookie_allows_colon_in_names = 'Set-Cookie: f:oo=' in _tc.output() if False: #_morsel_supports_httponly and _cookie_encodes_correctly and _cookie_allows_colon_in_names: SimpleCookie = Cookie.SimpleCookie else: class Morsel(Cookie.Morsel): def __getstate__(self): d = dict([(k,v) for k,v in dict.items(self)]) return d def __setstate__(self, state): for k,v in state.items(): dict.__setitem__(self, k, v) if not _morsel_supports_httponly: def __setitem__(self, K, V): K = K.lower() if K == "httponly": if V: # The superclass rejects httponly as a key, # so we jump to the grandparent. super(Cookie.Morsel, self).__setitem__(K, V) else: super(Morsel, self).__setitem__(K, V) def OutputString(self, attrs=None): output = super(Morsel, self).OutputString(attrs) if "httponly" in self: output += "; httponly" return output class SimpleCookie(Cookie.SimpleCookie): def __getstate__(self): d = dict([(k,v) for k,v in dict.items(self)]) return d def __setstate__(self, state): for k,v in state.items(): dict.__setitem__(self, k, v) if not _morsel_supports_httponly: def __set(self, key, real_value, coded_value): M = self.get(key, Morsel()) M.set(key, real_value, coded_value) dict.__setitem__(self, key, M) def __setitem__(self, key, value): rval, cval = self.value_encode(value) self.__set(key, rval, cval) if not _cookie_encodes_correctly: def value_encode(self, val): # Some browsers do not support quoted-string from RFC 2109, # including some versions of Safari and Internet Explorer. # These browsers split on ';', and some versions of Safari # are known to split on ', '. Therefore, we encode ';' and ',' # SimpleCookie already does the hard work of encoding and decoding. # It uses octal sequences like '\\012' for newline etc. # and non-ASCII chars. We just make use of this mechanism, to # avoid introducing two encoding schemes which would be confusing # and especially awkward for javascript. # NB, contrary to Python docs, value_encode returns a tuple containing # (real val, encoded_val) val, encoded = super(SimpleCookie, self).value_encode(val) encoded = encoded.replace(";", "\\073").replace(",","\\054") # If encoded now contains any quoted chars, we need double quotes # around the whole string. if "\\" in encoded and not encoded.startswith('"'): encoded = '"' + encoded + '"' return val, encoded if not _cookie_allows_colon_in_names: def load(self, rawdata, ignore_parse_errors=False): if ignore_parse_errors: self.bad_cookies = set() self._BaseCookie__set = self._loose_set super(SimpleCookie, self).load(rawdata) if ignore_parse_errors: self._BaseCookie__set = self._strict_set for key in self.bad_cookies: del self[key] _strict_set = Cookie.BaseCookie._BaseCookie__set def _loose_set(self, key, real_value, coded_value): try: self._strict_set(key, real_value, coded_value) except Cookie.CookieError: self.bad_cookies.add(key) dict.__setitem__(self, key, Cookie.Morsel()) class CompatCookie(SimpleCookie): def __init__(self, *args, **kwargs): super(CompatCookie, self).__init__(*args, **kwargs) import warnings warnings.warn("CompatCookie is deprecated, use django.http.SimpleCookie instead.", PendingDeprecationWarning) from django.utils.datastructures import MultiValueDict, ImmutableList from django.utils.encoding import smart_str, iri_to_uri, force_unicode from django.utils.http import cookie_date from django.http.multipartparser import MultiPartParser from django.conf import settings from django.core.files import uploadhandler from utils import * RESERVED_CHARS="!*'();:@&=+$,/?%#[]" absolute_http_url_re = re.compile(r"^https?://", re.I) class Http404(Exception): pass class HttpRequestDummy(object): """ A stripped down HTTP request object that's pickle-able TODO FILES """ def __init__(self,request): self.GET = request.GET self.POST = request.POST self.COOKIES = request.COOKIES self.META = dict([(k,v) for k,v in request.META.items() if not k.startswith('wsgi')]) self.FILES = request.FILES self.path = request.path self.session = request.session self.path_info = request.path_info self.user = request.user self.method = request.method if hasattr(request, '_messages'): self._messages = request._messages def __repr__(self): return "G: %s \nP: %s \nC: %s\nM: %s\np: %s\nm: %s" % (self.GET, self.POST, self.COOKIES, self.META, self.path, self.method) def _get_request(self): from django.utils import datastructures if not hasattr(self, '_request'): self._request = datastructures.MergeDict(self.POST, self.GET) return self._request def get_host(self): """Returns the HTTP host using the environment or request headers.""" # We try three options, in order of decreasing preference. if settings.USE_X_FORWARDED_HOST and ( 'HTTP_X_FORWARDED_HOST' in self.META): host = self.META['HTTP_X_FORWARDED_HOST'] elif 'HTTP_HOST' in self.META: host = self.META['HTTP_HOST'] else: # Reconstruct the host using the algorithm from PEP 333. host = self.META['SERVER_NAME'] server_port = str(self.META['SERVER_PORT']) if server_port != (self.is_secure() and '443' or '80'): host = '%s:%s' % (host, server_port) return host def get_full_path(self): # RFC 3986 requires query string arguments to be in the ASCII range. # Rather than crash if this doesn't happen, we encode defensively. return '%s%s' % (self.path, self.META.get('QUERY_STRING', '') and ('?' + iri_to_uri(self.META.get('QUERY_STRING', ''))) or '') def build_absolute_uri(self, location=None): """ Builds an absolute URI from the location and the variables available in this request. If no location is specified, the absolute URI is built on ``request.get_full_path()``. """ if not location: location = self.get_full_path() if not absolute_http_url_re.match(location): current_uri = '%s://%s%s' % (self.is_secure() and 'https' or 'http', self.get_host(), self.path) location = urljoin(current_uri, location) return iri_to_uri(location) def is_secure(self): return os.environ.get("HTTPS") == "on" def is_ajax(self): return self.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest' REQUEST = property(_get_request) typeset = [int, float, str, unicode, bool] class HttpRequest(object): """A basic HTTP request.""" # The encoding used in GET/POST dicts. None means use default setting. _encoding = None _upload_handlers = [] def __init__(self): self.GET, self.POST, self.COOKIES, self.META, self.FILES = {}, {}, {}, {}, {} self.path = '' self.path_info = '' self.method = None def __repr__(self): return '<HttpRequest\nGET:%s,\nPOST:%s,\nCOOKIES:%s,\nMETA:%s>' % \ (pformat(self.GET), pformat(self.POST), pformat(self.COOKIES), pformat(self.META)) def get_host(self): """Returns the HTTP host using the environment or request headers.""" # We try three options, in order of decreasing preference. if settings.USE_X_FORWARDED_HOST and ( 'HTTP_X_FORWARDED_HOST' in self.META): host = self.META['HTTP_X_FORWARDED_HOST'] elif 'HTTP_HOST' in self.META: host = self.META['HTTP_HOST'] else: # Reconstruct the host using the algorithm from PEP 333. host = self.META['SERVER_NAME'] server_port = str(self.META['SERVER_PORT']) if server_port != (self.is_secure() and '443' or '80'): host = '%s:%s' % (host, server_port) return host def get_full_path(self): # RFC 3986 requires query string arguments to be in the ASCII range. # Rather than crash if this doesn't happen, we encode defensively. return '%s%s' % (self.path, self.META.get('QUERY_STRING', '') and ('?' + iri_to_uri(self.META.get('QUERY_STRING', ''))) or '') def build_absolute_uri(self, location=None): """ Builds an absolute URI from the location and the variables available in this request. If no location is specified, the absolute URI is built on ``request.get_full_path()``. """ if not location: location = self.get_full_path() if not absolute_http_url_re.match(location): current_uri = '%s://%s%s' % (self.is_secure() and 'https' or 'http', self.get_host(), self.path) location = urljoin(current_uri, location) return iri_to_uri(location) def is_secure(self): return os.environ.get("HTTPS") == "on" def is_ajax(self): return self.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest' def _set_encoding(self, val): """ Sets the encoding used for GET/POST accesses. If the GET or POST dictionary has already been created, it is removed and recreated on the next access (so that it is decoded correctly). """ self._encoding = val if hasattr(self, '_get'): del self._get if hasattr(self, '_post'): del self._post def _get_encoding(self): return self._encoding encoding = property(_get_encoding, _set_encoding) def _initialize_handlers(self): self._upload_handlers = [uploadhandler.load_handler(handler, self) for handler in settings.FILE_UPLOAD_HANDLERS] def _set_upload_handlers(self, upload_handlers): if hasattr(self, '_files'): raise AttributeError("You cannot set the upload handlers after the upload has been processed.") self._upload_handlers = upload_handlers def _get_upload_handlers(self): if not self._upload_handlers: # If thre are no upload handlers defined, initialize them from settings. self._initialize_handlers() return self._upload_handlers upload_handlers = property(_get_upload_handlers, _set_upload_handlers) def parse_file_upload(self, META, post_data): """Returns a tuple of (POST QueryDict, FILES MultiValueDict).""" self.upload_handlers = ImmutableList( self.upload_handlers, warning = "You cannot alter upload handlers after the upload has been processed." ) parser = MultiPartParser(META, post_data, self.upload_handlers, self.encoding) return parser.parse() def _get_raw_post_data(self): if not hasattr(self, '_raw_post_data'): if self._read_started: raise Exception("You cannot access raw_post_data after reading from request's data stream") try: content_length = int(self.META.get('CONTENT_LENGTH', 0)) except (ValueError, TypeError): # If CONTENT_LENGTH was empty string or not an integer, don't # error out. We've also seen None passed in here (against all # specs, but see ticket #8259), so we handle TypeError as well. content_length = 0 if content_length: self._raw_post_data = self.read(content_length) else: self._raw_post_data = self.read() self._stream = StringIO(self._raw_post_data) # HACHI: used to be self._stream -- uh oh, 0-copy fuckup. #self._streamed = _stream.getvalue() return self._raw_post_data raw_post_data = property(_get_raw_post_data) def _mark_post_parse_error(self): self._post = QueryDict('') self._files = MultiValueDict() self._post_parse_error = True def _load_post_and_files(self): # Populates self._post and self._files if self.method != 'POST': self._post, self._files = QueryDict('', encoding=self._encoding), MultiValueDict() return if self._read_started and not hasattr(self, '_raw_post_data'): self._mark_post_parse_error() return if self.META.get('CONTENT_TYPE', '').startswith('multipart'): if hasattr(self, '_raw_post_data'): # Use already read data data = StringIO(self._raw_post_data) else: data = self try: self._post, self._files = self.parse_file_upload(self.META, data) except: # An error occured while parsing POST data. Since when # formatting the error the request handler might access # self.POST, set self._post and self._file to prevent # attempts to parse POST data again. # Mark that an error occured. This allows self.__repr__ to # be explicit about it instead of simply representing an # empty POST self._mark_post_parse_error() raise else: self._post, self._files = QueryDict(self.raw_post_data, encoding=self._encoding), MultiValueDict() ## File-like and iterator interface. ## ## Expects self._stream to be set to an appropriate source of bytes by ## a corresponding request subclass (WSGIRequest or ModPythonRequest). ## Also when request data has already been read by request.POST or ## request.raw_post_data, self._stream points to a StringIO instance ## containing that data. def read(self, size=0): self._read_started = True return self._stream.read(size) def readline(self, *args, **kwargs): self._read_started = True return self._stream.readline(*args, **kwargs) def xreadlines(self): while True: buf = self.readline() if not buf: break yield buf __iter__ = xreadlines def readlines(self): return list(iter(self)) def get_changeset(request): """ Right now, this just makes a dict of all the attributes that we allow to be modified. """ d = {} for attr, val in request.__dict__.items(): if callable(val): continue if attr in ['PUT', 'GET', 'REQUEST', 'META', 'path', 'path_info', 'script_name', 'method', '_request', '_post', '_files']: continue d[attr] = val return d class RequestDelta(object): def __init__(self, wrapped): self._wrapped = wrapped self.changeset = {} def __getattr__(self, name): # if an attribute is fetched, we should add it to the changset :( if name in self.changeset: return self.changeset[name] if not hasattr(self._wrapped, name): raise AttributeError("Barfing %s on %s" % (name, type(self._wrapped) )) val = getattr(self._wrapped, name) if name in ['PUT', 'GET', 'REQUEST', 'META']: # these are immutable now, so deal with it punks return val if callable(val): return val if type(val) not in typeset: self.changeset[name] = val return val def __setattr__(self, name, val): if name == '_wrapped' or name == 'changeset': self.__dict__[name] = val else: self.changeset[name] = val def __getstate__(self): return self.changeset def __setstate__(self, state): if '_wrapped' not in self.__dict__: self._wrapped = None self.changeset = state class QueryDict(MultiValueDict): """ A specialized MultiValueDict that takes a query string when initialized. This is immutable unless you create a copy of it. Values retrieved from this class are converted from the given encoding (DEFAULT_CHARSET by default) to unicode. """ # These are both reset in __init__, but is specified here at the class # level so that unpickling will have valid values _mutable = True _encoding = None def __init__(self, query_string, mutable=False, encoding=None): MultiValueDict.__init__(self) if not encoding: # *Important*: do not import settings any earlier because of note # in core.handlers.modpython. from django.conf import settings encoding = settings.DEFAULT_CHARSET self.encoding = encoding for key, value in parse_qsl((query_string or ''), True): # keep_blank_values=True self.appendlist(force_unicode(key, encoding, errors='replace'), force_unicode(value, encoding, errors='replace')) self._mutable = mutable def _get_encoding(self): if self._encoding is None: # *Important*: do not import settings at the module level because # of the note in core.handlers.modpython. from django.conf import settings self._encoding = settings.DEFAULT_CHARSET return self._encoding def _set_encoding(self, value): self._encoding = value encoding = property(_get_encoding, _set_encoding) def _assert_mutable(self): if not self._mutable: raise AttributeError("This QueryDict instance is immutable") def __setitem__(self, key, value): self._assert_mutable() key = str_to_unicode(key, self.encoding) value = str_to_unicode(value, self.encoding) MultiValueDict.__setitem__(self, key, value) def __delitem__(self, key): self._assert_mutable() super(QueryDict, self).__delitem__(key) def __copy__(self): result = self.__class__('', mutable=True, encoding=self.encoding) for key, value in dict.items(self): dict.__setitem__(result, key, value) return result def __deepcopy__(self, memo): import django.utils.copycompat as copy result = self.__class__('', mutable=True, encoding=self.encoding) memo[id(self)] = result for key, value in dict.items(self): dict.__setitem__(result, copy.deepcopy(key, memo), copy.deepcopy(value, memo)) return result def setlist(self, key, list_): self._assert_mutable() key = str_to_unicode(key, self.encoding) list_ = [str_to_unicode(elt, self.encoding) for elt in list_] MultiValueDict.setlist(self, key, list_) def setlistdefault(self, key, default_list=()): self._assert_mutable() if key not in self: self.setlist(key, default_list) return MultiValueDict.getlist(self, key) def appendlist(self, key, value): self._assert_mutable() key = str_to_unicode(key, self.encoding) value = str_to_unicode(value, self.encoding) MultiValueDict.appendlist(self, key, value) def update(self, other_dict): self._assert_mutable() f = lambda s: str_to_unicode(s, self.encoding) if hasattr(other_dict, 'lists'): for key, valuelist in other_dict.lists(): for value in valuelist: MultiValueDict.update(self, {f(key): f(value)}) else: d = dict([(f(k), f(v)) for k, v in other_dict.items()]) MultiValueDict.update(self, d) def pop(self, key, *args): self._assert_mutable() return MultiValueDict.pop(self, key, *args) def popitem(self): self._assert_mutable() return MultiValueDict.popitem(self) def clear(self): self._assert_mutable() MultiValueDict.clear(self) def setdefault(self, key, default=None): self._assert_mutable() key = str_to_unicode(key, self.encoding) default = str_to_unicode(default, self.encoding) return MultiValueDict.setdefault(self, key, default) def copy(self): """Returns a mutable copy of this object.""" return self.__deepcopy__({}) def urlencode(self, safe=None): """ Returns an encoded string of all query string arguments. :arg safe: Used to specify characters which do not require quoting, for example:: >>> q = QueryDict('', mutable=True) >>> q['next'] = '/a&b/' >>> q.urlencode() 'next=%2Fa%26b%2F' >>> q.urlencode(safe='/') 'next=/a%26b/' """ output = [] if safe: encode = lambda k, v: '%s=%s' % ((quote(k, safe), quote(v, safe))) else: encode = lambda k, v: urlencode({k: v}) for k, list_ in self.lists(): k = smart_str(k, self.encoding) output.extend([encode(k, smart_str(v, self.encoding)) for v in list_]) return '&'.join(output) def parse_cookie(cookie): if cookie == '': return {} if not isinstance(cookie, Cookie.BaseCookie): try: c = SimpleCookie() c.load(cookie, ignore_parse_errors=True) except Cookie.CookieError: # Invalid cookie return {} else: c = cookie cookiedict = {} for key in c.keys(): cookiedict[key] = c.get(key).value return cookiedict class BadHeaderError(ValueError): pass class HttpResponse(object): """A basic HTTP response, with content and dictionary-accessed headers.""" status_code = 200 def __init__(self, content='', mimetype=None, status=None, content_type=None): # _headers is a mapping of the lower-case name to the original case of # the header (required for working with legacy systems) and the header # value. Both the name of the header and its value are ASCII strings. self._headers = {} self._charset = settings.DEFAULT_CHARSET if mimetype: content_type = mimetype # For backwards compatibility if not content_type: content_type = "%s; charset=%s" % (settings.DEFAULT_CONTENT_TYPE, self._charset) if not isinstance(content, basestring) and hasattr(content, '__iter__'): self._container = content self._is_string = False else: self._container = [content] self._is_string = True self.cookies = SimpleCookie() if status: self.status_code = status self['Content-Type'] = content_type def __str__(self): """Full HTTP message, including headers.""" return '\n'.join(['%s: %s' % (key, value) for key, value in self._headers.values()]) \ + '\n\n' + self.content def _convert_to_ascii(self, *values): """Converts all values to ascii strings.""" for value in values: if isinstance(value, unicode): try: value = value.encode('us-ascii') except UnicodeError, e: e.reason += ', HTTP response headers must be in US-ASCII format' raise else: value = str(value) if '\n' in value or '\r' in value: raise BadHeaderError("Header values can't contain newlines (got %r)" % (value)) yield value def __setitem__(self, header, value): header, value = self._convert_to_ascii(header, value) self._headers[header.lower()] = (header, value) def __delitem__(self, header): try: del self._headers[header.lower()] except KeyError: pass def __getitem__(self, header): return self._headers[header.lower()][1] def has_header(self, header): """Case-insensitive check for a header.""" return self._headers.has_key(header.lower()) __contains__ = has_header def items(self): return self._headers.values() def get(self, header, alternate): return self._headers.get(header.lower(), (None, alternate))[1] def set_cookie(self, key, value='', max_age=None, expires=None, path='/', domain=None, secure=False, httponly=False): """ Sets a cookie. ``expires`` can be a string in the correct format or a ``datetime.datetime`` object in UTC. If ``expires`` is a datetime object then ``max_age`` will be calculated. """ self.cookies[key] = value if expires is not None: if isinstance(expires, datetime.datetime): delta = expires - expires.utcnow() # Add one second so the date matches exactly (a fraction of # time gets lost between converting to a timedelta and # then the date string). delta = delta + datetime.timedelta(seconds=1) # Just set max_age - the max_age logic will set expires. expires = None max_age = max(0, delta.days * 86400 + delta.seconds) else: self.cookies[key]['expires'] = expires if max_age is not None: self.cookies[key]['max-age'] = max_age # IE requires expires, so set it if hasn't been already. if not expires: self.cookies[key]['expires'] = cookie_date(time.time() + max_age) if path is not None: self.cookies[key]['path'] = path if domain is not None: self.cookies[key]['domain'] = domain if secure: self.cookies[key]['secure'] = True if httponly: self.cookies[key]['httponly'] = True def delete_cookie(self, key, path='/', domain=None): self.set_cookie(key, max_age=0, path=path, domain=domain, expires='Thu, 01-Jan-1970 00:00:00 GMT') def _get_content(self): if self.has_header('Content-Encoding'): return ''.join(self._container) return smart_str(''.join(self._container), self._charset) def _set_content(self, value): self._container = [value] self._is_string = True content = property(_get_content, _set_content) def __iter__(self): self._iterator = iter(self._container) return self def next(self): chunk = self._iterator.next() if isinstance(chunk, unicode): chunk = chunk.encode(self._charset) return str(chunk) def close(self): if hasattr(self._container, 'close'): self._container.close() # The remaining methods partially implement the file-like object interface. # See http://docs.python.org/lib/bltin-file-objects.html def write(self, content): if not self._is_string: raise Exception("This %s instance is not writable" % self.__class__) self._container.append(content) def flush(self): pass def tell(self): if not self._is_string: raise Exception("This %s instance cannot tell its position" % self.__class__) return sum([len(chunk) for chunk in self._container]) class HttpResponseRedirect(HttpResponse): status_code = 302 def __init__(self, redirect_to): super(HttpResponseRedirect, self).__init__() self['Location'] = iri_to_uri(redirect_to) class HttpResponsePermanentRedirect(HttpResponse): status_code = 301 def __init__(self, redirect_to): super(HttpResponsePermanentRedirect, self).__init__() self['Location'] = iri_to_uri(redirect_to) class HttpResponseNotModified(HttpResponse): status_code = 304 class HttpResponseBadRequest(HttpResponse): status_code = 400 class HttpResponseNotFound(HttpResponse): status_code = 404 class HttpResponseForbidden(HttpResponse): status_code = 403 class HttpResponseNotAllowed(HttpResponse): status_code = 405 def __init__(self, permitted_methods): super(HttpResponseNotAllowed, self).__init__() self['Allow'] = ', '.join(permitted_methods) class HttpResponseGone(HttpResponse): status_code = 410 class HttpResponseServerError(HttpResponse): status_code = 500 # A backwards compatible alias for HttpRequest.get_host. def get_host(request): return request.get_host() # It's neither necessary nor appropriate to use # django.utils.encoding.smart_unicode for parsing URLs and form inputs. Thus, # this slightly more restricted function. def str_to_unicode(s, encoding): """ Converts basestring objects to unicode, using the given encoding. Illegally encoded input characters are replaced with Unicode "unknown" codepoint (\ufffd). Returns any non-basestring objects without change. """ if isinstance(s, str): return unicode(s, encoding, 'replace') else: return s
bsd-3-clause
649,093,225,685,276,500
36.705251
134
0.576352
false
4.209566
false
false
false
rchav/vinerack
saleor/userprofile/models.py
2
5462
from __future__ import unicode_literals from django.contrib.auth.models import ( AbstractBaseUser, BaseUserManager, PermissionsMixin) from django.db import models from django.forms.models import model_to_dict from django.utils import timezone from django.utils.encoding import python_2_unicode_compatible from django.utils.translation import pgettext_lazy from django_countries.fields import Country, CountryField class AddressManager(models.Manager): def as_data(self, address): data = model_to_dict(address, exclude=['id', 'user']) if isinstance(data['country'], Country): data['country'] = data['country'].code return data def are_identical(self, addr1, addr2): data1 = self.as_data(addr1) data2 = self.as_data(addr2) return data1 == data2 def store_address(self, user, address): data = self.as_data(address) address, dummy_created = user.addresses.get_or_create(**data) return address @python_2_unicode_compatible class Address(models.Model): first_name = models.CharField( pgettext_lazy('Address field', 'first name'), max_length=256) last_name = models.CharField( pgettext_lazy('Address field', 'last name'), max_length=256) company_name = models.CharField( pgettext_lazy('Address field', 'company or organization'), max_length=256, blank=True) street_address_1 = models.CharField( pgettext_lazy('Address field', 'address'), max_length=256, blank=True) street_address_2 = models.CharField( pgettext_lazy('Address field', 'address'), max_length=256, blank=True) city = models.CharField( pgettext_lazy('Address field', 'city'), max_length=256, blank=True) city_area = models.CharField( pgettext_lazy('Address field', 'district'), max_length=128, blank=True) postal_code = models.CharField( pgettext_lazy('Address field', 'postal code'), max_length=20, blank=True) country = CountryField( pgettext_lazy('Address field', 'country')) country_area = models.CharField( pgettext_lazy('Address field', 'state or province'), max_length=128, blank=True) phone = models.CharField( pgettext_lazy('Address field', 'phone number'), max_length=30, blank=True) objects = AddressManager() @property def full_name(self): return '%s %s' % (self.first_name, self.last_name) def __str__(self): if self.company_name: return '%s - %s' % (self.company_name, self.full_name) return self.full_name def __repr__(self): return ( 'Address(first_name=%r, last_name=%r, company_name=%r, ' 'street_address_1=%r, street_address_2=%r, city=%r, ' 'postal_code=%r, country=%r, country_area=%r, phone=%r)' % ( self.first_name, self.last_name, self.company_name, self.street_address_1, self.street_address_2, self.city, self.postal_code, self.country, self.country_area, self.phone)) class UserManager(BaseUserManager): def create_user(self, email, password=None, is_staff=False, is_active=True, **extra_fields): 'Creates a User with the given username, email and password' email = UserManager.normalize_email(email) user = self.model(email=email, is_active=is_active, is_staff=is_staff, **extra_fields) if password: user.set_password(password) user.save() return user def create_superuser(self, email, password=None, **extra_fields): return self.create_user(email, password, is_staff=True, is_superuser=True, **extra_fields) def store_address(self, user, address, billing=False, shipping=False): entry = Address.objects.store_address(user, address) changed = False if billing and not user.default_billing_address_id: user.default_billing_address = entry changed = True if shipping and not user.default_shipping_address_id: user.default_shipping_address = entry changed = True if changed: user.save() return entry class User(PermissionsMixin, AbstractBaseUser): email = models.EmailField(unique=True) addresses = models.ManyToManyField(Address, blank=True) is_staff = models.BooleanField( pgettext_lazy('User field', 'staff status'), default=False) is_active = models.BooleanField( pgettext_lazy('User field', 'active'), default=False) date_joined = models.DateTimeField( pgettext_lazy('User field', 'date joined'), default=timezone.now, editable=False) default_shipping_address = models.ForeignKey( Address, related_name='+', null=True, blank=True, on_delete=models.SET_NULL, verbose_name=pgettext_lazy('User field', 'default shipping address')) default_billing_address = models.ForeignKey( Address, related_name='+', null=True, blank=True, on_delete=models.SET_NULL, verbose_name=pgettext_lazy('User field', 'default billing address')) USERNAME_FIELD = 'email' objects = UserManager() def get_full_name(self): return self.email def get_short_name(self): return self.email
bsd-3-clause
-8,794,741,306,931,394,000
35.657718
77
0.631637
false
3.952243
false
false
false
lmazuel/azure-sdk-for-python
azure-servicefabric/azure/servicefabric/models/file_info.py
1
1684
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class FileInfo(Model): """Information about a image store file. :param file_size: The size of file in bytes. :type file_size: str :param file_version: Information about the version of image store file. :type file_version: ~azure.servicefabric.models.FileVersion :param modified_date: The date and time when the image store file was last modified. :type modified_date: datetime :param store_relative_path: The file path relative to the image store root path. :type store_relative_path: str """ _attribute_map = { 'file_size': {'key': 'FileSize', 'type': 'str'}, 'file_version': {'key': 'FileVersion', 'type': 'FileVersion'}, 'modified_date': {'key': 'ModifiedDate', 'type': 'iso-8601'}, 'store_relative_path': {'key': 'StoreRelativePath', 'type': 'str'}, } def __init__(self, file_size=None, file_version=None, modified_date=None, store_relative_path=None): super(FileInfo, self).__init__() self.file_size = file_size self.file_version = file_version self.modified_date = modified_date self.store_relative_path = store_relative_path
mit
-8,983,340,538,904,316,000
39.095238
104
0.612827
false
4.17866
false
false
false
ScreamingUdder/mantid
Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANSBeamSpreaderTransmission.py
1
13137
#pylint: disable=no-init,invalid-name from __future__ import (absolute_import, division, print_function) import mantid.simpleapi as api from mantid.api import * from mantid.kernel import * import os from reduction_workflow.find_data import find_data class SANSBeamSpreaderTransmission(PythonAlgorithm): def category(self): return "Workflow\\SANS\\UsesPropertyManager" def name(self): return "SANSBeamSpreaderTransmission" def summary(self): return "Compute transmission using the beam spreader method" def PyInit(self): self.declareProperty(MatrixWorkspaceProperty("InputWorkspace", "", direction=Direction.Input)) self.declareProperty(FileProperty("SampleSpreaderFilename", "", action=FileAction.Load, extensions=['xml', 'nxs', 'nxs.h5'])) self.declareProperty(FileProperty("DirectSpreaderFilename", "", action=FileAction.Load, extensions=['xml', 'nxs', 'nxs.h5'])) self.declareProperty(FileProperty("SampleScatteringFilename", "", action=FileAction.Load, extensions=['xml', 'nxs', 'nxs.h5'])) self.declareProperty(FileProperty("DirectScatteringFilename", "", action=FileAction.Load, extensions=['xml', 'nxs', 'nxs.h5'])) self.declareProperty("SpreaderTransmissionValue", 1.0, "Transmission of the beam spreader") self.declareProperty("SpreaderTransmissionError", 0.0, "Error on the transmission of the beam spreader") self.declareProperty("ThetaDependent", True, "If true, a theta-dependent correction will be applied") self.declareProperty(FileProperty("DarkCurrentFilename", "", action=FileAction.OptionalLoad, extensions=['xml', 'nxs', 'nxs.h5'])) self.declareProperty("UseSampleDarkCurrent", False, "If true, the sample dark current will be used") self.declareProperty("ReductionProperties", "__sans_reduction_properties", validator=StringMandatoryValidator(), doc="Property manager name for the reduction") self.declareProperty(MatrixWorkspaceProperty("OutputWorkspace", "", direction = Direction.Output)) self.declareProperty("MeasuredTransmission", 0.0, direction=Direction.Output) self.declareProperty("MeasuredError", 0.0, direction=Direction.Output) self.declareProperty("OutputMessage", "", direction=Direction.Output, doc = "Output message") def PyExec(self): # noqa: C901 # Get the reduction property manager property_manager_name = self.getProperty("ReductionProperties").value property_manager = PropertyManagerDataService.retrieve(property_manager_name) # Build the name we are going to give the transmission workspace sample_scatt = self.getPropertyValue("SampleScatteringFilename") sample_basename = os.path.basename(sample_scatt) entry_name = "TransmissionSpreader%s" % sample_scatt trans_ws_name = "__transmission_fit_%s" % sample_basename trans_ws = None # If we have already computed the transmission, used the # previously computed workspace if property_manager.existsProperty(entry_name): trans_ws_name = property_manager.getProperty(entry_name) if AnalysisDataService.doesExist(trans_ws_name): trans_ws = AnalysisDataService.retrieve(trans_ws_name) # Get instrument to use with FileFinder instrument = '' if property_manager.existsProperty("InstrumentName"): instrument = property_manager.getProperty("InstrumentName").value # Get the data loader def _load_data(filename, output_ws): if not property_manager.existsProperty("LoadAlgorithm"): Logger("SANSBeamSpreaderTransmission").error("SANS reduction not set up properly: missing load algorithm") raise RuntimeError("SANS reduction not set up properly: missing load algorithm") p=property_manager.getProperty("LoadAlgorithm") alg=Algorithm.fromString(p.valueAsStr) alg.setProperty("Filename", filename) alg.setProperty("OutputWorkspace", output_ws) if alg.existsProperty("ReductionProperties"): alg.setProperty("ReductionProperties", property_manager_name) alg.execute() msg = '' if alg.existsProperty("OutputMessage"): msg = alg.getProperty("OutputMessage").value return msg # Compute the transmission if we don't already have it if trans_ws is None: # Load data files sample_spreader_ws = "__trans_sample_spreader" direct_spreader_ws = "__trans_direct_spreader" sample_scatt_ws = "__trans_sample_scatt" direct_scatt_ws = "__trans_direct_scatt" sample_spread = self.getPropertyValue("SampleSpreaderFilename") direct_spread = self.getPropertyValue("DirectSpreaderFilename") direct_scatt = self.getPropertyValue("DirectScatteringFilename") ws_names = [[sample_spread, sample_spreader_ws], [direct_spread, direct_spreader_ws], [sample_scatt, sample_scatt_ws], [direct_scatt, direct_scatt_ws]] for f in ws_names: filepath = find_data(f[0], instrument=instrument) _load_data(filepath, f[1]) self._subtract_dark_current(f[1], property_manager) # Get normalization for transmission calculation monitor_det_ID = None if property_manager.existsProperty("TransmissionNormalisation"): sample_ws = AnalysisDataService.retrieve(sample_scatt_ws) if property_manager.getProperty("TransmissionNormalisation").value=="Monitor": monitor_det_ID = int(sample_ws.getInstrument().getNumberParameter("default-incident-monitor-spectrum")[0]) else: monitor_det_ID = int(sample_ws.getInstrument().getNumberParameter("default-incident-timer-spectrum")[0]) elif property_manager.existsProperty("NormaliseAlgorithm"): def _normalise(workspace): p=property_manager.getProperty("NormaliseAlgorithm") alg=Algorithm.fromString(p.valueAsStr) alg.setProperty("InputWorkspace", workspace) alg.setProperty("OutputWorkspace", workspace) if alg.existsProperty("ReductionProperties"): alg.setProperty("ReductionProperties", property_manager_name) alg.execute() msg = '' if alg.existsProperty("OutputMessage"): msg += alg.getProperty("OutputMessage").value+'\n' return msg for f in ws_names: _normalise(f[1]) # Calculate transmission. Use the reduction method's normalization channel (time or beam monitor) # as the monitor channel. spreader_t_value = self.getPropertyValue("SpreaderTransmissionValue") spreader_t_error = self.getPropertyValue("SpreaderTransmissionError") alg = AlgorithmManager.createUnmanaged('CalculateTransmissionBeamSpreader') alg.initialize() alg.setProperty("SampleSpreaderRunWorkspace", sample_spreader_ws) alg.setProperty("DirectSpreaderRunWorkspace", direct_spreader_ws) alg.setProperty("SampleScatterRunWorkspace", sample_scatt_ws) alg.setProperty("DirectScatterRunWorkspace", direct_scatt_ws) alg.setProperty("IncidentBeamMonitor", monitor_det_ID) alg.setProperty("OutputWorkspace",trans_ws_name) alg.setProperty("SpreaderTransmissionValue",spreader_t_value) alg.setProperty("SpreaderTransmissionError",spreader_t_error) alg.execute() trans_ws = AnalysisDataService.retrieve(trans_ws_name) for f in ws_names: if AnalysisDataService.doesExist(f[1]): AnalysisDataService.remove(f[1]) # 2- Apply correction (Note: Apply2DTransCorr) input_ws_name = self.getPropertyValue("InputWorkspace") if not AnalysisDataService.doesExist(input_ws_name): Logger("SANSBeamSpreaderTransmission").error("Could not find input workspace") workspace = AnalysisDataService.retrieve(input_ws_name).name() # Clone workspace to make boost-python happy api.CloneWorkspace(InputWorkspace=workspace, OutputWorkspace='__'+workspace) workspace = '__'+workspace self._apply_transmission(workspace, trans_ws_name) trans = trans_ws.dataY(0)[0] error = trans_ws.dataE(0)[0] output_str = '' if len(trans_ws.dataY(0))==1: self.setProperty("MeasuredTransmission", trans) self.setProperty("MeasuredError", error) output_str = "\n%s T = %6.2g += %6.2g\n" % (output_str, trans, error) output_msg = "Transmission correction applied [%s]%s\n" % (trans_ws_name, output_str) output_ws = AnalysisDataService.retrieve(workspace) self.setProperty("OutputWorkspace", output_ws) self.setPropertyValue("OutputMessage", output_msg) def _apply_transmission(self, workspace, trans_workspace): """ Apply transmission correction @param workspace: workspace to apply correction to @param trans_workspace: workspace name for of the transmission """ # Make sure the binning is compatible api.RebinToWorkspace(WorkspaceToRebin=trans_workspace, WorkspaceToMatch=workspace, OutputWorkspace=trans_workspace+'_rebin', PreserveEvents=False) # Apply angle-dependent transmission correction using the zero-angle transmission theta_dependent = self.getProperty("ThetaDependent").value api.ApplyTransmissionCorrection(InputWorkspace=workspace, TransmissionWorkspace=trans_workspace+'_rebin', OutputWorkspace=workspace, ThetaDependent=theta_dependent) if AnalysisDataService.doesExist(trans_workspace+'_rebin'): AnalysisDataService.remove(trans_workspace+'_rebin') def _subtract_dark_current(self, workspace_name, property_manager): """ Subtract the dark current @param workspace_name: name of the workspace to subtract from @param property_manager: property manager object """ # Subtract dark current use_sample_dc = self.getProperty("UseSampleDarkCurrent").value dark_current_data = self.getPropertyValue("DarkCurrentFilename") property_manager_name = self.getProperty("ReductionProperties").value def _dark(workspace, dark_current_property): if property_manager.existsProperty(dark_current_property): p=property_manager.getProperty(dark_current_property) # Dark current subtraction for sample data alg=Algorithm.fromString(p.valueAsStr) alg.setProperty("InputWorkspace", workspace) alg.setProperty("OutputWorkspace", workspace) alg.setProperty("Filename", dark_current_data) if alg.existsProperty("PersistentCorrection"): alg.setProperty("PersistentCorrection", False) if alg.existsProperty("ReductionProperties"): alg.setProperty("ReductionProperties", property_manager_name) alg.execute() msg = "Dark current subtracted" if alg.existsProperty("OutputMessage"): msg += alg.getProperty("OutputMessage").value return msg if use_sample_dc is True: _dark(workspace_name, "DarkCurrentAlgorithm") elif len(dark_current_data.strip())>0: _dark(workspace_name, "DefaultDarkCurrentAlgorithm") ############################################################################################# AlgorithmFactory.subscribe(SANSBeamSpreaderTransmission)
gpl-3.0
-2,155,445,629,212,738,800
49.722008
126
0.599528
false
4.792776
false
false
false
sebleier/python-redis
pyredis/hash.py
1
2365
from collections import defaultdict class Hash(object): def __init__(self): self._data = defaultdict(int) def hset(self, key, value): """ Set ``key`` to ``value`` within hash ``name`` Returns 1 if HSET created a new field, otherwise 0 """ if key in self._data: created = 0 else: created = 1 self._data[key] = value return created def hget(self, key): "Return the value of ``key``" return self._data.get(key, None) def hdel(self, *keys): "Delete ``keys``" deleted = 0 for key in keys: if key in self._data: deleted += 1 del self._data[key] return deleted def hexists(self, key): "Returns a boolean indicating if ``key`` exists within hash ``name``" return key in self._data def hgetall(self): "Return a Python dict of the hash's name/value pairs" return self._data def hincrby(self, key, amount=1): "Increment the value of ``key`` in hash by ``amount``" self._data[key] += amount return self._data[key] def hincrbyfloat(self, key, amount=1.0): """ Increment the value of ``key`` in hash by floating ``amount`` """ return self.hincrby(key, amount) def hkeys(self): "Return the list of keys within hash" return self._data.keys() def hlen(self): "Return the number of elements in hash" return len(self._data) def hsetnx(self, key, value): """ Set ``key`` to ``value`` within hash if ``key`` does not exist. Returns 1 if HSETNX created a field, otherwise 0. """ if key in self._data: return 0 self._data[key] = value return 1 def hmset(self, mapping): """ Sets each key in the ``mapping`` dict to its corresponding value in the hash """ return self._data.update(mapping) def hmget(self, keys): "Returns a list of values ordered identically to ``keys``" values = [] for key in keys: values.append(self._data.get(key, None)) return values def hvals(self): "Return the list of values within hash" return self._data.values()
bsd-3-clause
7,353,101,178,696,724,000
26.183908
77
0.542072
false
4.063574
false
false
false
stepanovsh/project_template
{{cookiecutter.repo_name}}/config/settings/local.py
1
2101
# -*- coding: utf-8 -*- ''' Local settings - Run in Debug mode - Use console backend for emails - Add Django Debug Toolbar - Add django-extensions as app ''' from .common import * # noqa # DEBUG # ------------------------------------------------------------------------------ DEBUG = env.bool('DJANGO_DEBUG', default=True) TEMPLATES[0]['OPTIONS']['debug'] = DEBUG # SECRET CONFIGURATION # ------------------------------------------------------------------------------ # See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key # Note: This key only used for development and testing. SECRET_KEY = env("DJANGO_SECRET_KEY", default='CHANGEME!!!') # Mail settings # ------------------------------------------------------------------------------ EMAIL_HOST = 'localhost' EMAIL_PORT = 1025 EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.console.EmailBackend') # CACHING # ------------------------------------------------------------------------------ CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'LOCATION': '' } } # django-debug-toolbar # ------------------------------------------------------------------------------ MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',) INSTALLED_APPS += ('debug_toolbar', ) INTERNAL_IPS = ('127.0.0.1', '10.0.2.2',) DEBUG_TOOLBAR_CONFIG = { 'DISABLE_PANELS': [ 'debug_toolbar.panels.redirects.RedirectsPanel', ], 'SHOW_TEMPLATE_CONTEXT': True, } # django-extensions # ------------------------------------------------------------------------------ INSTALLED_APPS += ('django_extensions', ) # TESTING # ------------------------------------------------------------------------------ TEST_RUNNER = 'django.test.runner.DiscoverRunner' {%- if cookiecutter.use_celery == "y" -%} ########## CELERY # In development, all tasks will be executed locally by blocking until the task returns CELERY_ALWAYS_EAGER = True ########## END CELERY {%- endif %} # Your local stuff: Below this line define 3rd party library settings
bsd-3-clause
1,962,637,750,829,820,200
30.358209
101
0.502142
false
4.235887
false
false
false
jmeppley/py-metagenomics
sample_records.py
1
3684
#!/usr/bin/env python """ This script takes any file that can be divided into records and returns N randomly selected records Records can be fasta, fastq, genbank, or something described by a simple RegExp """ from os import path from edl.util import * from edl.batch import * import re import sys import argparse def main(): # set up CLI description = """ This script takes any file that can be divided into records and returns N randomly selected records. NOTE: By default, all sampled records are stored in memory. This requires a good amount of RAM (depending on record size and sample size). To avoid this, specify the number of records or request a count using the -n (population_size) option. Records can be fasta, fastq, genbank, or something described by a simple RegExp """ parser = argparse.ArgumentParser(description=description) add_IO_arguments(parser) add_record_parsing_arguments(parser) parser.add_argument( "-s", "--sample_size", default=1000000, type=int, metavar="SAMPLE_SIZE", help="Number of records to pull out. Defaults to 1 million.") parser.add_argument("-n", "--population_size", type=int, default=0, metavar="POPULATION_SIZE", help="Number of records in file. An integer, should " "be greater than the SAMPLE_SIZE, except: 0 " "(default)=> do a separate pass to count records " "first; -1 => reservoir sample to RAM on the fly") add_universal_arguments(parser) arguments = parser.parse_args() setup_logging(arguments) # check arguments if arguments.input_files == [sys.stdin, ] and arguments.population_size == 0: parser.error("We cannot count records from STDIN, please specify a" "positive population size or use reservoir sampling " "(-n -1)") if arguments.population_size > 0 and \ arguments.population_size < arguments.sample_size: parser.error("We cannot sample more records then " "there are in the file!") for inhandle, outhandle in inputIterator(arguments): # We need the file name to ge the type, get from handle (if not stdin) infilename = inhandle.name fileType = getFileType(arguments, infilename) record_iterator = fileType.recordStreamer(inhandle) logging.debug("Looking for %d records in %s" % (arguments.sample_size, infilename)) # if arguments.population_size<0: # indexed_sample_generator will only read file once # using reservoir sampling # count records if asked to if arguments.population_size == 0: record_count, total_size = get_total_size(inhandle.name, fileType) arguments.population_size = record_count logging.debug("setting population size to: {}" .format(arguments.population_size)) # get sampled record generator (will use reservoir if P is <0) sampled_records = indexed_sample_generator(record_iterator, N=arguments.sample_size, P=arguments.population_size) # print out sampled records count = 0 for record in sampled_records: outhandle.writelines(record) count += 1 logging.debug("Sampled %d records" % (count)) if __name__ == '__main__': main()
mit
-5,833,227,940,174,607,000
34.76699
79
0.602334
false
4.525799
false
false
false
Nichol4s/PyHead
tests/unreader.py
1
1888
import os try: from cStringIO import StringIO except ImportError: from StringIO import StringIO # Classes that can undo reading data from # a given type of data source. class Unreader(object): def __init__(self): self.buf = StringIO() def chunk(self): raise NotImplementedError() def read(self, size=None): if size is not None and not isinstance(size, (int, long)): raise TypeError("size parameter must be an int or long.") if size == 0: return "" if size < 0: size = None self.buf.seek(0, os.SEEK_END) if size is None and self.buf.tell(): ret = self.buf.getvalue() self.buf.truncate(0) return ret if size is None: return self.chunk() while self.buf.tell() < size: chunk = self.chunk() if not len(chunk): ret = self.buf.getvalue() self.buf.truncate(0) return ret self.buf.write(chunk) data = self.buf.getvalue() self.buf.truncate(0) self.buf.write(data[size:]) return data[:size] def unread(self, data): self.buf.seek(0, os.SEEK_END) self.buf.write(data) class SocketUnreader(Unreader): def __init__(self, sock, max_chunk=8192): super(SocketUnreader, self).__init__() self.sock = sock self.mxchunk = max_chunk def chunk(self): return self.sock.recv(self.mxchunk) class IterUnreader(Unreader): def __init__(self, iterable): super(IterUnreader, self).__init__() self.iter = iter(iterable) def chunk(self): if not self.iter: return "" try: return self.iter.next() except StopIteration: self.iter = None return ""
mit
-3,738,559,382,780,167,700
24.863014
69
0.544492
false
4.068966
false
false
false
jzitelli/poolvr.py
poolvr/billboards.py
1
4370
import pkgutil import os.path from ctypes import c_void_p import numpy as np import OpenGL.GL as gl import OpenGL.error from .gl_rendering import Node, Technique, Material, Program, DTYPE_COMPONENT_TYPE, Texture from .gl_primitives import PlanePrimitive NULL_PTR = c_void_p(0) # TODO: pkgutils way TEXTURES_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.path.pardir, 'textures') class BillboardParticles(Node): technique = Technique(Program(pkgutil.get_data('poolvr', 'shaders/bb_particles_vs.glsl').decode(), pkgutil.get_data('poolvr', 'shaders/bb_particles_fs.glsl').decode())) _modelview = np.eye(4, dtype=np.float32) def __init__(self, texture=Texture(os.path.join(TEXTURES_DIR, 'sphere_bb_alpha.png')), normal_map=Texture(os.path.join(TEXTURES_DIR, 'sphere_bb_normal.png')), num_particles=1, scale=1.0, color=None, translate=None): Node.__init__(self) self.texture = texture self.normal_map = normal_map self.material = Material(self.technique, textures={'map': texture, 'u_normal': normal_map}) self.num_particles = num_particles if color is None: color = np.array([num_particles*[1.0, 1.0, 1.0]], dtype=np.float32) if translate is None: translate = np.array([[1.1*scale*i, 0.2, 0.0] for i in range(num_particles)], dtype=np.float32) self.primitive = PlanePrimitive(width=scale, height=scale, color=color, translate=translate, attribute_usage={'color': gl.GL_STATIC_DRAW, 'translate': gl.GL_DYNAMIC_DRAW}) self.primitive.attributes['position'] = self.primitive.attributes['vertices'] self.primitive.attributes['uv'] = self.primitive.attributes['uvs'] self._initialized = False def init_gl(self, force=False): if self._initialized and not force: return self.material.init_gl(force=force) self.primitive.init_gl(force=force) self._initialized = True def update_gl(self): if not self._initialized: self.init_gl() translate = self.primitive.attributes['translate'] values = translate.tobytes() try: gl.glNamedBufferSubData(self.primitive.buffers['translate'], 0, len(values), values) except OpenGL.error.NullFunctionError as e: gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.primitive.buffers['translate']) gl.glBufferSubData(gl.GL_ARRAY_BUFFER, 0, len(values), values) def draw(self, view=None, projection=None, frame_data=None): self.material.use() if view is not None: self.world_matrix.dot(view, out=self._modelview) gl.glUniformMatrix4fv(self.technique.uniform_locations['u_modelview'], 1, False, self._modelview) if projection is not None: gl.glUniformMatrix4fv(self.technique.uniform_locations['u_projection'], 1, False, projection) for attribute_name, location in self.technique.attribute_locations.items(): attribute = self.primitive.attributes[attribute_name] gl.glEnableVertexAttribArray(location) gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.primitive.buffers[attribute_name]) gl.glVertexAttribPointer(location, attribute.shape[-1], DTYPE_COMPONENT_TYPE[attribute.dtype], False, attribute.dtype.itemsize * attribute.shape[-1], NULL_PTR) if attribute_name == 'translate' or attribute_name == 'color': gl.glVertexAttribDivisor(location, 1) else: gl.glVertexAttribDivisor(location, 0) gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.primitive.index_buffer) gl.glDrawElementsInstanced(self.primitive.mode, self.primitive.indices.size, DTYPE_COMPONENT_TYPE[self.primitive.indices.dtype], NULL_PTR, self.num_particles) # for location in self.technique.attribute_locations.values(): # gl.glDisableVertexAttribArray(location) self.material.release()
mit
-6,383,165,777,984,583,000
50.411765
116
0.613501
false
3.933393
false
false
false
JioCloud/glance
glance/api/middleware/cache.py
1
12967
# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Transparent image file caching middleware, designed to live on Glance API nodes. When images are requested from the API node, this middleware caches the returned image file to local filesystem. When subsequent requests for the same image file are received, the local cached copy of the image file is returned. """ import re from oslo_log import log as logging import webob from glance.api.common import size_checked_iter from glance.api import policy from glance.api.v1 import images from glance.common import exception from glance.common import utils from glance.common import wsgi import glance.db from glance import i18n from glance import image_cache from glance import notifier import glance.registry.client.v1.api as registry LOG = logging.getLogger(__name__) _LI = i18n._LI _LE = i18n._LE _LW = i18n._LW PATTERNS = { ('v1', 'GET'): re.compile(r'^/v1/images/([^\/]+)$'), ('v1', 'DELETE'): re.compile(r'^/v1/images/([^\/]+)$'), ('v2', 'GET'): re.compile(r'^/v2/images/([^\/]+)/file$'), ('v2', 'DELETE'): re.compile(r'^/v2/images/([^\/]+)$') } class CacheFilter(wsgi.Middleware): def __init__(self, app): self.cache = image_cache.ImageCache() self.serializer = images.ImageSerializer() self.policy = policy.Enforcer() LOG.info(_LI("Initialized image cache middleware")) super(CacheFilter, self).__init__(app) def _verify_metadata(self, image_meta): """ Sanity check the 'deleted' and 'size' metadata values. """ # NOTE: admins can see image metadata in the v1 API, but shouldn't # be able to download the actual image data. if image_meta['status'] == 'deleted' and image_meta['deleted']: raise exception.NotFound() if not image_meta['size']: # override image size metadata with the actual cached # file size, see LP Bug #900959 image_meta['size'] = self.cache.get_image_size(image_meta['id']) @staticmethod def _match_request(request): """Determine the version of the url and extract the image id :returns tuple of version and image id if the url is a cacheable, otherwise None """ for ((version, method), pattern) in PATTERNS.items(): if request.method != method: continue match = pattern.match(request.path_info) if match is None: continue image_id = match.group(1) # Ensure the image id we got looks like an image id to filter # out a URI like /images/detail. See LP Bug #879136 if image_id != 'detail': return (version, method, image_id) def _enforce(self, req, action, target=None): """Authorize an action against our policies""" if target is None: target = {} try: self.policy.enforce(req.context, action, target) except exception.Forbidden as e: LOG.debug("User not permitted to perform '%s' action", action) raise webob.exc.HTTPForbidden(explanation=e.msg, request=req) def _get_v1_image_metadata(self, request, image_id): """ Retrieves image metadata using registry for v1 api and creates dictionary-like mash-up of image core and custom properties. """ try: image_metadata = registry.get_image_metadata(request.context, image_id) return utils.create_mashup_dict(image_metadata) except exception.NotFound as e: LOG.debug("No metadata found for image '%s'", image_id) raise webob.exc.HTTPNotFound(explanation=e.msg, request=request) def _get_v2_image_metadata(self, request, image_id): """ Retrieves image and for v2 api and creates adapter like object to access image core or custom properties on request. """ db_api = glance.db.get_api() image_repo = glance.db.ImageRepo(request.context, db_api) try: image = image_repo.get(image_id) # Storing image object in request as it is required in # _process_v2_request call. request.environ['api.cache.image'] = image return policy.ImageTarget(image) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg, request=request) def process_request(self, request): """ For requests for an image file, we check the local image cache. If present, we return the image file, appending the image metadata in headers. If not present, we pass the request on to the next application in the pipeline. """ match = self._match_request(request) try: (version, method, image_id) = match except TypeError: # Trying to unpack None raises this exception return None self._stash_request_info(request, image_id, method, version) if request.method != 'GET' or not self.cache.is_cached(image_id): return None method = getattr(self, '_get_%s_image_metadata' % version) image_metadata = method(request, image_id) # Deactivated images shall not be served from cache if image_metadata['status'] == 'deactivated': return None try: self._enforce(request, 'download_image', target=image_metadata) except exception.Forbidden: return None LOG.debug("Cache hit for image '%s'", image_id) image_iterator = self.get_from_cache(image_id) method = getattr(self, '_process_%s_request' % version) try: return method(request, image_id, image_iterator, image_metadata) except exception.NotFound: msg = _LE("Image cache contained image file for image '%s', " "however the registry did not contain metadata for " "that image!") % image_id LOG.error(msg) self.cache.delete_cached_image(image_id) @staticmethod def _stash_request_info(request, image_id, method, version): """ Preserve the image id, version and request method for later retrieval """ request.environ['api.cache.image_id'] = image_id request.environ['api.cache.method'] = method request.environ['api.cache.version'] = version @staticmethod def _fetch_request_info(request): """ Preserve the cached image id, version for consumption by the process_response method of this middleware """ try: image_id = request.environ['api.cache.image_id'] method = request.environ['api.cache.method'] version = request.environ['api.cache.version'] except KeyError: return None else: return (image_id, method, version) def _process_v1_request(self, request, image_id, image_iterator, image_meta): # Don't display location if 'location' in image_meta: del image_meta['location'] image_meta.pop('location_data', None) self._verify_metadata(image_meta) response = webob.Response(request=request) raw_response = { 'image_iterator': image_iterator, 'image_meta': image_meta, } return self.serializer.show(response, raw_response) def _process_v2_request(self, request, image_id, image_iterator, image_meta): # We do some contortions to get the image_metadata so # that we can provide it to 'size_checked_iter' which # will generate a notification. # TODO(mclaren): Make notification happen more # naturally once caching is part of the domain model. image = request.environ['api.cache.image'] self._verify_metadata(image_meta) response = webob.Response(request=request) response.app_iter = size_checked_iter(response, image_meta, image_meta['size'], image_iterator, notifier.Notifier()) # NOTE (flwang): Set the content-type, content-md5 and content-length # explicitly to be consistent with the non-cache scenario. # Besides, it's not worth the candle to invoke the "download" method # of ResponseSerializer under image_data. Because method "download" # will reset the app_iter. Then we have to call method # "size_checked_iter" to avoid missing any notification. But after # call "size_checked_iter", we will lose the content-md5 and # content-length got by the method "download" because of this issue: # https://github.com/Pylons/webob/issues/86 response.headers['Content-Type'] = 'application/octet-stream' response.headers['Content-MD5'] = image.checksum response.headers['Content-Length'] = str(image.size) return response def process_response(self, resp): """ We intercept the response coming back from the main images Resource, removing image file from the cache if necessary """ status_code = self.get_status_code(resp) if not 200 <= status_code < 300: return resp try: (image_id, method, version) = self._fetch_request_info( resp.request) except TypeError: return resp if method == 'GET' and status_code == 204: # Bugfix:1251055 - Don't cache non-existent image files. # NOTE: Both GET for an image without locations and DELETE return # 204 but DELETE should be processed. return resp method_str = '_process_%s_response' % method try: process_response_method = getattr(self, method_str) except AttributeError: LOG.error(_LE('could not find %s') % method_str) # Nothing to do here, move along return resp else: return process_response_method(resp, image_id, version=version) def _process_DELETE_response(self, resp, image_id, version=None): if self.cache.is_cached(image_id): LOG.debug("Removing image %s from cache", image_id) self.cache.delete_cached_image(image_id) return resp def _process_GET_response(self, resp, image_id, version=None): image_checksum = resp.headers.get('Content-MD5') if not image_checksum: # API V1 stores the checksum in a different header: image_checksum = resp.headers.get('x-image-meta-checksum') if not image_checksum: LOG.error(_LE("Checksum header is missing.")) # fetch image_meta on the basis of version image_metadata = None if version: method = getattr(self, '_get_%s_image_metadata' % version) image_metadata = method(resp.request, image_id) # NOTE(zhiyan): image_cache return a generator object and set to # response.app_iter, it will be called by eventlet.wsgi later. # So we need enforce policy firstly but do it by application # since eventlet.wsgi could not catch webob.exc.HTTPForbidden and # return 403 error to client then. self._enforce(resp.request, 'download_image', target=image_metadata) resp.app_iter = self.cache.get_caching_iter(image_id, image_checksum, resp.app_iter) return resp def get_status_code(self, response): """ Returns the integer status code from the response, which can be either a Webob.Response (used in testing) or httplib.Response """ if hasattr(response, 'status_int'): return response.status_int return response.status def get_from_cache(self, image_id): """Called if cache hit""" with self.cache.open_for_read(image_id) as cache_file: chunks = utils.chunkiter(cache_file) for chunk in chunks: yield chunk
apache-2.0
-4,818,873,228,218,001,000
39.021605
78
0.61001
false
4.273896
false
false
false
zseder/hunmisc
hunmisc/corpustools/20ng_to_conll.py
1
4033
""" Copyright 2011-13 Attila Zseder Email: [email protected] This file is part of hunmisc project url: https://github.com/zseder/hunmisc hunmisc is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA """ """Converts a newsgroup file (as in the 20 Newsgroups collection) to the conll2 format.""" import os.path import re from langtools.nltk.nltktools import NltkTools from langtools.utils import cmd_utils from langtools.utils.file_utils import * from langtools.io.conll2.conll_iter import FieldedDocument re_pat = re.compile(r"^[\s>]+", re.UNICODE) # Decoding is not required as NltkTools.tag_raw() handles that for utf-8. def read_stream(ins): """Reads a stream. Returns a {field:raw text} map, with a Body field. The title is the content of the subject header field.""" fields = {} for line in ins: line = line.strip() if len(line) == 0: break if line.startswith("Subject:"): fields['Title'] = line[8:] fields['Body'] = u' '.join(re_pat.sub("", line.strip().replace(u'\ufffd', ' ')) for line in ins) return fields def read_file(infile): """Reads a file. Returns a {field:raw text} map, with a Body field. If title is true, a Title field will be added too.""" with FileReader(infile, replace=True).open() as ins: return read_stream(ins) def write_doc(doc, outs): """Writes the document to outs. A header line is written, then the Title field (if any), then the body.""" outs.write(u"%%#PAGE\t{0}\n".format(doc.title)) if 'Title' in doc.fields: outs.write(u"%%#Field\tTitle\n") write_text(doc.fields['Title'], outs) outs.write(u"%%#Field\tBody\n") write_text(doc.fields['Body'], outs) def write_text(text, outs): for token in text: outs.write(u"\t".join(token)) outs.write("\n") if __name__ == '__main__': import sys try: params, args = cmd_utils.get_params_sing(sys.argv[1:], 'i:o:m:ta', 'i', 0) if not os.path.isdir(params['i']): raise ValueError('Input must be a directory of files.') except ValueError as err: print('Error: {0}'.format(err)) print(('Usage: {0} -i input_dir [-o output_file] -m [hunpos_model] ' + '[-a]').format(sys.argv[0])) print(' input_dir: the directory with the input text files.') print(' hunpos_model: the hunpos model file.') print(' output_file: the conll2 output file. If omitted, the result will') print(' be written to stdout.') print(' hunpos_model: the hunpos model file.') print(' -a: the output is appended to output_file, instead of overwriting it.') sys.exit() if 'o' in params: output_mode = 'a' if 'a' in params else 'w' out = FileWriter(params['o'], output_mode).open() else: out = StreamWriter(sys.stdout) nt = NltkTools(pos=True, stem=True, tok=True, pos_model=params.get('m')) for infile in (os.path.join(d, f) for d, _, fs in os.walk(params['i']) for f in fs): print "File " + infile doc = FieldedDocument(infile) doc.fields = {} for field, raw_text in read_file(infile).iteritems(): doc.fields[field] = nt.tag_raw(raw_text) write_doc(doc, out) if 'o' in params: out.close()
gpl-3.0
898,430,724,181,993,000
36.691589
100
0.634763
false
3.516129
false
false
false
OneDrive/onedrive-sdk-python
src/python2/request/item_copy.py
1
3729
# -*- coding: utf-8 -*- ''' # Copyright (c) 2015 Microsoft Corporation # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # # This file was generated and any changes will be overwritten. ''' from ..model.item import Item from ..request_base import RequestBase from ..request_builder_base import RequestBuilderBase from ..async_operation_monitor import AsyncOperationMonitor from ..options import * import json class ItemCopyRequest(RequestBase): def __init__(self, request_url, client, options, name=None, parent_reference=None): super(ItemCopyRequest, self).__init__(request_url, client, options) self.method = "POST" self.body_options={} if name: self.body_options["name"] = name if parent_reference: self.body_options["parentReference"] = parent_reference @property def body_options(self): return self._body_options @body_options.setter def body_options(self, value): self._body_options=value def post(self): """Sends the POST request Returns: :class:`AsyncOperationMonitor<onedrivesdk.async_operation_monitor.AsyncOperationMonitor>`: The resulting entity from the operation """ self.content_type = "application/json" self.append_option(HeaderOption("Prefer", "respond-async")) response = self.send(self.body_options) entity = AsyncOperationMonitor(response.headers["Location"], self._client, None) return entity class ItemCopyRequestBuilder(RequestBuilderBase): def __init__(self, request_url, client, name=None, parent_reference=None): super(ItemCopyRequestBuilder, self).__init__(request_url, client) self._method_options = {} self._method_options["name"] = name self._method_options["parentReference"] = parent_reference._prop_dict def request(self, options=None): """Builds the request for the ItemCopy Args: options (list of :class:`Option<onedrivesdk.options.Option>`): Default to None, list of options to include in the request Returns: :class:`ItemCopyRequest<onedrivesdk.request.item_copy.ItemCopyRequest>`: The request """ req = ItemCopyRequest(self._request_url, self._client, options, name=self._method_options["name"], parent_reference=self._method_options["parentReference"]) return req def post(self): """Sends the POST request Returns: :class:`Item<onedrivesdk.model.item.Item>`: The resulting Item from the operation """ return self.request().post()
mit
-2,817,140,909,154,993,000
36.29
164
0.679807
false
4.361404
false
false
false
thehackercat/aha-memo
serverApp/common/aha.py
1
13263
# -*- coding:utf-8 -*- __author__ = 'LexusLee' import time import json import tornado import tornado.gen from tornado.web import HTTPError from tornado.escape import json_decode from foundation.log import logger from foundation import const from serverAppConfig import DEVICE_TYPE from serverAppConfig import TOKEN_ROLE, TOKEN_DEADLINE_TIME, TOKEN_USER_ID from cacheTool import _get_redis_connection class ArgumentTypeError(HTTPError): """Exception raised by `IntLongRequestHandler.add_query_argument`. This is a subclass of `HTTPError`, so if it is uncaught a 400 response code will be used instead of 500 (and a stack trace will not be logged). """ def __init__(self, arg_name): super(ArgumentTypeError, self).__init__( 400, 'Type of argument %s must be string type' % arg_name) self.arg_name = arg_name class RequestHandlerAha(tornado.web.RequestHandler): """   根据需要,定制tornado.web.RequestHandler """ def __init__(self, application, request, auto_init=True, **kwargs): """ 构造函数 :param write_to_client: 如果是后台调用,该值必须是True,否则是False,默认为False """ super(RequestHandlerAha, self).__init__(application, request, **kwargs) self.auto_init = auto_init self.decoded_secure_cookie = {} self.redis_client = None def on_finish(self): if self.redis_client: logger.debug('存在redis连接,所以关闭连接') self.redis_client.disconnect() def get_secure_cookie(self, name, value=None, max_age_days=31, min_version=None): """ 重写重写tornado.web.RequestHandler中的get_secure_cookie方法,用于在多次调用get_secure_cookie 不重复去解密 :param name: :return: """ if name in self.decoded_secure_cookie.keys(): return self.decoded_secure_cookie[name] else: value = super(RequestHandlerAha, self).get_secure_cookie(name, value, max_age_days, min_version) self.decoded_secure_cookie[name] = value return value def get_current_user_role(self): """ 返回当前用户的角色名字,角色名字参考configueFiles文件夹下面的authority文件注释 :return: 角色名字 """ tokenstr = self.get_secure_cookie("token") if not tokenstr: logger.debug("cookie中没有token,因此是游客访问") return "visitor" token = json_decode(tokenstr) role = token.get(TOKEN_ROLE, "visitor") return role def get_current_user(self): """ 重写tornado.web.RequestHandler中的get_current_user方法,用于在调用self.current_user能正确返回 直接调用该函数可以返回相应的用户id,也可以使用self.current_user来返回用户的id。 :return: 如果有相应的token,则返回对应的id,否则返回None """ tokenstr = self.get_secure_cookie("token") if not tokenstr: logger.debug("cookie中没有token,因此是游客访问,因此没有用户的id") return None token = json_decode(tokenstr) user_id = token.get(TOKEN_USER_ID) return user_id def write(self, chunk): """ 向调用者返回数据,如果是客户端直接请求的,则向客户端返回对应写的数据,函数返回None;如果是后台自己调用, 则返回相应的python对象数据,函数返回对应python对象数据 :param chunk: 待返回的数据 :return: 如果是后台自己调用,返回对应数据的python对象;否则返回None """ # self.set_header("Content-Type", "application/json; charset=UTF-8") if self.auto_init: super(RequestHandlerAha, self).write(chunk) else: return chunk def __add_arg(self, source, name, *args): """ 用来底层实现增加请求参数 :param source: 增加参数到指定的source上 :param name: 参数的名字,必须是字符串 :param args: 参数的值,可以是多个参数,但是必须是字符串 :return:None :exception ArgumentTypeError """ if not isinstance(name, basestring): raise ArgumentTypeError(name) for v in args: if not isinstance(v, basestring): raise ArgumentTypeError(name) addvalue = list(args) if name in self.request.query_arguments.keys(): addvalue.extend(source.get(name, [])) self.request.query_arguments[name] = addvalue def add_query_argument(self, name, *args): """ 增加query的参数,形如URL后面的参数 :param name: 参数的名字,必须是字符串 :param args: 参数的值,可以是多个参数,但是必须是字符串 :return:None """ self.__add_arg(self.request.query_arguments, name, *args) def add_body_argument(self, name, *args): """ 增加body的参数,形如提交表单里面的数据 :param name: 参数的名字,必须是字符串 :param args: 参数的值,可以是多个参数,但是必须是字符串 :return:None """ self.__add_arg(self.request.body_arguments, name, *args) def add_argument(self, name, *args): """ 增加全局参数 :param name: 参数的名字,必须是字符串 :param args: 参数的值,可以是多个参数,但是必须是字符串 :return:None """ self.__add_arg(self.request.arguments, name, *args) def get_redis_conn(self): """ 得到一个redis的连接 """ if not self.redis_client: self.redis_client = _get_redis_connection() return self.redis_client @property def device_type(self): """ 得到设备类型,返回的模拟枚举类型: DEVICE_TYPE :return: """ if not hasattr(self, "_device_type"): userAgent = self.request.headers.get('User-Agent', "") via = self.request.headers.get("Via", "") self._device_type = self._distinguishDevice(via, userAgent) return self._device_type def _distinguishDevice(self, via, userAgent): """ 验证设备是什么类型设备 :param via: :param userAgent: :return: 0代表手机,1表示pc """ pcHeaders = ["Windows 98", "Windows ME", "Windows 2000", "Windows XP", "Windows NT", "Ubuntu"] mobileGateWayHeaders = [ "ZXWAP", "chinamobile.com", "monternet.com", "infoX", "wap.lizongbo.com","Bytemobile"] mobileUserAgents = [ "Nokia", "SAMSUNG", "MIDP-2", "CLDC1.1", "SymbianOS", "MAUI", "UNTRUSTED/1.0", "Windows CE", "iPhone", "iPad", "Android", "BlackBerry", "UCWEB", "ucweb", "BREW", "J2ME", "YULONG", "YuLong", "COOLPAD","TIANYU","TY-", "K-Touch", "Haier", "DOPOD","Lenovo","LENOVO", "HUAQIN", "AIGO-", "CTC/1.0", "CTC/2.0","CMCC","DAXIAN","MOT-","SonyEricsson","GIONEE","HTC","ZTE", "HUAWEI", "webOS","GoBrowser","IEMobile", "WAP2.0"] pcFlag = False mobileFlag = False for pcHeader in pcHeaders: if pcFlag: break if userAgent.find(pcHeader) != -1: pcFlag = True break for mobileGateWayHeader in mobileGateWayHeaders: if mobileFlag: break if via.find(mobileGateWayHeader) != -1: mobileFlag = True break for mobileUserAgent in mobileUserAgents: if mobileFlag: break if userAgent.find(mobileUserAgent) != -1: mobileFlag = True break if mobileFlag==True and mobileFlag!=pcFlag: return DEVICE_TYPE.MOBILE else: return DEVICE_TYPE.PC class ResponseJSON: """ 处理返回给客户端的json对象 """ def __init__(self, code, data=None, description=None, status=None): """ :param code: 返回的code,数字类型 :param description: code相关描述 :param data: 具体的data数据 """ self.code = code self.description = description self.data = data self.status = status def resultDict(self): """ 返回一个dict对象。如果code不是数字,则认为系统内部错误,code置为500。如果 description为空,则没有description在dict中。如果data为一个json对象字符串,则会把对应 的字符串转换成dict :return:返回一个dict对象 """ if isinstance(self.code, int): meta = {"code": self.code} else: meta = {"code": 500} if const.basic.get('send_description') == 'True' and self.description: meta["description"] = self.description if self.status: if isinstance(self.status, int): meta['status'] = self.status else: meta['status'] = -9999 rdict = {"meta": meta} if isinstance(self.data, basestring): try: rdict["data"] = json.loads(self.data, encoding="utf-8") except ValueError: logger.warning("ResponseJSON:data数据格式错误") elif isinstance(self.data, dict) or isinstance(self.data, list): rdict["data"] = self.data return rdict def resultStr(self): """ 返回的是结果json字符串 """ return json.dumps(self.resultDict(), ensure_ascii=False) def _auth_user_token(token): """ 通过token去验证用户是否已经登陆成功 :param token:字典格式,token: CT: create_time,该token创建时间 DT: deadline_time,该token的有效日期 :return: 验证成功返回True,验证失败返回False """ if token is None: return False else: token = json_decode(token) deadline_time = token[TOKEN_DEADLINE_TIME] now_time = get_system_time(pretty=False) if now_time < deadline_time: return True else: return False def authenticated(method): """ Decorate methods with this to require that the user be logged in. """ def wrapper(self, *args, **kwargs): try: if not self.request.loginSuccess: # 第一次登陆会产生异常,如果没有产生异常,说明已经验证过登陆了 return self.write(ResponseJSON(401, description="not login.").resultDict()) # return '已经验证过登陆,但是验证失败' except AttributeError: resp = _auth_user_token(self.get_secure_cookie("token")) if resp: self.request.loginSuccess = True return method(self, *args, **kwargs) else: self.request.loginSuccess = False return self.write(ResponseJSON(401, description="not login").resultDict()) # return '验证失败' else: return method(self, *args, **kwargs) return wrapper def _auth_user_authority(code, role): """ 通过code去验证用户是否有该权限 :param code: 功能标识码 :return: 如果验证成功,返回True,否则返回False """ logger.debug(role) rolelist = const.authority.get(str(code)) logger.debug(rolelist) if role in rolelist: return True else: return False def authorized(code): """ 一个装饰器,用来验证该用户是否有权限使用该功能,如果有使用该模块的权限,则 返回对应的函数,如果没有,则函数不继续往下执行 :param code: 该模块的标识 """ def _deco(method): def wrappers(self, *args, **kwargs): role = self.get_current_user_role() resp = _auth_user_authority(code, role) if resp: return method(self, *args, **kwargs) else: logger.debug("该用户没有此功能的权限") return self.write(ResponseJSON(403, description="No authority for the function").resultDict()) # 该用户没有该权限 return wrappers return _deco def get_system_time(pretty=True): """ 该函数用于返回系统当前时间 :return:当前系统时间 """ if pretty: ISOTIMEFORMAT = "%Y-%m-%d-%X" current_time = time.strftime(ISOTIMEFORMAT, time.localtime(time.time())) else: current_time = time.time() return current_time
gpl-3.0
1,503,458,384,952,282,600
30.134771
122
0.565492
false
2.960277
false
false
false
par2/lamana
lamana/models/fixtures/fixture_model_class.py
1
10351
#------------------------------------------------------------------------------ '''Class-style Model This fixture is used to test the importing of models, handled by the `theories.handshake()` module. As of 0.4.11, models can: - be located in the `lamana.models` folder - module and classes can have any pythonic name; hard-coding removed - any sub-package can be accessed by the "model" keyword in `Case.apply()` - search for the hook-containing class and it's hook method This module is here to test these aspects as the module is imported. The Wilson_LT model was adapted. No functions are expected in this module; there are tests against this. ''' import math import collections as ct import pandas as pd from lamana.input_ import BaseDefaults from lamana.theories import BaseModel from lamana.lt_exceptions import IndeterminateError # This class lacks a hook method; theories should skip it. class DummyModel(): pass # The class containing the hook method can have any name. class RandomName(BaseModel): '''A modified laminate theory for circular biaxial flexure disks, loaded with a flat piston punch on 3-ball support having two distinct materials (polymer and ceramic).''' '''Accept extra args and kwds here''' def __init__(self): self.Laminate = None self.FeatureInput = None self.LaminateModel = None # TODO: eventually abstract into BaseModel and deprecate direct coding # TODO: accept kwargs from Case -> handshake def _use_model_(self, Laminate, adjusted_z=False): '''Return updated DataFrame and FeatureInput Return None if exceptions raised. Parameters ---------- df : DataFrame LaminateModel with IDs and Dimensional Variables. FeatureInut : dict Geometry, laminate parameters and more. Updates Globals dict for parameters in the dashboard output. adjusted_z: bool; default=False If True, uses z(m)* values instead; different assumption for internal calc. Raises ------ ZeroDivisionError If zero `r` or `a` in the log term are zero. ValueError If negative numbers are in the log term or the support radius exceeds the sample radius. Returns ------- tuple The updated calculations and parameters stored in a tuple `(LaminateModel, FeatureInput)``. ''' self.Laminate = Laminate df = Laminate.LFrame.copy() FeatureInput = Laminate.FeatureInput # Author-defined Exception Handling if (FeatureInput['Parameters']['r'] == 0): raise ZeroDivisionError('r=0 is invalid for the log term in the moment eqn.') elif (FeatureInput['Parameters']['a'] == 0): raise ZeroDivisionError('a=0 is invalid for the log term in the moment eqn.') elif (FeatureInput['Parameters']['r'] < 0) | (FeatureInput['Parameters']['a'] < 0): raise ValueError('Negative numbers are invalid for the log term ' 'in moment eqn.') elif FeatureInput['Parameters']['a'] > FeatureInput['Parameters']['R']: raise ValueError('Support radius is larger than sample radius.') elif df['side'].str.contains('INDET').any(): print('INDET value found. Rolling back...') raise IndeterminateError('INDET value found. Unable to accurately calculate stress.') #raise AssertionError('Indeterminate value found. Unable to accurately calculate stress.') # Calling functions to calculate Qs and Ds df.loc[:, 'Q_11'] = self.calc_stiffness(df, FeatureInput['Properties']).q_11 df.loc[:, 'Q_12'] = self.calc_stiffness(df, FeatureInput['Properties']).q_12 df.loc[:, 'D_11'] = self.calc_bending(df, adj_z=adjusted_z).d_11 df.loc[:, 'D_12'] = self.calc_bending(df, adj_z=adjusted_z).d_12 # Global Variable Update if (FeatureInput['Parameters']['p'] == 1) & (Laminate.nplies%2 == 0): D_11T = sum(df['D_11']) D_12T = sum(df['D_12']) else: D_11T = sum(df.loc[df['label'] == 'interface', 'D_11']) # total D11 D_12T = sum(df.loc[df['label'] == 'interface', 'D_12']) #print(FeatureInput['Geometric']['p']) D_11p = (1./((D_11T**2 - D_12T**2)) * D_11T) # D_12n = -(1./((D_11T**2 - D_12T**2)) *D_12T) # v_eq = D_12T/D_11T # equiv. Poisson's ratio M_r = self.calc_moment(df, FeatureInput['Parameters'], v_eq).m_r M_t = self.calc_moment(df, FeatureInput['Parameters'], v_eq).m_t K_r = (D_11p*M_r) + (D_12n*M_t) # curvatures K_t = (D_12n*M_r) + (D_11p*M_t) # Update FeatureInput global_params = { 'D_11T': D_11T, 'D_12T': D_12T, 'D_11p': D_11p, 'D_12n': D_12n, 'v_eq ': v_eq, 'M_r': M_r, 'M_t': M_t, 'K_r': K_r, 'K_t:': K_t, } FeatureInput['Globals'] = global_params self.FeatureInput = FeatureInput # update with Globals #print(FeatureInput) # Calculate Strains and Stresses and Update DataFrame df.loc[:,'strain_r'] = K_r * df.loc[:, 'Z(m)'] df.loc[:,'strain_t'] = K_t * df.loc[:, 'Z(m)'] df.loc[:, 'stress_r (Pa/N)'] = (df.loc[:, 'strain_r'] * df.loc[:, 'Q_11'] ) + (df.loc[:, 'strain_t'] * df.loc[:, 'Q_12']) df.loc[:,'stress_t (Pa/N)'] = (df.loc[:, 'strain_t'] * df.loc[:, 'Q_11'] ) + (df.loc[:, 'strain_r'] * df.loc[:, 'Q_12']) df.loc[:,'stress_f (MPa/N)'] = df.loc[:, 'stress_t (Pa/N)']/1e6 del df['Modulus'] del df['Poissons'] self.LaminateModel = df return (df, FeatureInput) #------------------------------------------------------------------------------ '''Prefer staticmethods here. Add formulas to doc strings.''' def calc_stiffness(self, df, mat_props): '''Return tuple of Series of (Q11, Q12) floats per lamina.''' # Iterate to Apply Modulus and Poisson's to correct Material # TODO: Prefer cleaner ways to parse materials from mat_props df_mat_props = pd.DataFrame(mat_props) # df easier to munge df_mat_props.index.name = 'materials' ##for material in mat_props.index: for material in df_mat_props.index: mat_idx = df['matl'] == material df.loc[mat_idx, 'Modulus'] = df_mat_props.loc[material, 'Modulus'] df.loc[mat_idx, 'Poissons'] = df_mat_props.loc[material, 'Poissons'] E = df['Modulus'] # series of moduli v = df['Poissons'] stiffness = ct.namedtuple('stiffness', ['q_11', 'q_12']) q_11 = E / (1 - (v**2)) q_12 = (v*E) / (1 - (v**2)) return stiffness(q_11, q_12) def calc_bending(self, df, adj_z=False): '''Return tuple of Series of (D11, D12) floats.''' q_11 = df['Q_11'] q_12 = df['Q_12'] h = df['h(m)'] # TODO: need to fix kwargs passing first; tabled since affects many modules. if not adj_z: z = df['z(m)'] else: z = df['z(m)*'] bending = ct.namedtuple('bending', ['d_11', 'd_12']) d_11 = ((q_11*(h**3)) / 12.) + (q_11*h*(z**2)) d_12 = ((q_12*(h**3)) / 12.) + (q_12*h*(z**2)) return bending(d_11, d_12) def calc_moment(self, df, load_params, v_eq): '''Return tuple of moments (radial and tangential); floats. See Timishenko-Woinowsky: Eq. 91; default''' P_a = load_params['P_a'] a = load_params['a'] r = load_params['r'] moments = ct.namedtuple('moments', ['m_r', 'm_t']) m_r = ((P_a/(4*math.pi)) * ((1 + v_eq)*math.log10(a/r))) m_t = ((P_a/(4*math.pi)) * (((1 + v_eq)*math.log10(a/r)) + (1 - v_eq))) return moments(m_r, m_t) class Defaults(BaseDefaults): '''Return parameters for building distributions cases. Useful for consistent testing. Dimensional defaults are inherited from utils.BaseDefaults(). Material-specific parameters are defined here by he user. - Default geometric parameters - Default material properties - Default FeatureInput Examples -------- >>> dft = Defaults() >>> dft.load_params {'R' : 12e-3, 'a' : 7.5e-3, 'p' : 1, 'P_a' : 1, 'r' : 2e-4,} >>> dft.mat_props {'Modulus': {'HA': 5.2e10, 'PSu': 2.7e9}, 'Poissons': {'HA': 0.25, 'PSu': 0.33}} >>> dft.FeatureInput {'Geometry' : '400-[200]-800', 'Geometric' : {'R' : 12e-3, 'a' : 7.5e-3, 'p' : 1, 'P_a' : 1, 'r' : 2e-4,}, 'Materials' : {'HA' : [5.2e10, 0.25], 'PSu' : [2.7e9, 0.33],}, 'Custom' : None, 'Model' : Wilson_LT} Returns ------- class Updated attributes inherited from the `BaseDefaults` class. ''' def __init__(self): BaseDefaults.__init__(self) '''DEV: Add defaults first. Then adjust attributes.''' # DEFAULTS ------------------------------------------------------------ # Build dicts of geometric and material parameters self.load_params = { 'R': 12e-3, # specimen radius 'a': 7.5e-3, # support ring radius 'p': 5, # points/layer 'P_a': 1, # applied load 'r': 2e-4, # radial distance from center loading } self.mat_props = { 'Modulus': {'HA': 5.2e10, 'PSu': 2.7e9}, 'Poissons': {'HA': 0.25, 'PSu': 0.33} } # ATTRIBUTES ---------------------------------------------------------- # FeatureInput self.FeatureInput = self.get_FeatureInput( self.Geo_objects['standard'][0], load_params=self.load_params, mat_props=self.mat_props, ##custom_matls=None, model='Wilson_LT', global_vars=None )
bsd-3-clause
8,944,103,067,520,001,000
39.120155
103
0.526906
false
3.524345
false
false
false
mglidden/git-analysis
analysis/word_frequencies.py
1
1325
import fix_paths import common import config from load_samples import load_samples_from_file from models.commit import Commit from collections import Counter import json import string def get_words_from_message(commit_message): #TODO: clean up this method cleaned_message = str(commit_message.encode('ascii', 'ignore').replace('\n', ' ')).translate(string.maketrans('', ''), string.punctuation + '\t').lower() return set(cleaned_message.split(' ')) def create_word_frequencies(): session = common.Session() training_samples = load_samples_from_file(config.TRAINING_DATA_PATH) word_frequencies = Counter() for _, commit_id in training_samples: commit = session.query(Commit).filter(Commit.id == commit_id).first() for word in get_words_from_message(commit.message): word_frequencies[word] += 1 all_words = [word for word, _ in word_frequencies.most_common(800)] word_frequency_file = open(config.WORD_FREQUENCY_PATH, 'w') word_frequency_file.write(json.dumps(all_words)) word_frequency_file.close() def load_word_frequencies(): # TODO: Cache this file word_frequency_file = open(config.WORD_FREQUENCY_PATH, 'r') word_frequency = json.loads(word_frequency_file.read()) word_frequency_file.close() return word_frequency if __name__ == '__main__': create_word_frequencies()
mit
-2,114,682,968,862,255,400
32.974359
155
0.723019
false
3.450521
false
false
false
chebee7i/dit
dit/algorithms/optutil.py
1
8288
""" Various utilities that can be helpful for optimization problems. """ from __future__ import division, print_function from collections import defaultdict import itertools import numpy as np import dit from .frankwolfe import frank_wolfe def as_full_rank(A, b): """ From a linear system Ax = b, return Bx = c such that B has full rank. In CVXOPT, linear constraints are denoted as: Ax = b. A has shape (p, n) and must have full rank. x has shape (n, 1), and so b has shape (p, 1). Let's assume that we have: rank(A) = q <= n This is a typical situation if you are doing optimization, where you have an under-determined system and are using some criterion for selecting out a particular solution. Now, it may happen that q < p, which means that some of your constraint equations are not independent. Since CVXOPT requires that A have full rank, we must isolate an equivalent system Bx = c which does have full rank. We use SVD for this. So A = U \Sigma V^*, where U is (p, p), \Sigma is (p, n) and V^* is (n, n). Then: \Sigma V^* x = U^{-1} b We take B = \Sigma V^* and c = U^T b, where we use U^T instead of U^{-1} for computational efficiency (and since U is orthogonal). But note, we take only the cols of U (which are rows in U^{-1}) and rows of \Sigma that have nonzero singular values. Parameters ---------- A : array-like, shape (p, n) The LHS for the linear constraints. b : array-like, shape (p,) or (p, 1) The RHS for the linear constraints. Returns ------- B : array-like, shape (q, n) The LHS for the linear constraints. c : array-like, shape (q,) or (q, 1) The RHS for the linear constraints. rank : int The rank of B. """ try: from scipy.linalg import svd except ImportError: from numpy.linalg import svd import scipy.linalg as splinalg A = np.atleast_2d(A) b = np.asarray(b) U, S, Vh = svd(A) Smat = splinalg.diagsvd(S, A.shape[0], A.shape[1]) # See np.linalg.matrix_rank tol = S.max() * max(A.shape) * np.finfo(S.dtype).eps rank = np.sum(S > tol) B = np.dot(Smat, Vh)[:rank] c = np.dot(U.transpose(), b)[:rank] return B, c, rank class CVXOPT_Template(object): """ Template for convex minimization on probability distributions. """ def __init__(self, dist, tol=None, prng=None): """ Initialize optimizer. Parameters ---------- dist : distribution The distribution that is used during optimization. tol : float | None The desired convergence tolerance. prng : RandomState A NumPy-compatible pseudorandom number generator. """ dist = prepare_dist(dist) self.dist = dist self.pmf = dist.pmf self.n_variables = dist.outcome_length() self.n_symbols = len(dist.alphabet[0]) self.n_elements = len(self.pmf) if prng is None: prng = np.random.RandomState() self.prng = prng if tol is None: tol = {} self.tol = tol self.init() def init(self): # Dimension of optimization variable self.n = len(self.pmf) # Number of nonlinear constraints self.m = 0 self.prep() self.build_function() self.build_gradient_hessian() self.build_linear_inequality_constraints() self.build_linear_equality_constraints() self.build_F() def prep(self): pass def build_function(self): self.func = lambda x: x.sum() def build_gradient_hessian(self): import numdifftools self.gradient = numdifftools.Gradient(self.func) self.hessian = numdifftools.Hessian(self.func) def build_linear_inequality_constraints(self): from cvxopt import matrix # Dimension of optimization variable n = self.n # Nonnegativity constraint # # We have M = N = 0 (no 2nd order cones or positive semidefinite cones) # So, K = l where l is the dimension of the nonnegative orthant. Thus, # we have l = n. G = matrix(-1 * np.eye(n)) # G should have shape: (K,n) = (n,n) h = matrix(np.zeros((n,1))) # h should have shape: (K,1) = (n,1) self.G = G self.h = h def build_linear_equality_constraints(self): from cvxopt import matrix # Normalization constraint only A = [np.ones(self.n_elements)] b = [1] A = np.asarray(A, dtype=float) b = np.asarray(b, dtype=float) self.A = matrix(A) self.b = matrix(b) # now a column vector def initial_dist(self): return self.prng.dirichlet([1] * self.n) def build_F(self): from cvxopt import matrix n = self.n m = self.m def F(x=None, z=None): # x has shape: (n,1) and is the distribution # z has shape: (m+1,1) and is the Hessian of f_0 if x is None and z is None: d = self.initial_dist() return (m, matrix(d)) xarr = np.array(x)[:, 0] # Verify that x is in domain. # Does G,h and A,b take care of this? # if np.any(xarr > 1) or np.any(xarr < 0): return None if not np.allclose(np.sum(xarr), 1, **self.tol): return None # Derivatives f = self.func(xarr) Df = self.gradient(xarr) Df = matrix(Df.reshape((1, n))) if z is None: return (f, Df) else: # Hessian H = self.hessian(xarr) H = matrix(H) return (f, Df, z[0] * H) self.F = F def optimize(self, **kwargs): """ Options: show_progress=False, maxiters=100, abstol=1e-7, reltol=1e-6, feastol=1e-7, refinement=0 if m=0 else 1 """ from cvxopt.solvers import cp, options old_options = options.copy() out = None try: options.clear() options.update(kwargs) with np.errstate(divide='ignore', invalid='ignore'): result = cp(F=self.F, G=self.G, h=self.h, dims={'l':self.G.size[0], 'q':[], 's':[]}, A=self.A, b=self.b) except: raise else: self.result = result out = np.asarray(result['x']) finally: options.clear() options.update(old_options) return out class Bunch: def __init__(self, **kwds): self.__dict__.update(kwds) def prepare_dist(dist): if not isinstance(dist._sample_space, dit.samplespace.CartesianProduct): dist = dit.expanded_samplespace(dist, union=True) if not dist.is_dense(): if len(dist._sample_space) > 1e4: import warnings msg = "Sample space has more than 10k elements." msg += " This could be slow." warnings.warn(msg) dist.make_dense() # We also need linear probabilities. dist.set_base('linear') return dist def op_runner(objective, constraints, **kwargs): """ Minimize the objective specified by the constraints. This safely let's you pass options to the solver and restores their values once the optimization process has completed. The objective must be linear in the variables. This uses cvxopt.modeling. """ from cvxopt.solvers import options from cvxopt.modeling import variable, op old_options = options.copy() opt = op(objective, constraints) try: options.clear() options.update(kwargs) # Ignore 0 log 0 warnings. with np.errstate(divide='ignore', invalid='ignore'): opt.solve() except: raise finally: options.clear() options.update(old_options) return opt
bsd-3-clause
3,453,613,929,888,165,000
25.14511
79
0.552847
false
3.781022
false
false
false
monetaproject/moneta
qa/rpc-tests/util.py
1
5291
# Copyright (c) 2014 The Bitcoin Core developers # Copyright (c) 2014-2015 The Moneta developers # Distributed under the MIT/X11 software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Helpful routines for regression testing # # Add python-bitcoinrpc to module search path: import os import sys sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-bitcoinrpc")) from decimal import Decimal import json import shutil import subprocess import time from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException from util import * START_P2P_PORT=11000 START_RPC_PORT=11100 def check_json_precision(): """Make sure json library being used does not lose precision converting BTC values""" n = Decimal("20000000.00000003") satoshis = int(json.loads(json.dumps(float(n)))*1.0e8) if satoshis != 2000000000000003: raise RuntimeError("JSON encode/decode loses precision") def sync_blocks(rpc_connections): """ Wait until everybody has the same block count """ while True: counts = [ x.getblockcount() for x in rpc_connections ] if counts == [ counts[0] ]*len(counts): break time.sleep(1) def sync_mempools(rpc_connections): """ Wait until everybody has the same transactions in their memory pools """ while True: pool = set(rpc_connections[0].getrawmempool()) num_match = 1 for i in range(1, len(rpc_connections)): if set(rpc_connections[i].getrawmempool()) == pool: num_match = num_match+1 if num_match == len(rpc_connections): break time.sleep(1) bitcoind_processes = [] def initialize_chain(test_dir): """ Create (or copy from cache) a 200-block-long chain and 4 wallets. monetad and moneta-cli must be in search path. """ if not os.path.isdir(os.path.join("cache", "node0")): devnull = open("/dev/null", "w+") # Create cache directories, run monetads: for i in range(4): datadir = os.path.join("cache", "node"+str(i)) os.makedirs(datadir) with open(os.path.join(datadir, "moneta.conf"), 'w') as f: f.write("regtest=1\n"); f.write("rpcuser=rt\n"); f.write("rpcpassword=rt\n"); f.write("port="+str(START_P2P_PORT+i)+"\n"); f.write("rpcport="+str(START_RPC_PORT+i)+"\n"); args = [ "monetad", "-keypool=1", "-datadir="+datadir ] if i > 0: args.append("-connect=127.0.0.1:"+str(START_P2P_PORT)) bitcoind_processes.append(subprocess.Popen(args)) subprocess.check_call([ "moneta-cli", "-datadir="+datadir, "-rpcwait", "getblockcount"], stdout=devnull) devnull.close() rpcs = [] for i in range(4): try: url = "http://rt:[email protected]:%d"%(START_RPC_PORT+i,) rpcs.append(AuthServiceProxy(url)) except: sys.stderr.write("Error connecting to "+url+"\n") sys.exit(1) # Create a 200-block-long chain; each of the 4 nodes # gets 25 mature blocks and 25 immature. for i in range(4): rpcs[i].setgenerate(True, 25) sync_blocks(rpcs) for i in range(4): rpcs[i].setgenerate(True, 25) sync_blocks(rpcs) # Shut them down, and remove debug.logs: stop_nodes(rpcs) wait_bitcoinds() for i in range(4): os.remove(debug_log("cache", i)) for i in range(4): from_dir = os.path.join("cache", "node"+str(i)) to_dir = os.path.join(test_dir, "node"+str(i)) shutil.copytree(from_dir, to_dir) def start_nodes(num_nodes, dir): # Start monetads, and wait for RPC interface to be up and running: devnull = open("/dev/null", "w+") for i in range(num_nodes): datadir = os.path.join(dir, "node"+str(i)) args = [ "monetad", "-datadir="+datadir ] bitcoind_processes.append(subprocess.Popen(args)) subprocess.check_call([ "moneta-cli", "-datadir="+datadir, "-rpcwait", "getblockcount"], stdout=devnull) devnull.close() # Create&return JSON-RPC connections rpc_connections = [] for i in range(num_nodes): url = "http://rt:[email protected]:%d"%(START_RPC_PORT+i,) rpc_connections.append(AuthServiceProxy(url)) return rpc_connections def debug_log(dir, n_node): return os.path.join(dir, "node"+str(n_node), "regtest", "debug.log") def stop_nodes(nodes): for i in range(len(nodes)): nodes[i].stop() del nodes[:] # Emptying array closes connections as a side effect def wait_bitcoinds(): # Wait for all bitcoinds to cleanly exit for bitcoind in bitcoind_processes: bitcoind.wait() del bitcoind_processes[:] def connect_nodes(from_connection, node_num): ip_port = "127.0.0.1:"+str(START_P2P_PORT+node_num) from_connection.addnode(ip_port, "onetry") def assert_equal(thing1, thing2): if thing1 != thing2: raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
mit
-734,973,261,075,377,800
33.581699
94
0.600832
false
3.499339
false
false
false
wright-group/WrightData
2015-12 Czech/workup.py
1
3041
''' First Created 2016/05/05 by Blaise Thompson Last Edited 2016/08/08 by Blaise Thompson Contributors: Blaise Thompson, Kyle Czech ''' ### import #################################################################### import os import sys import importlib import collections import WrightTools as wt ### define #################################################################### # paths directory = os.path.dirname(__file__) key = os.path.basename(directory) package_folder = os.path.dirname(directory) # shared module spec = importlib.util.spec_from_file_location('shared', os.path.join(package_folder, 'shared.py')) shared_module = importlib.util.module_from_spec(spec) spec.loader.exec_module(shared_module) # dictionaries to fill raw_dictionary = collections.OrderedDict() processed_dictionary = collections.OrderedDict() ### download ################################################################## bypass_download = False if __name__ == '__main__' and not bypass_download: shared_module.download(key, directory) ### movie ##################################################################### raw_pickle_path = os.path.join(directory, 'raw_movie.p') processed_pickle_path = os.path.join(directory, 'processed_movie.p') def workup(): # raw data_paths = wt.kit.glob_handler('.dat', folder=os.path.join(directory, 'movie')) raw_movie = wt.data.from_COLORS(data_paths, name='MoS2 TrEE Movie') raw_movie.save(raw_pickle_path) # processed processed_movie = raw_movie.copy() processed_movie.level('ai0', 'd2', -3) processed_movie.smooth([2, 2, 0], channel='ai0') processed_movie.scale(channel='ai0', kind='amplitude') processed_movie.normalize(channel='ai0') processed_movie.save(processed_pickle_path) # finish return raw_movie, processed_movie # force workup if False: workup() # automatically process shared_module.process(key='movie', workup_method=workup, raw_pickle_path=raw_pickle_path, processed_pickle_path=processed_pickle_path, raw_dictionary=raw_dictionary, processed_dictionary=processed_dictionary) ### absorbance ################################################################ raw_pickle_path = os.path.join(directory, 'absorbance_data.p') processed_pickle_path = raw_pickle_path def workup(): absorbance_path = os.path.join(directory, 'MoS2_TF_III_ebeam_1nm_Mo_onQuartz_T=300K__corrected.txt') absorbance_data = wt.data.from_shimadzu(absorbance_path, name='MoS2 thin film absorbance') absorbance_data.save(raw_pickle_path) return absorbance_data, absorbance_data # force workup if False: workup() # automatically process shared_module.process(key='absorbance', workup_method=workup, raw_pickle_path=raw_pickle_path, processed_pickle_path=processed_pickle_path, raw_dictionary=raw_dictionary, processed_dictionary=processed_dictionary)
cc0-1.0
7,105,389,251,518,506,000
28.813725
104
0.610654
false
3.695018
false
false
false
lacion/forge
forge/forge.py
1
5804
#!/usr/bin/env python """ forge.forge ~~~~~ :copyright: (c) 2010-2013 by Luis Morales :license: BSD, see LICENSE for more details. """ #heavely based on diamond https://github.com/BrightcoveOS/Diamond import os import sys import argparse import logging import traceback import inspect from util import load_class_from_name from module import Module class Forge(object): """ Forge class loads and starts modules """ pass def __init__(self, user, path, modules): # Initialize Logging self.log = logging.getLogger('forge') # Initialize Members self.modules = modules self.user = user self.path = path def load_include_path(self, path): """ Scan for and add paths to the include path """ # Verify the path is valid if not os.path.isdir(path): return # Add path to the system path sys.path.append(path) # Load all the files in path for f in os.listdir(path): # Are we a directory? If so process down the tree fpath = os.path.join(path, f) if os.path.isdir(fpath): self.load_include_path(fpath) def load_module(self, fqcn): """ Load Module class named fqcn """ # Load class cls = load_class_from_name(fqcn) # Check if cls is subclass of Module if cls == Module or not issubclass(cls, Module): raise TypeError("%s is not a valid Module" % fqcn) # Log self.log.debug("Loaded Module: %s", fqcn) return cls def load_modules(self, path): """ Scan for collectors to load from path """ # Initialize return value modules = {} # Get a list of files in the directory, if the directory exists if not os.path.exists(path): raise OSError("Directory does not exist: %s" % path) if path.endswith('tests') or path.endswith('fixtures'): return modules # Log self.log.debug("Loading Modules from: %s", path) # Load all the files in path for f in os.listdir(path): # Are we a directory? If so process down the tree fpath = os.path.join(path, f) if os.path.isdir(fpath): submodules = self.load_modules(fpath) for key in submodules: modules[key] = submodules[key] # Ignore anything that isn't a .py file elif (os.path.isfile(fpath) and len(f) > 3 and f[-3:] == '.py' and f[0:4] != 'test' and f[0] != '.'): modname = f[:-3] try: # Import the module mod = __import__(modname, globals(), locals(), ['*']) except ImportError: # Log error self.log.error("Failed to import module: %s. %s", modname, traceback.format_exc()) continue # Log self.log.debug("Loaded Module: %s", modname) # Find all classes defined in the module for attrname in dir(mod): attr = getattr(mod, attrname) # Only attempt to load classes that are infact classes # are Collectors but are not the base Collector class if (inspect.isclass(attr) and issubclass(attr, Module) and attr != Module): # Get class name fqcn = '.'.join([modname, attrname]) try: # Load Collector class cls = self.load_module(fqcn) # Add Collector class modules[cls.__name__] = cls except Exception: # Log error self.log.error("Failed to load Module: %s. %s", fqcn, traceback.format_exc()) continue # Return Collector classes return modules def init_module(self, cls): """ Initialize module """ module = None try: # Initialize module module = cls(self.user) # Log self.log.debug("Initialized Module: %s", cls.__name__) except Exception: # Log error self.log.error("Failed to initialize Module: %s. %s", cls.__name__, traceback.format_exc()) # Return module return module def run(self): """ Load module classes and run them """ # Load collectors modules_path = self.path self.load_include_path(modules_path) modules = self.load_modules(modules_path) for module in self.modules: c = self.init_module(modules[module.capitalize()]) c.execute() def run(): """ executes the recipe list to set the system """ parser = argparse.ArgumentParser( prog='forge', description='forge is a command line tool that allows to execute modules to configure a linux system.', epilog='this epilog whose whitespace will be cleaned up and whose words will be wrapped across a couple lines' ) parser.add_argument('-u', '--user', help='Destination user', type=str, required=True) parser.add_argument('-m', '--modules', help='List of modules to execute', nargs='+', type=str, required=True) parser.add_argument('-p', '--path', help='path to find modules', type=str, required=True) args = parser.parse_args() init = Forge(args.user, args.path, args.modules) init.run()
bsd-3-clause
-454,126,331,821,404,100
30.372973
118
0.52929
false
4.506211
false
false
false
edgedb/edgedb
edb/testbase/protocol/render_utils.py
1
1595
# # This source file is part of the EdgeDB open source project. # # Copyright 2020-present MagicStack Inc. and the EdgeDB authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import annotations from typing import * import contextlib import textwrap class RenderBuffer: ilevel: int buf: List[str] def __init__(self): self.ilevel = 0 self.buf = [] def write(self, line: str) -> None: self.buf.append(' ' * (self.ilevel * 2) + line) def newline(self) -> None: self.buf.append('') def lastline(self) -> Optional[str]: return self.buf[-1] if len(self.buf) else None def popline(self) -> str: return self.buf.pop() def write_comment(self, comment: str) -> None: lines = textwrap.wrap(comment, width=40) for line in lines: self.write(f'// {line}') def __str__(self): return '\n'.join(self.buf) @contextlib.contextmanager def indent(self): self.ilevel += 1 try: yield finally: self.ilevel -= 1
apache-2.0
-8,708,220,317,205,776
25.147541
74
0.641379
false
3.815789
false
false
false
AnhellO/DAS_Sistemas
Ene-Jun-2019/juanalmaguer/Extraordinario/Ejercicio 4.py
1
1725
import peewee import sqlite3 file = 'countries.db' db = peewee.SqliteDatabase(file) class Pais(peewee.Model): nombre = peewee.TextField() lenguajes = peewee.TextField() continente = peewee.TextField() capital = peewee.TextField() zona = peewee.TextField() class Meta: database = db db_table = 'Country' def count_paises(): db.connect() total = Pais.select().count() db.close() return total def data_countries(pais = 'Mexico'): conexion = sqlite3.connect(file) cursor = conexion.cursor() datos = cursor.execute('select * from Paises where Nombre = "{}"'.format(pais)).fetchall() conexion.close() return datos[0] def latinos(): conexion = sqlite3.connect(file) cursor = conexion.cursor() paises = cursor.execute('select Nombre, Lenguajes from Paises').fetchall() hispanohablantes = [] for pais in paises: lenguajes = pais[1].split(',') if type(lenguajes) != 'NoneType': if 'spa' in lenguajes: hispanohablantes.append(pais[0]) return hispanohablantes def europeos(): conexion = sqlite3.connect(file) cursor = conexion.cursor() paises = cursor.execute('select Nombre from Paises where Continente = "Europe"').fetchall() conexion.close() return paises def main(): print('Total de países: {}'.format(count_paises())) print('\nDatos de México: {}'.format(data_countries())) paises = latinos() print('\nPaíses hispanohablantes: ') for pais in paises: print(pais) paises_europeos = europeos() print('\nPaíses de Europa: ') for pais in paises_europeos: print(pais[0]) if __name__ == '__main__': main()
mit
7,194,455,465,097,568,000
24.323529
95
0.632772
false
3.056838
false
false
false
TuftsBCB/Walker
run_walker.py
1
2820
""" Main script for running tissue-specific graph walk experiments, to convergence. """ import sys import argparse from walker import Walker def generate_seed_list(seed_file): """ Read seed file into a list. """ seed_list = [] try: fp = open(seed_file, "r") except IOError: sys.exit("Error opening file {}".format(seed_file)) for line in fp.readlines(): info = line.rstrip().split() if len(info) > 1: seed_list.append(info[1]) else: seed_list.append(info[0]) fp.close() return seed_list def get_node_list(node_file): node_list = [] try: fp = open(node_file, 'r') except IOError: sys.exit('Could not open file: {}'.format(node_file)) # read the first (i.e. largest) connected component cur_line = fp.readline() while cur_line and not cur_line.isspace(): if cur_line: node_list.append(cur_line.rstrip()) cur_line = fp.readline() fp.close() return node_list def main(argv): # set up argument parsing parser = argparse.ArgumentParser() parser.add_argument('input_graph', help='Original graph input file, in\ edge list format') parser.add_argument('seed', help='Seed file, to pull start nodes from') parser.add_argument('-e', '--restart_prob', type=float, default=0.7, help='Restart probability for random walk') parser.add_argument('-l', '--low_list', nargs='?', default=None, help='<Optional> List of genes expressed and\ unexpressed in the current tissue, if applicable') parser.add_argument('-n', '--node_list', nargs='?', default=None, help='<Optional> Order of output probs') parser.add_argument('-o', '--original_graph_prob', type=float, default=0.1, help='Probability of walking on the original (non-\ tissue specific) graph, if applicable') parser.add_argument('-r', '--remove', nargs='+', help='<Optional> Nodes to remove from the graph, if any') opts = parser.parse_args() seed_list = generate_seed_list(opts.seed) node_list = get_node_list(opts.node_list) if opts.node_list else [] # filter nodes we want to remove out of the starting seed, if any remove_list = opts.remove if opts.remove else [] if remove_list: seed_list = [s for s in seed_list if s not in remove_list] # run the experiments, and write a rank list to stdout wk = Walker(opts.input_graph, opts.low_list, remove_list) wk.run_exp(seed_list, opts.restart_prob, opts.original_graph_prob, node_list) if __name__ == '__main__': main(sys.argv)
mit
-6,129,807,627,736,509,000
33.390244
81
0.588652
false
3.852459
false
false
false
brunoabud/ic
ic/queue.py
1
4887
# coding: utf-8 # Copyright (C) 2016 Bruno Abude Cardoso # # Imagem Cinemática is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Imagem Cinemática is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from collections import deque from PyQt4.QtCore import QMutex, QThread, QWaitCondition, QElapsedTimer __all__ = ['Empty', 'Full', 'Queue'] class Empty(Exception): pass class Full(Exception): pass class Locked(Exception): pass class Queue(object): """Create a queue object with a given maximum size. """ def __init__(self, maxsize=0): self.maxsize = maxsize self.queue = deque() # Mutex using for accessing the deque self.mutex = QMutex() # Condition that will be held when the queue is empty and the consumer # needs to wait for a new item self.item_added = QWaitCondition() # Condition that will be held when the queue is full and the producer # needs to wait for a new place to insert the item self.item_removed = QWaitCondition() def put(self, item, block=True, timeout=None): """Put an item into the queue. Parameters ---------- block : bool If True(default), the caller thread will block until the queue has a free space available for putting an new item. If False, the `Full` exception will be raised if there is no free space in the queue timeout : int The max time to wait for a new space to be avaible, in milliseconds. """ self.mutex.lock() try: # Check if the queue has a limit (0 means not) if self.maxsize > 0: # Raise Full if block is False and the queue is at max cap. if not block: if self._qsize() == self.maxsize: raise Full # If a timeout is not provided, wait indefinitely elif timeout is None: while self._qsize() == self.maxsize: self.item_removed.wait(self.mutex) elif timeout < 0: raise ValueError("'timeout' must be a non-negative number") else: timer = QElapsedTimer() timer.start() while self._qsize() == self.maxsize: remaining = timeout - timer.elapsed() if remaining <= 0.0: raise Full self.item_removed.wait(self.mutex, remaining) self._put(item) self.item_added.wakeOne() finally: self.mutex.unlock() def get(self, block=True, timeout=None): """Remove and return an item from the queue. Parameters ---------- block : bool If True(default), the caller thread will block until the queue has an item available for putting an new item. If False, the `Empty` exception will be raised if there is no item in the queue timeout : int The max time to wait for a new item to be avaible, in milliseconds. """ self.mutex.lock() try: if not block: if not self._qsize(): raise Empty elif timeout is None: while not self._qsize(): self.item_added.wait(self.mutex) elif timeout < 0: raise ValueError("'timeout' must be a non-negative number") else: timer = QElapsedTimer() timer.start() while not self._qsize(): remaining = timeout - timer.elapsed() if remaining <= 0.0: raise Empty self.item_added.wait(self.mutex, remaining) item = self._get() self.item_removed.wakeOne() return item finally: self.mutex.unlock() def _qsize(self, len=len): return len(self.queue) # Put a new item in the queue def _put(self, item): self.queue.append(item) # Get an item from the queue def _get(self): return self.queue.popleft() def _clear(self): self.queue.clear() def clear(self): self._clear()
gpl-3.0
6,604,661,603,224,259,000
32.689655
80
0.561515
false
4.518964
false
false
false
WPI-ARC/constrained_path_generator
scripts/demo.py
1
2723
#!/usr/bin/python import math import rospy import random from sensor_msgs.msg import * from geometry_msgs.msg import * from constrained_path_generator.msg import * from constrained_path_generator.srv import * def make_pose((px, py, pz), (rx, ry, rz, rw)): new_pose = Pose() new_pose.position.x = px new_pose.position.y = py new_pose.position.z = pz new_pose.orientation.x = rx new_pose.orientation.y = ry new_pose.orientation.z = rz new_pose.orientation.w = rw return new_pose def make_pose_stamped((px, py, pz), (rx, ry, rz, rw), frame): pose_stamped = PoseStamped() pose_stamped.pose = make_pose((px, py, pz), (rx, ry, rz, rw)) pose_stamped.header.frame_id = frame return pose_stamped def make_quaternion(w, x, y, z): new_quat = Quaternion() new_quat.w = w new_quat.x = x new_quat.y = y new_quat.z = z return new_quat def make_vector(x, y, z): new_vector = Vector3() new_vector.x = x new_vector.y = y new_vector.z = z return new_vector _joint_state = None def joint_state_cb(msg): global _joint_state _joint_state = msg def test(): test_node = rospy.init_node("test_planner") js_sub = rospy.Subscriber("joint_states", JointState, joint_state_cb) planner_client = rospy.ServiceProxy("plan_constrained_path", PlanConstrainedPath) # Wait for a joint state while _joint_state is None and not rospy.is_shutdown(): rospy.sleep(0.1) print "got robot state" # Make the waypoints pose_1 = make_pose_stamped((0.585, 0.15, 1.250), (0.0, 0.888, 0.0, -0.460), "base_link") waypoints = [pose_1] # Make the request query = PlanConstrainedPathQuery() query.path_type = PlanConstrainedPathQuery.CHECK_ENVIRONMENT_COLLISIONS | PlanConstrainedPathQuery.CARTESIAN_IK | PlanConstrainedPathQuery.PLAN query.waypoints = waypoints query.group_name = "left_arm" query.target_link = "l_wrist_roll_link" query.planning_time = 5.0 query.max_cspace_jump = 0.05 query.task_space_step_size = 0.025 query.initial_state.joint_state = _joint_state query.path_orientation_constraint = make_quaternion(0.0, 0.888, 0.0, -0.460) query.path_angle_tolerance = make_vector(0.01, 0.01, 0.01) query.path_position_tolerance = make_vector(0.02, 0.02, 0.02) query.goal_angle_tolerance = make_vector(0.01, 0.01, 0.01) query.goal_position_tolerance = make_vector(0.01, 0.01, 0.01) full_req = PlanConstrainedPathRequest() full_req.query = query full_res = planner_client.call(full_req) print full_res # Make some collision_planes raw_input("Press ENTER to close...") print "Done" if __name__ == '__main__': test()
bsd-2-clause
-177,377,274,693,844,160
28.608696
147
0.658832
false
2.921674
false
false
false
roderickmackenzie/gpvdm
gpvdm_gui/gui/license_key.py
1
2732
# # General-purpose Photovoltaic Device Model - a drift diffusion base/Shockley-Read-Hall # model for 1st, 2nd and 3rd generation solar cells. # Copyright (C) 2012-2017 Roderick C. I. MacKenzie r.c.i.mackenzie at googlemail.com # # https://www.gpvdm.com # Room B86 Coates, University Park, Nottingham, NG7 2RD, UK # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License v2.0, as published by # the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # ## @package register # Registration window # import os #qt from PyQt5.QtCore import QSize, Qt from PyQt5.QtWidgets import QWidget,QLineEdit,QComboBox,QHBoxLayout,QPushButton,QLabel,QDialog,QVBoxLayout,QSizePolicy from PyQt5.QtGui import QPainter,QIcon,QImage from PyQt5.QtGui import QFont from icon_lib import icon_get from PyQt5.QtCore import QSize, Qt from inp import inp_load_file import re from error_dlg import error_dlg from lock import get_lock class license_key(QDialog): def callback_ok(self): print("boom") #get_lock().register(email=self.email0.text(),name=self.name.text()) #get_lock().get_license() self.accept() def __init__(self): QWidget.__init__(self) self.setWindowIcon(icon_get("icon")) self.setWindowTitle(_("Registration window (www.gpvdm.com)")) self.setWindowFlags(Qt.WindowStaysOnTopHint) vbox=QVBoxLayout() l=QLabel(_("Enter the license key below:")) l.setFont(QFont('SansSerif', 14)) vbox.addWidget(l) hbox_widget=QWidget() hbox=QHBoxLayout() hbox_widget.setLayout(hbox) l=QLabel("<b>"+_("Key")+"</b>:") l.setFont(QFont('SansSerif', 14)) hbox.addWidget(l) self.name = QLineEdit() hbox.addWidget(self.name) vbox.addWidget(hbox_widget) button_box=QHBoxLayout() spacer = QWidget() spacer.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding) button_box.addWidget(spacer) self.register=QPushButton("Register", self) self.register.clicked.connect(self.callback_ok) button_box.addWidget(self.register) button_box_widget=QWidget() button_box_widget.setLayout(button_box) vbox.addWidget(button_box_widget) self.setLayout(vbox) self.setMinimumWidth(400) self.name.setText("key") def run(self): return self.exec_()
gpl-2.0
-5,252,635,026,861,043,000
25.524272
118
0.732064
false
3.151096
false
false
false
lneuhaus/pyrpl
pyrpl/widgets/module_widgets/pid_widget.py
1
1109
""" A widget for pid modules. """ from .base_module_widget import ModuleWidget from qtpy import QtCore, QtWidgets class PidWidget(ModuleWidget): """ Widget for a single PID. """ def init_gui(self): self.init_main_layout(orientation="vertical") #self.main_layout = QtWidgets.QVBoxLayout() #self.setLayout(self.main_layout) self.init_attribute_layout() input_filter_widget = self.attribute_widgets["inputfilter"] self.attribute_layout.removeWidget(input_filter_widget) self.main_layout.addWidget(input_filter_widget) for prop in ['p', 'i']: #, 'd']: self.attribute_widgets[prop].widget.set_log_increment() # can't avoid timer to update ival # self.timer_ival = QtCore.QTimer() # self.timer_ival.setInterval(1000) # self.timer_ival.timeout.connect(self.update_ival) # self.timer_ival.start() def update_ival(self): widget = self.attribute_widgets['ival'] if self.isVisible() and not widget.editing(): widget.write_attribute_value_to_widget()
gpl-3.0
1,500,945,310,355,024,100
31.617647
67
0.64202
false
3.746622
false
false
false
mozillazg/bild.me-cli
setup.py
1
1880
#!/usr/bin/env python # -*- coding: utf-8 -*- from codecs import open import sys import os try: from setuptools import setup except ImportError: from distutils.core import setup import bild if sys.argv[-1] == 'publish': os.system('python setup.py sdist upload') sys.exit() requirements = [ 'requests>=2.0.1', 'argparse', ] packages = [ 'bild', ] def long_description(): readme = open('README.rst', encoding='utf8').read() text = readme + '\n\n' + open('CHANGELOG.rst', encoding='utf8').read() return text setup( name='bild.me-cli', version=bild.__version__, description=bild.__doc__, long_description=long_description(), url='https://github.com/mozillazg/bild.me-cli', download_url='https://github.com/mozillazg/bild.me-cli/archive/master.zip', author=bild.__author__, author_email='[email protected]', license=bild.__license__, packages=packages, package_data={'': ['LICENSE.txt']}, package_dir={'bild': 'bild'}, entry_points={ 'console_scripts': [ 'bild = bild.bild:main', ], }, include_package_data=True, install_requires=requirements, zip_safe=False, classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Environment :: Console', 'Topic :: Utilities', 'Topic :: Terminals', ], keywords='bild.me, CLI', )
mit
1,008,756,644,684,967,000
25.478873
79
0.598936
false
3.686275
false
false
false
kamailio/kamcli
kamcli/commands/cmd_shv.py
1
1435
import click from kamcli.cli import pass_context from kamcli.iorpc import command_ctl @click.group( "shv", help="Manage $shv(name) variables", short_help="Manage $shv(name) variables", ) @pass_context def cli(ctx): pass @cli.command("get", short_help="Get the value for $shv(name)") @click.argument("name", nargs=-1, metavar="<name>") @pass_context def shv_get(ctx, name): """Get the value for $shv(name) \b Parameters: <name> - the name of shv variable """ if not name: command_ctl(ctx, "pv.shvGet") else: for n in name: command_ctl(ctx, "pv.shvGet", [n]) @cli.command("sets", short_help="Set $shv(name) to string value") @click.argument("name", metavar="<name>") @click.argument("sval", metavar="<sval>") @pass_context def shv_sets(ctx, name, sval): """Set $shv(name) to string value \b Parameters: <name> - the name of shv variable <sval> - the string value """ command_ctl(ctx, "pv.shvSet", [name, "str", sval]) @cli.command("seti", short_help="Set $shv(name) to int value") @click.argument("name", metavar="<name>") @click.argument("ival", metavar="<ival>", type=int) @pass_context def srv_seti(ctx, name, ival): """Set $shv(name) to int value \b Parameters: <name> - the name of shv variable <ival> - the int value """ command_ctl(ctx, "pv.shvSet", [name, "int", ival])
gpl-2.0
-3,036,620,697,451,798,500
22.916667
65
0.609059
false
3.027426
false
false
false
piotroxp/scibibscan
scib/lib/python3.5/site-packages/astropy/io/fits/hdu/nonstandard.py
1
4066
# Licensed under a 3-clause BSD style license - see PYFITS.rst import gzip import io from ..file import _File from .base import NonstandardExtHDU from .hdulist import HDUList from ..header import Header, _pad_length from ..util import fileobj_name from ....extern.six import string_types from ....utils import lazyproperty class FitsHDU(NonstandardExtHDU): """ A non-standard extension HDU for encapsulating entire FITS files within a single HDU of a container FITS file. These HDUs have an extension (that is an XTENSION keyword) of FITS. The FITS file contained in the HDU's data can be accessed by the `hdulist` attribute which returns the contained FITS file as an `HDUList` object. """ _extension = 'FITS' @lazyproperty def hdulist(self): self._file.seek(self._data_offset) fileobj = io.BytesIO() # Read the data into a BytesIO--reading directly from the file # won't work (at least for gzipped files) due to problems deep # within the gzip module that make it difficult to read gzip files # embedded in another file fileobj.write(self._file.read(self.size)) fileobj.seek(0) if self._header['COMPRESS']: fileobj = gzip.GzipFile(fileobj=fileobj) return HDUList.fromfile(fileobj, mode='readonly') @classmethod def fromfile(cls, filename, compress=False): """ Like `FitsHDU.fromhdulist()`, but creates a FitsHDU from a file on disk. Parameters ---------- filename : str The path to the file to read into a FitsHDU compress : bool, optional Gzip compress the FITS file """ return cls.fromhdulist(HDUList.fromfile(filename), compress=compress) @classmethod def fromhdulist(cls, hdulist, compress=False): """ Creates a new FitsHDU from a given HDUList object. Parameters ---------- hdulist : HDUList A valid Headerlet object. compress : bool, optional Gzip compress the FITS file """ fileobj = bs = io.BytesIO() if compress: if hasattr(hdulist, '_file'): name = fileobj_name(hdulist._file) else: name = None fileobj = gzip.GzipFile(name, mode='wb', fileobj=bs) hdulist.writeto(fileobj) if compress: fileobj.close() # A proper HDUList should still be padded out to a multiple of 2880 # technically speaking padding = (_pad_length(bs.tell()) * cls._padding_byte).encode('ascii') bs.write(padding) bs.seek(0) cards = [ ('XTENSION', cls._extension, 'FITS extension'), ('BITPIX', 8, 'array data type'), ('NAXIS', 1, 'number of array dimensions'), ('NAXIS1', len(bs.getvalue()), 'Axis length'), ('PCOUNT', 0, 'number of parameters'), ('GCOUNT', 1, 'number of groups'), ] # Add the XINDn keywords proposed by Perry, though nothing is done with # these at the moment if len(hdulist) > 1: for idx, hdu in enumerate(hdulist[1:]): cards.append(('XIND' + str(idx + 1), hdu._header_offset, 'byte offset of extension %d' % (idx + 1))) cards.append(('COMPRESS', compress, 'Uses gzip compression')) header = Header(cards) return cls._readfrom_internal(_File(bs), header=header) @classmethod def match_header(cls, header): card = header.cards[0] if card.keyword != 'XTENSION': return False xtension = card.value if isinstance(xtension, string_types): xtension = xtension.rstrip() return xtension == cls._extension # TODO: Add header verification def _summary(self): # TODO: Perhaps make this more descriptive... return (self.name, self.__class__.__name__, len(self._header))
mit
3,870,601,165,106,004,500
31.528
79
0.591982
false
4.029732
false
false
false
sh-ft/mudwyrm_users
mudwyrm_users/admin/achaea/scripts/brain/combat.py
1
8663
from mudwyrm_users.admin.achaea import ScriptState from mudwyrm_users.admin.achaea.action import Action, Outcome, EventOutcome from mudwyrm_users.admin.achaea.trigger import Trigger, Alias, OnEvent, TriggerPack from mudwyrm_users.admin.achaea.common import not_, traverse_scripts, AttrDict, partition_action from mudwyrm_users.admin.achaea.database import Base from mudwyrm_users.admin.achaea.scripts import char from mudwyrm_users.admin.achaea.scripts.actions import all_actions as actions import sqlalchemy as sa p = None s = ScriptState() def init(processor): assert processor is not None global p p = processor s.loot = [] s.info_here = {} s.state = 'inactive' s.target = None def think(): if s.state == 'inactive': return if not s.target: return combat_echo("No target to fight.") if isinstance(s.target, int) and s.target not in room['objects']: s.state = 'inactive' return combat_echo("Target has been lost. Given up on fighting.") cure() if s.state == 'attacking': attack() elif s.state == 'defending': defend() elif s.state == 'looting': loot() ################## def combat_echo(text): p.echo("[Combat] %s" % text) def choose_offensive_action(target): if char.race == 'Dragon': return (actions.gut, target) elif char.class_ == 'Sylvan': return (actions.thornrend if char.status('viridian') else actions.firelash, target) elif char.class_ == 'Serpent': if char.skill_available('garrote'): return (actions.garrote, target) else: assert char.skill_available('bite') venom = 'sumac' if not char.skill_available('camus', 'venom') else 'camus' return (actions.venom_bite, venom, target) elif char.class_ == 'Shaman': return (actions.curse, 'bleed', target) elif char.class_ == 'Blademaster': return (actions.drawslash, target) elif char.class_ == 'Alchemist': return (actions.educe_iron, target) return None def choose_defensive_action(): if char.skill_available('reflection'): return (actions.reflection, 'me') return None def offensive_mode(): if s.state == 'defending': s.state = 'attacking' combat_echo("Switched to offensive mode.") def defensive_mode(): if s.state == 'attacking': s.state = 'defending' combat_echo("Switched to defensive mode.") def attack(): if char.health < char.defensive_health_level: defensive_mode() return action, args = partition_action(choose_offensive_action(s.target)) if not action: return combat_echo("No offensive action was set for this character, not attacking.") if action.possible(*args) and not p.action_already_active(action, *args): p.act(action, *args) def defend(): action, args = partition_action(choose_defensive_action()) if not action: offensive_mode() return combat_echo("No defensive action was set for this character, not defending.") if action.possible(*args) and not p.action_already_active(action, *args): p.act(action, *args) if char.health > char.offensive_health_level: offensive_mode() def loot(): if char.balance('balance') and char.balance('equilibrium'): for item in s.loot: p.send("get %s" % item) s.loot = [] s.state = 'inactive' combat_echo("Finished fighting.") def cure(): if char.status('loki'): if actions.diagnose.possible(): p.act(actions.diagnose) ########################## @Alias(r'^(?:kill|k) (.+)$') def combat_start(match): target = match.group(1) if s.state not in ['inactive', 'looting']: return combat_echo("Already fighting someone.") s.target = target s.state = 'attacking' combat_echo("Fighting %s" % s.target) think() @Alias(r'^(?:autokill|ak|k)$') def autotarget_combat_start(match): if s.state not in ['inactive', 'looting']: return combat_echo("Already fighting someone.") def find_target(): target_list = p.db.query(Target).all() for obj in char.room_objects.itervalues(): for t in target_list: if obj['name'].find(t.name) >= 0: return obj return None target = find_target() if not target: return combat_echo("No target found.") s.target = target['id'] s.state = 'attacking' combat_echo("Target found: %s" % target['name']) think() @Alias(r'^(?:stopkill|sk)$') def combat_stop(match): if s.state not in ['inactive', 'looting']: s.state = 'inactive' combat_echo("Given up on fighting.") else: combat_echo("Already not fighting") ######################## @OnEvent('TargetNotFound') def on_target_not_found(): if s.state in ['attacking', 'defending']: s.state = 'inactive' combat_echo("Target has been lost. Given up on fighting.") p.notification("Combat", "Target has been lost.") @OnEvent('CreatureSlain') def on_creature_slain(name): # TODO: check if a creature was the target. if s.state in ['attacking', 'defending']: combat_echo("Target has been slain.") s.state = 'looting' p.notification("Combat", "Target has been slain.") @OnEvent('LootDropped') def on_loot_dropped(name): s.loot.append(name) @Trigger(r'^You have slain (.+), retrieving the corpse\.$') def creature_slain(match): p.raise_event('CreatureSlain', name=match.group(1)) @Trigger(r'^A few golden sovereigns spill from the corpse\.$', r'^A small quantity of sovereigns spills from the corpse\.$', r'^A (?:tiny|small|large) pile of sovereigns spills from the corpse\.$', r'^(?:Many|Numerous) golden sovereigns spill from the corpse\.$') def gold_loot(match): p.raise_event('LootDropped', name='money') @Trigger(r'^A glistening iridescent pearl tumbles out of the corpse of a barnacle encrusted oyster\.$', r'^A gleaming black pearl tumbles out of the corpse of a marsh ooze\.$') def pearl_loot(match): p.raise_event('LootDropped', name='pearl') @Trigger(r'^A (?:chipped|jagged|smooth) iconic shard appears and clatters to the ground\.$') def shard_loot(match): p.raise_event('LootDropped', name='shard') @Trigger(r'^The Mask of the Beast tumbles out of the corpse of a mysterious cloaked figure\.$') def mask_of_the_beast_loot(match): p.raise_event('LootDropped', name='mask') class info_here(Action): def start(action): s.info_here.clear() p.send("info here") @Alias(r'^(ih|info here)$') def aliases(match): p.act(info_here) @Trigger(r'^(\D+\d+)\s+(.*)$') def info_here_line(match, action): s.info_here[match.group(1)] = match.group(2) @Outcome(r'^Number of objects: (\d+)$') def info_here_end_line(match, action): n = int(match.group(1)) p.raise_event('InfoHereUpdated') if len(s.info_here) != n: raise ScriptError("Warning: Number of objects captured from " "'info here' doesn't match the actual number of objects.") @OnEvent('InfoHereUpdated') def ih_updated(): pass class Target(Base): __tablename__ = 'targets' id = sa.Column(sa.Integer, primary_key=True) name = sa.Column(sa.String, nullable=False, unique=True) def __init__(self, name): self.name = name @Alias(r'^target_list$') def target_list(match): targets = p.db.query(Target).all() if not targets: p.echo("Target list is empty.") else: p.echo("Target list: %s." % ", ".join(t.name for t in targets)) @Alias(r'^target_add (.*)$') def target_add(match): target = Target(match.group(1)) p.db.add(target) p.db.commit() p.echo("%s has been added to the target list." % target.name) @Alias(r'^target_remove (.*)$') def target_remove(match): name = match.group(1) target = p.db.query(Target).filter(Target.name == name).first() if not target: return p.echo("Target list doesn't contain %s." % name) p.db.delete(target) p.db.commit() p.echo("%s has been removed from the target list." % name)
mit
-8,037,533,459,721,834,000
31.430769
103
0.594482
false
3.370817
false
false
false
GoogleCloudPlatform/PerfKitBenchmarker
perfkitbenchmarker/windows_benchmarks/diskspd_benchmark.py
1
1623
# Copyright 2018 PerfKitBenchmarker Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Run DiskSpd in a single VM.""" from absl import flags from perfkitbenchmarker import configs from perfkitbenchmarker.windows_packages import diskspd FLAGS = flags.FLAGS BENCHMARK_NAME = 'diskspd' BENCHMARK_CONFIG = """ diskspd: description: Run diskspd on a single machine vm_groups: default: vm_spec: *default_single_core vm_count: 1 disk_spec: *default_500_gb """ def GetConfig(user_config): return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME) def Prepare(benchmark_spec): vm = benchmark_spec.vms[0] vm.Install('diskspd') def Run(benchmark_spec): """Measure the disk performance in one VM. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. Returns: A list of sample.Sample objects with the benchmark results. """ vm = benchmark_spec.vms[0] results = [] results.extend(diskspd.RunDiskSpd(vm)) return results def Cleanup(unused_benchmark_spec): pass
apache-2.0
-5,473,095,602,450,988,000
25.177419
74
0.736907
false
3.663657
true
false
false
garnertb/fire-risk
fire_risk/backends/__init__.py
1
2548
import psycopg2 from .queries import ALL_RESIDENTIAL_FIRES from psycopg2.extras import DictCursor class Backend(object): """ Backend mixin that should be used to implement APIs to read data. """ def connect(self): """ Connect to the backend. """ raise NotImplementedError def close_connection(self): """ Close the connection to the backend. """ raise NotImplementedError def query(self): """ Query the backend. """ raise NotImplementedError class FileBackend(Backend): """ Parse a set of NFIRS incident flat files for structure fires. Args: flatfiles (list): a list of file pathnames for files to be parsed. Returns: changes the values of the firespread_count attributes to calculated values """ pass class PostgresBackend(Backend): """ The Postgres Backend. """ def __init__(self, connection_params): self.connection_params = connection_params def __enter__(self): self.connect() return self def __exit__(self, exc_type, exc_val, exc_tb): self.close_connection() def connect(self): self.connection = psycopg2.connect(**self.connection_params) return self.connection def get_cursor(self): return self.connection.cursor(cursor_factory=DictCursor) def close_connection(self): self.connection.close() def query(self, query, query_params=()): cursor = self.get_cursor() cursor.execute(query, query_params) return cursor def get_firespread_counts(self, query=ALL_RESIDENTIAL_FIRES, query_params=()): results = self.query(query=query, query_params=query_params).fetchall() counts = dict(object_of_origin=0, room_of_origin=0, floor_of_origin=0, building_of_origin=0, beyond=0) for result in results: if result['fire_sprd'] == '1': counts['object_of_origin'] += result['count'] if result['fire_sprd'] == '2': counts['room_of_origin'] += result['count'] if result['fire_sprd'] == '3': counts['floor_of_origin'] += result['count'] if result['fire_sprd'] == '4': counts['building_of_origin'] += result['count'] if result['fire_sprd'] == '5': counts['beyond'] += result['count'] return counts if __name__ == '__main__': import doctest doctest.testmod()
mit
-6,737,203,091,711,348,000
24.737374
110
0.588305
false
4.177049
false
false
false
gardir/Devilry_sort
sort_deliveries.py
1
18439
#!/usr/bin/python # -*- coding: utf-8 -*- import os import sys import subprocess import time import shutil import glob from rettescript import print_failed class Devilry_Sort: def __init__(self, rootDir, execute=True, delete=False, log=False, rename=True, unzip="false", javacFlag=False, verbose=False): """ Initializes the class Parameters ---------- self : this This class rootDir : String A string describing the path to root directory execute : boolean Execute means the primary function will be executed (default=True) delete : boolean If true it will delete all older deliveries (default=False) log : boolean If log is true a seperate log-file for what was done is created (default False) rename : boolean If renaming is false, the user-id directories will not be renamed to contain only user-id (default=True) unzip : boolean If true program is to unzip a .zip file containing the deliveries before execute (default=False) verbose : boolean Be loud about what to do """ self.rootDir = rootDir self.execute = execute self.delete = delete self.log = log self.rename = rename self.unzip = unzip self.javacFlag = javacFlag self.verbose = verbose self.failed_javac = [] self.my_out = sys.stdout self.my_err = sys.stderr if log: log_filename = os.path.join(rootDir, "log.txt") self.log_file = open(log_filename, 'w') self.log_file.close() self.log_file = open(log_filename, 'a') self.write_to_log("Log created") self.my_out = self.log_file self.my_err = self.log_file elif not verbose: self.null_out = open(os.devnull, 'w') self.my_out = self.null_out self.my_err = subprocess.STDOUT def attempt_javac(self, path): """ Function inspired by rettescript.py written by Henrik Hillestad Løvold """ command = format("javac %s" % os.path.join(path, "*.java")) if self.verbose: print("%s:" % (command)) elif self.log: self.write_to_log(format("%s:" % command)) try: subprocess.check_call(command, shell=True, stdout=self.my_out, stderr=self.my_err) except subprocess.CalledProcessError: return 1 # No problem return 0 def dive_delete(self, root_depth): """ """ for dirpath, subdirList, fileList in os.walk(rootDir, topdown=False): depthList = dirpath.split(os.path.sep) depth = len(depthList) - root_depth if depth == 1: for subdir in subdirList: path = os.path.join(dirpath, subdir).replace(" ", "\ ") command = ["rm", "-r", path] if self.verbose: print("Recursive removing '%s'" % path) elif self.log: self.write_to_log(format("Recursive removing '%s'" % path)) #subprocess.call(command, stdout = self.my_out, stderr = self.my_err) shutil.rmtree(path) def dive_delete_dir(self, root_depth): for dirpath, subdirList, fileList in os.walk(rootDir, topdown = False): depth = len(dirpath.split(os.path.sep)) - root_depth created = False for subdir in subdirList: folder = os.path.join(dirpath, subdir) command = ['rm', '-d', folder] try: if self.verbose: print("Trying to remove empty folder: %s" % folder) elif self.log: self.write_to_log(format("Trying to remove empty folder: %s" % folder)) #subprocess.check_call(command, stdout = self.my_out, stderr = self.my_err) os.rmdir(folder) #except subprocess.CalledProcessError: except OSError: if self.verbose: print("Removing empty folder failed: %s" % folder) elif self.log: self.write_to_log(format("Removing empty folder failed: %s" % folder)) if depth == 1: self.move(dirpath, subdir) java_files_present = len(glob.glob(dirpath+os.path.sep+'*.java')) > 0 if java_files_present and self.attempt_javac(dirpath) != 0: if self.verbose: print("%s failed javac" % dirpath) elif self.log: self.write_to_log(format("%s failed javac" % dirpath)) self.failed_javac.append(dirpath) def dive_move(self, root_depth): for dirpath, subdirList, fileList in os.walk(rootDir, topdown=True): depthList = dirpath.split(os.path.sep) depth = len(depthList) - root_depth # We only want last deadline and last delivery if depth == 1 or depth == 2: if (len(subdirList) > 1): last = sorted(subdirList)[-1] i = 0 max = len(subdirList) while (i < max): if (last != subdirList[i]): del subdirList[i] i-=1 max-=1 i+=1 #subdirList = sorted(subdirList)[-1:] elif depth == 3: from_path = dirpath to_path = os.path.join(*from_path.split(os.path.sep)[:-2]) if self.verbose: print("Moving all files in '%s' to '%s'" % (from_path, to_path)) elif self.log: self.write_to_log(format( "Moving all files in '%s' to '%s'" % (from_path, to_path))) for work_file in fileList: file_path = os.path.join(from_path, work_file) new_file_path = os.path.join(to_path, work_file) if self.verbose: print("Renaming '%s' to '%s'" % (file_path, new_file_path)) elif self.log: self.write_to_log(format("Moved '%s' to '%s'" % (file_path, new_file_path))) #shutil.move(file_path, new_file_path) os.rename(file_path, new_file_path) def move(self, root_path, folder): from_path = os.path.join(root_path, folder) to_path = os.path.join(root_path, "older") command = ['mv', from_path, to_path] if self.verbose: print("Moving older files '%s' into '%s'" % (from_path, to_path)) elif self.log: self.write_to_log(format("Moving older files '%s' into '%s'" % (from_path, to_path))) #subprocess.call(command, stdout = self.my_out, stderr = self.my_err) try: shutil.move(from_path, to_path) except IOError as e: if self.verbose: print("ERROR: Could not move '%s' to '%s'" % (from_path, to_path)) print(e) elif self.log: self.write_to_log("ERROR: Could not move '%s' to '%s'\n%s" % (from_path, to_path, e)) def run(self): root_depth = len(self.rootDir.split(os.path.sep)) if self.unzip != "false": self.execute = self.unzip_execute(root_depth) if self.execute: if self.rename: self.user_rename() self.dive_move(root_depth) self.dive_delete_dir(root_depth) if self.delete: self.dive_delete(root_depth) if self.log: self.log_file.close() elif not verbose: self.null_out.close() def unzip_execute(self, root_depth): zipfile = self.unzip if self.unzip == "true": zipfile = self.find_zip_file(root_depth) # Return if _one_ zip file only not found. if self.execute: self.unzip_file(zipfile) self.unzip_clean(root_depth, zipfile) return execute def find_zip_file(self, root_depth): files = "" zipfiles = [] for dirpath, subdirs, filenames in os.walk(self.rootDir): depth = len(dirpath.split(os.path.sep)) - root_depth if depth == 0: if self.verbose: print("Looking for zip files.") files = filenames; for afile in files: if afile[-4:] == ".zip": if self.verbose: print("Found zip-file: %s" % afile) elif self.log: self.write_to_log(format("Found zip-file: %s" % afile)) zipfiles.append(afile) if len(zipfiles) > 1: print("Please have only the zipfile from Devilry in folder") self.execute = False elif len(zipfiles) == 0: print("No zipfiles were found in '%s%s'" % (rootDir, os.path.sep)) self.execute = False break # out from os.walk() as only files from root needed if len(zipfiles) > 0: return zipfiles[0] return "" def unzip_file(self, zipfile): # Unzip command from_path = format("%s" % (zipfile)) to_path = self.rootDir command = ['unzip', from_path, "-d", to_path] if self.verbose: print("Unzipping file: %s" % from_path) elif self.log: self.write_to_log(format("Unzipping file '%s'" % (from_path))) subprocess.call(command, stdout = self.my_out, stderr = self.my_err) def unzip_clean(self, root_depth, unzip_file): for dirpath, subdirs, filenames in os.walk(self.rootDir): # Finding current depth if (dirpath[-1] == os.path.sep): depth = len(dirpath[:-1].split(os.path.sep)) - root_depth else: depth = len(dirpath.split(os.path.sep)) - root_depth # After unzipping, depth 1 is inside unzipped folder (based on Devilry) if depth == 1: if self.verbose: print("Going through folders within '%s'" % dirpath) elif self.log: self.write_to_log(format("Going through folders within '%s'" % (dirpath))) # Move all users/groups one directory down/back for subdir in subdirs: from_path = os.path.join(dirpath, subdir) to_path = os.path.join(*dirpath.split(os.path.sep)[:-1]) if self.verbose: print("Moving '%s' down to '%s'" % (from_path, to_path)) elif self.log: self.write_to_log(format("Moving '%s' down to '%s'" % (from_path, to_path))) shutil.move(from_path, to_path) break # out from sub-folder created after zip. only these files needed moving # Remove the now empty folder unzipped_folder = unzip_file[unzip_file.rfind("/")+1:-4] from_path = os.path.join(self.rootDir, unzipped_folder) command = ["rm", "-d", from_path] if self.verbose: print("Removing empty folder: %s" % from_path) elif self.log: self.write_to_log(format("Removing empty folder: %s" % (from_path))) #subprocess.call(command, stdout = self.my_out, stderr = self.my_err) shutil.rmtree(from_path) def user_rename(self): for dirpath, subdirList, fileList in os.walk(rootDir): for subdir in subdirList: filepath = os.path.join(dirpath, subdir) new_filepath = os.path.join(dirpath, (subdir[0:subdir.find('(')]).replace(" ", "")) if self.verbose: print("Renaming '%s' to '%s'" % (filepath, new_filepath)) elif self.log: self.write_to_log(format("Renaming '%s' to '%s'" % (filepath, new_filepath))) os.rename(filepath, new_filepath) break def write_to_log(self, text): self.log_file.write( format("%s-%s: %s\n" % (time.strftime("%H:%M"), time.strftime("%d/%m/%Y"), text))) def print_usage(): print("Usage: python sort_deliveries.py [options] path") print("Mandatory: path") print("%10s -- %-s" % ("path", "the mandatory argument which is the output folder to have all user directories within when script is done")) print("Options: -b -c -d -D -h -l -v -z [zipfile]") print("%10s -- %-s" % ("-b", "bare move, no rename of user folder")) print("%10s -- %-s" % ("-c", "runs javac on each user, and prints those that fail")) print("%10s -- %-s" % ("-d", "delete the other files and folders")) print("%10s -- %-s" % ("-D", "DEBUG mode, program will not execute")) print("%10s -- %-s" % ("-h", "shows this menu")) print("%10s -- %-s" % ("-l", "creates a log file for what happens")) print("%10s -- %-s" % ("-v", "loud about what happens")) print("%10s -- %-s" % ("-z", "unzips the .zip file in path first (if only 1 is present)")) print("%10s -- %-s" % ("-z zipfile", "unzipz the specified zip file in path first")) print("Example usages") print("python sort_deliveries -z ~/Downloads/deliveries.zip .") print("Above command will first unzip the 'deliveries.zip' into current folder, and then sort all files") print("--") print("python sort_deliveries -z ~/Downloads/deliveries.zip ~/assignments/assignment1") print("Above command will first unzip the 'deliveries.zip' into the folder at '$HOME/assignments/assignment1/' before sorting said directory") print("--") print("python sort_deliveries .") print("Above command will sort deliveries from current directory - it should contain ALL the users folders - so it is NOT enough to just unzip the zip file and then run the sort script on subdirectory. It should be run on directory.") print("Command executions example") print("unzip ~/Downloads/deliveries.zip ## This will create a folder with the same name as zip-file in current working directory") print("python sort_deliveries deliveries ## Assuming the name of folder is equal to the zip file, it should be included as 'path'") if __name__=='__main__': """ TO BE DONE # Argument Parser parser = argparse.ArgumentParser(description="Usage:\npython sort_deliveries.py [options] pathProgram preprocesses a latex-file ('infile') and produces a new latex-file ('outfile') with additional functionality") parser.add_argument("infile", help="Name of the latex-file you want preprocessed") parser.add_argument("-o", "--outfile", nargs=1, help="Name of the new file (cannot be equal to infile)") parser.add_argument("-f", "--fancy_verbatim", help="produces more fancy verbatim", action="store_true") parser.add_argument("-v", "--verbosity", help="increase output verbosity", action="store_true") args = parser.parse_args() verbose = args.verbosity fancy = args.fancy_verbatim if len(sys.argv) < 2 or sys.argv[-1][0] == '-': print_usage() sys.exit() # Quits """ rootDir = "." execute = True delete = False rename = True log = False unzip = "false" verbose = False javacFlag = False # Find correct path according to arguments argc = 1 # 0 would be programname argl = len(sys.argv)-1 # .py -> program not the only argument # '-' -> last argument not an option # .zip -> last argument not the zip-file if argl < 1 or \ sys.argv[argl].find(".py") >= 0 or \ sys.argv[argl][0] == '-' or \ sys.argv[argl].find(".zip") >= 0: print_usage() sys.exit() rootDir = os.path.join(rootDir, sys.argv[-1])[2:] if (rootDir[-1] == os.path.sep): rootDir = rootDir[:-1] # Handle arguments while argc < argl: arg = sys.argv[argc] options = list(arg) for letter in options[1]: if letter == 'z': unzip = "true" if argc+1 < argl and sys.argv[argc+1].find(".zip", len(sys.argv[argc+1])-4) != -1: argc += 1 unzip = sys.argv[argc] elif letter == "h": print_usage() execute = False break elif letter == "l": log = True elif letter == "v": verbose = True elif letter == "d": delete = True elif letter == "b": rename = False elif letter == "D": execute = False elif letter == "c": javacFlag = True argc += 1 # Execute if executable if execute: sorter = Devilry_Sort(rootDir, execute, delete, log, rename, unzip, javacFlag, verbose) sorter.run() if javacFlag and len(sorter.failed_javac) > 0: print_failed(sorter.failed_javac) elif javacFlag: print("All students compiled")
gpl-2.0
3,814,187,668,424,409,600
39.169935
238
0.507485
false
4.13965
false
false
false
gurnec/HashCheck
UnitTests/get-sha-test-vectors.py
1
3573
#!/usr/bin/python3 # # SHA test vector downloader & builder # Copyright (C) 2016 Christopher Gurnee. All rights reserved. # # Please refer to readme.md for information about this source code. # Please refer to license.txt for details about distribution and modification. # # Downloads/builds SHA1-3 test vectors from the NIST Cryptographic Algorithm Validation Program import os, os.path, urllib.request, io, zipfile, glob, re # Determine and if necessary create the output directory test_vectors_dir = os.path.join(os.path.dirname(__file__), 'vectors\\') if not os.path.isdir(test_vectors_dir): os.mkdir(test_vectors_dir) # Download and unzip the two NIST test vector "response" files for sha_url in ('http://csrc.nist.gov/groups/STM/cavp/documents/shs/shabytetestvectors.zip', 'http://csrc.nist.gov/groups/STM/cavp/documents/sha3/sha-3bytetestvectors.zip'): print('downloading and extracting', sha_url) with urllib.request.urlopen(sha_url) as sha_downloading: # open connection to the download url; with io.BytesIO(sha_downloading.read()) as sha_downloaded_zip: # download entirely into ram; with zipfile.ZipFile(sha_downloaded_zip) as sha_zipcontents: # open the zip file from ram; sha_zipcontents.extractall(test_vectors_dir) # extract the zip file into the output dir # Convert each response file into a set of test vector files and a single expected .sha* file print('creating test vector files and expected .sha* files from NIST response files') rsp_filename_re = re.compile(r'\bSHA([\d_]+)(?:Short|Long)Msg.rsp$', re.IGNORECASE) for rsp_filename in glob.iglob(test_vectors_dir + '*.rsp'): rsp_filename_match = rsp_filename_re.search(rsp_filename) if not rsp_filename_match: # ignore the Monte Carlo simulation files continue print(' processing', rsp_filename_match.group(0)) with open(rsp_filename) as rsp_file: # Create the expected .sha file which covers this set of test vector files with open(rsp_filename + '.sha' + rsp_filename_match.group(1).replace('_', '-'), 'w', encoding='utf8') as sha_file: dat_filenum = 0 for line in rsp_file: # The "Len" line, specifies the length of the following test vector in bits if line.startswith('Len ='): dat_filelen = int(line[5:].strip()) dat_filelen, dat_filelenmod = divmod(dat_filelen, 8) if dat_filelenmod != 0: raise ValueError('unexpected bit length encountered (not divisible by 8)') # The "Msg" line, specifies the test vector encoded in hex elif line.startswith('Msg ='): dat_filename = rsp_filename + '-{:04}.dat'.format(dat_filenum) dat_filenum += 1 # Create the test vector file with open(dat_filename, 'wb') as dat_file: dat_file.write(bytes.fromhex(line[5:].strip()[:2*dat_filelen])) del dat_filelen # The "MD" line, specifies the expected hash encoded in hex elif line.startswith('MD ='): # Write the expected hash to the .sha file which covers this test vector file print(line[4:].strip(), '*' + os.path.basename(dat_filename), file=sha_file) del dat_filename print("done")
bsd-3-clause
-4,253,490,971,588,409,000
46.283784
123
0.617128
false
3.965594
true
false
false
NINAnor/QGIS
python/plugins/processing/gui/AlgorithmDialogBase.py
1
6211
# -*- coding: utf-8 -*- """ *************************************************************************** AlgorithmDialogBase.py --------------------- Date : August 2012 Copyright : (C) 2012 by Victor Olaya Email : volayaf at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Victor Olaya' __date__ = 'August 2012' __copyright__ = '(C) 2012, Victor Olaya' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' import os import webbrowser from PyQt4 import uic from PyQt4.QtCore import QCoreApplication, QSettings, QByteArray, SIGNAL, QUrl from PyQt4.QtGui import QApplication, QDialogButtonBox, QDesktopWidget from qgis.utils import iface from qgis.core import * from processing.core.ProcessingConfig import ProcessingConfig from processing.gui import AlgorithmClassification pluginPath = os.path.split(os.path.dirname(__file__))[0] WIDGET, BASE = uic.loadUiType( os.path.join(pluginPath, 'ui', 'DlgAlgorithmBase.ui')) class AlgorithmDialogBase(BASE, WIDGET): class InvalidParameterValue(Exception): def __init__(self, param, widget): (self.parameter, self.widget) = (param, widget) def __init__(self, alg): super(AlgorithmDialogBase, self).__init__(iface.mainWindow()) self.setupUi(self) self.settings = QSettings() self.restoreGeometry(self.settings.value("/Processing/dialogBase", QByteArray())) self.executed = False self.mainWidget = None self.alg = alg # Rename OK button to Run self.btnRun = self.buttonBox.button(QDialogButtonBox.Ok) self.btnRun.setText(self.tr('Run')) self.btnClose = self.buttonBox.button(QDialogButtonBox.Close) self.setWindowTitle(AlgorithmClassification.getDisplayName(self.alg)) desktop = QDesktopWidget() if desktop.physicalDpiX() > 96: self.textHelp.setZoomFactor(desktop.physicalDpiX() / 96) algHelp = self.alg.shortHelp() if algHelp is None: self.textShortHelp.setVisible(False) else: self.textShortHelp.document().setDefaultStyleSheet('''.summary { margin-left: 10px; margin-right: 10px; } h2 { color: #555555; padding-bottom: 15px; } a { text-decoration: none; color: #3498db; font-weight: bold; } p { color: #666666; } b { color: #333333; } dl dd { margin-bottom: 5px; }''') self.textShortHelp.setHtml(algHelp) self.textShortHelp.setOpenLinks(False) def linkClicked(url): webbrowser.open(url.toString()) self.textShortHelp.connect(self.textShortHelp, SIGNAL("anchorClicked(const QUrl&)"), linkClicked) self.textHelp.page().setNetworkAccessManager(QgsNetworkAccessManager.instance()) isText, algHelp = self.alg.help() if algHelp is not None: algHelp = algHelp if isText else QUrl(algHelp) try: if isText: self.textHelp.setHtml(algHelp) else: self.textHelp.settings().clearMemoryCaches() self.textHelp.load(algHelp) except: self.tabWidget.removeTab(2) else: self.tabWidget.removeTab(2) self.showDebug = ProcessingConfig.getSetting( ProcessingConfig.SHOW_DEBUG_IN_DIALOG) def closeEvent(self, evt): self.settings.setValue("/Processing/dialogBase", self.saveGeometry()) super(AlgorithmDialogBase, self).closeEvent(evt) def setMainWidget(self): self.tabWidget.widget(0).layout().addWidget(self.mainWidget) def error(self, msg): QApplication.restoreOverrideCursor() self.setInfo(msg, True) self.resetGUI() self.tabWidget.setCurrentIndex(1) def resetGUI(self): QApplication.restoreOverrideCursor() self.lblProgress.setText('') self.progressBar.setMaximum(100) self.progressBar.setValue(0) self.btnRun.setEnabled(True) self.btnClose.setEnabled(True) def setInfo(self, msg, error=False): if error: self.txtLog.append('<span style="color:red"><br>%s<br></span>' % msg) else: self.txtLog.append(msg) QCoreApplication.processEvents() def setCommand(self, cmd): if self.showDebug: self.setInfo('<code>%s<code>' % cmd) QCoreApplication.processEvents() def setDebugInfo(self, msg): if self.showDebug: self.setInfo('<span style="color:blue">%s</span>' % msg) QCoreApplication.processEvents() def setConsoleInfo(self, msg): if self.showDebug: self.setCommand('<span style="color:darkgray">%s</span>' % msg) QCoreApplication.processEvents() def setPercentage(self, value): if self.progressBar.maximum() == 0: self.progressBar.setMaximum(100) self.progressBar.setValue(value) QCoreApplication.processEvents() def setText(self, text): self.lblProgress.setText(text) self.setInfo(text, False) QCoreApplication.processEvents() def setParamValues(self): pass def setParamValue(self, param, widget, alg=None): pass def accept(self): pass def finish(self): pass
gpl-2.0
-3,842,399,775,241,946,000
34.090395
117
0.566736
false
4.292329
false
false
false
nathanielvarona/airflow
airflow/providers/apache/sqoop/hooks/sqoop.py
1
15515
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # """This module contains a sqoop 1.x hook""" import subprocess from copy import deepcopy from typing import Any, Dict, List, Optional from airflow.exceptions import AirflowException from airflow.hooks.base import BaseHook class SqoopHook(BaseHook): """ This hook is a wrapper around the sqoop 1 binary. To be able to use the hook it is required that "sqoop" is in the PATH. Additional arguments that can be passed via the 'extra' JSON field of the sqoop connection: * ``job_tracker``: Job tracker local|jobtracker:port. * ``namenode``: Namenode. * ``lib_jars``: Comma separated jar files to include in the classpath. * ``files``: Comma separated files to be copied to the map reduce cluster. * ``archives``: Comma separated archives to be unarchived on the compute machines. * ``password_file``: Path to file containing the password. :param conn_id: Reference to the sqoop connection. :type conn_id: str :param verbose: Set sqoop to verbose. :type verbose: bool :param num_mappers: Number of map tasks to import in parallel. :type num_mappers: int :param properties: Properties to set via the -D argument :type properties: dict """ conn_name_attr = 'conn_id' default_conn_name = 'sqoop_default' conn_type = 'sqoop' hook_name = 'Sqoop' def __init__( self, conn_id: str = default_conn_name, verbose: bool = False, num_mappers: Optional[int] = None, hcatalog_database: Optional[str] = None, hcatalog_table: Optional[str] = None, properties: Optional[Dict[str, Any]] = None, ) -> None: # No mutable types in the default parameters super().__init__() self.conn = self.get_connection(conn_id) connection_parameters = self.conn.extra_dejson self.job_tracker = connection_parameters.get('job_tracker', None) self.namenode = connection_parameters.get('namenode', None) self.libjars = connection_parameters.get('libjars', None) self.files = connection_parameters.get('files', None) self.archives = connection_parameters.get('archives', None) self.password_file = connection_parameters.get('password_file', None) self.hcatalog_database = hcatalog_database self.hcatalog_table = hcatalog_table self.verbose = verbose self.num_mappers = num_mappers self.properties = properties or {} self.log.info("Using connection to: %s:%s/%s", self.conn.host, self.conn.port, self.conn.schema) def get_conn(self) -> Any: return self.conn def cmd_mask_password(self, cmd_orig: List[str]) -> List[str]: """Mask command password for safety""" cmd = deepcopy(cmd_orig) try: password_index = cmd.index('--password') cmd[password_index + 1] = 'MASKED' except ValueError: self.log.debug("No password in sqoop cmd") return cmd def popen(self, cmd: List[str], **kwargs: Any) -> None: """ Remote Popen :param cmd: command to remotely execute :param kwargs: extra arguments to Popen (see subprocess.Popen) :return: handle to subprocess """ masked_cmd = ' '.join(self.cmd_mask_password(cmd)) self.log.info("Executing command: %s", masked_cmd) with subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, **kwargs) as sub_process: for line in iter(sub_process.stdout): # type: ignore self.log.info(line.strip()) sub_process.wait() self.log.info("Command exited with return code %s", sub_process.returncode) if sub_process.returncode: raise AirflowException(f"Sqoop command failed: {masked_cmd}") def _prepare_command(self, export: bool = False) -> List[str]: sqoop_cmd_type = "export" if export else "import" connection_cmd = ["sqoop", sqoop_cmd_type] for key, value in self.properties.items(): connection_cmd += ["-D", f"{key}={value}"] if self.namenode: connection_cmd += ["-fs", self.namenode] if self.job_tracker: connection_cmd += ["-jt", self.job_tracker] if self.libjars: connection_cmd += ["-libjars", self.libjars] if self.files: connection_cmd += ["-files", self.files] if self.archives: connection_cmd += ["-archives", self.archives] if self.conn.login: connection_cmd += ["--username", self.conn.login] if self.conn.password: connection_cmd += ["--password", self.conn.password] if self.password_file: connection_cmd += ["--password-file", self.password_file] if self.verbose: connection_cmd += ["--verbose"] if self.num_mappers: connection_cmd += ["--num-mappers", str(self.num_mappers)] if self.hcatalog_database: connection_cmd += ["--hcatalog-database", self.hcatalog_database] if self.hcatalog_table: connection_cmd += ["--hcatalog-table", self.hcatalog_table] connect_str = self.conn.host if self.conn.port: connect_str += f":{self.conn.port}" if self.conn.schema: connect_str += f"/{self.conn.schema}" connection_cmd += ["--connect", connect_str] return connection_cmd @staticmethod def _get_export_format_argument(file_type: str = 'text') -> List[str]: if file_type == "avro": return ["--as-avrodatafile"] elif file_type == "sequence": return ["--as-sequencefile"] elif file_type == "parquet": return ["--as-parquetfile"] elif file_type == "text": return ["--as-textfile"] else: raise AirflowException("Argument file_type should be 'avro', 'sequence', 'parquet' or 'text'.") def _import_cmd( self, target_dir: Optional[str], append: bool, file_type: str, split_by: Optional[str], direct: Optional[bool], driver: Any, extra_import_options: Any, ) -> List[str]: cmd = self._prepare_command(export=False) if target_dir: cmd += ["--target-dir", target_dir] if append: cmd += ["--append"] cmd += self._get_export_format_argument(file_type) if split_by: cmd += ["--split-by", split_by] if direct: cmd += ["--direct"] if driver: cmd += ["--driver", driver] if extra_import_options: for key, value in extra_import_options.items(): cmd += [f'--{key}'] if value: cmd += [str(value)] return cmd # pylint: disable=too-many-arguments def import_table( self, table: str, target_dir: Optional[str] = None, append: bool = False, file_type: str = "text", columns: Optional[str] = None, split_by: Optional[str] = None, where: Optional[str] = None, direct: bool = False, driver: Any = None, extra_import_options: Optional[Dict[str, Any]] = None, ) -> Any: """ Imports table from remote location to target dir. Arguments are copies of direct sqoop command line arguments :param table: Table to read :param target_dir: HDFS destination dir :param append: Append data to an existing dataset in HDFS :param file_type: "avro", "sequence", "text" or "parquet". Imports data to into the specified format. Defaults to text. :param columns: <col,col,col…> Columns to import from table :param split_by: Column of the table used to split work units :param where: WHERE clause to use during import :param direct: Use direct connector if exists for the database :param driver: Manually specify JDBC driver class to use :param extra_import_options: Extra import options to pass as dict. If a key doesn't have a value, just pass an empty string to it. Don't include prefix of -- for sqoop options. """ cmd = self._import_cmd(target_dir, append, file_type, split_by, direct, driver, extra_import_options) cmd += ["--table", table] if columns: cmd += ["--columns", columns] if where: cmd += ["--where", where] self.popen(cmd) def import_query( self, query: str, target_dir: Optional[str] = None, append: bool = False, file_type: str = "text", split_by: Optional[str] = None, direct: Optional[bool] = None, driver: Optional[Any] = None, extra_import_options: Optional[Dict[str, Any]] = None, ) -> Any: """ Imports a specific query from the rdbms to hdfs :param query: Free format query to run :param target_dir: HDFS destination dir :param append: Append data to an existing dataset in HDFS :param file_type: "avro", "sequence", "text" or "parquet" Imports data to hdfs into the specified format. Defaults to text. :param split_by: Column of the table used to split work units :param direct: Use direct import fast path :param driver: Manually specify JDBC driver class to use :param extra_import_options: Extra import options to pass as dict. If a key doesn't have a value, just pass an empty string to it. Don't include prefix of -- for sqoop options. """ cmd = self._import_cmd(target_dir, append, file_type, split_by, direct, driver, extra_import_options) cmd += ["--query", query] self.popen(cmd) # pylint: disable=too-many-arguments def _export_cmd( self, table: str, export_dir: Optional[str] = None, input_null_string: Optional[str] = None, input_null_non_string: Optional[str] = None, staging_table: Optional[str] = None, clear_staging_table: bool = False, enclosed_by: Optional[str] = None, escaped_by: Optional[str] = None, input_fields_terminated_by: Optional[str] = None, input_lines_terminated_by: Optional[str] = None, input_optionally_enclosed_by: Optional[str] = None, batch: bool = False, relaxed_isolation: bool = False, extra_export_options: Optional[Dict[str, Any]] = None, ) -> List[str]: cmd = self._prepare_command(export=True) if input_null_string: cmd += ["--input-null-string", input_null_string] if input_null_non_string: cmd += ["--input-null-non-string", input_null_non_string] if staging_table: cmd += ["--staging-table", staging_table] if clear_staging_table: cmd += ["--clear-staging-table"] if enclosed_by: cmd += ["--enclosed-by", enclosed_by] if escaped_by: cmd += ["--escaped-by", escaped_by] if input_fields_terminated_by: cmd += ["--input-fields-terminated-by", input_fields_terminated_by] if input_lines_terminated_by: cmd += ["--input-lines-terminated-by", input_lines_terminated_by] if input_optionally_enclosed_by: cmd += ["--input-optionally-enclosed-by", input_optionally_enclosed_by] if batch: cmd += ["--batch"] if relaxed_isolation: cmd += ["--relaxed-isolation"] if export_dir: cmd += ["--export-dir", export_dir] if extra_export_options: for key, value in extra_export_options.items(): cmd += [f'--{key}'] if value: cmd += [str(value)] # The required option cmd += ["--table", table] return cmd # pylint: disable=too-many-arguments def export_table( self, table: str, export_dir: Optional[str] = None, input_null_string: Optional[str] = None, input_null_non_string: Optional[str] = None, staging_table: Optional[str] = None, clear_staging_table: bool = False, enclosed_by: Optional[str] = None, escaped_by: Optional[str] = None, input_fields_terminated_by: Optional[str] = None, input_lines_terminated_by: Optional[str] = None, input_optionally_enclosed_by: Optional[str] = None, batch: bool = False, relaxed_isolation: bool = False, extra_export_options: Optional[Dict[str, Any]] = None, ) -> None: """ Exports Hive table to remote location. Arguments are copies of direct sqoop command line Arguments :param table: Table remote destination :param export_dir: Hive table to export :param input_null_string: The string to be interpreted as null for string columns :param input_null_non_string: The string to be interpreted as null for non-string columns :param staging_table: The table in which data will be staged before being inserted into the destination table :param clear_staging_table: Indicate that any data present in the staging table can be deleted :param enclosed_by: Sets a required field enclosing character :param escaped_by: Sets the escape character :param input_fields_terminated_by: Sets the field separator character :param input_lines_terminated_by: Sets the end-of-line character :param input_optionally_enclosed_by: Sets a field enclosing character :param batch: Use batch mode for underlying statement execution :param relaxed_isolation: Transaction isolation to read uncommitted for the mappers :param extra_export_options: Extra export options to pass as dict. If a key doesn't have a value, just pass an empty string to it. Don't include prefix of -- for sqoop options. """ cmd = self._export_cmd( table, export_dir, input_null_string, input_null_non_string, staging_table, clear_staging_table, enclosed_by, escaped_by, input_fields_terminated_by, input_lines_terminated_by, input_optionally_enclosed_by, batch, relaxed_isolation, extra_export_options, ) self.popen(cmd)
apache-2.0
389,434,711,630,519,940
36.652913
110
0.597499
false
4.073792
false
false
false
jpvanhal/cloudsizzle
cloudsizzle/scrapers/oodi/items.py
1
1589
# -*- coding: utf-8 -*- # # Copyright (c) 2009-2010 CloudSizzle Team # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation # files (the "Software"), to deal in the Software without # restriction, including without limitation the rights to use, # copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following # conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. """ The models for scraped items. See documentation in: http://doc.scrapy.org/topics/items.html """ from scrapy.item import Item, Field from cloudsizzle.scrapers.items import DateField class CompletedCourseItem(Item): name = Field() code = Field() cr = Field() ocr = Field() grade = Field() date = DateField('%d.%m.%Y') teacher = Field() module = Field() class ModuleItem(Item): name = Field() code = Field()
mit
7,193,697,621,543,712,000
30.156863
67
0.730648
false
4.063939
false
false
false
Jigsaw-Code/net-analysis
netanalysis/traffic/data/api_repository.py
1
3175
#!/usr/bin/python # # Copyright 2019 Jigsaw Operations LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Library to access Google's traffic data from its Transparency Report """ import datetime import json import ssl import time from urllib.request import urlopen, Request from urllib.parse import urlencode, quote import certifi import pandas as pd from netanalysis.traffic.data import model def _to_timestamp(time_point: datetime.datetime): return time.mktime(time_point.timetuple()) _SSL_CONTEXT = ssl.create_default_context(cafile=certifi.where()) class ApiTrafficRepository(model.TrafficRepository): """TrafficRepository that reads the traffic data from Google's Transparency Report.""" def _query_api(self, endpoint, params=None): query_url = "https://www.google.com/transparencyreport/api/v3/traffic/" + \ quote(endpoint) if params: query_url = query_url + "?" + urlencode(params) try: request = Request(query_url) request.add_header("User-Agent", "Jigsaw-Code/netanalysis") with urlopen(request, context=_SSL_CONTEXT) as response: return json.loads(response.read()[6:].decode("utf8")) except Exception as error: raise Exception("Failed to query url %s" % query_url, error) def list_regions(self): response_proto = self._query_api("regionlist") return sorted([e[0] for e in response_proto[0][1]]) def get_traffic(self, region_code: str, product_id: model.ProductId, start: datetime.datetime = None, end: datetime.datetime = None): DEFAULT_INTERVAL_DAYS = 2 * 365 POINTS_PER_DAY = 48 if not end: end = datetime.datetime.now() if not start: start = end - datetime.timedelta(days=DEFAULT_INTERVAL_DAYS) number_of_days = (end - start).days total_points = int(number_of_days * POINTS_PER_DAY) entries = [] params = [ ("start", int(_to_timestamp(start) * 1000)), ("end", int(_to_timestamp(end) * 1000)), ("width", total_points), ("product", product_id.value), ("region", region_code)] response_proto = self._query_api("fraction", params) entry_list_proto = response_proto[0][1] for entry_proto in entry_list_proto: timestamp = datetime.datetime.utcfromtimestamp( entry_proto[0] / 1000) value = entry_proto[1][0][1] entries.append((timestamp, value / POINTS_PER_DAY / 2)) dates, traffic = zip(*entries) return pd.Series(traffic, index=dates)
apache-2.0
-1,836,266,151,497,937,000
36.352941
90
0.647874
false
3.953923
false
false
false
openstate/yournextrepresentative
candidates/diffs.py
1
8701
# The functions in this file are to help produce human readable diffs # between our JSON representation of candidates. import re from django.conf import settings from django.utils.translation import ugettext as _ import jsonpatch import jsonpointer def get_descriptive_value(election, attribute, value, leaf): """Get a sentence fragment describing someone's status in a particular year 'attribute' is either "standing_in" or "party_membership", 'election' is one of the keys from settings.ELECTIONS, and 'value' is what would be under that year in the 'standing_in' or 'party_memberships' dictionary (see the comment at the top of update.py).""" election_data = settings.ELECTIONS[election] current_election = election_data.get('current') election_name = election_data['name'] if attribute == 'party_memberships': if leaf: # In that case, there's only a particular value in the # dictionary that's changed: if leaf == 'name': if current_election: message = _(u"is known to be standing for the party '{party}' in the {election}") else: message = _(u"was known to be standing for the party '{party}' in the {election}") return message.format(party=value, election=election_name) elif leaf == 'id': if current_election: message = _(u'is known to be standing for the party with ID {party} in the {election}') else: message = _(u'was known to be standing for the party with ID {party} in the {election}') return message.format(party=value, election=election_name) else: message = _(u"Unexpected leaf {0} (attribute: {1}, election: {2}") raise Exception, message.format( leaf, attribute, election ) else: if current_election: message = _(u'is known to be standing for the party "{party}" in the {election}') else: message = _(u'was known to be standing for the party "{party}" in the {election}') return message.format(party=value['name'], election=election_name) elif attribute == 'standing_in': if value is None: if current_election: message = _(u'is known not to be standing in the {election}') else: message = _(u'was known not to be standing in the {election}') return message.format(election=election_name) else: if leaf: if leaf == 'post_id': if current_election: message = _("is known to be standing for the post with ID {party} in the {election}") else: message = _("was known to be standing for the post with ID {party} in the {election}") return message.format(party=value, election=election_name) elif leaf == 'mapit_url': if current_election: message = _("is known to be standing in the constituency with MapIt URL {party} in the {election}") else: message = _("was known to be standing in the constituency with MapIt URL {party} in the {election}") return message.format(party=value, election=election_name) elif leaf == 'name': if current_election: message = _("is known to be standing in {party} in the {election}") else: message = _("was known to be standing in {party} in the {election}") return message.format(party=value, election=election_name) elif leaf == 'elected': if value: return _("was elected in the {election}").format(election=election_name) else: return _("was not elected in the {election}").format(election=election_name) else: message = _(u"Unexpected leaf {0} (attribute: {1}, election: {2}") raise Exception, message.format( leaf, attribute, election ) else: if current_election: message = _(u'is known to be standing in {party} in the {election}') else: message = _(u'was known to be standing in {party} in the {election}') return message.format(party=value['name'], election=election_name) def explain_standing_in_and_party_memberships(operation, attribute, election, leaf): """Set 'value' and 'previous_value' in operation to a readable explanation 'attribute' is one of 'standing_in' or 'party_memberships'.""" for key in ('previous_value', 'value'): if key not in operation: continue if election: operation[key] = get_descriptive_value( election, attribute, operation[key], leaf, ) else: clauses = [] for election, value in (operation[key] or {}).items(): clauses.append(get_descriptive_value( election, attribute, value, leaf, )) operation[key] = _(u' and ').join(clauses) def get_version_diff(from_data, to_data): """Calculate the diff (a mangled JSON patch) between from_data and to_data""" basic_patch = jsonpatch.make_patch(from_data, to_data) result = [] for operation in basic_patch: op = operation['op'] # We deal with standing_in and party_memberships slightly # differently so they can be presented in human-readable form, # so match those cases first: m = re.search( r'(standing_in|party_memberships)(?:/([-_A-Za-z0-9]+))?(?:/(\w+))?', operation['path'], ) if op in ('replace', 'remove'): operation['previous_value'] = \ jsonpointer.resolve_pointer( from_data, operation['path'] ) attribute, election, leaf = m.groups() if m else (None, None, None) if attribute: explain_standing_in_and_party_memberships(operation, attribute, election, leaf) if op in ('replace', 'remove'): # Ignore replacing no data with no data: if op == 'replace' and \ not operation['previous_value'] and \ not operation['value']: continue if op == 'replace' and not operation['previous_value']: operation['op'] = 'add' elif op == 'add': # It's important that we don't skip the case where a # standing_in value is being set to None, because that's # saying 'we *know* they're not standing then' if (not operation['value']) and (attribute != 'standing_in'): continue operation['path'] = re.sub(r'^/', '', operation['path']) result.append(operation) result.sort(key=lambda o: (o['op'], o['path'])) return result def clean_version_data(data): # We're not interested in changes of these IDs: for i in data.get('identifiers', []): i.pop('id', None) for on in data.get('other_names', []): on.pop('id', None) data.pop('last_party', None) data.pop('proxy_image', None) data.pop('date_of_birth', None) def get_version_diffs(versions): """Add a diff to each of an array of version dicts The first version is the most recent; the last is the original version.""" result = [] n = len(versions) for i, v in enumerate(versions): # to_version_data = replace_empty_with_none( # versions[i]['data'] # ) to_version_data = versions[i]['data'] if i == (n - 1): from_version_data = {} else: # from_version_data = replace_empty_with_none( # versions[i + 1]['data'] # ) from_version_data = versions[i + 1]['data'] clean_version_data(to_version_data) clean_version_data(from_version_data) version_with_diff = versions[i].copy() version_with_diff['diff'] = \ get_version_diff(from_version_data, to_version_data) result.append(version_with_diff) return result
agpl-3.0
-1,124,514,147,237,027,800
42.288557
124
0.54488
false
4.378963
false
false
false
iwm911/plaso
plaso/classifier/scanner.py
1
24473
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2013 The Plaso Project Authors. # Please see the AUTHORS file for details on individual authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This file contains the classes for a scan tree-based format scanner.""" import logging import os from plaso.classifier import patterns from plaso.classifier import range_list from plaso.classifier import scan_tree class _ScanMatch(object): """Class that implements a scan match.""" def __init__(self, total_data_offset, pattern): """Initializes the scan result. Args: total_data_offset: the offset of the resulting match relative to the start of the total data scanned. pattern: the pattern matched. """ super(_ScanMatch, self).__init__() self.total_data_offset = total_data_offset self.pattern = pattern @property def specification(self): """The specification.""" return self.pattern.specification class _ScanResult(object): """Class that implements a scan result.""" def __init__(self, specification): """Initializes the scan result. Args: scan_tree_node: the corresponding scan tree node or None. """ super(_ScanResult, self).__init__() self.specification = specification self.scan_matches = [] @property def identifier(self): """The specification identifier.""" return self.specification.identifier class ScanState(object): """Class that implements a scan state.""" # The state definitions. _SCAN_STATE_START = 1 _SCAN_STATE_SCANNING = 2 _SCAN_STATE_STOP = 3 def __init__(self, scan_tree_node, total_data_size=None): """Initializes the scan state. Args: scan_tree_node: the corresponding scan tree node or None. total_data_size: optional value to indicate the total data size. The default is None. """ super(ScanState, self).__init__() self._matches = [] self.remaining_data = None self.remaining_data_size = 0 self.scan_tree_node = scan_tree_node self.state = self._SCAN_STATE_START self.total_data_offset = 0 self.total_data_size = total_data_size def AddMatch(self, total_data_offset, pattern): """Adds a result to the state to scanning. Args: total_data_offset: the offset of the resulting match relative to the start total data scanned. pattern: the pattern matched. Raises: RuntimeError: when a unsupported state is encountered. """ if (self.state != self._SCAN_STATE_START and self.state != self._SCAN_STATE_SCANNING): raise RuntimeError(u'Unsupported scan state.') self._matches.append(_ScanMatch(total_data_offset, pattern)) def GetMatches(self): """Retrieves a list containing the results. Returns: A list of scan matches (instances of _ScanMatch). Raises: RuntimeError: when a unsupported state is encountered. """ if self.state != self._SCAN_STATE_STOP: raise RuntimeError(u'Unsupported scan state.') return self._matches def Reset(self, scan_tree_node): """Resets the state to start. This function will clear the remaining data. Args: scan_tree_node: the corresponding scan tree node or None. Raises: RuntimeError: when a unsupported state is encountered. """ if self.state != self._SCAN_STATE_STOP: raise RuntimeError(u'Unsupported scan state.') self.remaining_data = None self.remaining_data_size = 0 self.scan_tree_node = scan_tree_node self.state = self._SCAN_STATE_START def Scanning(self, scan_tree_node, total_data_offset): """Sets the state to scanning. Args: scan_tree_node: the active scan tree node. total_data_offset: the offset of the resulting match relative to the start of the total data scanned. Raises: RuntimeError: when a unsupported state is encountered. """ if (self.state != self._SCAN_STATE_START and self.state != self._SCAN_STATE_SCANNING): raise RuntimeError(u'Unsupported scan state.') self.scan_tree_node = scan_tree_node self.state = self._SCAN_STATE_SCANNING self.total_data_offset = total_data_offset def Stop(self): """Sets the state to stop. Raises: RuntimeError: when a unsupported state is encountered. """ if (self.state != self._SCAN_STATE_START and self.state != self._SCAN_STATE_SCANNING): raise RuntimeError(u'Unsupported scan state.') self.scan_tree_node = None self.state = self._SCAN_STATE_STOP class ScanTreeScannerBase(object): """Class that implements a scan tree-based scanner base.""" def __init__(self, specification_store): """Initializes the scanner. Args: specification_store: the specification store (instance of SpecificationStore) that contains the format specifications. """ super(ScanTreeScannerBase, self).__init__() self._scan_tree = None self._specification_store = specification_store def _ScanBufferScanState( self, scan_tree_object, scan_state, data, data_size, total_data_offset, total_data_size=None): """Scans a buffer using the scan tree. This function implements a Boyer–Moore–Horspool equivalent approach in combination with the scan tree. Args: scan_tree_object: the scan tree (instance of ScanTree). scan_state: the scan state (instance of ScanState). data: a buffer containing raw data. data_size: the size of the raw data in the buffer. total_data_offset: the offset of the data relative to the start of the total data scanned. total_data_size: optional value to indicate the total data size. The default is None. Raises: RuntimeError: if the total data offset, total data size or the last pattern offset value is out of bounds """ if total_data_size is not None and total_data_size < 0: raise RuntimeError(u'Invalid total data size, value out of bounds.') if total_data_offset < 0 or ( total_data_size is not None and total_data_offset >= total_data_size): raise RuntimeError(u'Invalid total data offset, value out of bounds.') data_offset = 0 scan_tree_node = scan_state.scan_tree_node if scan_state.remaining_data: # str.join() should be more efficient then concatenation by +. data = ''.join([scan_state.remaining_data, data]) data_size += scan_state.remaining_data_size scan_state.remaining_data = None scan_state.remaining_data_size = 0 if (total_data_size is not None and total_data_offset + data_size >= total_data_size): match_on_boundary = True else: match_on_boundary = False while data_offset < data_size: if (not match_on_boundary and data_offset + scan_tree_object.largest_length >= data_size): break found_match = False scan_done = False while not scan_done: scan_object = scan_tree_node.CompareByteValue( data, data_offset, data_size, total_data_offset, total_data_size=total_data_size) if isinstance(scan_object, scan_tree.ScanTreeNode): scan_tree_node = scan_object else: scan_done = True if isinstance(scan_object, patterns.Pattern): pattern_length = len(scan_object.signature.expression) data_last_offset = data_offset + pattern_length if cmp(scan_object.signature.expression, data[data_offset:data_last_offset]) == 0: if (not scan_object.signature.is_bound or scan_object.signature.offset == data_offset): found_match = True logging.debug( u'Signature match at data offset: 0x{0:08x}.'.format( data_offset)) scan_state.AddMatch(total_data_offset + data_offset, scan_object) if found_match: skip_value = len(scan_object.signature.expression) scan_tree_node = scan_tree_object.root_node else: last_pattern_offset = ( scan_tree_object.skip_table.skip_pattern_length - 1) if data_offset + last_pattern_offset >= data_size: raise RuntimeError( u'Invalid last pattern offset, value out of bounds.') skip_value = 0 while last_pattern_offset >= 0 and not skip_value: last_data_offset = data_offset + last_pattern_offset byte_value = ord(data[last_data_offset]) skip_value = scan_tree_object.skip_table[byte_value] last_pattern_offset -= 1 if not skip_value: skip_value = 1 scan_tree_node = scan_tree_object.root_node data_offset += skip_value if not match_on_boundary and data_offset < data_size: scan_state.remaining_data = data[data_offset:data_size] scan_state.remaining_data_size = data_size - data_offset scan_state.Scanning(scan_tree_node, total_data_offset + data_offset) def _ScanBufferScanStateFinal(self, scan_tree_object, scan_state): """Scans the remaining data in the scan state using the scan tree. Args: scan_tree_object: the scan tree (instance of ScanTree). scan_state: the scan state (instance of ScanState). """ if scan_state.remaining_data: data = scan_state.remaining_data data_size = scan_state.remaining_data_size scan_state.remaining_data = None scan_state.remaining_data_size = 0 # Setting the total data size will make boundary matches are returned # in this scanning pass. total_data_size = scan_state.total_data_size if total_data_size is None: total_data_size = scan_state.total_data_offset + data_size self._ScanBufferScanState( scan_tree_object, scan_state, data, data_size, scan_state.total_data_offset, total_data_size=total_data_size) scan_state.Stop() def GetScanResults(self, scan_state): """Retrieves the scan results. Args: scan_state: the scan state (instance of ScanState). Return: A list of scan results (instances of _ScanResult). """ scan_results = {} for scan_match in scan_state.GetMatches(): specification = scan_match.specification identifier = specification.identifier logging.debug( u'Scan match at offset: 0x{0:08x} specification: {1:s}'.format( scan_match.total_data_offset, identifier)) if identifier not in scan_results: scan_results[identifier] = _ScanResult(specification) scan_results[identifier].scan_matches.append(scan_match) return scan_results.values() class Scanner(ScanTreeScannerBase): """Class that implements a scan tree-based scanner.""" _READ_BUFFER_SIZE = 512 def __init__(self, specification_store): """Initializes the scanner. Args: specification_store: the specification store (instance of SpecificationStore) that contains the format specifications. """ super(Scanner, self).__init__(specification_store) def ScanBuffer(self, scan_state, data, data_size): """Scans a buffer. Args: scan_state: the scan state (instance of ScanState). data: a buffer containing raw data. data_size: the size of the raw data in the buffer. """ self._ScanBufferScanState( self._scan_tree, scan_state, data, data_size, scan_state.total_data_offset, total_data_size=scan_state.total_data_size) def ScanFileObject(self, file_object): """Scans a file-like object. Args: file_object: a file-like object. Returns: A list of scan results (instances of ScanResult). """ file_offset = 0 if hasattr(file_object, 'get_size'): file_size = file_object.get_size() else: file_object.seek(0, os.SEEK_END) file_size = file_object.tell() scan_state = self.StartScan(total_data_size=file_size) file_object.seek(file_offset, os.SEEK_SET) while file_offset < file_size: data = file_object.read(self._READ_BUFFER_SIZE) data_size = len(data) if data_size == 0: break self._ScanBufferScanState( self._scan_tree, scan_state, data, data_size, file_offset, total_data_size=file_size) file_offset += data_size self.StopScan(scan_state) return self.GetScanResults(scan_state) def StartScan(self, total_data_size=None): """Starts a scan. The function sets up the scanning related structures if necessary. Args: total_data_size: optional value to indicate the total data size. The default is None. Returns: A scan state (instance of ScanState). Raises: RuntimeError: when total data size is invalid. """ if total_data_size is not None and total_data_size < 0: raise RuntimeError(u'Invalid total data size.') if self._scan_tree is None: self._scan_tree = scan_tree.ScanTree( self._specification_store, None) return ScanState(self._scan_tree.root_node, total_data_size=total_data_size) def StopScan(self, scan_state): """Stops a scan. Args: scan_state: the scan state (instance of ScanState). """ self._ScanBufferScanStateFinal(self._scan_tree, scan_state) class OffsetBoundScanner(ScanTreeScannerBase): """Class that implements an offset-bound scan tree-based scanner.""" _READ_BUFFER_SIZE = 512 def __init__(self, specification_store): """Initializes the scanner. Args: specification_store: the specification store (instance of SpecificationStore) that contains the format specifications. """ super(OffsetBoundScanner, self).__init__(specification_store) self._footer_scan_tree = None self._footer_spanning_range = None self._header_scan_tree = None self._header_spanning_range = None def _GetFooterRange(self, total_data_size): """Retrieves the read buffer aligned footer range. Args: total_data_size: optional value to indicate the total data size. The default is None. Returns: A range (instance of Range). """ # The actual footer range is in reverse since the spanning footer range # is based on positive offsets, where 0 is the end of file. if self._footer_spanning_range.end_offset < total_data_size: footer_range_start_offset = ( total_data_size - self._footer_spanning_range.end_offset) else: footer_range_start_offset = 0 # Calculate the lower bound modulus of the footer range start offset # in increments of the read buffer size. footer_range_start_offset /= self._READ_BUFFER_SIZE footer_range_start_offset *= self._READ_BUFFER_SIZE # Calculate the upper bound modulus of the footer range size # in increments of the read buffer size. footer_range_size = self._footer_spanning_range.size remainder = footer_range_size % self._READ_BUFFER_SIZE footer_range_size /= self._READ_BUFFER_SIZE if remainder > 0: footer_range_size += 1 footer_range_size *= self._READ_BUFFER_SIZE return range_list.Range(footer_range_start_offset, footer_range_size) def _GetHeaderRange(self): """Retrieves the read buffer aligned header range. Returns: A range (instance of Range). """ # Calculate the lower bound modulus of the header range start offset # in increments of the read buffer size. header_range_start_offset = self._header_spanning_range.start_offset header_range_start_offset /= self._READ_BUFFER_SIZE header_range_start_offset *= self._READ_BUFFER_SIZE # Calculate the upper bound modulus of the header range size # in increments of the read buffer size. header_range_size = self._header_spanning_range.size remainder = header_range_size % self._READ_BUFFER_SIZE header_range_size /= self._READ_BUFFER_SIZE if remainder > 0: header_range_size += 1 header_range_size *= self._READ_BUFFER_SIZE return range_list.Range(header_range_start_offset, header_range_size) def _ScanBufferScanState( self, scan_tree_object, scan_state, data, data_size, total_data_offset, total_data_size=None): """Scans a buffer using the scan tree. This function implements a Boyer–Moore–Horspool equivalent approach in combination with the scan tree. Args: scan_tree_object: the scan tree (instance of ScanTree). scan_state: the scan state (instance of ScanState). data: a buffer containing raw data. data_size: the size of the raw data in the buffer. total_data_offset: the offset of the data relative to the start of the total data scanned. total_data_size: optional value to indicate the total data size. The default is None. """ scan_done = False scan_tree_node = scan_tree_object.root_node while not scan_done: data_offset = 0 scan_object = scan_tree_node.CompareByteValue( data, data_offset, data_size, total_data_offset, total_data_size=total_data_size) if isinstance(scan_object, scan_tree.ScanTreeNode): scan_tree_node = scan_object else: scan_done = True if isinstance(scan_object, patterns.Pattern): pattern_length = len(scan_object.signature.expression) pattern_start_offset = scan_object.signature.offset pattern_end_offset = pattern_start_offset + pattern_length if cmp(scan_object.signature.expression, data[pattern_start_offset:pattern_end_offset]) == 0: scan_state.AddMatch( total_data_offset + scan_object.signature.offset, scan_object) logging.debug( u'Signature match at data offset: 0x{0:08x}.'.format(data_offset)) # TODO: implement. # def ScanBuffer(self, scan_state, data, data_size): # """Scans a buffer. # Args: # scan_state: the scan state (instance of ScanState). # data: a buffer containing raw data. # data_size: the size of the raw data in the buffer. # """ # # TODO: fix footer scanning logic. # # need to know the file size here for the footers. # # TODO: check for clashing ranges? # header_range = self._GetHeaderRange() # footer_range = self._GetFooterRange(scan_state.total_data_size) # if self._scan_tree == self._header_scan_tree: # if (scan_state.total_data_offset >= header_range.start_offset and # scan_state.total_data_offset < header_range.end_offset): # self._ScanBufferScanState( # self._scan_tree, scan_state, data, data_size, # scan_state.total_data_offset, # total_data_size=scan_state.total_data_size) # elif scan_state.total_data_offset > header_range.end_offset: # # TODO: implement. # pass # if self._scan_tree == self._footer_scan_tree: # if (scan_state.total_data_offset >= footer_range.start_offset and # scan_state.total_data_offset < footer_range.end_offset): # self._ScanBufferScanState( # self._scan_tree, scan_state, data, data_size, # scan_state.total_data_offset, # total_data_size=scan_state.total_data_size) def ScanFileObject(self, file_object): """Scans a file-like object. Args: file_object: a file-like object. Returns: A scan state (instance of ScanState). """ # TODO: add support for fixed size block-based reads. if hasattr(file_object, 'get_size'): file_size = file_object.get_size() else: file_object.seek(0, os.SEEK_END) file_size = file_object.tell() file_offset = 0 scan_state = self.StartScan(total_data_size=file_size) if self._header_scan_tree.root_node is not None: header_range = self._GetHeaderRange() # TODO: optimize the read by supporting fixed size block-based reads. # if file_offset < header_range.start_offset: # file_offset = header_range.start_offset file_object.seek(file_offset, os.SEEK_SET) # TODO: optimize the read by supporting fixed size block-based reads. # data = file_object.read(header_range.size) data = file_object.read(header_range.end_offset) data_size = len(data) if data_size > 0: self._ScanBufferScanState( self._scan_tree, scan_state, data, data_size, file_offset, total_data_size=file_size) file_offset += data_size if self._footer_scan_tree.root_node is not None: self.StopScan(scan_state) self._scan_tree = self._footer_scan_tree scan_state.Reset(self._scan_tree.root_node) if self._footer_scan_tree.root_node is not None: footer_range = self._GetFooterRange(file_size) # Note that the offset in the footer scan tree start with 0. Make sure # the data offset of the data being scanned is aligned with the offset # in the scan tree. if footer_range.start_offset < self._footer_spanning_range.end_offset: data_offset = ( self._footer_spanning_range.end_offset - footer_range.start_offset) else: data_offset = 0 if file_offset < footer_range.start_offset: file_offset = footer_range.start_offset file_object.seek(file_offset, os.SEEK_SET) data = file_object.read(self._READ_BUFFER_SIZE) data_size = len(data) if data_size > 0: self._ScanBufferScanState( self._scan_tree, scan_state, data[data_offset:], data_size - data_offset, file_offset + data_offset, total_data_size=file_size) file_offset += data_size self.StopScan(scan_state) return self.GetScanResults(scan_state) def StartScan(self, total_data_size=None): """Starts a scan. The function sets up the scanning related structures if necessary. Args: total_data_size: optional value to indicate the total data size. The default is None. Returns: A list of scan results (instances of ScanResult). Raises: RuntimeError: when total data size is invalid. """ if total_data_size is None or total_data_size < 0: raise RuntimeError(u'Invalid total data size.') if self._header_scan_tree is None: self._header_scan_tree = scan_tree.ScanTree( self._specification_store, True, offset_mode=scan_tree.ScanTree.OFFSET_MODE_POSITIVE) if self._header_spanning_range is None: spanning_range = self._header_scan_tree.range_list.GetSpanningRange() self._header_spanning_range = spanning_range if self._footer_scan_tree is None: self._footer_scan_tree = scan_tree.ScanTree( self._specification_store, True, offset_mode=scan_tree.ScanTree.OFFSET_MODE_NEGATIVE) if self._footer_spanning_range is None: spanning_range = self._footer_scan_tree.range_list.GetSpanningRange() self._footer_spanning_range = spanning_range if self._header_scan_tree.root_node is not None: self._scan_tree = self._header_scan_tree elif self._footer_scan_tree.root_node is not None: self._scan_tree = self._footer_scan_tree else: self._scan_tree = None if self._scan_tree is not None: root_node = self._scan_tree.root_node else: root_node = None return ScanState(root_node, total_data_size=total_data_size) def StopScan(self, scan_state): """Stops a scan. Args: scan_state: the scan state (instance of ScanState). """ self._ScanBufferScanStateFinal(self._scan_tree, scan_state) self._scan_tree = None
apache-2.0
-7,426,356,127,187,481,000
31.576565
80
0.651707
false
3.871657
false
false
false
dims/neutron
neutron/common/config.py
1
13000
# Copyright 2011 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Routines for configuring Neutron """ import sys from keystoneauth1 import loading as ks_loading from oslo_config import cfg from oslo_db import options as db_options from oslo_log import log as logging import oslo_messaging from oslo_service import wsgi from neutron._i18n import _, _LI from neutron.api.v2 import attributes from neutron.common import utils from neutron import policy from neutron import version LOG = logging.getLogger(__name__) core_opts = [ cfg.StrOpt('bind_host', default='0.0.0.0', help=_("The host IP to bind to")), cfg.PortOpt('bind_port', default=9696, help=_("The port to bind to")), cfg.StrOpt('api_extensions_path', default="", help=_("The path for API extensions. " "Note that this can be a colon-separated list of paths. " "For example: api_extensions_path = " "extensions:/path/to/more/exts:/even/more/exts. " "The __path__ of neutron.extensions is appended to " "this, so if your extensions are in there you don't " "need to specify them here.")), cfg.StrOpt('auth_strategy', default='keystone', help=_("The type of authentication to use")), cfg.StrOpt('core_plugin', help=_("The core plugin Neutron will use")), cfg.ListOpt('service_plugins', default=[], help=_("The service plugins Neutron will use")), cfg.StrOpt('base_mac', default="fa:16:3e:00:00:00", help=_("The base MAC address Neutron will use for VIFs. " "The first 3 octets will remain unchanged. If the 4th " "octet is not 00, it will also be used. The others " "will be randomly generated.")), cfg.IntOpt('mac_generation_retries', default=16, help=_("How many times Neutron will retry MAC generation")), cfg.BoolOpt('allow_bulk', default=True, help=_("Allow the usage of the bulk API")), cfg.BoolOpt('allow_pagination', default=False, help=_("Allow the usage of the pagination")), cfg.BoolOpt('allow_sorting', default=False, help=_("Allow the usage of the sorting")), cfg.StrOpt('pagination_max_limit', default="-1", help=_("The maximum number of items returned in a single " "response, value was 'infinite' or negative integer " "means no limit")), cfg.ListOpt('default_availability_zones', default=[], help=_("Default value of availability zone hints. The " "availability zone aware schedulers use this when " "the resources availability_zone_hints is empty. " "Multiple availability zones can be specified by a " "comma separated string. This value can be empty. " "In this case, even if availability_zone_hints for " "a resource is empty, availability zone is " "considered for high availability while scheduling " "the resource.")), cfg.IntOpt('max_dns_nameservers', default=5, help=_("Maximum number of DNS nameservers per subnet")), cfg.IntOpt('max_subnet_host_routes', default=20, help=_("Maximum number of host routes per subnet")), cfg.IntOpt('max_fixed_ips_per_port', default=5, deprecated_for_removal=True, help=_("Maximum number of fixed ips per port. This option " "is deprecated and will be removed in the N " "release.")), cfg.StrOpt('default_ipv4_subnet_pool', deprecated_for_removal=True, help=_("Default IPv4 subnet pool to be used for automatic " "subnet CIDR allocation. " "Specifies by UUID the pool to be used in case where " "creation of a subnet is being called without a " "subnet pool ID. If not set then no pool " "will be used unless passed explicitly to the subnet " "create. If no pool is used, then a CIDR must be passed " "to create a subnet and that subnet will not be " "allocated from any pool; it will be considered part of " "the tenant's private address space. This option is " "deprecated for removal in the N release.")), cfg.StrOpt('default_ipv6_subnet_pool', deprecated_for_removal=True, help=_("Default IPv6 subnet pool to be used for automatic " "subnet CIDR allocation. " "Specifies by UUID the pool to be used in case where " "creation of a subnet is being called without a " "subnet pool ID. See the description for " "default_ipv4_subnet_pool for more information. This " "option is deprecated for removal in the N release.")), cfg.BoolOpt('ipv6_pd_enabled', default=False, help=_("Enables IPv6 Prefix Delegation for automatic subnet " "CIDR allocation. " "Set to True to enable IPv6 Prefix Delegation for " "subnet allocation in a PD-capable environment. Users " "making subnet creation requests for IPv6 subnets " "without providing a CIDR or subnetpool ID will be " "given a CIDR via the Prefix Delegation mechanism. " "Note that enabling PD will override the behavior of " "the default IPv6 subnetpool.")), cfg.IntOpt('dhcp_lease_duration', default=86400, deprecated_name='dhcp_lease_time', help=_("DHCP lease duration (in seconds). Use -1 to tell " "dnsmasq to use infinite lease times.")), cfg.StrOpt('dns_domain', default='openstacklocal', help=_('Domain to use for building the hostnames')), cfg.StrOpt('external_dns_driver', help=_('Driver for external DNS integration.')), cfg.BoolOpt('dhcp_agent_notification', default=True, help=_("Allow sending resource operation" " notification to DHCP agent")), cfg.BoolOpt('allow_overlapping_ips', default=False, help=_("Allow overlapping IP support in Neutron. " "Attention: the following parameter MUST be set to " "False if Neutron is being used in conjunction with " "Nova security groups.")), cfg.StrOpt('host', default=utils.get_hostname(), sample_default='example.domain', help=_("Hostname to be used by the Neutron server, agents and " "services running on this machine. All the agents and " "services running on this machine must use the same " "host value.")), cfg.BoolOpt('force_gateway_on_subnet', default=True, deprecated_for_removal=True, help=_("Ensure that configured gateway is on subnet. " "For IPv6, validate only if gateway is not a link " "local address. Deprecated, to be removed during the " "Newton release, at which point the gateway will not " "be forced on to subnet.")), cfg.BoolOpt('notify_nova_on_port_status_changes', default=True, help=_("Send notification to nova when port status changes")), cfg.BoolOpt('notify_nova_on_port_data_changes', default=True, help=_("Send notification to nova when port data (fixed_ips/" "floatingip) changes so nova can update its cache.")), cfg.IntOpt('send_events_interval', default=2, help=_('Number of seconds between sending events to nova if ' 'there are any events to send.')), cfg.BoolOpt('advertise_mtu', default=True, help=_('If True, advertise network MTU values if core plugin ' 'calculates them. MTU is advertised to running ' 'instances via DHCP and RA MTU options.')), cfg.StrOpt('ipam_driver', help=_("Neutron IPAM (IP address management) driver to use. " "If ipam_driver is not set (default behavior), no IPAM " "driver is used. In order to use the reference " "implementation of Neutron IPAM driver, " "use 'internal'.")), cfg.BoolOpt('vlan_transparent', default=False, help=_('If True, then allow plugins that support it to ' 'create VLAN transparent networks.')), cfg.StrOpt('web_framework', default='legacy', choices=('legacy', 'pecan'), help=_("This will choose the web framework in which to run " "the Neutron API server. 'pecan' is a new experiemental " "rewrite of the API server.")) ] core_cli_opts = [ cfg.StrOpt('state_path', default='/var/lib/neutron', help=_("Where to store Neutron state files. " "This directory must be writable by the agent.")), ] # Register the configuration options cfg.CONF.register_opts(core_opts) cfg.CONF.register_cli_opts(core_cli_opts) wsgi.register_opts(cfg.CONF) # Ensure that the control exchange is set correctly oslo_messaging.set_transport_defaults(control_exchange='neutron') def set_db_defaults(): # Update the default QueuePool parameters. These can be tweaked by the # conf variables - max_pool_size, max_overflow and pool_timeout db_options.set_defaults( cfg.CONF, connection='sqlite://', sqlite_db='', max_pool_size=10, max_overflow=20, pool_timeout=10) set_db_defaults() NOVA_CONF_SECTION = 'nova' ks_loading.register_auth_conf_options(cfg.CONF, NOVA_CONF_SECTION) ks_loading.register_session_conf_options(cfg.CONF, NOVA_CONF_SECTION) nova_opts = [ cfg.StrOpt('region_name', help=_('Name of nova region to use. Useful if keystone manages' ' more than one region.')), cfg.StrOpt('endpoint_type', default='public', choices=['public', 'admin', 'internal'], help=_('Type of the nova endpoint to use. This endpoint will' ' be looked up in the keystone catalog and should be' ' one of public, internal or admin.')), ] cfg.CONF.register_opts(nova_opts, group=NOVA_CONF_SECTION) logging.register_options(cfg.CONF) def init(args, **kwargs): cfg.CONF(args=args, project='neutron', version='%%(prog)s %s' % version.version_info.release_string(), **kwargs) # FIXME(ihrachys): if import is put in global, circular import # failure occurs from neutron.common import rpc as n_rpc n_rpc.init(cfg.CONF) # Validate that the base_mac is of the correct format msg = attributes._validate_regex(cfg.CONF.base_mac, attributes.MAC_PATTERN) if msg: msg = _("Base MAC: %s") % msg raise Exception(msg) def setup_logging(): """Sets up the logging options for a log with supplied name.""" product_name = "neutron" logging.setup(cfg.CONF, product_name) LOG.info(_LI("Logging enabled!")) LOG.info(_LI("%(prog)s version %(version)s"), {'prog': sys.argv[0], 'version': version.version_info.release_string()}) LOG.debug("command line: %s", " ".join(sys.argv)) def reset_service(): # Reset worker in case SIGHUP is called. # Note that this is called only in case a service is running in # daemon mode. setup_logging() policy.refresh() def load_paste_app(app_name): """Builds and returns a WSGI app from a paste config file. :param app_name: Name of the application to load """ loader = wsgi.Loader(cfg.CONF) app = loader.load_app(app_name) return app
apache-2.0
-2,916,700,827,220,835,000
46.619048
79
0.585846
false
4.506066
false
false
false
nginxinc/kubernetes-ingress
tests/suite/grpc/helloworld_pb2.py
1
3911
# Generated by the protocol buffer compiler. DO NOT EDIT! # source: helloworld.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='helloworld.proto', package='helloworld', syntax='proto3', serialized_pb=_b('\n\x10helloworld.proto\x12\nhelloworld\"\x1c\n\x0cHelloRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\x1d\n\nHelloReply\x12\x0f\n\x07message\x18\x01 \x01(\t2I\n\x07Greeter\x12>\n\x08SayHello\x12\x18.helloworld.HelloRequest\x1a\x16.helloworld.HelloReply\"\x00\x42\x36\n\x1bio.grpc.examples.helloworldB\x0fHelloWorldProtoP\x01\xa2\x02\x03HLWb\x06proto3') ) _HELLOREQUEST = _descriptor.Descriptor( name='HelloRequest', full_name='helloworld.HelloRequest', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='name', full_name='helloworld.HelloRequest.name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=32, serialized_end=60, ) _HELLOREPLY = _descriptor.Descriptor( name='HelloReply', full_name='helloworld.HelloReply', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='message', full_name='helloworld.HelloReply.message', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=62, serialized_end=91, ) DESCRIPTOR.message_types_by_name['HelloRequest'] = _HELLOREQUEST DESCRIPTOR.message_types_by_name['HelloReply'] = _HELLOREPLY _sym_db.RegisterFileDescriptor(DESCRIPTOR) HelloRequest = _reflection.GeneratedProtocolMessageType('HelloRequest', (_message.Message,), dict( DESCRIPTOR = _HELLOREQUEST, __module__ = 'helloworld_pb2' # @@protoc_insertion_point(class_scope:helloworld.HelloRequest) )) _sym_db.RegisterMessage(HelloRequest) HelloReply = _reflection.GeneratedProtocolMessageType('HelloReply', (_message.Message,), dict( DESCRIPTOR = _HELLOREPLY, __module__ = 'helloworld_pb2' # @@protoc_insertion_point(class_scope:helloworld.HelloReply) )) _sym_db.RegisterMessage(HelloReply) DESCRIPTOR.has_options = True DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\033io.grpc.examples.helloworldB\017HelloWorldProtoP\001\242\002\003HLW')) _GREETER = _descriptor.ServiceDescriptor( name='Greeter', full_name='helloworld.Greeter', file=DESCRIPTOR, index=0, options=None, serialized_start=93, serialized_end=166, methods=[ _descriptor.MethodDescriptor( name='SayHello', full_name='helloworld.Greeter.SayHello', index=0, containing_service=None, input_type=_HELLOREQUEST, output_type=_HELLOREPLY, options=None, ), ]) _sym_db.RegisterServiceDescriptor(_GREETER) DESCRIPTOR.services_by_name['Greeter'] = _GREETER # @@protoc_insertion_point(module_scope)
apache-2.0
-1,847,906,107,374,825,000
28.19403
369
0.722833
false
3.156578
false
true
false
ercanezin/ce888labs
lab8/imdb.py
1
2192
from __future__ import print_function import numpy as np np.random.seed(1337) # for reproducibility from keras.preprocessing import sequence from keras.models import Model from keras.layers import Dense, Activation, Embedding, GlobalMaxPooling1D,Convolution1D, Input,LSTM,merge from keras.datasets import imdb max_features = 20000 maxlen = 80 # cut texts after this number of words (among top max_features most common words) batch_size = 32 ###PREPROCCESSING print('Loading data...') (X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=max_features) print(len(X_train), 'train sequences') print(len(X_test), 'test sequences') print (X_train[0]) print('Pad sequences (samples x time)') X_train = sequence.pad_sequences(X_train, maxlen=maxlen) X_test = sequence.pad_sequences(X_test, maxlen=maxlen) print('X_train shape:', X_train.shape) print('X_test shape:', X_test.shape) print('Build model...') ###PREPROCCESSING ENDS inputs = Input(shape=(maxlen,)) m = inputs m = Embedding(max_features, 128, dropout=0.2)(m) x = Convolution1D(nb_filter=32, filter_length=4, border_mode='valid',activation='relu', subsample_length=1)(m) x = GlobalMaxPooling1D()(x) y=LSTM(70)(m) z=merge([x, y], mode='concat', concat_axis=1) z = Dense(1)(z) predictions = Activation("sigmoid")(z) model = Model(input=inputs, output=predictions) # # model = Sequential() # model.add(Embedding(max_features, embedding_size, input_length=maxlen)) # model.add(Dropout(0.25)) # model.add(Convolution1D(nb_filter=nb_filter, # filter_length=filter_length, # border_mode='valid', # activation='relu', # subsample_length=1)) # model.add(MaxPooling1D(pool_length=pool_length)) # model.add(LSTM(lstm_output_size)) # model.add(Dense(1)) # model.add(Activation('sigmoid')) model.compile(loss='binary_crossentropy',optimizer='adam', metrics=['accuracy']) print('Train...') model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=15,validation_data=(X_test, y_test)) score, acc = model.evaluate(X_test, y_test, batch_size=batch_size) print('Test score:', score) print('Test accuracy:', acc)
gpl-3.0
8,493,642,314,764,031,000
24.206897
110
0.69115
false
3.100424
true
false
false
zergov/flashcards
flashcards/sets.py
1
3877
""" flashcards.sets ~~~~~~~~~~~~~~~~~~~ Contain the StudySet object and logic related to it. """ from collections import OrderedDict from flashcards import cards from flashcards.cards import StudyCard TITLE_KEY = 'title' DESC_KEY = 'description' CARDS_KEY = 'cards' def create_from_dict(data): """ Construct a StudySet Object from a dictionary object. :param data: the dictionary object :raises KeyError: when dictionary is missing a needed field to create obj :raises ValueError: if cards field in data is not of type list :returns: StudySet object """ _assert_data_is_valid(data) title = data[TITLE_KEY] description = data[DESC_KEY] study_cards = [cards.create_from_dict(card) for card in data[CARDS_KEY]] study_set = StudySet(title, description) for card in study_cards: study_set.add(card) return study_set def _assert_data_is_valid(data): """ Check that data received in `create_from_dict` has a valid format """ if TITLE_KEY not in data: raise KeyError("Invalid data string. %s key is missing" % TITLE_KEY) if DESC_KEY not in data: raise KeyError("Invalid data string. %s key is missing" % DESC_KEY) if CARDS_KEY not in data: raise KeyError("Invalid data string. %s key is missing" % CARDS_KEY) if not isinstance(data[CARDS_KEY], list): raise ValueError("Invalid data type. %s value's should be a list" % CARDS_KEY) class StudySet(object): """ A StudySet is a container of flash cards. """ def __init__(self, title, description=None): """ Creates a Study set. :param title: The title of the study set. :param description: The description for this study set. """ self._title = title self._description = '' if description is None else description self._cards = [] def __iter__(self): """Iter through the cards of this set.""" return iter(self._cards) def __len__(self): """Return the number of cards in this StudySet.""" return len(self._cards) @property def title(self): """ Get the title of this set. :returns: The title of this Study set. """ return self._title @title.setter def title(self, value): """ Set the title of this set. :param value: The new title for this set """ if isinstance(value, basestring): self._title = value else: raise TypeError("StudySet title should be of type str") @property def description(self): """ Get the description of this set. """ return self._description @description.setter def description(self, value): """ Set the description of this set. :param value: The new description for this set """ if isinstance(value, basestring): self._description = value else: raise TypeError("StudySet description should be of type str") def add(self, card): """ Add a card to the end of this set. :param card: A subclass of flashcards.cards.StudyCard object. """ if isinstance(card, StudyCard): self._cards.append(card) else: raise TypeError("A Set can only contain instances of " "StudyCard objects.") def to_dict(self): """ Get a dictionary object representing this StudySet. :returns: a dictionary object representation of this StudySet. """ serialized_cards = [c.to_dict() for c in self] data = ((TITLE_KEY, self.title), (DESC_KEY, self.description), (CARDS_KEY, serialized_cards)) return OrderedDict(data)
mit
-994,116,634,628,581,500
25.923611
77
0.594532
false
4.21413
false
false
false
lpryszcz/REDiscover
taxid2sra.py
1
13105
#!/usr/bin/env python desc="""Fetch all entries from SRA for given taxid. Save the biggest run per each SAMPLE (SRS) from given date. Paired first, if any. Note, it run fastq-dump in background. Make sure you have enough free cores;) DEPENDENCIES: Biopython """ epilog="""Author: [email protected] Barcelona, 2/10/2012 """ import argparse, os, re, sys, gzip from datetime import datetime from ftplib import FTP from Bio import Entrez import xml.etree.ElementTree as ET def srr2info(srr): """Return info for SRR entry - experiment id - submission id - project id - biosample id - run date - bases - insert size - insert std - reads orientation """ ''' for child in root[0]: print child.tag, child.attrib EXPERIMENT {'center_name': 'BI', 'alias': '74116.WR23613.Solexa-42619.62C7UAAXX100916.P', 'accession': 'SRX026545'} SUBMISSION {'submission_date': '2009-06-01T02:01:25Z', 'lab_name': 'Genome Sequencing', 'submission_comment': 'Produced by user cristyn on Sun May 31 22:01:25 EDT 2009', 'alias': 'BI.Streptococcus_pyogenes_Pathogenomics', 'center_name': 'BI', 'accession': 'SRA008647'} STUDY {'center_name': 'BI', 'alias': 'Fusarium_oxysporum_Diversity_RNA_Sequencing_multi_isolate', 'accession': 'SRP002351'} SAMPLE {'center_name': 'BI', 'alias': '74336.0', 'accession': 'SRS190364'} RUN_SET {} root[0][0].keys() ['center_name', 'alias', 'accession'] ''' #search NCBI result = Entrez.read( Entrez.esearch(db="sra",term=srr ) ) if not result['IdList']: sys.stderr.write( " Entrez Error: No results for %s\n" % srr ) return elif len(result['IdList'])>1: sys.stderr.write( " Entrez Warning: Multiple hits for %s: %s\n" % (srr,",".join(result['IdList'])) ) #fetch info from NCBI xml = Entrez.efetch( db="sra",id=result['IdList'][0] ).read() root = ET.fromstring(xml)#; print xml #get experiment EXPERIMENT = root[0].find("EXPERIMENT") srx = EXPERIMENT.attrib['accession'] #get submission s = root[0].find("SUBMISSION") sra = s.attrib['accession'] #get accession s = root[0].find("STUDY") srp = s.attrib['accession'] #get accession s = root[0].find("SAMPLE") srs = s.attrib['accession'] s = root[0].find('RUN_SET') #it's within RUN_SET date = s[0].attrib['run_date'] bases = s[0].attrib['total_bases'] #LIBRARY_LAYOUT - maybe try to simplify it isize=istdv=orient = 0 DESIGN = EXPERIMENT.find("DESIGN") # [2][2][4][0].attrib#; print layout LIBRARY_DESCRIPTOR = DESIGN.find("LIBRARY_DESCRIPTOR") LIBRARY_LAYOUT = LIBRARY_DESCRIPTOR.find("LIBRARY_LAYOUT") PAIRED = LIBRARY_LAYOUT.find("PAIRED") if PAIRED is not None: layout = PAIRED.attrib isize = layout['NOMINAL_LENGTH'] # NOMINAL_LENGTH="476" orient = layout['ORIENTATION'] # ORIENTATION="5\'3\'-3\'5\' istdv = layout['NOMINAL_SDEV'] ## PAIRED NOMINAL_SDEV="149.286" return ( srx,sra,srp,srs,date,bases,isize,istdv,orient ) def xml2data(child, taxid2srs, verbose): """ """ #get experiment EXPERIMENT = child.find("EXPERIMENT") srx = EXPERIMENT.attrib['accession'] #get submission s = child.find("SUBMISSION") sra = s.attrib['accession'] #get accession s = child.find("STUDY") srp = s.attrib['accession'] #get accession for SAMPLE in child.findall("SAMPLE"): #if SAMPLE.attrib['accession']!= srs = SAMPLE.attrib['accession'] #get taxid SAMPLE_NAME = SAMPLE.find("SAMPLE_NAME") TAXON_ID = SAMPLE_NAME.find("TAXON_ID") taxid = int(TAXON_ID.text) SCIENTIFIC_NAME = SAMPLE_NAME.find("SCIENTIFIC_NAME") #malformed xml? if SCIENTIFIC_NAME is None: return taxid2srs strain = SCIENTIFIC_NAME.text strain0 = tissue = stage = "" #get strain tag - this may cause problems with non-ENA accessions! SAMPLE_ATTRIBUTES = SAMPLE.find("SAMPLE_ATTRIBUTES") if SAMPLE_ATTRIBUTES is None: continue for SAMPLE_ATTRIBUTE in SAMPLE_ATTRIBUTES.findall("SAMPLE_ATTRIBUTE"): #print SAMPLE_ATTRIBUTE.find("TAG").text if SAMPLE_ATTRIBUTE.find("TAG").text == "strain": #print SAMPLE_ATTRIBUTE.find("VALUE") strain += " %s" % SAMPLE_ATTRIBUTE.find("VALUE").text strain0 = SAMPLE_ATTRIBUTE.find("VALUE").text elif SAMPLE_ATTRIBUTE.find("TAG").text == "ArrayExpress-OrganismPart": tissue = SAMPLE_ATTRIBUTE.find("VALUE").text elif SAMPLE_ATTRIBUTE.find("TAG").text == "ArrayExpress-StrainOrLine": strain0 = SAMPLE_ATTRIBUTE.find("VALUE").text elif SAMPLE_ATTRIBUTE.find("TAG").text == "ArrayExpress-DevelopmentalStage": stage = SAMPLE_ATTRIBUTE.find("VALUE").text if strain!="unidentified organism": break # get tissue #LIBRARY_LAYOUT - maybe try to simplify it DESIGN = EXPERIMENT.find("DESIGN") # [2][2][4][0].attrib#; print layout LIBRARY_DESCRIPTOR = DESIGN.find("LIBRARY_DESCRIPTOR") LIBRARY_LAYOUT = LIBRARY_DESCRIPTOR.find("LIBRARY_LAYOUT") LIBRARY_CONSTRUCTION_PROTOCOL = LIBRARY_DESCRIPTOR.find("LIBRARY_CONSTRUCTION_PROTOCOL")# RNA-seq dUTP eukaryotic stranded = "" if LIBRARY_CONSTRUCTION_PROTOCOL is not None and LIBRARY_CONSTRUCTION_PROTOCOL.text is not None: stranded = re.sub('[ \t\n\r]+', ' ', LIBRARY_CONSTRUCTION_PROTOCOL.text) orient = "" isize = istdv = 0 PAIRED = LIBRARY_LAYOUT.find("PAIRED") if PAIRED is not None: layout = PAIRED.attrib if 'NOMINAL_LENGTH' in layout: isize = float(layout['NOMINAL_LENGTH']) # NOMINAL_LENGTH="476" if 'NOMINAL_SDEV' in layout: istdv = float(layout['NOMINAL_SDEV']) ##PAIRED NOMINAL_SDEV="149.286" if 'ORIENTATION' in layout: orient = layout['ORIENTATION'] #ORIENTATION="5\'3\'-3\'5\' #run data runs = [] RUN_SET = child.find('RUN_SET') #it's within RUN_SET for RUN in RUN_SET.findall("RUN"): srr = RUN.attrib['accession'] date = assembly = "" bases = size = 0 if 'size' in RUN.attrib: size = RUN.attrib['size'] if 'run_date' in RUN.attrib: date = RUN.attrib['run_date'] if 'total_bases' in RUN.attrib: bases = int(RUN.attrib['total_bases']) if "assembly" in RUN.attrib: assembly = RUN.attrib["assembly"] runs.append((srr, assembly, size, bases, date)) #store data childdata = (strain, strain0, tissue, stage, taxid, srx, srp, isize, istdv, orient, stranded, runs) if verbose: sys.stderr.write( " %s: %s: %s\n" % (taxid, srs, str(childdata))) if not taxid in taxid2srs: taxid2srs[taxid] = {} if not srs in taxid2srs[taxid]: taxid2srs[taxid][srs] = [] taxid2srs[taxid][srs].append(childdata) return taxid2srs def taxid2runs(outfn, taxid, verbose, db="sra", retmode="xml", retmax=10**6): """Return info from SRA for given taxid. """ taxid2srs = {} #search NCBI term = 'txid%s[organism] AND sra_public[filter] AND "biomol rna"[Properties]' % taxid if verbose: sys.stderr.write("Query: %s\n" % term) result = Entrez.read(Entrez.esearch(db=db, term=term, retmax=retmax))#; print result ids = result['IdList'] if not ids: sys.stderr.write(" Entrez Error: No results for %s\n" % taxid) return if verbose: sys.stderr.write("Downloading %s entries from NCBI %s database...\n" % (len(ids), db)) #post NCBI query for id in ids: xmlfn = os.path.join(".xml", "%s.xml.gz"%id) if os.path.isfile(xmlfn): xml = "".join(l for l in gzip.open(xmlfn)) else: xml = Entrez.efetch(db=db, retmode=retmode, id=id).read()#; print xml with gzip.open(xmlfn, "w") as out: out.write(xml) root = ET.fromstring(xml) child = root[0] taxid2srs = xml2data(child, taxid2srs, verbose) #print output out = open(outfn, "w") # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 header = "#Strain\tStrain0\tTissue\tStage\tTaxid\tSample\tExperiment\tProject\tRun\tInsert size\tOrientation\tStranded\tAssembly\tSize\tBases\tDate\n" out.write(header) info = "%s\t"*15+"%s\n" sys.stderr.write("Saving SRA info to: %s\n" % outfn) for taxid in taxid2srs: for srs in taxid2srs[taxid]: for strain, strain0, tissue, stage, taxid, srx, srp, isize, istdv, orient, stranded, runs in taxid2srs[taxid][srs]: for srr, assembly, size, bases, date in runs: line = info%(strain, strain0, tissue, stage, taxid, srs, srx, srp, srr, isize, orient, stranded, assembly, size, bases, date) out.write(line.encode('ascii', 'xmlcharrefreplace')) out.close() return taxid2srs def get_runs(taxid2srs, ftpdomain, orientth, maxisize, paired, minbases, verbose): """Select the best run for each uniq taxid-srs-date combination """ if verbose: sys.stderr.write( "Fetching best run for each uniq taxid-srs-date combination...\n" ) #select the best run for each uniq taxid-srs-date combination for taxid in taxid2srs: for srs in taxid2srs[taxid]: date2runs={} for strain, taxid, srx, srp, isize, istdv, orient, runs in taxid2srs[taxid][srs]: #check if paired if paired: if not isize: continue #skip if wrong orientation if orientth and orientth!=orient: continue #skip big insert size or not paired if maxisize: if isize>maxisize: continue #add runs passed filtering for srr,bases,date in runs: #skip if too small yield if bases < minbases*10**6: continue if date not in date2runs: date2runs[date]=[] date2runs[date].append( (srr,srx,srp,isize,bases) ) #process best run for each uniq taxid-srs-date combination for date in date2runs: # fltruns = filter( lambda x: x[3]!=0, date2runs[date] ) if not fltruns: fltruns = date2runs[date] #sort by size bestrun = sorted( fltruns,key=lambda x: x[-1],reverse=True )[0] #print bestrun,date2runs[date] srr,srx,srp,isize,bases = bestrun # fetch cmd = "fastq-dump --gzip --split-3 -O %s %s" % (outdir, srr) def main(): usage = "%(prog)s -v" parser = argparse.ArgumentParser(usage=usage, description=desc, epilog=epilog) parser.add_argument("-v", dest="verbose", default=False, action="store_true", help="verbose") parser.add_argument('--version', action='version', version='1.1') parser.add_argument("-d", "--download", default=False, action="store_true", help="download SRA files") parser.add_argument("-t", "--taxid", type=int, required=True, help="taxid of interest " ) parser.add_argument("-f", dest="ftp", default="ftp-trace.ncbi.nih.gov", help="ftp server address [%(default)s]" ) parser.add_argument("-e", "--email", default="[email protected]", type=str, help="email address [%(default)s]" ) parser.add_argument("-o", dest="orient", default="5'3'-3'5'", help="orientation [%(default)s]" ) parser.add_argument("-m", dest="maxisize", default=1000, type=int, help="max allowed insert [%(default)s]" ) parser.add_argument("-b", dest="minbases", default=600, type=int, help="min Mbases in run [%(default)s Mbases -> 10x for 60Mb genome]" ) parser.add_argument("-p", "--paired", default=False, action="store_true", help="fetch only paired runs" ) o = parser.parse_args() if o.verbose: sys.stderr.write( "Options: %s\n" % str(o) ) Entrez.email = o.email if not os.path.isdir(".xml"): os.makedirs(".xml") #get all runs for taxid outfn = "sra.tsv" taxid2srs = taxid2runs(outfn, o.taxid, o.verbose); return if o.download: #fetch best srr get_runs( taxid2srs,o.ftp,o.orient,o.maxisize,o.paired,o.minbases,o.verbose ) if __name__=='__main__': t0 = datetime.now() main() dt = datetime.now()-t0 sys.stderr.write( "#Time elapsed: %s\n" % dt )
gpl-2.0
5,824,388,399,453,832,000
42.111842
268
0.583441
false
3.286108
false
false
false
eeshangarg/zulip
zerver/views/realm_icon.py
1
2428
from django.conf import settings from django.http import HttpRequest, HttpResponse from django.shortcuts import redirect from django.utils.translation import gettext as _ from zerver.decorator import require_realm_admin from zerver.lib.actions import do_change_icon_source from zerver.lib.realm_icon import realm_icon_url from zerver.lib.response import json_error, json_success from zerver.lib.upload import upload_icon_image from zerver.lib.url_encoding import add_query_arg_to_redirect_url from zerver.models import UserProfile @require_realm_admin def upload_icon(request: HttpRequest, user_profile: UserProfile) -> HttpResponse: if len(request.FILES) != 1: return json_error(_("You must upload exactly one icon.")) icon_file = list(request.FILES.values())[0] if (settings.MAX_ICON_FILE_SIZE_MIB * 1024 * 1024) < icon_file.size: return json_error( _("Uploaded file is larger than the allowed limit of {} MiB").format( settings.MAX_ICON_FILE_SIZE_MIB, ) ) upload_icon_image(icon_file, user_profile) do_change_icon_source( user_profile.realm, user_profile.realm.ICON_UPLOADED, acting_user=user_profile ) icon_url = realm_icon_url(user_profile.realm) json_result = dict( icon_url=icon_url, ) return json_success(json_result) @require_realm_admin def delete_icon_backend(request: HttpRequest, user_profile: UserProfile) -> HttpResponse: # We don't actually delete the icon because it might still # be needed if the URL was cached and it is rewritten # in any case after next update. do_change_icon_source( user_profile.realm, user_profile.realm.ICON_FROM_GRAVATAR, acting_user=user_profile ) gravatar_url = realm_icon_url(user_profile.realm) json_result = dict( icon_url=gravatar_url, ) return json_success(json_result) def get_icon_backend(request: HttpRequest, user_profile: UserProfile) -> HttpResponse: url = realm_icon_url(user_profile.realm) # We can rely on the URL already having query parameters. Because # our templates depend on being able to use the ampersand to # add query parameters to our url, get_icon_url does '?version=version_number' # hacks to prevent us from having to jump through decode/encode hoops. url = add_query_arg_to_redirect_url(url, request.META["QUERY_STRING"]) return redirect(url)
apache-2.0
-8,473,574,400,917,262,000
37.539683
91
0.714168
false
3.723926
false
false
false
huiyiqun/check_mk
cmk/regex.py
1
2740
#!/usr/bin/python # -*- encoding: utf-8; py-indent-offset: 4 -*- # +------------------------------------------------------------------+ # | ____ _ _ __ __ _ __ | # | / ___| |__ ___ ___| | __ | \/ | |/ / | # | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | # | Copyright Mathias Kettner 2016 [email protected] | # +------------------------------------------------------------------+ # # This file is part of Check_MK. # The official homepage is at http://mathias-kettner.de/check_mk. # # check_mk is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation in version 2. check_mk is distributed # in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- # out even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. See the GNU General Public License for more de- # tails. You should have received a copy of the GNU General Public # License along with GNU Make; see the file COPYING. If not, write # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. """This module wraps some regex handling functions used by Check_MK""" import re from .exceptions import MKGeneralException # TODO: Clean this up one day by using the way recommended by gettext. # (See https://docs.python.org/2/library/gettext.html). For this we # need the path to the locale files here. try: _ except NameError: _ = lambda x: x # Fake i18n when not available g_compiled_regexes = {} def regex(pattern): """Compile regex or look it up in already compiled regexes. (compiling is a CPU consuming process. We cache compiled regexes).""" try: return g_compiled_regexes[pattern] except KeyError: pass try: reg = re.compile(pattern) except Exception, e: raise MKGeneralException(_("Invalid regular expression '%s': %s") % (pattern, e)) g_compiled_regexes[pattern] = reg return reg # Checks if a string contains characters that make it neccessary # to use regular expression logic to handle it correctly def is_regex(pattern): for c in pattern: if c in '.?*+^$|[](){}\\': return True return False def escape_regex_chars(match): r = "" for c in match: if c in r"[]\().?{}|*^$+": r += "\\" r += c return r
gpl-2.0
-2,221,331,912,281,145,600
36.027027
89
0.531752
false
3.697706
false
false
false
SublimeHaskell/SublimeHaskell
hsdev/backend.py
1
36770
""" The `hsdev` backend. """ from functools import reduce import io import json import os import os.path import pprint import re import subprocess import threading import sublime import SublimeHaskell.hsdev.callback as HsCallback import SublimeHaskell.hsdev.client as HsDevClient import SublimeHaskell.hsdev.result_parse as ResultParse import SublimeHaskell.internals.backend as Backend import SublimeHaskell.internals.logging as Logging import SublimeHaskell.internals.output_collector as OutputCollector import SublimeHaskell.internals.proc_helper as ProcHelper import SublimeHaskell.internals.settings as Settings import SublimeHaskell.internals.utils as Utils import SublimeHaskell.sublime_haskell_common as Common def result_identity(resp): '''Identity function for results ''' return resp class HsDevBackend(Backend.HaskellBackend): """This class encapsulates all of the functions that interact with the `hsdev` backend. """ HSDEV_DEFAULT_PORT = 4567 HSDEV_DEFAULT_HOST = 'localhost' HSDEV_NOT_FOUND = [0, 0, 0, 0] HSDEV_MIN_VER = [0, 3, 3, 0] # minimum hsdev version HSDEV_MAX_VER = [0, 3, 4, 0] # maximum hsdev version HSDEV_CALL_TIMEOUT = 300.0 # second timeout for synchronous requests (5 minutes should be enough, no?) def __init__(self, backend_mgr, local=True, port=HSDEV_DEFAULT_PORT, host=HSDEV_DEFAULT_HOST, **kwargs): super().__init__(backend_mgr) Logging.log('{0}.__init__({1}, {2})'.format(type(self).__name__, host, port), Logging.LOG_INFO) # Sanity checking: exec_with = kwargs.get('exec-with') install_dir = kwargs.get('install-dir') if bool(exec_with) ^ bool(install_dir): if install_dir is None: sublime.error_message('\n'.join(['\'exec_with\' requires an \'install_dir\'.', '', 'Please check your \'backends\' configuration and retry.'])) raise RuntimeError('\'exec_with\' requires an \'install_dir\'.') else: sublime.error_message('\n'.join(['\'install_dir\' requires an \'exec_with\'.', '', 'Please check your \'backends\' configuration and retry.'])) raise RuntimeError('\'install_dir\' requires an \'exec_with\'.') elif exec_with and exec_with not in ['stack', 'cabal', 'cabal-new-build']: sublime.error_message('\n'.join(['Invalid backend \'exec_with\': {0}'.format(exec_with), '', 'Valid values are "cabal", "cabal-new-build" or "stack".', 'Please check your \'backends\' configuration and retry.'])) raise RuntimeError('Invalid backend \'exec_with\': {0}'.format(exec_with)) # Local hsdev server process and params self.is_local_hsdev = local self.hsdev_process = None self.cache = os.path.join(Common.sublime_haskell_cache_path(), 'hsdev', 'hsdev.db') self.log_file = os.path.join(Common.sublime_haskell_cache_path(), 'hsdev', 'hsdev.log') self.exec_with = exec_with self.install_dir = Utils.normalize_path(install_dir) if install_dir is not None else None # Keep track of the hsdev version early. Needed to patch command line arguments later. self.version = HsDevBackend.hsdev_version(self.exec_with, self.install_dir) self.drain_stdout = None self.drain_stderr = None # Connection params self.port = port self.hostname = host if self.is_local_hsdev: self.hostname = self.HSDEV_DEFAULT_HOST self.client = None self.serial_lock = threading.RLock() self.request_serial = 1 @staticmethod def backend_name(): return 'hsdev' @staticmethod def is_available(**kwargs): # Yes, this is slightly redundant because eventually __init__ does the same thing for a class # instance. exec_with = kwargs.get('exec-with') install_dir = kwargs.get('install-dir') local = kwargs.get('local', False) exec_install_set = not bool(exec_with) ^ bool(install_dir) backend_name = kwargs.get('backend_name', 'not specified.') if exec_install_set or local: if not exec_install_set: # Either exec-with or install-dir isn't set, so the corresponding configuration target is unavailable. return False hsdev_ver = HsDevBackend.hsdev_version(exec_with, install_dir) str_version = '.'.join([str(v) for v in hsdev_ver]) Logging.log('hsdev version: {0}'.format(str_version), Logging.LOG_INFO) retval = hsdev_ver >= HsDevBackend.HSDEV_MIN_VER and hsdev_ver < HsDevBackend.HSDEV_MAX_VER if not retval: if retval != HsDevBackend.HSDEV_NOT_FOUND: min_version = '.'.join([str(v) for v in HsDevBackend.HSDEV_MIN_VER]) max_version = '.'.join([str(v) for v in HsDevBackend.HSDEV_MAX_VER]) msg = '\n'.join(['Backend configuration: "{0}"'.format(backend_name), '', 'Incompatible hsdev, detected version ' + str_version, 'Version should be \u2265 ' + min_version + ' and < ' + max_version]) else: msg = '\n'.join(['Backend configuration: "{0}"'.format(backend_name), '', 'Tried executing hsdev to get a version number, not successful.', 'Is hsdev installed (or built, if using stack or cabal exec wrappers)?']) sublime.message_dialog(msg) return retval # Assume that a remote backend is actually available. Ultimately, we might not connect to it, but # it is available to us as a backend. return True def start_backend(self): retval = True if self.is_local_hsdev: Logging.log('Starting local \'hsdev\' server', Logging.LOG_INFO) log_level = Settings.PLUGIN.hsdev_log_level cmd = self.concat_args([(True, ["hsdev"]), (True, ["run"]), (self.port, ["--port", str(self.port)]), (self.cache, ["--db", self.cache]), (self.log_file, ["--log", self.log_file]), (True, ["--log-level", log_level]), (True, ["--no-color"])]) hsdev_proc = ProcHelper.exec_with_wrapper(self.exec_with, self.install_dir, cmd) if hsdev_proc.process is not None: # Use TextIOWrapper here because it combines decoding with newline handling, # which means less to maintain. hsdev_proc.process.stdout = io.TextIOWrapper(hsdev_proc.process.stdout, 'utf-8') hsdev_proc.process.stderr = io.TextIOWrapper(hsdev_proc.process.stderr, 'utf-8') # Read and wait for hsdev's startup messge. 15 seconds should be enough time for the message to appear. # Otherwise, kill the thread because we don't want to get stuck waiting forever. startup_reader = HsDevStartupReader(hsdev_proc.process.stdout) startup_reader.start() startup_reader.wait_startup(15.0) if startup_reader.successful(): port = startup_reader.port() if port != self.port: Logging.log('hsdev: server port changed, was {0}, now {1}'.format(self.port, port), Logging.LOG_WARNING) self.port = port self.drain_stdout = OutputCollector.DescriptorDrain('hsdev stdout', hsdev_proc.process.stdout) self.drain_stderr = OutputCollector.DescriptorDrain('hsdev stderr', hsdev_proc.process.stderr) self.drain_stdout.start() self.drain_stderr.start() self.hsdev_process = hsdev_proc Logging.log('Local \'hsdev\' server started successfully.', Logging.LOG_INFO) else: # This is a bit of a "Hail Mary!" because readline() could just hang forever. Just to make sure, # kill the process too! startup_reader.stop() hsdev_proc.process.kill() if hsdev_proc.process_err is not None: Logging.log('Possible reason for timeout: {0}'.format(hsdev_proc.process_err)) self.hsdev_process = None retval = False sublime.error_message('Timed out waiting for \'hsdev\' to start up.') else: errmsg = 'Could not start local \'hsdev\' server because:\n\n' + hsdev_proc.process_err sublime.error_message(errmsg) self.hsdev_process = None retval = False return retval def connect_backend(self): Logging.log('Connecting to \'hsdev\' server at {0}:{1}'.format(self.hostname, self.port), Logging.LOG_INFO) retval = True self.client = HsDevClient.HsDevClient(self.backend_mgr) if self.client.connect(self.hostname, self.port): # For a local hsdev server that we started, send the link command so that it exits when we exit. if self.is_local_hsdev: self.link() else: Logging.log('Connections to \'hsdev\' server unsuccessful, see tracebacks to diagnose.', Logging.LOG_ERROR) retval = False return retval def disconnect_backend(self): self.exit() self.client.close() def stop_backend(self): if self.is_local_hsdev: try: self.hsdev_process.process.wait(90.0) except subprocess.TimeoutExpired: sublime.message_dialog('\n'.join(['Time out waiting for \'hsdev\' process to terminate.', '', 'You may have to kill this process manually from a terminal or', 'console window\'s command line.'])) def is_live_backend(self): return self.client.is_connected() # -~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~- # File/project tracking functions: # -~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~- ## Pylint deems these two methods unncessary since all they do is call the superclass. However, I'm ## leaving them here just in case something more interesting has to be done in addition to calling ## the superclass. # def add_project_file(self, filename, project, project_dir): # super().add_project_file(filename, project, project_dir) # def remove_project_file(self, filename): # super().remove_project_file(filename) # -~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~- # Features # -~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~- def auto_rescan(self): return True # -~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~- # Utility functions used to implement the API: # -~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~- @staticmethod def hsdev_version(exec_with, install_dir, output_compiler_version=False): retval = [0, 0, 0, 0] compiler_version = None cmd = ['hsdev', 'version'] if output_compiler_version: cmd.append('-c') hsdev_proc = ProcHelper.exec_with_wrapper(exec_with, install_dir, cmd) if hsdev_proc.process is not None: exit_code, out, _ = hsdev_proc.wait() if exit_code == 0: ## 'cabal new-run' can spit out multiple lines of status before executing the task: for line in out.splitlines(): hsver = re.match(r'(?P<major>\d+)\.(?P<minor>\d+)\.(?P<revision>\d+)\.(?P<build>\d+)', line) if hsver: major = int(hsver.group('major')) minor = int(hsver.group('minor')) revision = int(hsver.group('revision')) build = int(hsver.group('build')) retval = [major, minor, revision, build] compiler_version = line.split()[1] if output_compiler_version else None break return (retval, compiler_version) if output_compiler_version else retval @staticmethod def concat_args(args): def inner_concat(left, right): (left_pred, left_expr) = left (right_pred, right_expr) = right return (left_pred or right_pred, (left_expr if left_pred else []) + (right_expr if right_pred else [])) return reduce(inner_concat, args, (True, []))[1] def files_and_contents(self, files, contents): contents = contents or {} retval = [{'file': f, 'contents': contents.get(f)} for f in files] if files else [] return retval def make_callbacks(self, name, on_response=None, result_convert=result_identity, on_notify=None, on_error=None, **backend_args): with self.serial_lock: req_serial = str(self.request_serial) self.request_serial += 1 # Clean up backend arguments: for param in ['on_response', 'result_convert', 'on_notify', 'on_error']: if param in backend_args: del backend_args[param] return (HsCallback.HsDevCallbacks(req_serial, name, on_response, result_convert, on_notify, on_error), backend_args) def hsdev_command(self, name, opts, callbacks, async_cmd=False, timeout=HSDEV_CALL_TIMEOUT, is_list=False, on_result_part=None, split_result=None): if split_result is None: split_res = on_result_part is not None if is_list and split_res: result = [] def hsdev_command_notify(reply): if 'result-part' in reply: notify_result = callbacks.call_result_convert([reply['result-part']])[0] on_result_part(notify_result) result.append(notify_result) else: callbacks.call_notify(reply) # FIXME: Is this option still used? opts.update({'split-result': None}) callbacks.add_notify(hsdev_command_notify) resp = self.client.call(name, opts, callbacks, wait=not async_cmd, timeout=timeout) return resp def command(self, name, opts, callbacks, timeout=HSDEV_CALL_TIMEOUT, on_result_part=None, split_result=None): return self.hsdev_command(name, opts, callbacks, async_cmd=False, timeout=timeout, is_list=False, on_result_part=on_result_part, split_result=split_result) def async_command(self, name, opts, callbacks, on_result_part=None, split_result=None): return self.hsdev_command(name, opts, callbacks, async_cmd=True, timeout=None, is_list=False, on_result_part=on_result_part, split_result=split_result) def list_command(self, name, opts, callbacks, timeout=HSDEV_CALL_TIMEOUT, on_result_part=None, split_result=None): return self.hsdev_command(name, opts, callbacks, async_cmd=False, timeout=timeout, is_list=True, on_result_part=on_result_part, split_result=split_result) def async_list_command(self, name, opts, callbacks, on_result_part=None, split_result=None): return self.hsdev_command(name, opts, callbacks, async_cmd=True, timeout=None, is_list=True, on_result_part=on_result_part, split_result=split_result) # -~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~- # API implementation: # -~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~- def link(self, hold=False): return self.command('link', {'hold': hold}, self.make_callbacks('link')[0]) def ping(self): return self.command('ping', {}, lambda r: r and ('message' in r) and (r['message'] == 'pong'), self.make_callbacks('ping')[0]) def scan(self, cabal=False, sandboxes=None, projects=None, files=None, paths=None, ghc=None, contents=None, docs=False, infer=False, wait_complete=False, **backend_args): action = self.command if wait_complete else self.async_command callbacks, backend_args = self.make_callbacks('scan', **backend_args) return action('scan', {'projects': projects or [], 'cabal': cabal, 'sandboxes': sandboxes or [], 'files': self.files_and_contents(files, contents), 'paths': paths or [], 'ghc-opts': ghc or [], 'docs': docs, 'infer': infer}, callbacks, **backend_args) def scan_project(self, project, build_tool=None, no_deps=False, wait_complete=False, **backend_args): action = self.command if wait_complete else self.async_command callbacks, backend_args = self.make_callbacks('scan project', **backend_args) return action( 'scan project', { 'project': project, 'build-tool': build_tool, 'scan-deps': not no_deps, }, callbacks, **backend_args ) def scan_file(self, file, build_tool=None, no_project=False, no_deps=False, wait_complete=False, **backend_args): action = self.command if wait_complete else self.async_command callbacks, backend_args = self.make_callbacks('scan file', **backend_args) return action( 'scan file', { 'file': file, 'build-tool': build_tool, 'scan-project': not no_project, 'scan-deps': not no_deps, }, callbacks, **backend_args ) def scan_package_dbs(self, package_dbs, wait_complete=False, **backend_args): action = self.command if wait_complete else self.async_command callbacks, backend_args = self.make_callbacks('scan package-dbs', **backend_args) return action( 'scan package-dbs', {'package-db-stack': [{'package-db': p} if p not in ['user-db', 'global-db'] else p for p in package_dbs]}, callbacks, **backend_args ) def set_file_contents(self, file, contents=None, **backend_args): callbacks, backend_args = self.make_callbacks('set-file-contents', **backend_args) return self.command('set-file-contents', {'file': file, 'contents': contents}, callbacks, **backend_args) def docs(self, projects=None, files=None, **backend_args): callbacks, backend_args = self.make_callbacks('docs', **backend_args) return self.async_command('docs', {'projects': projects or [], 'files': files or []}, callbacks, **backend_args) def infer(self, projects=None, files=None, **backend_args): callbacks, backend_args = self.make_callbacks('infer', **backend_args) return self.async_command('infer', {'projects': projects or [], 'files': files or []}, callbacks, **backend_args) def remove(self, cabal=False, sandboxes=None, projects=None, files=None, **backend_args): callbacks, backend_args = self.make_callbacks('remove', **backend_args) return self.async_list_command('remove', {'projects': projects or [], 'cabal': cabal, 'sandboxes': sandboxes or [], 'files': files or []}, callbacks, **backend_args) def remove_all(self, **backend_args): callbacks, backend_args = self.make_callbacks('remove-all', **backend_args) return self.command('remove-all', {}, callbacks, **backend_args) def list_packages(self, **backend_args): callbacks, backend_args = self.make_callbacks('packages', **backend_args) return self.list_command('packages', {}, callbacks, **backend_args) def list_projects(self, **backend_args): callbacks, backend_args = self.make_callbacks('projects', **backend_args) return self.list_command('projects', {}, callbacks, **backend_args) def list_sandboxes(self, **backend_args): return self.list_command('sandboxes', {}, **backend_args) def symbol(self, lookup="", search_type='prefix', project=None, file=None, module=None, package=None, installed=False, source=False, standalone=False, local_names=False, header=False, **backend_args): # search_type is one of: exact, prefix, infix, suffix query = {'input': lookup, 'type': search_type} filters = [] if project: filters.append({'project': project}) if file: filters.append({'file': file}) if module: filters.append({'module': module}) if package: filters.append({'package': package}) if installed: filters.append('installed') if source: filters.append('sourced') if standalone: filters.append('standalone') callbacks, backend_args = self.make_callbacks('symbol', result_convert=ResultParse.parse_symbol_ids if header else ResultParse.parse_symbols, **backend_args) return self.list_command('symbol', {'query': query, 'filters': filters, 'locals': local_names, 'header': header}, callbacks, **backend_args) def module(self, _projectname, lookup="", search_type='prefix', project=None, file=None, module=None, package=None, installed=False, source=False, standalone=False, header=False, **backend_args): query = {'input': lookup, 'type': search_type} filters = [] if project: filters.append({'project': project}) if file: filters.append({'file': file}) if module: filters.append({'module': module}) if package: filters.append({'package': package}) if installed: filters.append('installed') if source: filters.append('sourced') if standalone: filters.append('standalone') callbacks, backend_args = self.make_callbacks('module', result_convert=ResultParse.parse_module_ids if header else ResultParse.parse_modules, **backend_args) return self.command('module', {'query': query, 'filters': filters, 'header': header, 'inspection': False}, callbacks, **backend_args) def project(self, project=None, path=None, **backend_args): callbacks, backend_args = self.make_callbacks('project', **backend_args) return self.command('project', {'name': project} if project else {'path': path}, callbacks, **backend_args) def sandbox(self, path, **backend_args): callbacks, backend_args = self.make_callbacks('sandbox', **backend_args) return self.command('sandbox', {'path': path}, callbacks, **backend_args) def lookup(self, name, file, **backend_args): callbacks, backend_args = self.make_callbacks('lookup', result_convert=ResultParse.parse_symbols, **backend_args) return self.list_command('lookup', {'name': name, 'file': file}, callbacks, **backend_args) def whois(self, name, file, **backend_args): callbacks, backend_args = self.make_callbacks('whois', result_convert=ResultParse.parse_symbols, **backend_args) return self.list_command('whois', {'name': name, 'file': file}, callbacks, **backend_args) def whoat(self, line, column, file, **backend_args): callbacks, backend_args = self.make_callbacks('whoat', result_convert=ResultParse.parse_symbols, **backend_args) return self.list_command('whoat', {'line': line, 'column': column, 'file': file}, callbacks, **backend_args) def scope_modules(self, _projcname, file, lookup='', search_type='prefix', **backend_args): callbacks, backend_args = self.make_callbacks('scope_modules', result_convert=ResultParse.parse_module_ids, **backend_args) return self.list_command('scope modules', {'query': {'input': lookup, 'type': search_type}, 'file': file}, callbacks, **backend_args) def scope(self, file, lookup='', search_type='prefix', global_scope=False, **backend_args): callbacks, backend_args = self.make_callbacks('scope', result_convert=ResultParse.parse_symbol_ids, **backend_args) return self.list_command('scope', {'query': {'input': lookup, 'type': search_type }, 'file': file }, callbacks, **backend_args) def usages(self, line, column, file, **backend_args): callbacks, backend_args = self.make_callbacks('usages', result_convert=ResultParse.parse_symbol_usages, **backend_args) return self.list_command('usages', {'line': line, 'column': column, 'file': file}, callbacks, **backend_args) def complete(self, sym, file, wide=False, **backend_args): qname = sym.qualified_name() if sym.name is not None else sym.module + '.' callbacks, backend_args = self.make_callbacks('complete', result_convert=ResultParse.parse_symbols, **backend_args) return self.list_command('complete', {'prefix': qname, 'wide': wide, 'file': file}, callbacks, **backend_args) def hayoo(self, query, page=None, pages=None, **backend_args): callbacks, backend_args = self.make_callbacks('hayoo', result_convert=ResultParse.parse_symbols, **backend_args) return self.list_command('hayoo', {'query': query, 'page': page or 0, 'pages': pages or 1}, callbacks, **backend_args) def cabal_list(self, packages, **backend_args): def convert_to_cabal_packages(pkg_list): return [ResultParse.parse_cabal_package(pkg) for pkg in pkg_list] if pkg_list else None callbacks, backend_args = self.make_callbacks('cabal list', result_convert=convert_to_cabal_packages, **backend_args) return self.list_command('cabal list', {'packages': packages}, callbacks, **backend_args) def unresolveds(self, files, **backend_args): callbacks, backend_args = self.make_callbacks('unresolveds', **backend_args) return self.list_command('unresolveds', {'files': files}, callbacks, **backend_args) def lint(self, files=None, contents=None, hlint=None, wait_complete=False, **backend_args): action = self.list_command if wait_complete else self.async_list_command result_convert = backend_args.pop('result_convert', []) if result_convert and not isinstance(result_convert, list): result_convert = [result_convert] result_convert.append(self.convert_warnings) callbacks, backend_args = self.make_callbacks('lint', result_convert=result_convert, **backend_args) return action('lint', {'files': self.files_and_contents(files, contents), 'lint-opts': hlint or []}, callbacks, **backend_args) def check(self, files=None, contents=None, ghc=None, wait_complete=False, **backend_args): action = self.list_command if wait_complete else self.async_list_command callbacks, backend_args = self.make_callbacks('check', **backend_args) return action('check', {'files': self.files_and_contents(files, contents), 'ghc-opts': ghc or []}, callbacks, **backend_args) def check_lint(self, files=None, contents=None, ghc=None, hlint=None, wait_complete=False, **backend_args): action = self.list_command if wait_complete else self.async_list_command result_convert = backend_args.pop('result_convert', []) if result_convert and not isinstance(result_convert, list): result_convert = [result_convert] result_convert.append(self.convert_warnings) callbacks, backend_args = self.make_callbacks('check-lint', result_convert=result_convert, **backend_args) return action('check-lint', {'files': self.files_and_contents(files, contents), 'ghc-opts': ghc or [], 'lint-opts': hlint or []}, callbacks, **backend_args) def types(self, _projectname, file, _modulename, _line, _column, ghc_flags=None, contents=None, **backend_args): callbacks, backend_args = self.make_callbacks('types', **backend_args) return self.list_command('types', {'files': self.files_and_contents(file, contents), 'ghc-opts': ghc_flags or []}, callbacks, **backend_args) def autofixes(self, messages, wait_complete=False, **backend_args): callbacks, backend_args = self.make_callbacks('autofixes', result_convert=ResultParse.parse_corrections, **backend_args) action = self.list_command if wait_complete else self.async_list_command return action('autofixes', {'messages': messages}, callbacks, **backend_args) def refactor(self, messages, rest=[], pure=True, wait_complete=False, **backend_args): callbacks, backend_args = self.make_callbacks('refactor', result_convert=ResultParse.parse_corrections, **backend_args) action = self.list_command if wait_complete else self.async_list_command return action('refactor', {'messages': messages, 'rest': rest, 'pure': pure}, callbacks, **backend_args) def rename(self, name, new_name, file, wait_complete=False, **backend_args): callbacks, backend_args = self.make_callbacks('rename', result_convert=ResultParse.parse_corrections, **backend_args) action = self.list_command if wait_complete else self.async_list_command return action('rename', {'name': name, 'new-name': new_name, 'file': file}, callbacks, **backend_args) def langs(self, _projectname, **backend_args): callbacks, backend_args = self.make_callbacks('langs', **backend_args) return self.command('langs', {}, callbacks, **backend_args) def flags(self, _projectname, **backend_args): callbacks, backend_args = self.make_callbacks('flags', **backend_args) return self.command('flags', {}, callbacks, **backend_args) def ghc_eval(self, exprs, file=None, source=None, wait_complete=False, **backend_args): the_file = None if file is not None: the_file = {'file': file, 'contents': source} callbacks, backend_args = self.make_callbacks('ghc eval', result_convert=ResultParse.parse_repl_results, **backend_args) action = self.list_command if wait_complete else self.async_list_command return action('ghc eval', {'exprs': exprs, 'file': the_file}, callbacks, **backend_args) def ghc_type(self, exprs, file=None, source=None, wait_complete=False, **backend_args): the_file = None if file is not None: the_file = {'file': file, 'contents': source} callbacks, backend_args = self.make_callbacks('ghc type', result_convert=ResultParse.parse_repl_results, **backend_args) action = self.list_command if wait_complete else self.async_list_command return action('ghc type', {'exprs': exprs, 'file': the_file}, callbacks, **backend_args) def stop_ghc(self, **backend_args): callbacks, backend_args = self.make_callbacks('stop-ghc', **backend_args) return self.command('stop-ghc', {}, callbacks, **backend_args) def exit(self): return self.command('exit', {}, self.make_callbacks('exit')[0]) # old names for compatibility def autofix_show(self, messages, wait_complete=False, **backend_args): return self.autofixes(messages, wait_complete=wait_complete, **backend_args) def autofix_fix(self, messages, rest=[], pure=True, wait_complete=False, **backend_args): return self.refactor(messages, rest=rest, pure=pure, wait_complete=wait_complete, **backend_args) # -~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~- # Advanced features: # -~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~- def query_import(self, symname, filename): if self.whois(symname, filename): return (False, ['Symbol {0} already in scope'.format(symname)]) candidates = list(filter( lambda c: c.imported_from is not None, self.lookup(symname, filename), )) return (True, candidates) if candidates else (False, ['Symbol {0} not found'.format(symname)]) def contents_to_module(self, file, contents): self.set_file_contents(file, contents) m = self.module(file=file, header=True) proj = self.project(path=m.location.project) build_tool = proj['build-tool'] self.scan_file(file=file, build_tool=build_tool, wait_complete=True) return Utils.head_of(self.module(None, file=file)) def clean_imports(self, filename): cmd = ['hsclearimports', filename, '--max-import-list', '64'] hsclean_proc = ProcHelper.exec_with_wrapper(self.exec_with, self.install_dir, cmd) if hsclean_proc.process is not None: exit_code, result, err = hsclean_proc.wait() if exit_code == 0: return (True, result.splitlines()) return (False, err) return (False, ['\'hscleanimports\' utility not found.']) # -~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~- # Utility functions: # -~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~- def convert_warnings(self, messages): for msg in messages: if msg.get('level', '') == 'warning': msg['level'] = 'hint' return messages class HsDevStartupReader(threading.Thread): '''Separate thread object that reads the local `hsdev` server's `stdout` looking for the server's startup message. The server's port number is parsed from the startup message and saved in the object's `hsdev_port` attribute, just in case this differs from the default or requested port. ''' def __init__(self, fstdout): super().__init__(name='hsdev startup reader') self.stdout = fstdout self.hsdev_port = -1 self.end_event = threading.Event() def run(self): self.end_event.clear() while not self.end_event.is_set(): srvout = self.stdout.readline().strip() Logging.log('hsdev initial: {0}'.format(srvout), Logging.LOG_DEBUG) if srvout != '': start_confirm = re.search(r'[Ss]erver started at port (?P<port>\d+)$', srvout) if start_confirm: self.hsdev_port = int(start_confirm.group('port')) Logging.log('hsdev initial: \'hsdev\' server started at port {0}'.format(self.hsdev_port)) self.end_event.set() else: # Got EOF, stop loop. self.end_event.set() def wait_startup(self, tmo): self.end_event.wait(tmo) def successful(self): return self.end_event.is_set() def stop(self): self.end_event.clear() def port(self): return self.hsdev_port
mit
-7,706,911,259,607,050,000
49.438957
204
0.571716
false
3.794634
false
false
false
afsungur/MemWord
framefinish.py
1
2063
import wx from griddict import GridDictionary import Global class FrameFinish(wx.Frame): def __init__(self, parent, true_count, false_count, falses): FRAME_SIZE_WIDTH = 800 FRAME_SIZE_HEIGHT = 300 FRAME_POS_X = 200 FRAME_POS_Y = 200 wx.Frame.__init__(self, parent, -1, title=Global.FINISH_TITLE, size=(FRAME_SIZE_WIDTH, FRAME_SIZE_HEIGHT), pos=(FRAME_POS_X, FRAME_POS_Y), style=wx.DEFAULT_FRAME_STYLE) self.frame = parent # Text Items true_count_text = wx.StaticText(self, -1, Global.TRUE_COUNT_TEXT) false_count_text = wx.StaticText(self, -1, Global.FALSE_COUNT_TEXT) true_count_value = wx.StaticText(self, -1, str(true_count)) false_count_value = wx.StaticText(self, -1, str(false_count)) seperator = wx.StaticText(self, -1, "-----------------------------") font = wx.Font(16, wx.MODERN, wx.NORMAL, wx.BOLD) falses_big_text = wx.StaticText(self, -1, Global.WRONG_ANSWERS_TEXT+":") falses_big_text.SetFont(font) # Grid grid_falses = GridDictionary(self, falses) print "false count:", len(falses) # Sizer Set trueCountSizer = wx.GridBagSizer(2,2) trueCountSizer.Add(true_count_text,pos=(0,0)) trueCountSizer.Add(true_count_value,pos=(0,1)) trueCountSizer.Add(false_count_text,pos=(1,0)) trueCountSizer.Add(false_count_value,pos=(1,1)) mainSizer = wx.BoxSizer(wx.VERTICAL) mainSizer.Add(trueCountSizer, 0, wx.ALL, 5) mainSizer.Add(seperator,0, wx.ALL, 5) mainSizer.Add(falses_big_text,0, wx.ALL, 5) mainSizer.Add(grid_falses, 0, wx.ALL, 5) # Bind self.Bind(wx.EVT_CLOSE, self.close_event) # Frame Settings self.SetSizer(mainSizer) self.Fit() self.Show() def close_event(self, evt): print "closed..." self.frame.close()
gpl-3.0
-5,161,963,566,746,056,000
33.383333
80
0.56762
false
3.393092
false
false
false
davidcdba/oBid
oBid/oBid/settings.py
1
6054
#encoding: utf-8 #Para que no de porculo los acentos y Ñ # Django settings for oBid project. ## EXPLICACION ## IMPORTAMOS LA LIBRERIA 'os' del sistema y establecemos como PATH del proyecto la carpeta en la que se encuentra import os PROJECT_PATH=os.path.dirname(os.path.realpath(__file__)) DEBUG = True TEMPLATE_DEBUG = DEBUG ADMINS = ( ('i12gamad', '[email protected]'), ) MANAGERS = ADMINS DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'. 'NAME': 'oBid.db', # Or path to database file if using sqlite3. # The following settings are not used with sqlite3: 'USER': '', 'PASSWORD': '', 'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP. 'PORT': '', # Set to empty string for default. } } # Hosts/domain names that are valid for this site; required if DEBUG is False # See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts ALLOWED_HOSTS = [] # Puedes ver cuales son las zonas aqui: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name ## EXPLICACION ## ESTABLECEMOS COMO ZONA HORARIA 'Europe/Madrid' para evitar cambios de tiempo TIME_ZONE = 'Europe/Madrid' ## EXPLICACION ## ESTABLECEMOS COMO IDIOMA QUE USAREMOS EL ESPANOL DE ESPANA LANGUAGE_CODE = 'es-es' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale. USE_L10N = True # If you set this to False, Django will not use timezone-aware datetimes. USE_TZ = True ## EXPLICACION ## ESTABLECE LA CARPETA 'media' de dentro del proyecto como carpeta donde se encuentra el contenido multimedia MEDIA_ROOT = os.path.join(PROJECT_PATH,'media') ## EXPLICACION ## ESTABLECE LA ruta 'localhost:8000/media/' como ruta de acceso a la carpeta de contenido multimedia MEDIA_URL = '/media/' # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/var/www/example.com/static/" STATIC_ROOT = '' # URL prefix for static files. # Example: "http://example.com/static/", "http://static.example.com/" STATIC_URL = '/static/' # Additional locations of static files STATICFILES_DIRS = ( # Put strings here, like "/home/html/static" or "C:/www/django/static". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ## EXPLICACION ## ESTABLECE LA CARPETA 'static' de dentro del proyecto como carpeta donde se encuentra el contenido estatico os.path.join(PROJECT_PATH,'static'), ) # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', ## AVISO ## Linea descomentada, activa la ruta a contenidos estaticos 'django.contrib.staticfiles.finders.DefaultStorageFinder', ) # Make this unique, and don't share it with anybody. SECRET_KEY = '##d-1%bpw32#q*_#q6e)07_n01$qy!s&9mx6_2yh4p6)gv^^p&' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', # 'django.template.loaders.eggs.Loader', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', # Uncomment the next line for simple clickjacking protection: # 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'oBid.urls' # Python dotted path to the WSGI application used by Django's runserver. WSGI_APPLICATION = 'oBid.wsgi.application' TEMPLATE_DIRS = ( # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ## EXPLICACION ## ESTABLECE LA CARPETA 'templates' de dentro del proyecto como carpeta donde se encuentra los templates os.path.join(PROJECT_PATH,'templates'), ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', 'django.contrib.admin', ## AVISO ## Linea descomentada, activa el acceso al panel de administracion ## AVISO ## Linea descomentada, activa el acceso a la documentacion del panel de administracion 'django.contrib.admindocs', #añadimos la aplicación subasta 'subasta', 'usuarios', 'articulos', ) # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error when DEBUG=False. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' } }, 'loggers': { 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, } }
gpl-2.0
8,352,776,375,811,243,000
35.896341
129
0.691786
false
3.386122
false
false
false
yarikoptic/NiPy-OLD
examples/interfaces/process_fiac.py
1
6055
''' Single subject analysis script for SPM / FIAC ''' import sys from os.path import join as pjoin from glob import glob import numpy as np from nipy.interfaces.spm import spm_info, make_job, scans_for_fnames, \ run_jobdef, fnames_presuffix, fname_presuffix, fltcols def get_data(data_path, subj_id): data_def = {} subject_path = pjoin(data_path, 'fiac%s' % subj_id) data_def['functionals'] = sorted( glob(pjoin(subject_path, 'functional_*.nii'))) anatomicals = glob(pjoin(subject_path, 'anatomical.nii')) if len(anatomicals) == 1: data_def['anatomical'] = anatomicals[0] elif len(anatomicals) == 0: data_def['anatomical'] = None else: raise ValueError('Too many anatomicals') return data_def def slicetime(data_def): sess_scans = scans_for_fnames(data_def['functionals']) stinfo = make_job('temporal', 'st', { 'scans': sess_scans, 'so':range(1,31,2) + range(2,31,2), 'tr':2.5, 'ta':2.407, 'nslices':float(30), 'refslice':1 }) run_jobdef(stinfo) def realign(data_def): sess_scans = scans_for_fnames(fnames_presuffix(data_def['functionals'], 'a')) rinfo = make_job('spatial', 'realign', [{ 'estimate':{ 'data':sess_scans, 'eoptions':{ 'quality':0.9, 'sep':4.0, 'fwhm':5.0, 'rtm':True, 'interp':2.0, 'wrap':[0.0,0.0,0.0], 'weight':[] } } }]) run_jobdef(rinfo) def reslice(data_def): sess_scans = scans_for_fnames(fnames_presuffix(data_def['functionals'], 'a')) rsinfo = make_job('spatial', 'realign', [{ 'write':{ 'data': np.vstack(sess_scans.flat), 'roptions':{ 'which':[2, 1], 'interp':4.0, 'wrap':[0.0,0.0,0.0], 'mask':True, } } }]) run_jobdef(rsinfo) def coregister(data_def): func1 = data_def['functionals'][0] mean_fname = fname_presuffix(func1, 'meana') crinfo = make_job('spatial', 'coreg', [{ 'estimate':{ 'ref': [mean_fname], 'source': [data_def['anatomical']], 'other': [[]], 'eoptions':{ 'cost_fun':'nmi', 'sep':[4.0, 2.0], 'tol':np.array( [0.02,0.02,0.02, 0.001,0.001,0.001, 0.01,0.01,0.01, 0.001,0.001,0.001]).reshape(1,12), 'fwhm':[7.0, 7.0] } } }]) run_jobdef(crinfo) def segnorm(data_def): def_tpms = np.zeros((3,1), dtype=np.object) spm_path = spm_info.spm_path def_tpms[0] = pjoin(spm_path, 'tpm', 'grey.nii'), def_tpms[1] = pjoin(spm_path, 'tpm', 'white.nii'), def_tpms[2] = pjoin(spm_path, 'tpm', 'csf.nii') data = np.zeros((1,), dtype=object) data[0] = data_def['anatomical'] sninfo = make_job('spatial', 'preproc', { 'data': data, 'output':{ 'GM':fltcols([0,0,1]), 'WM':fltcols([0,0,1]), 'CSF':fltcols([0,0,0]), 'biascor':1.0, 'cleanup':False, }, 'opts':{ 'tpm':def_tpms, 'ngaus':fltcols([2,2,2,4]), 'regtype':'mni', 'warpreg':1.0, 'warpco':25.0, 'biasreg':0.0001, 'biasfwhm':60.0, 'samp':3.0, 'msk':np.array([], dtype=object), } }) run_jobdef(sninfo) def norm_write(data_def): sess_scans = scans_for_fnames(fnames_presuffix(data_def['functionals'], 'a')) matname = fname_presuffix(data_def['anatomical'], suffix='_seg_sn.mat', use_ext=False) subj = { 'matname': np.zeros((1,), dtype=object), 'resample': np.vstack(sess_scans.flat), } subj['matname'][0] = matname roptions = { 'preserve':False, 'bb':np.array([[-78,-112, -50],[78,76,85.0]]), 'vox':fltcols([2.0,2.0,2.0]), 'interp':1.0, 'wrap':[0.0,0.0,0.0], } nwinfo = make_job('spatial', 'normalise', [{ 'write':{ 'subj': subj, 'roptions': roptions, } }]) run_jobdef(nwinfo) # knock out the list of images, replacing with only one subj['resample'] = np.zeros((1,), dtype=object) subj['resample'][0] = data_def['anatomical'] roptions['interp'] = 4.0 run_jobdef(nwinfo) def smooth(data_def, fwhm=8.0): try: len(fwhm) except TypeError: fwhm = [fwhm] * 3 fwhm = np.asarray(fwhm, dtype=np.float).reshape(1,3) sess_scans = scans_for_fnames(fnames_presuffix(data_def['functionals'], 'wa')) sinfo = make_job('spatial', 'smooth', {'data':np.vstack(sess_scans.flat), 'fwhm':fwhm, 'dtype':0}) run_jobdef(sinfo) def process_subject(ddef): if not ddef['anatomical']: return slicetime(ddef) realign(ddef) reslice(ddef) coregister(ddef) segnorm(ddef) norm_write(ddef) smooth(ddef) def process_subjects(data_path, subj_ids): for subj_id in subj_ids: ddef = get_data(data_path, subj_id) process_subject(ddef) if __name__ == '__main__': try: data_path = sys.argv[1] except IndexError: raise OSError('Need FIAC data path as input') try: subj_ids = sys.argv[2:] except IndexError: subj_ids = range(16) process_subjects(data_path, subj_ids)
bsd-3-clause
-6,172,351,069,423,954,000
29.124378
82
0.471181
false
3.196938
false
false
false
xesscorp/skidl
skidl/bus.py
1
16133
# -*- coding: utf-8 -*- # MIT license # # Copyright (C) 2018 by XESS Corp. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. """ Handles buses. """ from __future__ import absolute_import, division, print_function, unicode_literals from builtins import range, str, super from future import standard_library from .alias import Alias from .common import * from .defines import * from .logger import logger from .net import Net from .netpinlist import NetPinList from .pin import Pin from .skidlbaseobj import SkidlBaseObject from .utilities import * standard_library.install_aliases() class Bus(SkidlBaseObject): """ This class collects one or more nets into a group that can be indexed. Args: name: A string with the name of the bus. args: A list of ints, pins, nets, buses to attach to the net. Keyword Args: attribs: A dictionary of attributes and values to attach to the Net object. Example: :: n = Net() led1 = Part("Device", 'LED') b = Bus('B', 8, n, led1['K']) """ @classmethod def get(cls, name, circuit=None): """Get the bus with the given name from a circuit, or return None.""" if not circuit: circuit = builtins.default_circuit search_params = ( ("name", name, True), ("aliases", name, True), # ('name', ''.join(('.*',name,'.*')), False), # ('aliases', Alias(''.join(('.*',name,'.*'))), False) ) for attr, name, do_str_match in search_params: buses = filter_list( circuit.buses, do_str_match=do_str_match, **{attr: name} ) if buses: return list_or_scalar(buses) return None @classmethod def fetch(cls, name, *args, **attribs): """Get the bus with the given name from a circuit, or create it if not found.""" circuit = attribs.get("circuit", builtins.default_circuit) return cls.get(name, circuit=circuit) or cls(name, *args, **attribs) def __init__(self, *args, **attribs): super().__init__() # Define the member storing the nets so it's present, but it starts empty. self.nets = [] # For Bus objects, the circuit object the bus is a member of is passed # in with all the other attributes. If a circuit object isn't provided, # then the default circuit object is added to the attributes. attribs["circuit"] = attribs.get("circuit", default_circuit) # Scan through the kwargs and args to see if there is a name for this bus. name = attribs.pop("name", None) if not name: try: # The first string found will be the bus name. name = [a for a in args if isinstance(a, (basestring, type(None)))][0] # Remove the name from the list of things to be added to the bus. args = list(args) args.remove(name) # args = [a for a in args if a != name] except IndexError: # No explicit bus name found, so generate an implicit one. name = None # Attach additional attributes to the bus. (The Circuit object also gets # set here.) for k, v in list(attribs.items()): setattr(self, k, v) # The bus name is set after the circuit is assigned so the name can be # checked against the other bus names already in that circuit. self.name = name # Add the bus to the circuit. self.circuit = None # Make sure bus isn't seen as part of circuit. attribs["circuit"] += self # Add bus to circuit (also sets self.circuit). # Build the bus from net widths, existing nets, nets of pins, other buses. self.extend(args) def extend(self, *objects): """Extend bus by appending objects to the end (MSB).""" self.insert(len(self.nets), objects) def insert(self, index, *objects): """Insert objects into bus starting at indexed position.""" for obj in flatten(objects): if isinstance(obj, int): # Add a number of new nets to the bus. for _ in range(obj): self.nets.insert(index, Net()) index += obj elif isinstance(obj, Net): # Add an existing net to the bus. self.nets.insert(index, obj) index += 1 elif isinstance(obj, Pin): # Add a pin to the bus. try: # Add the pin's net to the bus. self.nets.insert(index, obj.get_nets()[0]) except IndexError: # OK, the pin wasn't already connected to a net, # so create a new net, add it to the bus, and # connect the pin to it. n = Net() n += obj self.nets.insert(index, n) index += 1 elif isinstance(obj, Bus): # Add an existing bus to this bus. for n in reversed(obj.nets): self.nets.insert(index, n) index += len(obj) else: log_and_raise( logger, ValueError, "Adding illegal type of object ({}) to Bus {}.".format( type(obj), self.name ), ) # Assign names to all the unnamed nets in the bus. # Separate index from bus name if name ends with number. sep = '_' if self.name[-1].isdigit() else '' for i, net in enumerate(self.nets): if net.is_implicit(): # Net names are the bus name with the index appended. net.name = self.name + sep + str(i) def get_nets(self): """Return the list of nets contained in this bus.""" return to_list(self.nets) def get_pins(self): """It's an error to get the list of pins attached to all bus lines.""" log_and_raise(logger, TypeError, "Can't get the list of pins on a bus!") def copy(self, num_copies=None, **attribs): """ Make zero or more copies of this bus. Args: num_copies: Number of copies to make of this bus. Keyword Args: attribs: Name/value pairs for setting attributes for the copy. Returns: A list of Bus copies or a Bus if num_copies==1. Raises: Exception if the requested number of copies is a non-integer or negative. Notes: An instance of a bus can be copied just by calling it like so:: b = Bus('A', 8) # Create a bus. b_copy = b(2) # Get two copies of the bus. You can also use the multiplication operator to make copies:: b = 10 * Bus('A', 8) # Create an array of buses. """ # If the number of copies is None, then a single copy will be made # and returned as a scalar (not a list). Otherwise, the number of # copies will be set by the num_copies parameter or the number of # values supplied for each part attribute. num_copies_attribs = find_num_copies(**attribs) return_list = (num_copies is not None) or (num_copies_attribs > 1) if num_copies is None: num_copies = max(1, num_copies_attribs) # Check that a valid number of copies is requested. if not isinstance(num_copies, int): log_and_raise( logger, ValueError, "Can't make a non-integer number ({}) of copies of a bus!".format( num_copies ), ) if num_copies < 0: log_and_raise( logger, ValueError, "Can't make a negative number ({}) of copies of a bus!".format( num_copies ), ) copies = [] for i in range(num_copies): cpy = Bus(self.name, self) # Attach additional attributes to the bus. for k, v in list(attribs.items()): if isinstance(v, (list, tuple)): try: v = v[i] except IndexError: log_and_raise( logger, ValueError, "{} copies of bus {} were requested, but too few elements in attribute {}!".format( num_copies, self.name, k ), ) setattr(cpy, k, v) copies.append(cpy) # Return a list of the copies made or just a single copy. if return_list: return copies return copies[0] # Make copies with the multiplication operator or by calling the object. __call__ = copy def __mul__(self, num_copies): if num_copies is None: num_copies = 0 return self.copy(num_copies=num_copies) __rmul__ = __mul__ def __getitem__(self, *ids): """ Return a bus made up of the nets at the given indices. Args: ids: A list of indices of bus lines. These can be individual numbers, net names, nested lists, or slices. Returns: A bus if the indices are valid, otherwise None. """ # Use the indices to get the nets from the bus. nets = [] for ident in expand_indices(0, len(self) - 1, False, *ids): if isinstance(ident, int): nets.append(self.nets[ident]) elif isinstance(ident, basestring): nets.extend(filter_list(self.nets, name=ident)) else: log_and_raise( logger, TypeError, "Can't index bus with a {}.".format(type(ident)) ) if len(nets) == 0: # No nets were selected from the bus, so return None. return None if len(nets) == 1: # Just one net selected, so return the Net object. return nets[0] # Multiple nets selected, so return them as a NetPinList list. return NetPinList(nets) def __setitem__(self, ids, *pins_nets_buses): """ You can't assign to bus lines. You must use the += operator. This method is a work-around that allows the use of the += for making connections to bus lines while prohibiting direct assignment. Python processes something like my_bus[7:0] += 8 * Pin() as follows:: 1. Bus.__getitem__ is called with '7:0' as the index. This returns a NetPinList of eight nets from my_bus. 2. The NetPinList.__iadd__ method is passed the NetPinList and the thing to connect to the it (eight pins in this case). This method makes the actual connection to the part pin or pins. Then it creates an iadd_flag attribute in the object it returns. 3. Finally, Bus.__setitem__ is called. If the iadd_flag attribute is true in the passed argument, then __setitem__ was entered as part of processing the += operator. If there is no iadd_flag attribute, then __setitem__ was entered as a result of using a direct assignment, which is not allowed. """ # If the iadd_flag is set, then it's OK that we got # here and don't issue an error. Also, delete the flag. if getattr(pins_nets_buses[0], "iadd_flag", False): del pins_nets_buses[0].iadd_flag return # No iadd_flag or it wasn't set. This means a direct assignment # was made to the pin, which is not allowed. log_and_raise(logger, TypeError, "Can't assign to a bus! Use the += operator.") def __iter__(self): """ Return an iterator for stepping thru individual lines of the bus. """ return (self[l] for l in range(len(self))) # Return generator expr. def is_movable(self): """ Return true if the bus is movable to another circuit. A bus is movable if all the nets in it are movable. """ for n in self.nets: if not n.is_movable(): # One net not movable means the entire Bus is not movable. return False return True # All the nets were movable. def is_implicit(self): """Return true if the bus name is implicit.""" from .defines import NET_PREFIX, BUS_PREFIX prefix_re = "({}|{})+".format(re.escape(NET_PREFIX), re.escape(BUS_PREFIX)) return re.match(prefix_re, self.name) def connect(self, *pins_nets_buses): """ Return the bus after connecting one or more nets, pins, or buses. Args: pins_nets_buses: One or more Pin, Net or Bus objects or lists/tuples of them. Returns: The updated bus with the new connections. Notes: You can connect nets or pins to a bus like so:: p = Pin() # Create a pin. n = Net() # Create a net. b = Bus('B', 2) # Create a two-wire bus. b += p,n # Connect pin and net to B[0] and B[1]. """ nets = NetPinList(self.nets) nets += pins_nets_buses return self __iadd__ = connect @property def name(self): """ Get, set and delete the name of the bus. When setting the bus name, if another bus with the same name is found, the name for this bus is adjusted to make it unique. """ return self._name @name.setter def name(self, name): # Remove the existing name so it doesn't cause a collision if the # object is renamed with its existing name. self._name = None # Now name the object with the given name or some variation # of it that doesn't collide with anything else in the list. self._name = get_unique_name(self.circuit.buses, "name", BUS_PREFIX, name) @name.deleter def name(self): """Delete the bus name.""" del self._name def __str__(self): """Return a list of the nets in this bus as a string.""" return self.name + ":\n\t" + "\n\t".join([n.__str__() for n in self.nets]) __repr__ = __str__ def __len__(self): """Return the number of nets in this bus.""" return len(self.nets) @property def width(self): """Return width of a Bus, which is the same as using the len() operator.""" return len(self) def __bool__(self): """Any valid Bus is True""" return True __nonzero__ = __bool__ # Python 2 compatibility.
mit
-8,701,624,065,980,637,000
34.613687
111
0.556065
false
4.329844
false
false
false
MicheleTobias/CurvilinearAnisotropy
Code/WillametteFiles_Centerline.py
1
1734
# import the modules I'm gonna need import glob, string, csv, os # input the files to use inputdirectory = 'C:\Users\Michele\Documents\Research\CurvilinearAnisotropy\WillametteRiver\willamette_elevations\willamette\centerline_elevation\\' outputfile1 = 'C:\Users\Michele\Documents\Research\CurvilinearAnisotropy\WillametteRiver\willamette_elevations\willamette\PythonOutput\\WillamettePoints_Centerline.txt' #outputfile2 = 'C:\Documents and Settings\Michele Tobias\My Documents\Davis\Research\GIS Data\DataOutput\\SBV_average.txt' filemake = open(outputfile1,'w') filemake.close() #filemake = open(outputfile2,'w') #filemake.close() data = [] fulldata = [] #add *.txt to the end of the inputdirectory inputdirectory += '*.txt' #---------Copying the $GPGGA Lines to their own File-------------- # find the text files you need to work with textfiles = glob.glob(inputdirectory) #print textfiles #for writing the column names at the top of the output file columnnames = ['Easting\tNorthing\tBed_Elevation'] #finding the lines I need and writing them to the output file under the column names writer = csv.writer(open(outputfile1, 'w+')) writer.writerow(columnnames) #print textfiles for i in textfiles: #shortdoc = os.path.basename(i) #point = shortdoc.rstrip(".txt") #point = shortdoc[shortdoc.find(' ')+1: shortdoc.find('.')] reader = csv.reader(open(i, "r")) data = [row for row in reader] rownum=0 for j in data: if rownum >1: writer.writerow(j) #fulldata.append(j) rownum += 1 #j.append(point) #if j[0] != '#': # writer.writerow(j) # fulldata.append(j) # #print j #rownum += 1 print 'Finished!'
gpl-2.0
6,246,451,483,380,688,000
30.527273
168
0.686275
false
3.1875
false
false
false
garbas/mozilla-releng-services
lib/cli_common/cli_common/log.py
1
5277
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. from __future__ import absolute_import import os import structlog import logbook import structlog.exceptions CHANNELS = [ 'master', 'staging', 'production', ] class UnstructuredRenderer(structlog.processors.KeyValueRenderer): def __call__(self, logger, method_name, event_dict): event = None if 'event' in event_dict: event = event_dict.pop('event') if event_dict or event is None: # if there are other keys, use the parent class to render them # and append to the event rendered = super(UnstructuredRenderer, self).__call__( logger, method_name, event_dict) return '%s (%s)' % (event, rendered) else: return event def setup_mozdef(project_name, channel, MOZDEF): ''' Setup mozdef using taskcluster secrets ''' import mozdef_client sevirity_map = { 'critical': mozdef_client.MozDefEvent.SEVERITY_CRITICAL, 'error': mozdef_client.MozDefEvent.SEVERITY_ERROR, 'warning': mozdef_client.MozDefEvent.SEVERITY_WARNING, 'info': mozdef_client.MozDefEvent.SEVERITY_INFO, 'debug': mozdef_client.MozDefEvent.SEVERITY_DEBUG, } def send(logger, method_name, event_dict): # only send to mozdef if `mozdef` is set if event_dict.pop('mozdef', False): msg = mozdef_client.MozDefEvent(MOZDEF) msg.summary = event_dict.get('event', '') msg.tags = [ 'mozilla-releng/services/' + channel, project_name, ] if set(event_dict) - {'event'}: msg.details = event_dict.copy() msg.details.pop('event', None) msg.source = logger.name msg.set_severity( sevirity_map.get( method_name, mozdef_client.MozDefEvent.SEVERITY_INFO, ), ) msg.send() return event_dict return send def setup_papertrail(project_name, channel, PAPERTRAIL_HOST, PAPERTRAIL_PORT): ''' Setup papertrail account using taskcluster secrets ''' # Setup papertrail papertrail = logbook.SyslogHandler( application_name='mozilla-releng/services/{}/{}'.format(channel, project_name), address=(PAPERTRAIL_HOST, int(PAPERTRAIL_PORT)), format_string='{record.time} {record.channel}: {record.message}', bubble=True, ) papertrail.push_application() def setup_sentry(project_name, channel, SENTRY_DSN): ''' Setup sentry account using taskcluster secrets ''' from raven import Client from raven.handlers.logbook import SentryHandler sentry_client = Client( dsn=SENTRY_DSN, site=project_name, name='mozilla-releng/services', environment=channel, # TODO: # release=read(VERSION) we need to promote that as well via secrets # tags=... # repos=... ) sentry = SentryHandler(sentry_client, level=logbook.WARNING, bubble=True) sentry.push_application() def init_logger(project_name, channel=None, level=logbook.INFO, handler=None, PAPERTRAIL_HOST=None, PAPERTRAIL_PORT=None, SENTRY_DSN=None, MOZDEF=None ): if not channel: channel = os.environ.get('APP_CHANNEL') if channel and channel not in CHANNELS: raise Exception('Initilizing logging with channel `{}`. It should be one of: {}'.format(channel, ', '.join(CHANNELS))) # By default utput logs on stderr if handler is None: fmt = '{record.channel}: {record.message}' handler = logbook.StderrHandler(level=level, format_string=fmt) handler.push_application() # Log to papertrail if channel and PAPERTRAIL_HOST and PAPERTRAIL_PORT: setup_papertrail(project_name, channel, PAPERTRAIL_HOST, PAPERTRAIL_PORT) # Log to sentry if channel and SENTRY_DSN: setup_sentry(project_name, channel, SENTRY_DSN) def logbook_factory(*args, **kwargs): # Logger given to structlog logbook.compat.redirect_logging() return logbook.Logger(level=level, *args, **kwargs) # Setup structlog over logbook processors = [ structlog.stdlib.PositionalArgumentsFormatter(), structlog.processors.StackInfoRenderer(), structlog.processors.format_exc_info, ] # send to mozdef before formatting into a string if channel and MOZDEF: processors.append(setup_mozdef(project_name, channel, MOZDEF)) processors.append(UnstructuredRenderer()) structlog.configure( context_class=structlog.threadlocal.wrap_dict(dict), processors=processors, logger_factory=logbook_factory, wrapper_class=structlog.stdlib.BoundLogger, cache_logger_on_first_use=True, ) def get_logger(*args, **kwargs): return structlog.get_logger(*args, **kwargs)
mpl-2.0
-6,548,193,065,109,681,000
28.480447
126
0.617396
false
3.837818
false
false
false
zfrenchee/pandas
pandas/core/api.py
1
3146
# pylint: disable=W0614,W0401,W0611 # flake8: noqa import numpy as np from pandas.core.algorithms import factorize, unique, value_counts from pandas.core.dtypes.missing import isna, isnull, notna, notnull from pandas.core.categorical import Categorical from pandas.core.groupby import Grouper from pandas.io.formats.format import set_eng_float_format from pandas.core.index import (Index, CategoricalIndex, Int64Index, UInt64Index, RangeIndex, Float64Index, MultiIndex, IntervalIndex, TimedeltaIndex, DatetimeIndex, PeriodIndex, NaT) from pandas.core.indexes.period import Period, period_range, pnow from pandas.core.indexes.timedeltas import Timedelta, timedelta_range from pandas.core.indexes.datetimes import Timestamp, date_range, bdate_range from pandas.core.indexes.interval import Interval, interval_range from pandas.core.series import Series from pandas.core.frame import DataFrame from pandas.core.panel import Panel, WidePanel from pandas.core.panel4d import Panel4D # TODO: Remove import when statsmodels updates #18264 from pandas.core.reshape.reshape import get_dummies from pandas.core.indexing import IndexSlice from pandas.core.tools.numeric import to_numeric from pandas.tseries.offsets import DateOffset from pandas.core.tools.datetimes import to_datetime from pandas.core.tools.timedeltas import to_timedelta # see gh-14094. from pandas.util._depr_module import _DeprecatedModule _removals = ['day', 'bday', 'businessDay', 'cday', 'customBusinessDay', 'customBusinessMonthEnd', 'customBusinessMonthBegin', 'monthEnd', 'yearEnd', 'yearBegin', 'bmonthEnd', 'bmonthBegin', 'cbmonthEnd', 'cbmonthBegin', 'bquarterEnd', 'quarterEnd', 'byearEnd', 'week'] datetools = _DeprecatedModule(deprmod='pandas.core.datetools', removals=_removals) from pandas.core.config import (get_option, set_option, reset_option, describe_option, option_context, options) # deprecation, xref #13790 def match(*args, **kwargs): import warnings warnings.warn("pd.match() is deprecated and will be removed " "in a future version", FutureWarning, stacklevel=2) from pandas.core.algorithms import match return match(*args, **kwargs) def groupby(*args, **kwargs): import warnings warnings.warn("pd.groupby() is deprecated and will be removed; " "Please use the Series.groupby() or " "DataFrame.groupby() methods", FutureWarning, stacklevel=2) return args[0].groupby(*args[1:], **kwargs) # Deprecation: xref gh-16747 class TimeGrouper(object): def __new__(cls, *args, **kwargs): from pandas.core.resample import TimeGrouper import warnings warnings.warn("pd.TimeGrouper is deprecated and will be removed; " "Please use pd.Grouper(freq=...)", FutureWarning, stacklevel=2) return TimeGrouper(*args, **kwargs)
bsd-3-clause
2,358,515,945,609,053,000
37.839506
76
0.679275
false
4.048906
false
false
false
siddhantgoel/tornado-sqlalchemy
tests/test_session_mixin.py
1
1642
from unittest.mock import Mock from tornado_sqlalchemy import MissingDatabaseSettingError, SessionMixin from ._common import BaseTestCase, User, db class SessionMixinTestCase(BaseTestCase): def test_mixin_ok(self): class GoodHandler(SessionMixin): def __init__(h_self): h_self.application = Mock() h_self.application.settings = {'db': db} def run(h_self): with h_self.make_session() as session: return session.query(User).count() self.assertEqual(GoodHandler().run(), 0) def test_mixin_no_db(self): class BadHandler(SessionMixin): def __init__(h_self): h_self.application = Mock() h_self.application.settings = {} def run(h_self): with h_self.make_session() as session: return session.query(User).count() self.assertRaises(MissingDatabaseSettingError, BadHandler().run) def test_distinct_sessions(self): sessions = set() class Handler(SessionMixin): def __init__(h_self): db.configure(url=self.db_url) h_self.application = Mock() h_self.application.settings = {'db': db} def run(h_self): session = h_self.session sessions.add(id(session)) value = session.query(User).count() session.commit() session.close() return value Handler().run() Handler().run() self.assertEqual(len(sessions), 2)
mit
-7,001,430,187,323,076,000
27.807018
72
0.545676
false
4.486339
true
false
false
detrout/pykolab
pykolab/cli/telemetry/cmd_examine_session.py
1
4119
# -*- coding: utf-8 -*- # Copyright 2010-2012 Kolab Systems AG (http://www.kolabsys.com) # # Jeroen van Meeuwen (Kolab Systems) <vanmeeuwen a kolabsys.com> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 3 or, at your option, any later version # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Library General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # import pykolab from pykolab.translate import _ log = pykolab.getLogger('pykolab.cli') conf = pykolab.getConf() from pykolab import telemetry from pykolab.cli import commands def __init__(): commands.register('examine_session', execute, group='telemetry', description="Examine a Telemetry session.") def execute(*args, **kw): db = telemetry.init_db() wanted = False if session_id == None: try: wanted = conf.cli_args.pop(0) except: log.error(_("Unspecified session identifier")) sys.exit(1) if not wanted: wanted = session_id session_wanted = None try: _wanted = (int)(wanted) session_wanted = _wanted except: user_wanted = wanted if not session_wanted == None: session = db.query( telemetry.TelemetrySession ).filter_by( id=session_wanted ).first() if session == None: log.error(_("Invalid session identifier")) sys.exit(1) user = db.query( telemetry.TelemetryUser ).filter_by( id=session.user_id ).first() server = db.query( telemetry.TelemetryServer ).filter_by( id=session.server_id ).first() else: user = db.query( telemetry.TelemetryUser ).filter_by( sasl_username=user_wanted ).first() sessions = db.query( telemetry.TelemetrySession ).filter_by( user_id=user.id ).order_by( telemetry.telemetry_session_table.c.start ) for session in sessions: self.action_telemetry_examine_session(session_id=session.id) return print _("Session by %s on server %s") % (user.sasl_username,server.fqdn) command_issues = db.query( telemetry.TelemetryCommandIssue ).filter_by( session_id=session.id ) for command_issue in command_issues: command = db.query( telemetry.TelemetryCommand ).filter_by( id=command_issue.command_id ).first() command_arg = db.query( telemetry.TelemetryCommandArg ).filter_by( id=command_issue.command_arg_id ).first() print "Client(%d): %s %s %s" % ( command_issue.id, command_issue.command_tag, command.command, command_arg.command_arg ) server_responses = db.query( telemetry.TelemetryServerResponse ).filter_by( command_issue_id=command_issue.id ) for server_response in server_responses: server_response_lines = server_response.response.split('\n'); for server_response_line in server_response_lines: print "Server(%d): %s" % ( server_response.id, server_response_line )
gpl-3.0
592,902,317,208,415,900
28.212766
112
0.554989
false
4.290625
false
false
false
gltn/stdm
stdm/ui/view_str.py
1
44716
""" /*************************************************************************** Name : View STR Relationships Description : Main Window for searching and browsing the social tenure relationship of the participating entities. Date : 24/May/2014 copyright : (C) 2014 by UN-Habitat and implementing partners. See the accompanying file CONTRIBUTORS.txt in the root email : [email protected] ***************************************************************************/ /*************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ """ import logging from collections import OrderedDict from datetime import date from qgis.PyQt import uic from qgis.PyQt.QtCore import ( QTimer, Qt, QSize, QObject, pyqtSignal, QThread, QRegExp, QSortFilterProxyModel, pyqtSlot ) from qgis.PyQt.QtWidgets import ( QMainWindow, QDesktopWidget, QToolBar, QAction, QApplication, QProgressDialog, QProgressBar, QMessageBox, QVBoxLayout, QWidget, QScrollArea, QFrame, QCheckBox, QTabBar, QCompleter ) from qgis.core import QgsProject from qgis.utils import ( iface ) from sqlalchemy import exc from sqlalchemy import ( func, String ) from stdm.data import globals from stdm.data.configuration import entity_model from stdm.data.database import Content from stdm.data.pg_utils import pg_table_count from stdm.data.qtmodels import ( BaseSTDMTableModel ) from stdm.exceptions import DummyException from stdm.security.authorization import Authorizer from stdm.settings import current_profile from stdm.ui.feature_details import DetailsTreeView from stdm.ui.forms.widgets import ColumnWidgetRegistry from stdm.ui.gui_utils import GuiUtils from stdm.ui.notification import ( NotificationBar ) from stdm.ui.social_tenure.str_editor import STREditor from stdm.ui.sourcedocument import ( SourceDocumentManager, DocumentWidget ) from stdm.ui.spatial_unit_manager import SpatialUnitManagerDockWidget from stdm.utils.util import ( entity_searchable_columns, entity_display_columns, format_name, lookup_parent_entity ) LOGGER = logging.getLogger('stdm') WIDGET, BASE = uic.loadUiType( GuiUtils.get_ui_file_path('ui_view_str.ui')) class ViewSTRWidget(WIDGET, BASE): """ Search and browse the social tenure relationship of all participating entities. """ def __init__(self, plugin): QMainWindow.__init__(self, plugin.iface.mainWindow()) self.setupUi(self) self.btnSearch.setIcon(GuiUtils.get_icon('search.png')) self.btnClearSearch.setIcon(GuiUtils.get_icon('reset.png')) self._plugin = plugin self.search_done = False # self.tbPropertyPreview.set_iface(self._plugin.iface) QTimer.singleShot( 100, lambda: self.tbPropertyPreview.set_iface(self._plugin.iface)) self.curr_profile = current_profile() self.spatial_units = self.curr_profile.social_tenure.spatial_units # Center me self.move(QDesktopWidget().availableGeometry().center() - self.frameGeometry().center()) self.sp_unit_manager = SpatialUnitManagerDockWidget( self._plugin.iface, self._plugin ) self.geom_cols = [] for spatial_unit in self.spatial_units: each_geom_col = self.sp_unit_manager.geom_columns(spatial_unit) self.geom_cols.extend(each_geom_col) # Configure notification bar self._notif_search_config = NotificationBar( self.vl_notification ) # set whether currently logged in user has # permissions to edit existing STR records self._can_edit = self._plugin.STRCntGroup.canUpdate() self._can_delete = self._plugin.STRCntGroup.canDelete() self._can_create = self._plugin.STRCntGroup.canCreate() # Variable used to store a reference to the # currently selected social tenure relationship # when displaying documents in the supporting documents tab window. # This ensures that there are no duplicates # when the same item is selected over and over again. self._strID = None self.removed_docs = None # Used to store the root hash of the currently selected node. self._curr_rootnode_hash = "" self.str_model, self.str_doc_model = entity_model( self.curr_profile.social_tenure, False, True ) self._source_doc_manager = SourceDocumentManager( self.curr_profile.social_tenure.supporting_doc, self.str_doc_model, self ) self._source_doc_manager.documentRemoved.connect( self.onSourceDocumentRemoved ) self._source_doc_manager.setEditPermissions(False) self.addSTR = None self.editSTR = None self.deleteSTR = None self.initGui() self.add_spatial_unit_layer() self.details_tree_view = DetailsTreeView(parent=self, plugin=self._plugin) layout = QVBoxLayout() layout.setContentsMargins(0, 0, 0, 0) layout.addWidget(self.details_tree_view) self.str_tree_container.setLayout(layout) # else: # self.details_tree_view = self._plugin.details_tree_view self.details_tree_view.activate_feature_details(True) self.details_tree_view.model.clear() count = pg_table_count(self.curr_profile.social_tenure.name) self.setWindowTitle( self.tr('{}{}'.format( self.windowTitle(), '- ' + str(count) + ' rows' )) ) self.active_spu_id = -1 self.toolBox.setStyleSheet( ''' QToolBox::tab { background: qlineargradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #EDEDED, stop: 0.4 #EDEDED, stop: 0.5 #EDEDED, stop: 1.0 #D3D3D3 ); border-radius: 2px; border-style: outset; border-width: 2px; height: 100px; border-color: #C3C3C3; } QToolBox::tab:selected { font: italic; } ''' ) self.details_tree_view.view.setStyleSheet( ''' QTreeView:!active { selection-background-color: #72a6d9; } ''' ) def add_tool_buttons(self): """ Add toolbar buttons of add, edit and delete buttons. :return: None :rtype: NoneType """ tool_buttons = QToolBar() tool_buttons.setObjectName('form_toolbar') tool_buttons.setIconSize(QSize(16, 16)) self.addSTR = QAction(GuiUtils.get_icon( 'add.png'), QApplication.translate('ViewSTRWidget', 'Add'), self ) self.editSTR = QAction( GuiUtils.get_icon('edit.png'), QApplication.translate('ViewSTRWidget', 'Edit'), self ) self.deleteSTR = QAction( GuiUtils.get_icon('remove.png'), QApplication.translate('ViewSTRWidget', 'Remove'), self ) tool_buttons.addAction(self.addSTR) tool_buttons.addAction(self.editSTR) tool_buttons.addAction(self.deleteSTR) self.toolbarVBox.addWidget(tool_buttons) def initGui(self): """ Initialize widget """ self.tb_actions.setVisible(False) self._load_entity_configurations() self.add_tool_buttons() # Connect signals self.tbSTREntity.currentChanged.connect(self.entityTabIndexChanged) self.btnSearch.clicked.connect(self.searchEntityRelations) self.btnClearSearch.clicked.connect(self.clearSearch) # self.tvSTRResults.expanded.connect(self.onTreeViewItemExpanded) # Set the results treeview to accept requests for context menus # self.tvSTRResults.setContextMenuPolicy(Qt.CustomContextMenu) # self.tvSTRResults.customContextMenuRequested.connect( # self.onResultsContextMenuRequested # ) if not self._can_create: self.addSTR.hide() if not self._can_edit: self.editSTR.hide() else: self.editSTR.setDisabled(True) if not self._can_delete: self.deleteSTR.hide() else: self.deleteSTR.setDisabled(True) self.addSTR.triggered.connect(self.load_new_str_editor) self.deleteSTR.triggered.connect(self.delete_str) self.editSTR.triggered.connect(self.load_edit_str_editor) # Load async for the current widget self.entityTabIndexChanged(0) def init_progress_dialog(self): """ Initializes the progress dialog. """ self.progress = QProgressBar(self) self.progress.resize(self.width(), 10) self.progress.setTextVisible(False) def add_spatial_unit_layer(self): """ Add the spatial unit layer into the map canvas for later use. """ # Used for startup of view STR, just add the first geom layer. if len(self.geom_cols) > 0: for spatial_unit in self.spatial_units: layer_name_item = self.sp_unit_manager.geom_col_layer_name( spatial_unit.name, self.geom_cols[0] ) self.sp_unit_manager.add_layer_by_name(layer_name_item) def _check_permissions(self): """ Enable/disable actions based on the permissions defined in the content group. """ if self._can_edit: self.tb_actions.addAction(self._new_str_action) else: self.tb_actions.removeAction(self._new_str_action) if len(self.tb_actions.actions()) == 0: self.tb_actions.setVisible(False) else: self.tb_actions.setVisible(True) def _load_entity_configurations(self): """ Specify the entity configurations. """ try: self.parties = self.curr_profile.social_tenure.parties tb_str_entities = self.parties + self.spatial_units for i, t in enumerate(tb_str_entities): QApplication.processEvents() entity_cfg = self._entity_config_from_profile( str(t.name), t.short_name ) if entity_cfg is not None: entity_widget = self.add_entity_config(entity_cfg) # entity_widget.setNodeFormatter( # EntityNodeFormatter( # entity_cfg, self.tvSTRResults, self # ) # ) except DummyException as pe: self._notif_search_config.clear() self._notif_search_config.insertErrorNotification(str(pe)) def _entity_config_from_profile(self, table_name, short_name): """ Creates an EntityConfig object from the table name. :param table_name: Name of the database table. :type table_name: str :return: Entity configuration object. :rtype: EntityConfig """ table_display_name = format_name(short_name) entity = self.curr_profile.entity_by_name(table_name) model = entity_model(entity) if model is not None: # Entity configuration entity_cfg = EntityConfiguration() entity_cfg.Title = table_display_name entity_cfg.STRModel = model entity_cfg.data_source_name = table_name for col, factory in self._get_widget_factory(entity): entity_cfg.LookupFormatters[col.name] = factory # Load filter and display columns # using only those which are of # numeric/varchar type searchable_columns = entity_searchable_columns(entity) display_columns = entity_display_columns(entity) for c in searchable_columns: if c != 'id': entity_cfg.filterColumns[c] = format_name(c) for c in display_columns: if c != 'id': entity_cfg.displayColumns[c] = format_name(c) return entity_cfg else: return None def _get_widget_factory(self, entity): """ Get widget factory for specific column type :param entity: Current column entity object :type entity: Entity :return c: Column object corresponding to the widget factory :rtype c: BaseColumn :return col_factory: Widget factory corresponding to the column type :rtype col_factory: ColumnWidgetRegistry """ for c in entity.columns.values(): col_factory = ColumnWidgetRegistry.factory(c.TYPE_INFO) if col_factory is not None: yield c, col_factory(c) def add_entity_config(self, config): """ Set an entity configuration option and add it to the 'Search Entity' tab. """ entityWidg = STRViewEntityWidget(config) entityWidg.asyncStarted.connect(self._progressStart) entityWidg.asyncFinished.connect(self._progressFinish) tabIndex = self.tbSTREntity.addTab(entityWidg, config.Title) return entityWidg def entityTabIndexChanged(self, index): """ Raised when the tab index of the entity search tab widget changes. """ # Get the current widget in the tab container entityWidget = self.tbSTREntity.currentWidget() if isinstance(entityWidget, EntitySearchItem): entityWidget.loadAsync() def searchEntityRelations(self): """ Slot that searches for matching items for the specified entity and corresponding STR entities. """ entityWidget = self.tbSTREntity.currentWidget() entity_name = entityWidget.config.data_source_name self._reset_controls() if isinstance(entityWidget, EntitySearchItem): valid, msg = entityWidget.validate() if not valid: self._notif_search_config.clear() self._notif_search_config.insertErrorNotification(msg) return results, searchWord = entityWidget.executeSearch() # Show error message if len(results) == 0: noResultsMsg = QApplication.translate( 'ViewSTR', 'No results found for "{}"'.format(searchWord) ) self._notif_search_config.clear() self._notif_search_config.insertErrorNotification( noResultsMsg ) return party_names = [e.name for e in self.curr_profile.social_tenure.parties] entity = self.curr_profile.entity_by_name(entity_name) result_ids = [r.id for r in results] if entity_name in party_names: self.active_spu_id = self.details_tree_view.search_party( entity, result_ids ) else: self.details_tree_view.search_spatial_unit( entity, result_ids ) # self.tbPropertyPreview._iface.activeLayer().selectByExpression("id={}".format(self.active_spu_id)) # self.details_tree_view._selected_features = self.tbPropertyPreview._iface.activeLayer().selectedFeatures() # self._load_root_node(entity_name, formattedNode) def clearSearch(self): """ Clear search input parameters (for current widget) and results. """ entityWidget = self.tbSTREntity.currentWidget() if isinstance(entityWidget, EntitySearchItem): entityWidget.reset() self._reset_controls() def _reset_controls(self): # Clear tree view self._resetTreeView() # Clear document listings self._deleteSourceDocTabs() # Remove spatial unit memory layer self.tbPropertyPreview.remove_layer() def on_select_results(self): """ Slot which is raised when the selection is changed in the tree view selection model. """ if len(self.details_tree_view.view.selectedIndexes()) < 1: self.disable_buttons() return self.search_done = True index = self.details_tree_view.view.selectedIndexes()[0] item = self.details_tree_view.model.itemFromIndex(index) QApplication.processEvents() # STR node - edit social tenure relationship if item.text() == self.details_tree_view.str_text: entity = self.curr_profile.social_tenure str_model = self.details_tree_view.str_models[item.data()] documents = self.details_tree_view._supporting_doc_models( entity.name, str_model ) self._load_source_documents(documents) # if there is supporting document, # expand supporting document tab if len(documents) > 0: self.toolBox.setCurrentIndex(1) self.disable_buttons(False) # party node - edit party elif item.data() in self.details_tree_view.spatial_unit_items.keys(): self.toolBox.setCurrentIndex(0) entity = self.details_tree_view.spatial_unit_items[item.data()] model = self.details_tree_view.feature_model(entity, item.data()) self.draw_spatial_unit(entity.name, model) self.disable_buttons() canvas = iface.mapCanvas() if canvas: canvas.zoomToFullExtent() else: self.disable_buttons() def disable_buttons(self, status=True): if self._can_edit: self.deleteSTR.setDisabled(status) if self._can_delete: self.editSTR.setDisabled(status) def str_party_column_obj(self, record): """ Gets the current party column name in STR table by finding party column with value other than None. :param record: The STR record or result. :type record: Dictionary :return: The party column name with value. :rtype: String """ for party in self.parties: party_name = party.short_name.lower() party_id = '{}_id'.format(party_name) if party_id not in record.__dict__: return None if record.__dict__[party_id] is not None: party_id_obj = getattr(self.str_model, party_id) return party_id_obj def load_edit_str_editor(self): self.details_tree_view.edit_selected_node() self.btnSearch.click() self.disable_buttons() def load_new_str_editor(self): try: # Check type of node and perform corresponding action add_str = STREditor() add_str.exec_() except DummyException as ex: QMessageBox.critical( self._plugin.iface.mainWindow(), QApplication.translate( "STDMPlugin", "Loading Error" ), str(ex) ) def delete_str(self): self.details_tree_view.delete_selected_item() self.btnSearch.click() self.disable_buttons() def onSourceDocumentRemoved(self, container_id, doc_uuid, removed_doc): """ Slot raised when a source document is removed from the container. If there are no documents in the specified container then remove the tab. """ curr_container = self.tbSupportingDocs.currentWidget() curr_doc_widget = curr_container.findChildren(DocumentWidget) for doc in curr_doc_widget: if doc.fileUUID == doc_uuid: doc.deleteLater() self.removed_docs = removed_doc def draw_spatial_unit(self, entity_name, model): """ Render the geometry of the given spatial unit in the spatial view. :param row_id: Sqlalchemy object representing a feature. """ entity = self.curr_profile.entity_by_name(entity_name) self.tbPropertyPreview.draw_spatial_unit(entity, model) def showEvent(self, event): """ (Re)load map layers in the viewer and main canvas. :param event: Window event :type event: QShowEvent """ self.setEnabled(True) if QTimer is not None: QTimer.singleShot(200, self.init_mirror_map) return QMainWindow.showEvent(self, event) def init_mirror_map(self): self._notify_no_base_layers() # Add spatial unit layer if it doesn't exist self.tbPropertyPreview.refresh_canvas_layers() self.tbPropertyPreview.load_web_map() def _notify_no_base_layers(self): """ Checks if there are any base layers that will be used when visualizing the spatial units. If there are no base layers then insert warning message. """ self._notif_search_config.clear() num_layers = len(QgsProject.instance().mapLayers()) if num_layers == 0: msg = QApplication.translate( "ViewSTR", "No basemap layers are loaded in the " "current project. Basemap layers " "enhance the visualization of spatial units." ) self._notif_search_config.insertWarningNotification(msg) def _deleteSourceDocTabs(self): """ Removes all source document tabs and deletes their references. """ tabCount = self.tbSupportingDocs.count() while tabCount != 0: srcDocWidget = self.tbSupportingDocs.widget(tabCount - 1) self.tbSupportingDocs.removeTab(tabCount - 1) del srcDocWidget tabCount -= 1 self._strID = None self._source_doc_manager.reset() def _resetTreeView(self): """ Clears the results tree view. """ # Reset tree view strModel = self.details_tree_view.view.model() resultsSelModel = self.details_tree_view.view.selectionModel() if strModel: strModel.clear() if resultsSelModel: if self.search_done: resultsSelModel.selectionChanged.disconnect(self.on_select_results) resultsSelModel.selectionChanged.connect(self.on_select_results) def _load_source_documents(self, source_docs): """ Load source documents into document listing widget. """ # Configure progress dialog progress_msg = QApplication.translate( "ViewSTR", "Loading supporting documents..." ) progress_dialog = QProgressDialog(self) if len(source_docs) > 0: progress_dialog.setWindowTitle(progress_msg) progress_dialog.setRange(0, len(source_docs)) progress_dialog.setWindowModality(Qt.WindowModal) progress_dialog.setFixedWidth(380) progress_dialog.show() progress_dialog.setValue(0) self._notif_search_config.clear() self.tbSupportingDocs.clear() self._source_doc_manager.reset() if len(source_docs) < 1: empty_msg = QApplication.translate( 'ViewSTR', 'No supporting document is uploaded ' 'for this social tenure relationship.' ) self._notif_search_config.clear() self._notif_search_config.insertWarningNotification(empty_msg) for i, (doc_type_id, doc_obj) in enumerate(source_docs.items()): # add tabs, and container and widget for each tab tab_title = self._source_doc_manager.doc_type_mapping[doc_type_id] tab_widget = QWidget() tab_widget.setObjectName(tab_title) cont_layout = QVBoxLayout(tab_widget) cont_layout.setObjectName('widget_layout_' + tab_title) scrollArea = QScrollArea(tab_widget) scrollArea.setFrameShape(QFrame.NoFrame) scrollArea_contents = QWidget() scrollArea_contents.setObjectName('tab_scroll_area_' + tab_title) tab_layout = QVBoxLayout(scrollArea_contents) tab_layout.setObjectName('layout_' + tab_title) scrollArea.setWidgetResizable(True) scrollArea.setWidget(scrollArea_contents) cont_layout.addWidget(scrollArea) self._source_doc_manager.registerContainer( tab_layout, doc_type_id ) for doc in doc_obj: try: # add doc widgets self._source_doc_manager.insertDocFromModel( doc, doc_type_id ) except DummyException as ex: LOGGER.debug(str(ex)) self.tbSupportingDocs.addTab( tab_widget, tab_title ) progress_dialog.setValue(i + 1) progress_dialog.deleteLater() del progress_dialog # def _on_node_reference_changed(self, rootHash): # """ # Method for resetting document listing and map preview # if another root node and its children # are selected then the documents are reset as # well as the map preview control. # """ # if rootHash != self._curr_rootnode_hash: # self._deleteSourceDocTabs() # self._curr_rootnode_hash = rootHash def _progressStart(self): """ Load progress dialog window. For items whose durations is unknown, 'isindefinite' = True by default. If 'isindefinite' is False, then 'rangeitems' has to be specified. """ pass def _progressFinish(self): """ Hide progress dialog window. """ pass def _edit_permissions(self): """ Returns True/False whether the current logged in user has permissions to create new social tenure relationships. If true, then the system assumes that they can also edit STR records. """ canEdit = False userName = globals.APP_DBCONN.User.UserName authorizer = Authorizer(userName) newSTRCode = "9576A88D-C434-40A6-A318-F830216CA15A" # Get the name of the content from the code cnt = Content() createSTRCnt = cnt.queryObject().filter( Content.code == newSTRCode ).first() if createSTRCnt: name = createSTRCnt.name canEdit = authorizer.CheckAccess(name) return canEdit class EntitySearchItem(QObject): """ Abstract class for implementation by widgets that enable users to search for entity records. """ def __init__(self, formatter=None): super().__init__() # Specify the formatter that should be # applied on the result item. It should # inherit from 'stdm.navigation.STRNodeFormatter' self.formatter = formatter def setNodeFormatter(self, formatter): """ Set the formatter that should be applied on the entity search results. """ self.formatter = formatter def validate(self): """ Method for validating the input arguments before a search is conducted. Should return bool indicating whether validation was successful and message (applicable if validation fails). """ raise NotImplementedError() def executeSearch(self): """ Implemented when the a search operation is executed. Should return tuple of formatted results for render in the tree view,raw object results and search word. """ raise NotImplementedError( str( QApplication.translate( "ViewSTR", "Subclass must implement abstract method." ) ) ) def loadAsync(self): """ Any initialization that needs to be carried out when the parent container is activated. """ pass def errorHandler(self, error): """ Generic handler that logs error messages to the QGIS message log """ # QgsMessageLog.logMessage(error,2) LOGGER.debug(error) def reset(self): """ Clear search results. """ pass WIDGET2, BASE2 = uic.loadUiType( GuiUtils.get_ui_file_path('ui_str_view_entity.ui')) class STRViewEntityWidget(WIDGET2, BASE2, EntitySearchItem): """ A widget that represents options for searching through an entity. """ asyncStarted = pyqtSignal() asyncFinished = pyqtSignal() def __init__(self, config, formatter=None, parent=None): QWidget.__init__(self, parent) EntitySearchItem.__init__(self, formatter) self.setupUi(self) self.tbSTRViewEntity.setTabIcon(0, GuiUtils.get_icon('filter.png')) self.tbSTRViewEntity.setTabIcon(1, GuiUtils.get_icon('period_blue.png')) self.config = config self.setConfigOptions() self.curr_profile = current_profile() self.social_tenure = self.curr_profile.social_tenure self.str_model = entity_model(self.social_tenure) # Model for storing display and actual mapping values self._completer_model = None self._proxy_completer_model = None # Hook up signals self.cboFilterCol.currentIndexChanged.connect( self._on_column_index_changed ) self.init_validity_dates() self.validity_from_date.dateChanged.connect( self.set_minimum_to_date ) self.validity.setDisabled(True) self.init_validity_checkbox() def init_validity_checkbox(self): self.check_box_list = [] self.validity_checkbox = QCheckBox() self.check_box_list.append(self.validity_checkbox) self.tbSTRViewEntity.tabBar().setTabButton( self.tbSTRViewEntity.tabBar().count() - 1, QTabBar.LeftSide, self.validity_checkbox ) self.validity_checkbox.stateChanged.connect(self.toggle_validity_period) def toggle_validity_period(self, state): if state == Qt.Checked: self.validity.setDisabled(False) else: self.validity.setDisabled(True) def set_minimum_to_date(self): """ Set the minimum to date based on the change in value of from date. :return: :rtype: """ self.validity_to_date.setMinimumDate( self.validity_from_date.date() ) def init_validity_dates(self): """ Initialize the dates by setting the current date. :return: :rtype: """ self.validity_from_date.setDate( date.today() ) self.validity_to_date.setDate( date.today() ) def setConfigOptions(self): """ Apply configuration options. """ # Set filter columns and remove id column for col_name, display_name in self.config.filterColumns.items(): if col_name != "id": self.cboFilterCol.addItem( display_name, col_name ) def loadAsync(self): """ Asynchronously loads an entity's attribute values. """ self.asyncStarted.emit() # Create model worker workerThread = QThread(self) modelWorker = ModelWorker() modelWorker.moveToThread(workerThread) # Connect signals modelWorker.error.connect(self.errorHandler) workerThread.started.connect( lambda: modelWorker.fetch( self.config.STRModel, self.currentFieldName() ) ) modelWorker.retrieved.connect(self._asyncFinished) modelWorker.retrieved.connect(workerThread.quit) workerThread.finished.connect(modelWorker.deleteLater) workerThread.finished.connect(workerThread.deleteLater) # Start thread workerThread.start() def validate(self): """ Validate entity search widget """ is_valid = True message = "" if self.txtFilterPattern.text() == "": message = QApplication.translate( "ViewSTR", "Search word cannot be empty." ) is_valid = False return is_valid, message def executeSearch(self): """ Base class override. Search for matching items for the specified entity and column. """ model_root_node = None prog_dialog = QProgressDialog(self) prog_dialog.setFixedWidth(380) prog_dialog.setWindowTitle( QApplication.translate( "STRViewEntityWidget", "Searching for STR..." ) ) prog_dialog.show() prog_dialog.setRange( 0, 10 ) search_term = self._searchTerm() prog_dialog.setValue(2) # Try to get the corresponding search term value from the completer model if self._completer_model is not None: reg_exp = QRegExp("^%s$" % search_term, Qt.CaseInsensitive, QRegExp.RegExp2) self._proxy_completer_model.setFilterRegExp(reg_exp) if self._proxy_completer_model.rowCount() > 0: # Get corresponding actual value from the first matching item value_model_idx = self._proxy_completer_model.index(0, 1) source_model_idx = self._proxy_completer_model.mapToSource( value_model_idx ) prog_dialog.setValue(4) search_term = self._completer_model.data( source_model_idx, Qt.DisplayRole ) modelInstance = self.config.STRModel() modelQueryObj = modelInstance.queryObject() queryObjProperty = getattr( self.config.STRModel, self.currentFieldName() ) entity_name = modelQueryObj._primary_entity._label_name entity = self.curr_profile.entity_by_name(entity_name) prog_dialog.setValue(6) # Get property type so that the filter can # be applied according to the appropriate type propType = queryObjProperty.property.columns[0].type results = [] try: if not isinstance(propType, String): col_name = self.currentFieldName() col = entity.columns[self.currentFieldName()] if col.TYPE_INFO == 'LOOKUP': lookup_entity = lookup_parent_entity( self.curr_profile, col_name ) lkp_model = entity_model(lookup_entity) lkp_obj = lkp_model() value_obj = getattr( lkp_model, 'value' ) result = lkp_obj.queryObject().filter( func.lower(value_obj) == func.lower(search_term) ).first() if result is None: result = lkp_obj.queryObject().filter( func.lower(value_obj).like(search_term + '%') ).first() if result is not None: results = modelQueryObj.filter( queryObjProperty == result.id ).all() else: results = [] else: results = modelQueryObj.filter( func.lower(queryObjProperty) == func.lower(search_term) ).all() if self.validity.isEnabled(): valid_str_ids = self.str_validity_period_filter(results) else: valid_str_ids = None prog_dialog.setValue(7) except exc.StatementError: prog_dialog.deleteLater() del prog_dialog return model_root_node, [], search_term # if self.formatter is not None: # self.formatter.setData(results) # model_root_node = self.formatter.root(valid_str_ids) prog_dialog.setValue(10) prog_dialog.hide() prog_dialog.deleteLater() del prog_dialog return results, search_term def str_validity_period_filter(self, results): """ Filter the entity results using validity period in STR table. :param results: Entity result :type results: SQLAlchemy result proxy :return: Valid list of STR ids :rtype: List """ self.str_model_obj = self.str_model() valid_str_ids = [] for result in results: from_date = self.validity_from_date.date().toPyDate() to_date = self.validity_to_date.date().toPyDate() entity_id = '{}_id'.format(result.__table__.name[3:]) str_column_obj = getattr(self.str_model, entity_id) str_result = self.str_model_obj.queryObject().filter( self.str_model.validity_start >= from_date).filter( self.str_model.validity_end <= to_date ).filter(str_column_obj == result.id).all() for res in str_result: valid_str_ids.append(res.id) return valid_str_ids def reset(self): """ Clear search input parameters. """ self.txtFilterPattern.clear() if self.cboFilterCol.count() > 0: self.cboFilterCol.setCurrentIndex(0) def currentFieldName(self): """ Returns the name of the database field from the current item in the combo box. """ curr_index = self.cboFilterCol.currentIndex() field_name = self.cboFilterCol.itemData(curr_index) if field_name is None: return else: return field_name def _searchTerm(self): """ Returns the search term specified by the user. """ return self.txtFilterPattern.text() def _asyncFinished(self, model_values): """ Slot raised when worker has finished retrieving items. """ # Create QCompleter and add values to it. self._update_completer(model_values) self.asyncFinished.emit() def _update_completer(self, values): # Get the items in a tuple and put them in a list # Store display and actual values in a # model for easier mapping and # retrieval when carrying out searches model_attr_mapping = [] # Check if there are formaters specified # for the current field name for mv in values: f_model_values = [] m_val = mv[0] if m_val is not None: col_label = self.currentFieldName() if col_label in self.config.LookupFormatters: formatter = self.config.LookupFormatters[col_label] if formatter.column.TYPE_INFO == 'LOOKUP': m_val = formatter.code_value(m_val)[0] else: m_val = formatter.format_column_value(m_val) f_model_values.extend([m_val, m_val]) model_attr_mapping.append(f_model_values) self._completer_model = BaseSTDMTableModel(model_attr_mapping, ["", ""], self) # We will use the QSortFilterProxyModel for filtering purposes self._proxy_completer_model = QSortFilterProxyModel() self._proxy_completer_model.setDynamicSortFilter(True) self._proxy_completer_model.setSourceModel(self._completer_model) self._proxy_completer_model.setSortCaseSensitivity(Qt.CaseInsensitive) self._proxy_completer_model.setFilterKeyColumn(0) # Configure completer mod_completer = QCompleter(self._completer_model, self) mod_completer.setCaseSensitivity(Qt.CaseInsensitive) mod_completer.setCompletionMode(QCompleter.PopupCompletion) mod_completer.setCompletionColumn(0) mod_completer.setCompletionRole(Qt.DisplayRole) self.txtFilterPattern.setCompleter(mod_completer) def _on_column_index_changed(self, int): """ Slot raised when the user selects a different filter column. """ self.txtFilterPattern.clear() self.loadAsync() class EntityConfiguration(object): """ Specifies the configuration to apply when creating a new tab widget for performing entity searches. """ browseDescription = "Click on the browse button below to load entity " \ "records and their corresponding social tenure " \ "relationship definitions." defaultFieldName = "" # Format of each dictionary item: # property/db column name - display name filterColumns = OrderedDict() displayColumns = OrderedDict() groupBy = "" STRModel = None Title = "" data_source_name = "" # Functions for formatting values before # they are loaded into the completer LookupFormatters = {} def __init__(self): # Reset filter and display columns self.filterColumns = OrderedDict() self.displayColumns = OrderedDict() class ModelWorker(QObject): """ Worker for retrieving model attribute values stored in the database. """ retrieved = pyqtSignal(object) error = pyqtSignal(str) pyqtSlot(object, str) def fetch(self, model, fieldname): """ Fetch attribute values from the database for the specified model and corresponding column name. """ try: if hasattr(model, fieldname): modelInstance = model() obj_property = getattr(model, fieldname) model_values = modelInstance.queryObject( [obj_property] ).distinct() self.retrieved.emit(model_values) except DummyException as ex: self.error.emit(str(ex))
gpl-2.0
-8,273,558,001,482,112,000
32.030441
120
0.562371
false
4.507661
true
false
false
multikatt/beets
beetsplug/permissions.py
1
3116
from __future__ import (division, absolute_import, print_function, unicode_literals) """Fixes file permissions after the file gets written on import. Put something like the following in your config.yaml to configure: permissions: file: 644 dir: 755 """ import os from beets import config, util from beets.plugins import BeetsPlugin from beets.util import ancestry def convert_perm(perm): """If the perm is a int it will first convert it to a string and back to an oct int. Else it just converts it to oct. """ if isinstance(perm, int): return int(bytes(perm), 8) else: return int(perm, 8) def check_permissions(path, permission): """Checks the permissions of a path. """ return oct(os.stat(path).st_mode & 0o777) == oct(permission) def dirs_in_library(library, item): """Creates a list of ancestor directories in the beets library path. """ return [ancestor for ancestor in ancestry(item) if ancestor.startswith(library)][1:] class Permissions(BeetsPlugin): def __init__(self): super(Permissions, self).__init__() # Adding defaults. self.config.add({ u'file': 644, u'dir': 755 }) self.register_listener('item_imported', permissions) self.register_listener('album_imported', permissions) def permissions(lib, item=None, album=None): """Running the permission fixer. """ # Getting the config. file_perm = config['permissions']['file'].get() dir_perm = config['permissions']['dir'].get() # Converts permissions to oct. file_perm = convert_perm(file_perm) dir_perm = convert_perm(dir_perm) # Create chmod_queue. file_chmod_queue = [] if item: file_chmod_queue.append(item.path) elif album: for album_item in album.items(): file_chmod_queue.append(album_item.path) # A set of directories to change permissions for. dir_chmod_queue = set() for path in file_chmod_queue: # Changing permissions on the destination file. os.chmod(util.bytestring_path(path), file_perm) # Checks if the destination path has the permissions configured. if not check_permissions(util.bytestring_path(path), file_perm): message = 'There was a problem setting permission on {}'.format( path) print(message) # Adding directories to the directory chmod queue. dir_chmod_queue.update( dirs_in_library(config['directory'].get(), path)) # Change permissions for the directories. for path in dir_chmod_queue: # Chaning permissions on the destination directory. os.chmod(util.bytestring_path(path), dir_perm) # Checks if the destination path has the permissions configured. if not check_permissions(util.bytestring_path(path), dir_perm): message = 'There was a problem setting permission on {}'.format( path) print(message)
mit
8,238,867,990,123,123,000
29.851485
78
0.626765
false
4.171352
true
false
false