repo_name
stringlengths
5
92
path
stringlengths
4
221
copies
stringclasses
19 values
size
stringlengths
4
6
content
stringlengths
766
896k
license
stringclasses
15 values
hash
int64
-9,223,277,421,539,062,000
9,223,102,107B
line_mean
float64
6.51
99.9
line_max
int64
32
997
alpha_frac
float64
0.25
0.96
autogenerated
bool
1 class
ratio
float64
1.5
13.6
config_test
bool
2 classes
has_no_keywords
bool
2 classes
few_assignments
bool
1 class
satterly/alerta5
alerta/app/database/backends/mongodb/base.py
1
40457
import json import pytz import re from datetime import datetime, timedelta from flask import current_app, g from pymongo import MongoClient, ASCENDING, TEXT, ReturnDocument from pymongo.errors import ConnectionFailure from alerta.app.models import status_code from alerta.app.utils.format import DateTime from alerta.app import severity from alerta.app.exceptions import NoCustomerMatch, ApiError # See https://github.com/MongoEngine/flask-mongoengine/blob/master/flask_mongoengine/__init__.py # See https://github.com/dcrosta/flask-pymongo/blob/master/flask_pymongo/__init__.py class Backend: def connect(self, config): conn = MongoClient(config.get('MONGO_URI', 'mongodb://localhost:27017/monitoring')) if config.get('MONGO_DATABASE', None): db = conn[config['MONGO_DATABASE']] else: db = conn.get_database() # create unique indexes db.alerts.create_index( [('environment', ASCENDING), ('customer', ASCENDING), ('resource', ASCENDING), ('event', ASCENDING)], unique=True ) db.alerts.create_index([('$**', TEXT)]) db.heartbeats.create_index([('origin', ASCENDING), ('customer', ASCENDING)], unique=True) db.metrics.create_index([('group', ASCENDING), ('name', ASCENDING)], unique=True) return conn, db @property def cx(self): return current_app.extensions['mongodb'][0] @property def db(self): return current_app.extensions['mongodb'][1] @property def version(self): return self.db.client.server_info()['version'] @property def is_alive(self): try: self.db.client.admin.command('ismaster') except ConnectionFailure: return False return True def close(self): self.db.close() def destroy(self, name=None): name = name or self.db.name self.cx.drop_database(name) def build_query(self, params): query_time = datetime.utcnow() # q if params.get('q', None): query = json.loads(params.pop('q')) else: query = dict() # customer if g.get('customer', None): query['customer'] = g.get('customer') # from-date, to-date from_date = params.get('from-date', default=None, type=DateTime.parse) to_date = params.get('to-date', default=query_time, type=DateTime.parse) if from_date and to_date: query['lastReceiveTime'] = {'$gt': from_date.replace(tzinfo=pytz.utc), '$lte': to_date.replace(tzinfo=pytz.utc)} elif to_date: query['lastReceiveTime'] = {'$lte': to_date.replace(tzinfo=pytz.utc)} # duplicateCount, repeat if params.get('duplicateCount', None): query['duplicateCount'] = params.get('duplicateCount', int) if params.get('repeat', None): query['repeat'] = params.get('repeat', default=True, type=lambda x: x == 'true') # sort-by sort = list() direction = 1 if params.get('reverse', None): direction = -1 if params.get('sort-by', None): for sort_by in params.getlist('sort-by'): if sort_by in ['createTime', 'receiveTime', 'lastReceiveTime']: sort.append((sort_by, -direction)) # reverse chronological else: sort.append((sort_by, direction)) else: sort.append(('lastReceiveTime', -direction)) # group-by group = params.getlist('group-by') # page, page-size, limit (deprecated) page = params.get('page', 1, int) limit = params.get('limit', current_app.config['DEFAULT_PAGE_SIZE'], int) page_size = params.get('page-size', limit, int) # id ids = params.getlist('id') if len(ids) == 1: query['$or'] = [{'_id': {'$regex': '^' + ids[0]}}, {'lastReceiveId': {'$regex': '^' + ids[0]}}] elif ids: query['$or'] = [{'_id': {'$regex': re.compile('|'.join(['^' + i for i in ids]))}}, {'lastReceiveId': {'$regex': re.compile('|'.join(['^' + i for i in ids]))}}] EXCLUDE_QUERY = ['q', 'id', 'from-date', 'to-date', 'repeat', 'sort-by', 'reverse', 'group-by', 'page', 'page-size', 'limit'] # fields for field in params: if field in EXCLUDE_QUERY: continue value = params.getlist(field) if len(value) == 1: value = value[0] if field.endswith('!'): if value.startswith('~'): query[field[:-1]] = dict() query[field[:-1]]['$not'] = re.compile(value[1:], re.IGNORECASE) else: query[field[:-1]] = dict() query[field[:-1]]['$ne'] = value else: if value.startswith('~'): query[field] = dict() query[field]['$regex'] = re.compile(value[1:], re.IGNORECASE) else: query[field] = value else: if field.endswith('!'): if '~' in [v[0] for v in value]: value = '|'.join([v.lstrip('~') for v in value]) query[field[:-1]] = dict() query[field[:-1]]['$not'] = re.compile(value, re.IGNORECASE) else: query[field[:-1]] = dict() query[field[:-1]]['$nin'] = value else: if '~' in [v[0] for v in value]: value = '|'.join([v.lstrip('~') for v in value]) query[field] = dict() query[field]['$regex'] = re.compile(value, re.IGNORECASE) else: query[field] = dict() query[field]['$in'] = value return query, sort, group, page, page_size, query_time #### ALERTS def get_severity(self, alert): """ Get severity of correlated alert. Used to determine previous severity. """ query = { "environment": alert.environment, "resource": alert.resource, '$or': [ { "event": alert.event, "severity": {'$ne': alert.severity} }, { "event": {'$ne': alert.event}, "correlate": alert.event }], "customer": alert.customer } return self.db.alerts.find_one(query, projection={"severity": 1, "_id": 0})['severity'] def get_status(self, alert): """ Get status of correlated or duplicate alert. Used to determine previous status. """ query = { "environment": alert.environment, "resource": alert.resource, '$or': [ { "event": alert.event }, { "correlate": alert.event, } ], "customer": alert.customer } return self.db.alerts.find_one(query, projection={"status": 1, "_id": 0})['status'] def is_duplicate(self, alert): query = { "environment": alert.environment, "resource": alert.resource, "event": alert.event, "severity": alert.severity, "customer": alert.customer } return bool(self.db.alerts.find_one(query)) def is_correlated(self, alert): query = { "environment": alert.environment, "resource": alert.resource, '$or': [ { "event": alert.event, "severity": {'$ne': alert.severity} }, { "event": {'$ne': alert.event}, "correlate": alert.event }], "customer": alert.customer } return bool(self.db.alerts.find_one(query)) def is_flapping(self, alert, window=1800, count=2): """ Return true if alert severity has changed more than X times in Y seconds """ pipeline = [ {'$match': {"environment": alert.environment, "resource": alert.resource, "event": alert.event}}, {'$unwind': '$history'}, {'$match': { "history.updateTime": {'$gt': datetime.utcnow() - timedelta(seconds=window)}}, "history.type": "severity" }, { '$group': { "_id": '$history.type', "count": {'$sum': 1} } } ] responses = self.db.alerts.aggregate(pipeline) for r in responses: if r['count'] > count: return True return False def dedup_alert(self, alert): """ Update alert value, text and rawData, increment duplicate count and set repeat=True, and keep track of last receive id and time but don't append to history unless status changes. """ previous_status = self.get_status(alert) if alert.status != status_code.UNKNOWN and alert.status != previous_status: status = alert.status else: status = status_code.status_from_severity(alert.severity, alert.severity, previous_status) query = { "environment": alert.environment, "resource": alert.resource, "event": alert.event, "severity": alert.severity, "customer": alert.customer } now = datetime.utcnow() update = { '$set': { "status": status, "value": alert.value, "text": alert.text, "rawData": alert.raw_data, "repeat": True, "lastReceiveId": alert.id, "lastReceiveTime": now }, '$addToSet': {"tags": {'$each': alert.tags}}, '$inc': {"duplicateCount": 1} } # only update those attributes that are specifically defined attributes = {'attributes.'+k: v for k, v in alert.attributes.items()} update['$set'].update(attributes) if status != previous_status: update['$push'] = { "history": { '$each': [{ "event": alert.event, "status": status, "type": "status", "text": "duplicate alert status change", "id": alert.id, "updateTime": now }], '$slice': -abs(current_app.config['HISTORY_LIMIT']) } } return self.db.alerts.find_one_and_update( query, update=update, projection={"history": 0}, return_document=ReturnDocument.AFTER ) def correlate_alert(self, alert): """ Update alert key attributes, reset duplicate count and set repeat=False, keep track of last receive id and time, appending all to history. Append to history again if status changes. """ previous_severity = self.get_severity(alert) previous_status = self.get_status(alert) trend_indication = severity.trend(previous_severity, alert.severity) if alert.status == status_code.UNKNOWN: status = status_code.status_from_severity(previous_severity, alert.severity, previous_status) else: status = alert.status query = { "environment": alert.environment, "resource": alert.resource, '$or': [ { "event": alert.event, "severity": {'$ne': alert.severity} }, { "event": {'$ne': alert.event}, "correlate": alert.event }], "customer": alert.customer } now = datetime.utcnow() update = { '$set': { "event": alert.event, "severity": alert.severity, "status": status, "value": alert.value, "text": alert.text, "createTime": alert.create_time, "rawData": alert.raw_data, "duplicateCount": 0, "repeat": False, "previousSeverity": previous_severity, "trendIndication": trend_indication, "receiveTime": now, "lastReceiveId": alert.id, "lastReceiveTime": now }, '$addToSet': {"tags": {'$each': alert.tags}}, '$push': { "history": { '$each': [{ "event": alert.event, "severity": alert.severity, "value": alert.value, "type": "severity", "text": alert.text, "id": alert.id, "updateTime": now }], '$slice': -abs(current_app.config['HISTORY_LIMIT']) } } } # only update those attributes that are specifically defined attributes = {'attributes.'+k: v for k, v in alert.attributes.items()} update['$set'].update(attributes) if status != previous_status: update['$push']['history']['$each'].append({ "event": alert.event, "status": status, "type": "status", "text": "correlated alert status change", "id": alert.id, "updateTime": now }) return self.db.alerts.find_one_and_update( query, update=update, projection={"history": 0}, return_document=ReturnDocument.AFTER ) def create_alert(self, alert): data = { "_id": alert.id, "resource": alert.resource, "event": alert.event, "environment": alert.environment, "severity": alert.severity, "correlate": alert.correlate, "status": alert.status, "service": alert.service, "group": alert.group, "value": alert.value, "text": alert.text, "tags": alert.tags, "attributes": alert.attributes, "origin": alert.origin, "type": alert.event_type, "createTime": alert.create_time, "timeout": alert.timeout, "rawData": alert.raw_data, "customer": alert.customer, "duplicateCount": alert.duplicate_count, "repeat": alert.repeat, "previousSeverity": alert.previous_severity, "trendIndication": alert.trend_indication, "receiveTime": alert.receive_time, "lastReceiveId": alert.last_receive_id, "lastReceiveTime": alert.last_receive_time, "history": [h.serialize for h in alert.history] } if self.db.alerts.insert_one(data).inserted_id == alert.id: return data def get_alert(self, id, customer=None): if len(id) == 8: query = {'$or': [{'_id': {'$regex': '^' + id}}, {'lastReceiveId': {'$regex': '^' + id}}]} else: query = {'$or': [{'_id': id}, {'lastReceiveId': id}]} if customer: query['customer'] = customer return self.db.alerts.find_one(query) #### STATUS, TAGS, ATTRIBUTES def set_status(self, id, status, text=None): """ Set status and update history. """ query = {'_id': {'$regex': '^' + id}} event = self.db.alerts.find_one(query, projection={"event": 1, "_id": 0})['event'] if not event: return False now = datetime.utcnow() update = { '$set': {"status": status}, '$push': { "history": { '$each': [{ "event": event, "status": status, "type": "status", "text": text, "id": id, "updateTime": now }], '$slice': -abs(current_app.config['HISTORY_LIMIT']) } } } return self.db.alerts.find_one_and_update( query, update=update, projection={"history": 0}, return_document=ReturnDocument.AFTER ) def tag_alert(self, id, tags): """ Append tags to tag list. Don't add same tag more than once. """ response = self.db.alerts.update_one({'_id': {'$regex': '^' + id}}, {'$addToSet': {"tags": {'$each': tags}}}) return response.matched_count > 0 def untag_alert(self, id, tags): """ Remove tags from tag list. """ response = self.db.alerts.update_one({'_id': {'$regex': '^' + id}}, {'$pullAll': {"tags": tags}}) return response.matched_count > 0 def update_attributes(self, id, attrs): """ Set all attributes (including private attributes) and unset attributes by using a value of 'null'. """ update = dict() set_value = {'attributes.' + k: v for k, v in attrs.items() if v is not None} if set_value: update['$set'] = set_value unset_value = {'attributes.' + k: v for k, v in attrs.items() if v is None} if unset_value: update['$unset'] = unset_value response = self.db.alerts.update_one({'_id': {'$regex': '^' + id}}, update=update) return response.matched_count > 0 def delete_alert(self, id): response = self.db.alerts.delete_one({'_id': {'$regex': '^' + id}}) return True if response.deleted_count == 1 else False #### SEARCH & HISTORY def get_alerts(self, query=None, sort=None, page=1, page_size=0): return self.db.alerts.find(query, sort=sort).skip((page-1)*page_size).limit(page_size) def get_history(self, query=None, fields=None): if not fields: fields = { "resource": 1, "event": 1, "environment": 1, "customer": 1, "service": 1, "group": 1, "tags": 1, "attributes": 1, "origin": 1, "type": 1, "history": 1 } pipeline = [ {'$match': query}, {'$unwind': '$history'}, {'$project': fields}, {'$limit': current_app.config['HISTORY_LIMIT']}, {'$sort': {'history.updateTime': 1}} ] responses = self.db.alerts.aggregate(pipeline) history = list() for response in responses: if 'severity' in response['history']: history.append( { "id": response['_id'], # or response['history']['id'] "resource": response['resource'], "event": response['history']['event'], "environment": response['environment'], "severity": response['history']['severity'], "service": response['service'], "group": response['group'], "value": response['history']['value'], "text": response['history']['text'], "tags": response['tags'], "attributes": response['attributes'], "origin": response['origin'], "updateTime": response['history']['updateTime'], "type": response['history'].get('type', 'unknown'), "customer": response.get('customer', None) } ) elif 'status' in response['history']: history.append( { "id": response['_id'], # or response['history']['id'] "resource": response['resource'], "event": response['event'], "environment": response['environment'], "status": response['history']['status'], "service": response['service'], "group": response['group'], "text": response['history']['text'], "tags": response['tags'], "attributes": response['attributes'], "origin": response['origin'], "updateTime": response['history']['updateTime'], "type": response['history'].get('type', 'unknown'), "customer": response.get('customer', None) } ) return history #### COUNTS def get_count(self, query=None): """ Return total number of alerts that meet the query filter. """ return self.db.alerts.find(query).count() def get_counts(self, query=None, fields=None, group=None): pipeline = [ {'$match': query}, {'$project': fields or {}}, {'$group': {"_id": "$" + group, "count": {'$sum': 1}}} ] responses = self.db.alerts.aggregate(pipeline) counts = dict() for response in responses: counts[response['_id']] = response['count'] return counts def get_counts_by_severity(self, query=None): return self.get_counts(query, fields={"severity": 1}, group="severity") def get_counts_by_status(self, query=None): return self.get_counts(query, fields={"status": 1}, group="status") def get_topn_count(self, query=None, group="event", topn=10): pipeline = [ {'$match': query}, {'$unwind': '$service'}, { '$group': { "_id": "$%s" % group, "count": {'$sum': 1}, "duplicateCount": {'$sum': "$duplicateCount"}, "environments": {'$addToSet': "$environment"}, "services": {'$addToSet': "$service"}, "resources": {'$addToSet': {"id": "$_id", "resource": "$resource"}} } }, {'$sort': {"count": -1, "duplicateCount": -1}}, {'$limit': topn} ] responses = self.db.alerts.aggregate(pipeline) top = list() for response in responses: top.append( { "%s" % group: response['_id'], "environments": response['environments'], "services": response['services'], "resources": response['resources'], "count": response['count'], "duplicateCount": response['duplicateCount'] } ) return top def get_topn_flapping(self, query=None, group="event", topn=10): pipeline = [ {'$match': query}, {'$unwind': '$service'}, {'$unwind': '$history'}, {'$match': {"history.type": "severity"}}, { '$group': { "_id": "$%s" % group, "count": {'$sum': 1}, "duplicateCount": {'$max': "$duplicateCount"}, "environments": {'$addToSet': "$environment"}, "services": {'$addToSet': "$service"}, "resources": {'$addToSet': {"id": "$_id", "resource": "$resource"}} } }, {'$sort': {"count": -1, "duplicateCount": -1}}, {'$limit': topn} ] responses = self.db.alerts.aggregate(pipeline) top = list() for response in responses: top.append( { "%s" % group: response['_id'], "environments": response['environments'], "services": response['services'], "resources": response['resources'], "count": response['count'], "duplicateCount": response['duplicateCount'] } ) return top #### ENVIRONMENTS def get_environments(self, query=None, topn=100): pipeline = [ {'$match': query}, {'$project': {"environment": 1}}, {'$limit': topn}, {'$group': {"_id": "$environment", "count": {'$sum': 1}}} ] responses = self.db.alerts.aggregate(pipeline) environments = list() for response in responses: environments.append( { "environment": response['_id'], "count": response['count'] } ) return environments #### SERVICES def get_services(self, query=None, topn=100): pipeline = [ {'$unwind': '$service'}, {'$match': query}, {'$project': {"environment": 1, "service": 1}}, {'$limit': topn}, {'$group': {"_id": {"environment": "$environment", "service": "$service"}, "count": {'$sum': 1}}} ] responses = self.db.alerts.aggregate(pipeline) services = list() for response in responses: services.append( { "environment": response['_id']['environment'], "service": response['_id']['service'], "count": response['count'] } ) return services #### BLACKOUTS def create_blackout(self, blackout): data = { "_id": blackout.id, "priority": blackout.priority, "environment": blackout.environment, "startTime": blackout.start_time, "endTime": blackout.end_time, "duration": blackout.duration } if blackout.service: data["service"] = blackout.service if blackout.resource: data["resource"] = blackout.resource if blackout.event: data["event"] = blackout.event if blackout.group: data["group"] = blackout.group if blackout.tags: data["tags"] = blackout.tags if blackout.customer: data["customer"] = blackout.customer if self.db.blackouts.insert_one(data).inserted_id == blackout.id: return data def get_blackout(self, id, customer=None): query = {'_id': id} if customer: query['customer'] = customer return self.db.blackouts.find_one(query) def get_blackouts(self, query=None, page=1, page_size=0): return self.db.blackouts.find(query).skip((page - 1) * page_size).limit(page_size) def is_blackout_period(self, alert): now = datetime.utcnow() query = dict() query['startTime'] = {'$lte': now} query['endTime'] = {'$gt': now} query['environment'] = alert.environment query['$or'] = [ { "resource": {'$exists': False}, "service": {'$exists': False}, "event": {'$exists': False}, "group": {'$exists': False}, "tags": {'$exists': False} }, { "resource": alert.resource, "service": {'$exists': False}, "event": {'$exists': False}, "group": {'$exists': False}, "tags": {'$exists': False} }, { "resource": {'$exists': False}, "service": {"$not": {"$elemMatch": {"$nin": alert.service}}}, "event": {'$exists': False}, "group": {'$exists': False}, "tags": {'$exists': False} }, { "resource": {'$exists': False}, "service": {'$exists': False}, "event": alert.event, "group": {'$exists': False}, "tags": {'$exists': False} }, { "resource": {'$exists': False}, "service": {'$exists': False}, "event": {'$exists': False}, "group": alert.group, "tags": {'$exists': False} }, { "resource": alert.resource, "service": {'$exists': False}, "event": alert.event, "group": {'$exists': False}, "tags": {'$exists': False} }, { "resource": {'$exists': False}, "service": {'$exists': False}, "event": {'$exists': False}, "group": {'$exists': False}, "tags": {"$not": {"$elemMatch": {"$nin": alert.tags}}} } ] if self.db.blackouts.find_one(query): return True if current_app.config['CUSTOMER_VIEWS']: query['customer'] = alert.customer if self.db.blackouts.find_one(query): return True return False def delete_blackout(self, id): response = self.db.blackouts.delete_one({"_id": id}) return True if response.deleted_count == 1 else False #### HEARTBEATS def upsert_heartbeat(self, heartbeat): return self.db.heartbeats.find_one_and_update( { "origin": heartbeat.origin, "customer": heartbeat.customer }, { '$setOnInsert': { "_id": heartbeat.id }, '$set': { "origin": heartbeat.origin, "tags": heartbeat.tags, "type": heartbeat.event_type, "createTime": heartbeat.create_time, "timeout": heartbeat.timeout, "receiveTime": heartbeat.receive_time, "customer": heartbeat.customer } }, upsert=True, return_document=ReturnDocument.AFTER ) def get_heartbeat(self, id, customer=None): if len(id) == 8: query = {'_id': {'$regex': '^' + id}} else: query = {'_id': id} if customer: query['customer'] = customer return self.db.heartbeats.find_one(query) def get_heartbeats(self, query=None, page=1, page_size=0): return self.db.heartbeats.find(query).skip((page - 1) * page_size).limit(page_size) def delete_heartbeat(self, id): response = self.db.heartbeats.delete_one({'_id': {'$regex': '^' + id}}) return True if response.deleted_count == 1 else False #### API KEYS # save def create_key(self, key): data = { "_id": key.key, "user": key.user, "scopes": key.scopes, "text": key.text, "expireTime": key.expire_time, "count": key.count, "lastUsedTime": key.last_used_time } if key.customer: data['customer'] = key.customer if self.db.keys.insert_one(data).inserted_id == key.key: return data # get def get_key(self, key, customer=None): query = {'$or': [{'key': key}, {'_id': key}]} if customer: query['customer'] = customer return self.db.keys.find_one(query) # list def get_keys(self, query=None, page=1, page_size=0): return self.db.keys.find(query).skip((page - 1) * page_size).limit(page_size) # update def update_key_last_used(self, key): return self.db.keys.update_one( {'$or': [{'key': key}, {'_id': key}]}, { '$set': {"lastUsedTime": datetime.utcnow()}, '$inc': {"count": 1} } ).matched_count == 1 # delete def delete_key(self, key): query = {'$or': [{'key': key}, {'_id': key}]} response = self.db.keys.delete_one(query) return True if response.deleted_count == 1 else False #### USERS def create_user(self, user): data = { "_id": user.id, "name": user.name, "password": user.password, "email": user.email, "createTime": user.create_time, "lastLogin": user.last_login, "text": user.text, "email_verified": user.email_verified } if self.db.users.insert_one(data).inserted_id == user.id: return data # get def get_user(self, id, customer=None): query = {'_id': id} if customer: query['customer'] = customer return self.db.users.find_one(query) # list def get_users(self, query=None, page=1, page_size=0): return self.db.users.find(query).skip((page - 1) * page_size).limit(page_size) def get_user_by_email(self, email): query = {"email": email} return self.db.users.find_one(query) def get_user_by_hash(self, hash): query = {"hash": hash} return self.db.users.find_one(query) def get_user_password(self, id): return def update_last_login(self, id): return self.db.users.update_one( {"_id": id}, update={'$set': {"lastLogin": datetime.utcnow()}} ).matched_count == 1 def set_email_hash(self, id, hash): return self.db.users.update_one( {"_id": id}, update={'$set': {'hash': hash, 'updateTime': datetime.utcnow()}} ).matched_count == 1 def update_user(self, id, **kwargs): return self.db.users.find_one_and_update( {"_id": id}, update={'$set': kwargs}, return_document=ReturnDocument.AFTER ) def delete_user(self, id): response = self.db.users.delete_one({"_id": id}) return True if response.deleted_count == 1 else False #### PERMISSIONS def create_perm(self, perm): data = { "_id": perm.id, "match": perm.match, "scopes": perm.scopes } if self.db.perms.insert_one(data).inserted_id == perm.id: return data def get_perm(self, id): query = {'_id': id} return self.db.perms.find_one(query) def get_perms(self, query=None, page=1, page_size=0): return self.db.perms.find(query).skip((page - 1) * page_size).limit(page_size) def delete_perm(self, id): response = self.db.perms.delete_one({"_id": id}) return True if response.deleted_count == 1 else False def get_scopes_by_match(self, login, matches): if login in current_app.config['ADMIN_USERS']: return ['admin', 'read', 'write'] scopes = list() for match in matches: response = self.db.perms.find_one({"match": match}, projection={"scopes": 1, "_id": 0}) if response: scopes.extend(response['scopes']) return set(scopes) or current_app.config['USER_DEFAULT_SCOPES'] #### CUSTOMERS def create_customer(self, customer): data = { "_id": customer.id, "match": customer.match, "customer": customer.customer } if self.db.customers.insert_one(data).inserted_id == customer.id: return data def get_customer(self, id): query = {'_id': id} return self.db.customers.find_one(query) def get_customers(self, query=None, page=1, page_size=0): return self.db.customers.find(query).skip((page - 1) * page_size).limit(page_size) def delete_customer(self, id): response = self.db.customers.delete_one({"_id": id}) return True if response.deleted_count == 1 else False def get_customers_by_match(self, login, matches): if login in current_app.config['ADMIN_USERS']: return '*' # all customers for match in [login] + matches: response = self.db.customers.find_one({"match": match}, projection={"customer": 1, "_id": 0}) if response: return response['customer'] raise NoCustomerMatch("No customer lookup configured for user '%s' or '%s'" % (login, ','.join(matches))) #### METRICS def get_metrics(self, type=None): query = {"type": type} if type else {} return list(self.db.metrics.find(query, {"_id": 0})) def set_gauge(self, group, name, title=None, description=None, value=0): return self.db.metrics.find_one_and_update( { "group": group, "name": name }, { '$set': { "group": group, "name": name, "title": title, "description": description, "value": value, "type": "gauge" } }, upsert=True, return_document=ReturnDocument.AFTER )['value'] def get_gauges(self): from alerta.app.models.metrics import Gauge return [ Gauge( group=g.get('group'), name=g.get('name'), title=g.get('title', ''), description=g.get('description', ''), value=g.get('value', 0) ) for g in self.db.metrics.find({"type": "gauge"}, {"_id": 0}) ] def inc_counter(self, group, name, title=None, description=None, count=1): return self.db.metrics.find_one_and_update( { "group": group, "name": name }, { '$set': { "group": group, "name": name, "title": title, "description": description, "type": "counter" }, '$inc': {"count": count} }, upsert=True, return_document=ReturnDocument.AFTER )['count'] def get_counters(self): from alerta.app.models.metrics import Counter return [ Counter( group=c.get('group'), name=c.get('name'), title=c.get('title', ''), description=c.get('description', ''), count=c.get('count', 0) ) for c in self.db.metrics.find({"type": "counter"}, {"_id": 0}) ] def update_timer(self, group, name, title=None, description=None, count=1, duration=0): return self.db.metrics.find_one_and_update( { "group": group, "name": name }, { '$set': { "group": group, "name": name, "title": title, "description": description, "type": "timer" }, '$inc': {"count": count, "totalTime": duration} }, upsert=True, return_document=ReturnDocument.AFTER ) def get_timers(self): from alerta.app.models.metrics import Timer return [ Timer( group=t.get('group'), name=t.get('name'), title=t.get('title', ''), description=t.get('description', ''), count=t.get('count', 0), total_time=t.get('totalTime', 0) ) for t in self.db.metrics.find({"type": "timer"}, {"_id": 0}) ]
apache-2.0
-2,205,730,669,139,260,000
33.756873
133
0.470994
false
4.378463
false
false
false
ckan/ckanext-archiver
ckanext/archiver/lib.py
1
1725
import os import logging import ckan.plugins as p from ckanext.archiver.tasks import update_package, update_resource log = logging.getLogger(__name__) def compat_enqueue(name, fn, queue, args=None): u''' Enqueue a background job using Celery or RQ. ''' try: # Try to use RQ from ckan.plugins.toolkit import enqueue_job enqueue_job(fn, args=args, queue=queue) except ImportError: # Fallback to Celery import uuid from ckan.lib.celery_app import celery celery.send_task(name, args=args + [queue], task_id=str(uuid.uuid4())) def create_archiver_resource_task(resource, queue): from pylons import config if p.toolkit.check_ckan_version(max_version='2.2.99'): # earlier CKANs had ResourceGroup package = resource.resource_group.package else: package = resource.package ckan_ini_filepath = os.path.abspath(config['__file__']) compat_enqueue('archiver.update_resource', update_resource, queue, [ckan_ini_filepath, resource.id]) log.debug('Archival of resource put into celery queue %s: %s/%s url=%r', queue, package.name, resource.id, resource.url) def create_archiver_package_task(package, queue): from pylons import config ckan_ini_filepath = os.path.abspath(config['__file__']) compat_enqueue('archiver.update_package', update_package, queue, [ckan_ini_filepath, package.id]) log.debug('Archival of package put into celery queue %s: %s', queue, package.name) def get_extra_from_pkg_dict(pkg_dict, key, default=None): for extra in pkg_dict.get('extras', []): if extra['key'] == key: return extra['value'] return default
mit
7,860,166,736,346,997,000
30.944444
104
0.663188
false
3.571429
false
false
false
jhermann/rituals
src/rituals/util/shell.py
1
2127
# -*- coding: utf-8 -*- # pylint: disable=bad-continuation """ Shell command calls. """ # Copyright ⓒ 2015 Jürgen Hermann # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # The full LICENSE file and source are available at # https://github.com/jhermann/rituals from __future__ import absolute_import, unicode_literals, print_function import sys from invoke import run as invoke_run from invoke import exceptions from . import notify def capture(cmd, **kw): """Run a command and return its stripped captured output.""" kw = kw.copy() kw['hide'] = 'out' if not kw.get('echo', False): kw['echo'] = False ignore_failures = kw.pop('ignore_failures', False) try: return invoke_run(cmd, **kw).stdout.strip() except exceptions.Failure as exc: if not ignore_failures: notify.error("Command `{}` failed with RC={}!".format(cmd, exc.result.return_code,)) raise def run(cmd, **kw): """Run a command and flush its output.""" kw = kw.copy() kw.setdefault('warn', False) # make extra sure errors don't get silenced report_error = kw.pop('report_error', True) runner = kw.pop('runner', invoke_run) try: return runner(cmd, **kw) except exceptions.Failure as exc: sys.stdout.flush() sys.stderr.flush() if report_error: notify.error("Command `{}` failed with RC={}!".format(cmd, exc.result.return_code,)) raise finally: sys.stdout.flush() sys.stderr.flush()
gpl-2.0
1,718,451,169,136,169,700
31.676923
96
0.669021
false
3.840868
false
false
false
Nth-iteration-labs/streamingbandit
app/handlers/evalhandlers.py
1
3985
# -* coding: utf-8 -*- import json import numpy as np from handlers.basehandler import BaseHandler, ExceptionHandler from core.experiment import Experiment global numpy global random class Simulate(BaseHandler): def get(self, exp_id): """ Simulate your experiment based on four scripts, which create a closed feedback loop. +--------------------------------------------------------------------+ | Example | +====================================================================+ |http://example.com/eval/EXP_ID/simulate?N=1000&log_stats=True | |&verbose=True&seed=10 | +--------------------------------------------------------------------+ :requires: A secure cookie, obtained by logging in. :param int exp_id: Experiment ID as specified in the url. :param int N: The number of simulation draws. :param bool log_stats: Flag for logging the results in the database (default is False) :param bool verbose: Flag for displaying the results in the returning JSON object (default is True) :param int seed (optional): Set numpy seed. :returns: A JSON indicating success when verbose flag is False, and a JSON with all the data when verbose flag is True. :raises 400: If the experiment does not belong to this user or the exp_id is wrong. :raises 401: If user is not logged in or if there is no secure cookie available. """ if self.get_current_user(): if self.validate_user_experiment(exp_id): N = int(self.get_argument("N", default = 1000)) log_stats = self.get_argument("log_stats", default = False) verbose = self.get_argument("verbose", default = True) seed = self.get_argument("seed", default = None) if seed is None: seed = np.random.randint(2**32-1, dtype=np.uint32) if verbose == "True": verbose = True else: verbose = False if log_stats == "True": log_stats = True else: log_stats = False __EXP__ = Experiment(exp_id) data = {} with self.temp_seed(int(seed)): for i in range(N): # Generate context context = __EXP__.run_context_code() # Get action action = __EXP__.run_action_code(context, {}) # Generate reward reward = __EXP__.run_get_reward_code(context, action) # Set reward __EXP__.run_reward_code(context, action, reward) # Get theta theta = __EXP__.get_theta() # Save stats data[str(i)] = {'context' : context.copy(), 'action' : action.copy(), 'reward' : reward.copy(), 'theta' : theta.copy()} context.clear() action.clear() reward.clear() if log_stats == True: __EXP__.log_simulation_data(data.copy()) data_tmp = data.copy() data.clear() if verbose == True: self.write(json.dumps({'simulate':'success', 'experiment':exp_id, 'data':data_tmp})) else: self.write(json.dumps({'simulate':'success', 'experiment':exp_id, 'theta':theta})) else: raise ExceptionHandler(reason="Experiment could not be validated.", status_code=401) else: raise ExceptionHandler(reason="Could not validate user.", status_code=401)
mit
-3,311,606,737,012,722,000
42.791209
143
0.473275
false
5.018892
false
false
false
brettcs/diffoscope
diffoscope/comparators/utils/archive.py
1
3833
# -*- coding: utf-8 -*- # # diffoscope: in-depth comparison of files, archives, and directories # # Copyright © 2016 Chris Lamb <[email protected]> # # diffoscope is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # diffoscope is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with diffoscope. If not, see <https://www.gnu.org/licenses/>. import abc import logging from diffoscope.profiling import profile from diffoscope.tempfiles import get_temporary_directory from ..missing_file import MissingFile from .file import File from .container import Container logger = logging.getLogger(__name__) class Archive(Container, metaclass=abc.ABCMeta): def __new__(cls, source, *args, **kwargs): if isinstance(source, MissingFile): return super(Container, MissingArchive).__new__(MissingArchive) else: return super(Container, cls).__new__(cls) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) with profile('open_archive', self): self._archive = self.open_archive() def __del__(self): with profile('close_archive', self): self.close_archive() @property def archive(self): return self._archive @abc.abstractmethod def open_archive(self): raise NotImplementedError() @abc.abstractmethod def close_archive(self): raise NotImplementedError() @abc.abstractmethod def get_member_names(self): raise NotImplementedError() @abc.abstractmethod def extract(self, member_name, dest_dir): raise NotImplementedError() def get_member(self, member_name): return ArchiveMember(self, member_name) class ArchiveMember(File): def __init__(self, container, member_name): super().__init__(container=container) self._name = member_name self._temp_dir = None self._path = None @property def path(self): if self._path is None: logger.debug("Unpacking %s", self._name) assert self._temp_dir is None self._temp_dir = get_temporary_directory() with profile('container_extract', self.container): self._path = self.container.extract(self._name, self._temp_dir.name) return self._path def cleanup(self): if self._path is not None: self._path = None if self._temp_dir is not None: self._temp_dir.cleanup() self._temp_dir = None super().cleanup() def is_directory(self): return False def is_symlink(self): return False def is_device(self): return False class MissingArchiveLikeObject(object): def getnames(self): return [] def list(self, *args, **kwargs): return '' def close(self): pass class MissingArchive(Archive): @property def source(self): return None def open_archive(self): return MissingArchiveLikeObject() def close_archive(self): pass def get_member_names(self): return [] def extract(self, member_name, dest_dir): # should never be called raise NotImplementedError() def get_member(self, member_name): return MissingFile('/dev/null') # Be nice to gzip and the likes @property def path(self): return '/dev/null'
gpl-3.0
5,791,410,520,121,028,000
25.611111
84
0.63857
false
4.12931
false
false
false
edx/pyrasite
pyrasite/inspector.py
1
1168
# This file is part of pyrasite. # # pyrasite is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # pyrasite is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with pyrasite. If not, see <http://www.gnu.org/licenses/>. # # Copyright (C) 2011, 2012 Red Hat, Inc., Luke Macken <[email protected]> import subprocess def inspect(pid, address): "Return the value of an object in a given process at the specified address" cmd = ' '.join([ 'gdb --quiet -p %s -batch' % pid, '-eval-command="print (PyObject *)%s"' % address, ]) p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE) for line in p.communicate()[0].split('\n'): if line.startswith('$1 = '): return line[5:]
gpl-3.0
1,011,668,733,586,748,900
39.275862
79
0.694349
false
3.719745
false
false
false
ojii/django-shop
tests/testapp/settings.py
1
4005
# Django settings for example project. DEBUG = True TEMPLATE_DEBUG = DEBUG ADMINS = ( ('Christopher Glass', '[email protected]'), ) MANAGERS = ADMINS DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'. 'NAME': 'test.sqlite', # Or path to database file if using sqlite3. 'USER': '', # Not used with sqlite3. 'PASSWORD': '', # Not used with sqlite3. 'HOST': '', # Set to empty string for localhost. Not used with sqlite3. 'PORT': '', # Set to empty string for default. Not used with sqlite3. } } # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # On Unix systems, a value of None will cause Django to use the same # timezone as the operating system. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'Europe/Zurich' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale USE_L10N = True # Absolute path to the directory that holds media. # Example: "/home/media/media.lawrence.com/" MEDIA_ROOT = '' # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash if there is a path component (optional in other cases). # Examples: "http://media.lawrence.com", "http://example.com/media/" MEDIA_URL = '' # URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a # trailing slash. # Examples: "http://foo.com/media/", "/media/". ADMIN_MEDIA_PREFIX = '/media/' # Make this unique, and don't share it with anybody. SECRET_KEY = 'h2%uf!luks79rw^4!5%q#v2znc87g_)@^jf1og!04@&&tsf7*9' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', # 'django.template.loaders.eggs.Loader', ) MIDDLEWARE_CLASSES = [ 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', ] import django if django.VERSION[0] < 1 or django.VERSION[1] <3: MIDDLEWARE_CLASSES.append('cbv.middleware.DeferredRenderingMiddleware') ROOT_URLCONF = 'testapp.urls' TEMPLATE_DIRS = ( # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', # Uncomment the next line to enable the admin: 'django.contrib.admin', # Uncomment the next line to enable admin documentation: 'django.contrib.admindocs', 'polymorphic', # We need polymorphic installed for the shop 'shop', # The django SHOP application 'shop.addressmodel', 'project', # the test project application ) # The shop settings: SHOP_CART_MODIFIERS= ['shop.cart.modifiers.rebate_modifiers.BulkRebateModifier'] SHOP_SHIPPING_BACKENDS=['shop.shipping.backends.flat_rate.FlatRateShipping'] # Shop module settings SHOP_SHIPPING_FLAT_RATE = '10' # That's just for the flat rate shipping backend
bsd-3-clause
-8,624,996,170,369,027,000
34.758929
122
0.698127
false
3.608108
false
false
false
cgqyh/pyalgotrade-mod
pyalgotrade/tools/quandl.py
1
5711
# PyAlgoTrade # # Copyright 2011-2015 Gabriel Martin Becedillas Ruiz # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ .. moduleauthor:: Gabriel Martin Becedillas Ruiz <[email protected]> """ import datetime import os from pyalgotrade import bar from pyalgotrade.barfeed import quandlfeed from pyalgotrade.utils import dt from pyalgotrade.utils import csvutils import pyalgotrade.logger # http://www.quandl.com/help/api def download_csv(sourceCode, tableCode, begin, end, frequency, authToken): url = "http://www.quandl.com/api/v1/datasets/%s/%s.csv" % (sourceCode, tableCode) params = { "trim_start": begin.strftime("%Y-%m-%d"), "trim_end": end.strftime("%Y-%m-%d"), "collapse": frequency } if authToken is not None: params["auth_token"] = authToken return csvutils.download_csv(url, params) def download_daily_bars(sourceCode, tableCode, year, csvFile, authToken=None): """Download daily bars from Quandl for a given year. :param sourceCode: The dataset's source code. :type sourceCode: string. :param tableCode: The dataset's table code. :type tableCode: string. :param year: The year. :type year: int. :param csvFile: The path to the CSV file to write. :type csvFile: string. :param authToken: Optional. An authentication token needed if you're doing more than 50 calls per day. :type authToken: string. """ bars = download_csv(sourceCode, tableCode, datetime.date(year, 1, 1), datetime.date(year, 12, 31), "daily", authToken) f = open(csvFile, "w") f.write(bars) f.close() def download_weekly_bars(sourceCode, tableCode, year, csvFile, authToken=None): """Download weekly bars from Quandl for a given year. :param sourceCode: The dataset's source code. :type sourceCode: string. :param tableCode: The dataset's table code. :type tableCode: string. :param year: The year. :type year: int. :param csvFile: The path to the CSV file to write. :type csvFile: string. :param authToken: Optional. An authentication token needed if you're doing more than 50 calls per day. :type authToken: string. """ begin = dt.get_first_monday(year) - datetime.timedelta(days=1) # Start on a sunday end = dt.get_last_monday(year) - datetime.timedelta(days=1) # Start on a sunday bars = download_csv(sourceCode, tableCode, begin, end, "weekly", authToken) f = open(csvFile, "w") f.write(bars) f.close() def build_feed(sourceCode, tableCodes, fromYear, toYear, storage, frequency=bar.Frequency.DAY, timezone=None, skipErrors=False, noAdjClose=False, authToken=None): """Build and load a :class:`pyalgotrade.barfeed.quandlfeed.Feed` using CSV files downloaded from Quandl. CSV files are downloaded if they haven't been downloaded before. :param sourceCode: The dataset source code. :type sourceCode: string. :param tableCodes: The dataset table codes. :type tableCodes: list. :param fromYear: The first year. :type fromYear: int. :param toYear: The last year. :type toYear: int. :param storage: The path were the files will be loaded from, or downloaded to. :type storage: string. :param frequency: The frequency of the bars. Only **pyalgotrade.bar.Frequency.DAY** or **pyalgotrade.bar.Frequency.WEEK** are supported. :param timezone: The default timezone to use to localize bars. Check :mod:`pyalgotrade.marketsession`. :type timezone: A pytz timezone. :param skipErrors: True to keep on loading/downloading files in case of errors. :type skipErrors: boolean. :param noAdjClose: True if the instruments don't have adjusted close values. :type noAdjClose: boolean. :param authToken: Optional. An authentication token needed if you're doing more than 50 calls per day. :type authToken: string. :rtype: :class:`pyalgotrade.barfeed.quandlfeed.Feed`. """ logger = pyalgotrade.logger.getLogger("quandl") ret = quandlfeed.Feed(frequency, timezone) if noAdjClose: ret.setNoAdjClose() if not os.path.exists(storage): logger.info("Creating %s directory" % (storage)) os.mkdir(storage) for year in range(fromYear, toYear+1): for tableCode in tableCodes: fileName = os.path.join(storage, "%s-%s-%d-quandl.csv" % (sourceCode, tableCode, year)) if not os.path.exists(fileName): logger.info("Downloading %s %d to %s" % (tableCode, year, fileName)) try: if frequency == bar.Frequency.DAY: download_daily_bars(sourceCode, tableCode, year, fileName, authToken) elif frequency == bar.Frequency.WEEK: download_weekly_bars(sourceCode, tableCode, year, fileName, authToken) else: raise Exception("Invalid frequency") except Exception, e: if skipErrors: logger.error(str(e)) continue else: raise e ret.addBarsFromCSV(tableCode, fileName) return ret
apache-2.0
-7,145,280,726,221,592,000
38.386207
162
0.66941
false
3.653871
false
false
false
evancasey/demeter
demeter/unsup/common/image_pool.py
1
1090
import tensorflow as tf import copy class ImagePool: def __init__(self, pool_size): self.pool_size = pool_size if self.pool_size > 0: self.num_imgs = 0 self.images = [] def query(self, images): if self.pool_size == 0: return images ret_imgs = [] for i in range(images.shape[0]): image = tf.expand_dims(images[i], axis=0) if self.num_imgs < self.pool_size: self.num_imgs = self.num_imgs + 1 self.images.append(image) ret_imgs.append(image) else: p = tf.random_uniform((1,), 0, 1).numpy()[0] if p > 0.5: random_id = tf.random_uniform((1,), 0, self.pool_size - 1).numpy()[0].astype(int) tmp = copy.copy(self.images[random_id]) self.images[random_id] = image ret_imgs.append(tmp) else: ret_imgs.append(image) ret_imgs = tf.concat(ret_imgs, 0) return ret_imgs
mit
6,230,843,943,123,238,000
33.0625
101
0.480734
false
3.633333
false
false
false
mvaled/sentry
src/sentry/south_migrations/0277_auto__add_commitfilechange__add_unique_commitfilechange_commit_filenam.py
1
92625
# -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'CommitFileChange' db.create_table( 'sentry_commitfilechange', ( ( 'id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')( primary_key=True ) ), ( 'organization_id', self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')( db_index=True ) ), ( 'commit', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')( to=orm['sentry.Commit'] ) ), ('filename', self.gf('django.db.models.fields.CharField')(max_length=255)), ('type', self.gf('django.db.models.fields.CharField')(max_length=1)), ) ) db.send_create_signal('sentry', ['CommitFileChange']) # Adding unique constraint on 'CommitFileChange', fields ['commit', 'filename'] db.create_unique('sentry_commitfilechange', ['commit_id', 'filename']) # Adding field 'Repository.url' db.add_column( 'sentry_repository', 'url', self.gf('django.db.models.fields.URLField')(max_length=200, null=True), keep_default=False ) # Adding field 'Repository.provider' db.add_column( 'sentry_repository', 'provider', self.gf('django.db.models.fields.CharField')(max_length=64, null=True), keep_default=False ) # Adding field 'Repository.external_id' db.add_column( 'sentry_repository', 'external_id', self.gf('django.db.models.fields.CharField')(max_length=64, null=True), keep_default=False ) # Adding field 'Repository.config' db.add_column( 'sentry_repository', 'config', self.gf('sentry.db.models.fields.jsonfield.JSONField')(default={}), keep_default=False ) # Adding field 'Repository.status' db.add_column( 'sentry_repository', 'status', self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')( default=0, db_index=True ), keep_default=False ) # Adding unique constraint on 'Repository', fields ['organization_id', # 'provider', 'external_id'] db.create_unique('sentry_repository', ['organization_id', 'provider', 'external_id']) def backwards(self, orm): # Removing unique constraint on 'Repository', fields ['organization_id', # 'provider', 'external_id'] db.delete_unique('sentry_repository', ['organization_id', 'provider', 'external_id']) # Removing unique constraint on 'CommitFileChange', fields ['commit', 'filename'] db.delete_unique('sentry_commitfilechange', ['commit_id', 'filename']) # Deleting model 'CommitFileChange' db.delete_table('sentry_commitfilechange') # Deleting field 'Repository.url' db.delete_column('sentry_repository', 'url') # Deleting field 'Repository.provider' db.delete_column('sentry_repository', 'provider') # Deleting field 'Repository.external_id' db.delete_column('sentry_repository', 'external_id') # Deleting field 'Repository.config' db.delete_column('sentry_repository', 'config') # Deleting field 'Repository.status' db.delete_column('sentry_repository', 'status') models = { 'sentry.activity': { 'Meta': { 'object_name': 'Activity' }, 'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], { 'null': 'True' }), 'datetime': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'group': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Group']", 'null': 'True' } ), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'ident': ('django.db.models.fields.CharField', [], { 'max_length': '64', 'null': 'True' }), 'project': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Project']" } ), 'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}), 'user': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.User']", 'null': 'True' } ) }, 'sentry.apikey': { 'Meta': { 'object_name': 'ApiKey' }, 'allowed_origins': ('django.db.models.fields.TextField', [], { 'null': 'True', 'blank': 'True' }), 'date_added': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'key': ('django.db.models.fields.CharField', [], { 'unique': 'True', 'max_length': '32' }), 'label': ( 'django.db.models.fields.CharField', [], { 'default': "'Default'", 'max_length': '64', 'blank': 'True' } ), 'organization': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'related_name': "'key_set'", 'to': "orm['sentry.Organization']" } ), 'scopes': ('django.db.models.fields.BigIntegerField', [], { 'default': 'None' }), 'status': ( 'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'default': '0', 'db_index': 'True' } ) }, 'sentry.apitoken': { 'Meta': { 'object_name': 'ApiToken' }, 'date_added': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'key': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.ApiKey']", 'null': 'True' } ), 'scopes': ('django.db.models.fields.BigIntegerField', [], { 'default': 'None' }), 'token': ('django.db.models.fields.CharField', [], { 'unique': 'True', 'max_length': '64' }), 'user': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.User']" } ) }, 'sentry.auditlogentry': { 'Meta': { 'object_name': 'AuditLogEntry' }, 'actor': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'blank': 'True', 'related_name': "'audit_actors'", 'null': 'True', 'to': "orm['sentry.User']" } ), 'actor_key': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.ApiKey']", 'null': 'True', 'blank': 'True' } ), 'actor_label': ( 'django.db.models.fields.CharField', [], { 'max_length': '64', 'null': 'True', 'blank': 'True' } ), 'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}), 'datetime': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'ip_address': ( 'django.db.models.fields.GenericIPAddressField', [], { 'max_length': '39', 'null': 'True' } ), 'organization': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Organization']" } ), 'target_object': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'null': 'True' }), 'target_user': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'blank': 'True', 'related_name': "'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']" } ) }, 'sentry.authenticator': { 'Meta': { 'unique_together': "(('user', 'type'),)", 'object_name': 'Authenticator', 'db_table': "'auth_authenticator'" }, 'config': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {}), 'created_at': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], { 'primary_key': 'True' }), 'last_used_at': ('django.db.models.fields.DateTimeField', [], { 'null': 'True' }), 'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}), 'user': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.User']" } ) }, 'sentry.authidentity': { 'Meta': { 'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))", 'object_name': 'AuthIdentity' }, 'auth_provider': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.AuthProvider']" } ), 'data': ('sentry.db.models.fields.jsonfield.JSONField', [], { 'default': '{}' }), 'date_added': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'ident': ('django.db.models.fields.CharField', [], { 'max_length': '128' }), 'last_synced': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'last_verified': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'user': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.User']" } ) }, 'sentry.authprovider': { 'Meta': { 'object_name': 'AuthProvider' }, 'config': ('sentry.db.models.fields.jsonfield.JSONField', [], { 'default': '{}' }), 'date_added': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'default_global_access': ('django.db.models.fields.BooleanField', [], { 'default': 'True' }), 'default_role': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'default': '50' }), 'default_teams': ( 'django.db.models.fields.related.ManyToManyField', [], { 'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True' } ), 'flags': ('django.db.models.fields.BigIntegerField', [], { 'default': '0' }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'last_sync': ('django.db.models.fields.DateTimeField', [], { 'null': 'True' }), 'organization': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Organization']", 'unique': 'True' } ), 'provider': ('django.db.models.fields.CharField', [], { 'max_length': '128' }), 'sync_time': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'null': 'True' }) }, 'sentry.broadcast': { 'Meta': { 'object_name': 'Broadcast' }, 'date_added': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'date_expires': ( 'django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime(2016, 11, 29, 0, 0)', 'null': 'True', 'blank': 'True' } ), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'is_active': ('django.db.models.fields.BooleanField', [], { 'default': 'True', 'db_index': 'True' }), 'link': ( 'django.db.models.fields.URLField', [], { 'max_length': '200', 'null': 'True', 'blank': 'True' } ), 'message': ('django.db.models.fields.CharField', [], { 'max_length': '256' }), 'title': ('django.db.models.fields.CharField', [], { 'max_length': '32' }), 'upstream_id': ( 'django.db.models.fields.CharField', [], { 'max_length': '32', 'null': 'True', 'blank': 'True' } ) }, 'sentry.broadcastseen': { 'Meta': { 'unique_together': "(('broadcast', 'user'),)", 'object_name': 'BroadcastSeen' }, 'broadcast': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Broadcast']" } ), 'date_seen': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'user': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.User']" } ) }, 'sentry.commit': { 'Meta': { 'unique_together': "(('repository_id', 'key'),)", 'object_name': 'Commit', 'index_together': "(('repository_id', 'date_added'),)" }, 'author': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.CommitAuthor']", 'null': 'True' } ), 'date_added': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'key': ('django.db.models.fields.CharField', [], { 'max_length': '64' }), 'message': ('django.db.models.fields.TextField', [], { 'null': 'True' }), 'organization_id': ( 'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'db_index': 'True' } ), 'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}) }, 'sentry.commitauthor': { 'Meta': { 'unique_together': "(('organization_id', 'email'),)", 'object_name': 'CommitAuthor' }, 'email': ('django.db.models.fields.EmailField', [], { 'max_length': '75' }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'name': ('django.db.models.fields.CharField', [], { 'max_length': '128', 'null': 'True' }), 'organization_id': ( 'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'db_index': 'True' } ) }, 'sentry.commitfilechange': { 'Meta': { 'unique_together': "(('commit', 'filename'),)", 'object_name': 'CommitFileChange' }, 'commit': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Commit']" } ), 'filename': ('django.db.models.fields.CharField', [], { 'max_length': '255' }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'organization_id': ( 'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'db_index': 'True' } ), 'type': ('django.db.models.fields.CharField', [], { 'max_length': '1' }) }, 'sentry.counter': { 'Meta': { 'object_name': 'Counter', 'db_table': "'sentry_projectcounter'" }, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'project': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Project']", 'unique': 'True' } ), 'value': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}) }, 'sentry.dsymbundle': { 'Meta': { 'object_name': 'DSymBundle' }, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'object': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.DSymObject']" } ), 'sdk': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.DSymSDK']" } ) }, 'sentry.dsymobject': { 'Meta': { 'object_name': 'DSymObject' }, 'cpu_name': ('django.db.models.fields.CharField', [], { 'max_length': '40' }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'object_path': ('django.db.models.fields.TextField', [], { 'db_index': 'True' }), 'uuid': ('django.db.models.fields.CharField', [], { 'max_length': '36', 'db_index': 'True' }), 'vmaddr': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], { 'null': 'True' }), 'vmsize': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], { 'null': 'True' }) }, 'sentry.dsymsdk': { 'Meta': { 'object_name': 'DSymSDK', 'index_together': "[('version_major', 'version_minor', 'version_patchlevel', 'version_build')]" }, 'dsym_type': ('django.db.models.fields.CharField', [], { 'max_length': '20', 'db_index': 'True' }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'sdk_name': ('django.db.models.fields.CharField', [], { 'max_length': '20' }), 'version_build': ('django.db.models.fields.CharField', [], { 'max_length': '40' }), 'version_major': ('django.db.models.fields.IntegerField', [], {}), 'version_minor': ('django.db.models.fields.IntegerField', [], {}), 'version_patchlevel': ('django.db.models.fields.IntegerField', [], {}) }, 'sentry.dsymsymbol': { 'Meta': { 'unique_together': "[('object', 'address')]", 'object_name': 'DSymSymbol' }, 'address': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], { 'db_index': 'True' }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'object': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.DSymObject']" } ), 'symbol': ('django.db.models.fields.TextField', [], {}) }, 'sentry.environment': { 'Meta': { 'unique_together': "(('project_id', 'name'),)", 'object_name': 'Environment' }, 'date_added': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'name': ('django.db.models.fields.CharField', [], { 'max_length': '64' }), 'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}) }, 'sentry.event': { 'Meta': { 'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group_id', 'datetime'),)" }, 'data': ('sentry.db.models.fields.node.NodeField', [], { 'null': 'True', 'blank': 'True' }), 'datetime': ( 'django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now', 'db_index': 'True' } ), 'event_id': ( 'django.db.models.fields.CharField', [], { 'max_length': '32', 'null': 'True', 'db_column': "'message_id'" } ), 'group_id': ( 'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], { 'null': 'True', 'blank': 'True' } ), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'message': ('django.db.models.fields.TextField', [], {}), 'platform': ('django.db.models.fields.CharField', [], { 'max_length': '64', 'null': 'True' }), 'project_id': ( 'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], { 'null': 'True', 'blank': 'True' } ), 'time_spent': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], { 'null': 'True' }) }, 'sentry.eventmapping': { 'Meta': { 'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'EventMapping' }, 'date_added': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'event_id': ('django.db.models.fields.CharField', [], { 'max_length': '32' }), 'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}) }, 'sentry.eventtag': { 'Meta': { 'unique_together': "(('event_id', 'key_id', 'value_id'),)", 'object_name': 'EventTag', 'index_together': "(('project_id', 'key_id', 'value_id'), ('group_id', 'key_id', 'value_id'))" }, 'date_added': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'event_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}), 'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], { 'null': 'True' }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'key_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}), 'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}), 'value_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}) }, 'sentry.eventuser': { 'Meta': { 'unique_together': "(('project', 'ident'), ('project', 'hash'))", 'object_name': 'EventUser', 'index_together': "(('project', 'email'), ('project', 'username'), ('project', 'ip_address'))" }, 'date_added': ( 'django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now', 'db_index': 'True' } ), 'email': ('django.db.models.fields.EmailField', [], { 'max_length': '75', 'null': 'True' }), 'hash': ('django.db.models.fields.CharField', [], { 'max_length': '32' }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'ident': ('django.db.models.fields.CharField', [], { 'max_length': '128', 'null': 'True' }), 'ip_address': ( 'django.db.models.fields.GenericIPAddressField', [], { 'max_length': '39', 'null': 'True' } ), 'project': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Project']" } ), 'username': ('django.db.models.fields.CharField', [], { 'max_length': '128', 'null': 'True' }) }, 'sentry.file': { 'Meta': { 'object_name': 'File' }, 'blob': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'related_name': "'legacy_blob'", 'null': 'True', 'to': "orm['sentry.FileBlob']" } ), 'blobs': ( 'django.db.models.fields.related.ManyToManyField', [], { 'to': "orm['sentry.FileBlob']", 'through': "orm['sentry.FileBlobIndex']", 'symmetrical': 'False' } ), 'checksum': ('django.db.models.fields.CharField', [], { 'max_length': '40', 'null': 'True' }), 'headers': ('sentry.db.models.fields.jsonfield.JSONField', [], { 'default': '{}' }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'name': ('django.db.models.fields.CharField', [], { 'max_length': '128' }), 'path': ('django.db.models.fields.TextField', [], { 'null': 'True' }), 'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'null': 'True' }), 'timestamp': ( 'django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now', 'db_index': 'True' } ), 'type': ('django.db.models.fields.CharField', [], { 'max_length': '64' }) }, 'sentry.fileblob': { 'Meta': { 'object_name': 'FileBlob' }, 'checksum': ('django.db.models.fields.CharField', [], { 'unique': 'True', 'max_length': '40' }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'path': ('django.db.models.fields.TextField', [], { 'null': 'True' }), 'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'null': 'True' }), 'timestamp': ( 'django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now', 'db_index': 'True' } ) }, 'sentry.fileblobindex': { 'Meta': { 'unique_together': "(('file', 'blob', 'offset'),)", 'object_name': 'FileBlobIndex' }, 'blob': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.FileBlob']" } ), 'file': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.File']" } ), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'offset': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}) }, 'sentry.globaldsymfile': { 'Meta': { 'object_name': 'GlobalDSymFile' }, 'cpu_name': ('django.db.models.fields.CharField', [], { 'max_length': '40' }), 'file': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.File']" } ), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'object_name': ('django.db.models.fields.TextField', [], {}), 'uuid': ('django.db.models.fields.CharField', [], { 'unique': 'True', 'max_length': '36' }) }, 'sentry.group': { 'Meta': { 'unique_together': "(('project', 'short_id'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'", 'index_together': "(('project', 'first_release'),)" }, 'active_at': ('django.db.models.fields.DateTimeField', [], { 'null': 'True', 'db_index': 'True' }), 'culprit': ( 'django.db.models.fields.CharField', [], { 'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True' } ), 'data': ( 'sentry.db.models.fields.gzippeddict.GzippedDictField', [], { 'null': 'True', 'blank': 'True' } ), 'first_release': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Release']", 'null': 'True', 'on_delete': 'models.PROTECT' } ), 'first_seen': ( 'django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now', 'db_index': 'True' } ), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'is_public': ( 'django.db.models.fields.NullBooleanField', [], { 'default': 'False', 'null': 'True', 'blank': 'True' } ), 'last_seen': ( 'django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now', 'db_index': 'True' } ), 'level': ( 'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'default': '40', 'db_index': 'True', 'blank': 'True' } ), 'logger': ( 'django.db.models.fields.CharField', [], { 'default': "''", 'max_length': '64', 'db_index': 'True', 'blank': 'True' } ), 'message': ('django.db.models.fields.TextField', [], {}), 'num_comments': ( 'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'default': '0', 'null': 'True' } ), 'platform': ('django.db.models.fields.CharField', [], { 'max_length': '64', 'null': 'True' }), 'project': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Project']", 'null': 'True' } ), 'resolved_at': ('django.db.models.fields.DateTimeField', [], { 'null': 'True', 'db_index': 'True' }), 'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], { 'default': '0' }), 'short_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], { 'null': 'True' }), 'status': ( 'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'default': '0', 'db_index': 'True' } ), 'time_spent_count': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], { 'default': '0' }), 'time_spent_total': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], { 'default': '0' }), 'times_seen': ( 'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'default': '1', 'db_index': 'True' } ) }, 'sentry.groupassignee': { 'Meta': { 'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'" }, 'date_added': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'group': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']" } ), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'project': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'related_name': "'assignee_set'", 'to': "orm['sentry.Project']" } ), 'user': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'related_name': "'sentry_assignee_set'", 'to': "orm['sentry.User']" } ) }, 'sentry.groupbookmark': { 'Meta': { 'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark' }, 'date_added': ( 'django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now', 'null': 'True' } ), 'group': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']" } ), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'project': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']" } ), 'user': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']" } ) }, 'sentry.groupemailthread': { 'Meta': { 'unique_together': "(('email', 'group'), ('email', 'msgid'))", 'object_name': 'GroupEmailThread' }, 'date': ( 'django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now', 'db_index': 'True' } ), 'email': ('django.db.models.fields.EmailField', [], { 'max_length': '75' }), 'group': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'related_name': "'groupemail_set'", 'to': "orm['sentry.Group']" } ), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'msgid': ('django.db.models.fields.CharField', [], { 'max_length': '100' }), 'project': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'related_name': "'groupemail_set'", 'to': "orm['sentry.Project']" } ) }, 'sentry.grouphash': { 'Meta': { 'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash' }, 'group': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Group']", 'null': 'True' } ), 'hash': ('django.db.models.fields.CharField', [], { 'max_length': '32' }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'project': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Project']", 'null': 'True' } ) }, 'sentry.groupmeta': { 'Meta': { 'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta' }, 'group': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Group']" } ), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'key': ('django.db.models.fields.CharField', [], { 'max_length': '64' }), 'value': ('django.db.models.fields.TextField', [], {}) }, 'sentry.groupredirect': { 'Meta': { 'object_name': 'GroupRedirect' }, 'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], { 'db_index': 'True' }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'previous_group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], { 'unique': 'True' }) }, 'sentry.grouprelease': { 'Meta': { 'unique_together': "(('group_id', 'release_id', 'environment'),)", 'object_name': 'GroupRelease' }, 'environment': ('django.db.models.fields.CharField', [], { 'default': "''", 'max_length': '64' }), 'first_seen': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'last_seen': ( 'django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now', 'db_index': 'True' } ), 'project_id': ( 'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'db_index': 'True' } ), 'release_id': ( 'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'db_index': 'True' } ) }, 'sentry.groupresolution': { 'Meta': { 'object_name': 'GroupResolution' }, 'datetime': ( 'django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now', 'db_index': 'True' } ), 'group': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Group']", 'unique': 'True' } ), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'release': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Release']" } ), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'default': '0' }) }, 'sentry.grouprulestatus': { 'Meta': { 'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus' }, 'date_added': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'group': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Group']" } ), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'last_active': ('django.db.models.fields.DateTimeField', [], { 'null': 'True' }), 'project': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Project']" } ), 'rule': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Rule']" } ), 'status': ('django.db.models.fields.PositiveSmallIntegerField', [], { 'default': '0' }) }, 'sentry.groupseen': { 'Meta': { 'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen' }, 'group': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Group']" } ), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'last_seen': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'project': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Project']" } ), 'user': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.User']", 'db_index': 'False' } ) }, 'sentry.groupsnooze': { 'Meta': { 'object_name': 'GroupSnooze' }, 'group': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Group']", 'unique': 'True' } ), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'until': ('django.db.models.fields.DateTimeField', [], {}) }, 'sentry.groupsubscription': { 'Meta': { 'unique_together': "(('group', 'user'),)", 'object_name': 'GroupSubscription' }, 'date_added': ( 'django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now', 'null': 'True' } ), 'group': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'related_name': "'subscription_set'", 'to': "orm['sentry.Group']" } ), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'is_active': ('django.db.models.fields.BooleanField', [], { 'default': 'True' }), 'project': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'related_name': "'subscription_set'", 'to': "orm['sentry.Project']" } ), 'reason': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'default': '0' }), 'user': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.User']" } ) }, 'sentry.grouptagkey': { 'Meta': { 'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey' }, 'group': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Group']" } ), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'key': ('django.db.models.fields.CharField', [], { 'max_length': '32' }), 'project': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Project']", 'null': 'True' } ), 'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'default': '0' }) }, 'sentry.grouptagvalue': { 'Meta': { 'unique_together': "(('group', 'key', 'value'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'", 'index_together': "(('project', 'key', 'value', 'last_seen'),)" }, 'first_seen': ( 'django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True' } ), 'group': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'related_name': "'grouptag'", 'to': "orm['sentry.Group']" } ), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'key': ('django.db.models.fields.CharField', [], { 'max_length': '32' }), 'last_seen': ( 'django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True' } ), 'project': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'related_name': "'grouptag'", 'null': 'True', 'to': "orm['sentry.Project']" } ), 'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'default': '0' }), 'value': ('django.db.models.fields.CharField', [], { 'max_length': '200' }) }, 'sentry.lostpasswordhash': { 'Meta': { 'object_name': 'LostPasswordHash' }, 'date_added': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'hash': ('django.db.models.fields.CharField', [], { 'max_length': '32' }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'user': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.User']", 'unique': 'True' } ) }, 'sentry.option': { 'Meta': { 'object_name': 'Option' }, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'key': ('django.db.models.fields.CharField', [], { 'unique': 'True', 'max_length': '64' }), 'last_updated': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {}) }, 'sentry.organization': { 'Meta': { 'object_name': 'Organization' }, 'date_added': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'default_role': ('django.db.models.fields.CharField', [], { 'default': "'member'", 'max_length': '32' }), 'flags': ('django.db.models.fields.BigIntegerField', [], { 'default': '1' }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'members': ( 'django.db.models.fields.related.ManyToManyField', [], { 'related_name': "'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']" } ), 'name': ('django.db.models.fields.CharField', [], { 'max_length': '64' }), 'slug': ('django.db.models.fields.SlugField', [], { 'unique': 'True', 'max_length': '50' }), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'default': '0' }) }, 'sentry.organizationaccessrequest': { 'Meta': { 'unique_together': "(('team', 'member'),)", 'object_name': 'OrganizationAccessRequest' }, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'member': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.OrganizationMember']" } ), 'team': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Team']" } ) }, 'sentry.organizationmember': { 'Meta': { 'unique_together': "(('organization', 'user'), ('organization', 'email'))", 'object_name': 'OrganizationMember' }, 'date_added': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'email': ( 'django.db.models.fields.EmailField', [], { 'max_length': '75', 'null': 'True', 'blank': 'True' } ), 'flags': ('django.db.models.fields.BigIntegerField', [], { 'default': '0' }), 'has_global_access': ('django.db.models.fields.BooleanField', [], { 'default': 'True' }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'organization': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'related_name': "'member_set'", 'to': "orm['sentry.Organization']" } ), 'role': ('django.db.models.fields.CharField', [], { 'default': "'member'", 'max_length': '32' }), 'teams': ( 'django.db.models.fields.related.ManyToManyField', [], { 'to': "orm['sentry.Team']", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMemberTeam']", 'blank': 'True' } ), 'token': ( 'django.db.models.fields.CharField', [], { 'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True' } ), 'type': ( 'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'default': '50', 'blank': 'True' } ), 'user': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'blank': 'True', 'related_name': "'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']" } ) }, 'sentry.organizationmemberteam': { 'Meta': { 'unique_together': "(('team', 'organizationmember'),)", 'object_name': 'OrganizationMemberTeam', 'db_table': "'sentry_organizationmember_teams'" }, 'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], { 'primary_key': 'True' }), 'is_active': ('django.db.models.fields.BooleanField', [], { 'default': 'True' }), 'organizationmember': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.OrganizationMember']" } ), 'team': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Team']" } ) }, 'sentry.organizationonboardingtask': { 'Meta': { 'unique_together': "(('organization', 'task'),)", 'object_name': 'OrganizationOnboardingTask' }, 'data': ('sentry.db.models.fields.jsonfield.JSONField', [], { 'default': '{}' }), 'date_completed': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'organization': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Organization']" } ), 'project_id': ( 'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], { 'null': 'True', 'blank': 'True' } ), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}), 'task': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}), 'user': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.User']", 'null': 'True' } ) }, 'sentry.organizationoption': { 'Meta': { 'unique_together': "(('organization', 'key'),)", 'object_name': 'OrganizationOption', 'db_table': "'sentry_organizationoptions'" }, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'key': ('django.db.models.fields.CharField', [], { 'max_length': '64' }), 'organization': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Organization']" } ), 'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {}) }, 'sentry.project': { 'Meta': { 'unique_together': "(('team', 'slug'), ('organization', 'slug'))", 'object_name': 'Project' }, 'date_added': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'first_event': ('django.db.models.fields.DateTimeField', [], { 'null': 'True' }), 'forced_color': ( 'django.db.models.fields.CharField', [], { 'max_length': '6', 'null': 'True', 'blank': 'True' } ), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'name': ('django.db.models.fields.CharField', [], { 'max_length': '200' }), 'organization': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Organization']" } ), 'public': ('django.db.models.fields.BooleanField', [], { 'default': 'False' }), 'slug': ('django.db.models.fields.SlugField', [], { 'max_length': '50', 'null': 'True' }), 'status': ( 'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'default': '0', 'db_index': 'True' } ), 'team': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Team']" } ) }, 'sentry.projectbookmark': { 'Meta': { 'unique_together': "(('project_id', 'user'),)", 'object_name': 'ProjectBookmark' }, 'date_added': ( 'django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now', 'null': 'True' } ), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'project_id': ( 'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], { 'null': 'True', 'blank': 'True' } ), 'user': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.User']" } ) }, 'sentry.projectdsymfile': { 'Meta': { 'unique_together': "(('project', 'uuid'),)", 'object_name': 'ProjectDSymFile' }, 'cpu_name': ('django.db.models.fields.CharField', [], { 'max_length': '40' }), 'file': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.File']" } ), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'object_name': ('django.db.models.fields.TextField', [], {}), 'project': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Project']", 'null': 'True' } ), 'uuid': ('django.db.models.fields.CharField', [], { 'max_length': '36' }) }, 'sentry.projectkey': { 'Meta': { 'object_name': 'ProjectKey' }, 'date_added': ( 'django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now', 'null': 'True' } ), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'label': ( 'django.db.models.fields.CharField', [], { 'max_length': '64', 'null': 'True', 'blank': 'True' } ), 'project': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'related_name': "'key_set'", 'to': "orm['sentry.Project']" } ), 'public_key': ( 'django.db.models.fields.CharField', [], { 'max_length': '32', 'unique': 'True', 'null': 'True' } ), 'roles': ('django.db.models.fields.BigIntegerField', [], { 'default': '1' }), 'secret_key': ( 'django.db.models.fields.CharField', [], { 'max_length': '32', 'unique': 'True', 'null': 'True' } ), 'status': ( 'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'default': '0', 'db_index': 'True' } ) }, 'sentry.projectoption': { 'Meta': { 'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'" }, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'key': ('django.db.models.fields.CharField', [], { 'max_length': '64' }), 'project': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Project']" } ), 'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {}) }, 'sentry.projectplatform': { 'Meta': { 'unique_together': "(('project_id', 'platform'),)", 'object_name': 'ProjectPlatform' }, 'date_added': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'last_seen': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'platform': ('django.db.models.fields.CharField', [], { 'max_length': '64' }), 'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}) }, 'sentry.release': { 'Meta': { 'unique_together': "(('project', 'version'),)", 'object_name': 'Release' }, 'data': ('sentry.db.models.fields.jsonfield.JSONField', [], { 'default': '{}' }), 'date_added': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'date_released': ('django.db.models.fields.DateTimeField', [], { 'null': 'True', 'blank': 'True' }), 'date_started': ('django.db.models.fields.DateTimeField', [], { 'null': 'True', 'blank': 'True' }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'default': '0' }), 'owner': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.User']", 'null': 'True', 'blank': 'True' } ), 'project': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Project']" } ), 'ref': ( 'django.db.models.fields.CharField', [], { 'max_length': '64', 'null': 'True', 'blank': 'True' } ), 'url': ( 'django.db.models.fields.URLField', [], { 'max_length': '200', 'null': 'True', 'blank': 'True' } ), 'version': ('django.db.models.fields.CharField', [], { 'max_length': '64' }) }, 'sentry.releasecommit': { 'Meta': { 'unique_together': "(('release', 'commit'), ('release', 'order'))", 'object_name': 'ReleaseCommit' }, 'commit': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Commit']" } ), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'order': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}), 'project_id': ( 'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'db_index': 'True' } ), 'release': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Release']" } ) }, 'sentry.releaseenvironment': { 'Meta': { 'unique_together': "(('project_id', 'release_id', 'environment_id'),)", 'object_name': 'ReleaseEnvironment', 'db_table': "'sentry_environmentrelease'" }, 'environment_id': ( 'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'db_index': 'True' } ), 'first_seen': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'last_seen': ( 'django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now', 'db_index': 'True' } ), 'project_id': ( 'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'db_index': 'True' } ), 'release_id': ( 'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'db_index': 'True' } ) }, 'sentry.releasefile': { 'Meta': { 'unique_together': "(('release', 'ident'),)", 'object_name': 'ReleaseFile' }, 'file': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.File']" } ), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'ident': ('django.db.models.fields.CharField', [], { 'max_length': '40' }), 'name': ('django.db.models.fields.TextField', [], {}), 'project': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Project']" } ), 'release': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Release']" } ) }, 'sentry.repository': { 'Meta': { 'unique_together': "(('organization_id', 'name'), ('organization_id', 'provider', 'external_id'))", 'object_name': 'Repository' }, 'config': ('sentry.db.models.fields.jsonfield.JSONField', [], { 'default': '{}' }), 'date_added': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'external_id': ('django.db.models.fields.CharField', [], { 'max_length': '64', 'null': 'True' }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'name': ('django.db.models.fields.CharField', [], { 'max_length': '200' }), 'organization_id': ( 'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'db_index': 'True' } ), 'provider': ('django.db.models.fields.CharField', [], { 'max_length': '64', 'null': 'True' }), 'status': ( 'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'default': '0', 'db_index': 'True' } ), 'url': ('django.db.models.fields.URLField', [], { 'max_length': '200', 'null': 'True' }) }, 'sentry.rule': { 'Meta': { 'object_name': 'Rule' }, 'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}), 'date_added': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'label': ('django.db.models.fields.CharField', [], { 'max_length': '64' }), 'project': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Project']" } ), 'status': ( 'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'default': '0', 'db_index': 'True' } ) }, 'sentry.savedsearch': { 'Meta': { 'unique_together': "(('project', 'name'),)", 'object_name': 'SavedSearch' }, 'date_added': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'is_default': ('django.db.models.fields.BooleanField', [], { 'default': 'False' }), 'name': ('django.db.models.fields.CharField', [], { 'max_length': '128' }), 'project': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Project']" } ), 'query': ('django.db.models.fields.TextField', [], {}) }, 'sentry.savedsearchuserdefault': { 'Meta': { 'unique_together': "(('project', 'user'),)", 'object_name': 'SavedSearchUserDefault', 'db_table': "'sentry_savedsearch_userdefault'" }, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'project': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Project']" } ), 'savedsearch': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.SavedSearch']" } ), 'user': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.User']" } ) }, 'sentry.tagkey': { 'Meta': { 'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'" }, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'key': ('django.db.models.fields.CharField', [], { 'max_length': '32' }), 'label': ('django.db.models.fields.CharField', [], { 'max_length': '64', 'null': 'True' }), 'project': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Project']" } ), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'default': '0' }), 'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'default': '0' }) }, 'sentry.tagvalue': { 'Meta': { 'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'" }, 'data': ( 'sentry.db.models.fields.gzippeddict.GzippedDictField', [], { 'null': 'True', 'blank': 'True' } ), 'first_seen': ( 'django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True' } ), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'key': ('django.db.models.fields.CharField', [], { 'max_length': '32' }), 'last_seen': ( 'django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True' } ), 'project': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Project']", 'null': 'True' } ), 'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'default': '0' }), 'value': ('django.db.models.fields.CharField', [], { 'max_length': '200' }) }, 'sentry.team': { 'Meta': { 'unique_together': "(('organization', 'slug'),)", 'object_name': 'Team' }, 'date_added': ( 'django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now', 'null': 'True' } ), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'name': ('django.db.models.fields.CharField', [], { 'max_length': '64' }), 'organization': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Organization']" } ), 'slug': ('django.db.models.fields.SlugField', [], { 'max_length': '50' }), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], { 'default': '0' }) }, 'sentry.user': { 'Meta': { 'object_name': 'User', 'db_table': "'auth_user'" }, 'date_joined': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'email': ('django.db.models.fields.EmailField', [], { 'max_length': '75', 'blank': 'True' }), 'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], { 'primary_key': 'True' }), 'is_active': ('django.db.models.fields.BooleanField', [], { 'default': 'True' }), 'is_managed': ('django.db.models.fields.BooleanField', [], { 'default': 'False' }), 'is_password_expired': ('django.db.models.fields.BooleanField', [], { 'default': 'False' }), 'is_staff': ('django.db.models.fields.BooleanField', [], { 'default': 'False' }), 'is_superuser': ('django.db.models.fields.BooleanField', [], { 'default': 'False' }), 'last_login': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'last_password_change': ('django.db.models.fields.DateTimeField', [], { 'null': 'True' }), 'name': ( 'django.db.models.fields.CharField', [], { 'max_length': '200', 'db_column': "'first_name'", 'blank': 'True' } ), 'password': ('django.db.models.fields.CharField', [], { 'max_length': '128' }), 'session_nonce': ('django.db.models.fields.CharField', [], { 'max_length': '12', 'null': 'True' }), 'username': ('django.db.models.fields.CharField', [], { 'unique': 'True', 'max_length': '128' }) }, 'sentry.useravatar': { 'Meta': { 'object_name': 'UserAvatar' }, 'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], { 'default': '0' }), 'file': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL' } ), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'ident': ( 'django.db.models.fields.CharField', [], { 'unique': 'True', 'max_length': '32', 'db_index': 'True' } ), 'user': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.User']" } ) }, 'sentry.useremail': { 'Meta': { 'unique_together': "(('user', 'email'),)", 'object_name': 'UserEmail' }, 'date_hash_added': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'email': ('django.db.models.fields.EmailField', [], { 'max_length': '75' }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'is_verified': ('django.db.models.fields.BooleanField', [], { 'default': 'False' }), 'user': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'related_name': "'emails'", 'to': "orm['sentry.User']" } ), 'validation_hash': ( 'django.db.models.fields.CharField', [], { 'default': "u'nWSQmbINKkiwvRzlFaq4iWFfAr22O7g3'", 'max_length': '32' } ) }, 'sentry.useroption': { 'Meta': { 'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption' }, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'key': ('django.db.models.fields.CharField', [], { 'max_length': '64' }), 'project': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Project']", 'null': 'True' } ), 'user': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.User']" } ), 'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {}) }, 'sentry.userreport': { 'Meta': { 'unique_together': "(('project', 'event_id'),)", 'object_name': 'UserReport', 'index_together': "(('project', 'event_id'), ('project', 'date_added'))" }, 'comments': ('django.db.models.fields.TextField', [], {}), 'date_added': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'email': ('django.db.models.fields.EmailField', [], { 'max_length': '75' }), 'event_id': ('django.db.models.fields.CharField', [], { 'max_length': '32' }), 'group': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Group']", 'null': 'True' } ), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'name': ('django.db.models.fields.CharField', [], { 'max_length': '128' }), 'project': ( 'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], { 'to': "orm['sentry.Project']" } ) } } complete_apps = ['sentry']
bsd-3-clause
-4,519,465,274,450,707,000
35.799762
97
0.40068
false
4.716141
false
false
false
sassoftware/mint
mint/buildtypes.py
1
14373
# # Copyright (c) SAS Institute Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # pyflakes=ignore-file import sys from conary.deps import deps validBuildTypes = { 'BOOTABLE_IMAGE' : 0, 'INSTALLABLE_ISO' : 1, 'STUB_IMAGE' : 2, 'RAW_FS_IMAGE' : 3, 'NETBOOT_IMAGE' : 4, 'TARBALL' : 5, 'LIVE_ISO' : 6, 'RAW_HD_IMAGE' : 7, 'VMWARE_IMAGE' : 8, 'VMWARE_ESX_IMAGE' : 9, 'VIRTUAL_PC_IMAGE' : 10, 'XEN_OVA' : 11, 'VIRTUAL_IRON' : 12, 'PARALLELS' : 13, 'AMI' : 14, 'UPDATE_ISO' : 15, 'APPLIANCE_ISO' : 16, 'IMAGELESS' : 17, 'VMWARE_OVF_IMAGE' : 18, 'WINDOWS_ISO' : 19, 'WINDOWS_WIM' : 20, 'DEFERRED_IMAGE' : 21, 'DOCKER_IMAGE' : 22, } TYPES = validBuildTypes.values() # add all the defined image types directly to the module so that the standard # approach of "buildtypes.IMAGE_TYPE" will result in the expected enum sys.modules[__name__].__dict__.update(validBuildTypes) deprecatedBuildTypes = { 'QEMU_IMAGE' : RAW_HD_IMAGE } windowsBuildTypes = set([ WINDOWS_ISO, WINDOWS_WIM, ]) # # These are identifying pieces of information that we can extract from the # flavor of a build, but not necessarily tied to any particular build type. # # These can sometimes be used as a buildType, indexes starting at 100. # flavorFlags = { 'XEN_DOMU': 100, 'APPLIANCE': 101, } FLAG_TYPES = flavorFlags.values() flavorFlagsFromId = dict((x[1], x[0]) for x in flavorFlags.items()) sys.modules[__name__].__dict__.update(flavorFlags) flavorFlagFlavors = { XEN_DOMU: "use: xen, domU", APPLIANCE: "use: appliance", } flavorFlagNames = { XEN_DOMU: "DomU", APPLIANCE: "Appliance", } #BOOTABLE_IMAGE Should never get stored in the DB and therefore doesn't need a name # NOTA BENE: Using Latin-1 here is harmful to XML-RPC which expects UTF-8 # Until we figure out the root cause, use "(R)" for registered trademark here. typeNames = { NETBOOT_IMAGE: "Netboot Image", INSTALLABLE_ISO: "Installable CD/DVD", RAW_FS_IMAGE: "Raw Filesystem Image", STUB_IMAGE: "Stub Image", RAW_HD_IMAGE: "Raw Hard Disk Image", VMWARE_IMAGE: "VMware (R) Virtual Appliance", VMWARE_ESX_IMAGE: "VMware (R) ESX Server Virtual Appliance", VMWARE_OVF_IMAGE: "VMware (R) Virtual Appliance OVF", LIVE_ISO: "Demo CD/DVD (Live CD/DVD)", TARBALL: "Compressed Tar File", VIRTUAL_PC_IMAGE: "VHD for Microsoft (R) Hyper-V", XEN_OVA: "Citrix XenServer (TM) Appliance", VIRTUAL_IRON: "Virtual Iron Virtual Appliance", PARALLELS: "Parallels Virtual Appliance", AMI: "Amazon Machine Image (EC2)", UPDATE_ISO: "Update CD/DVD", APPLIANCE_ISO: "Appliance Installable ISO", DEFERRED_IMAGE: "Layered Image", WINDOWS_ISO: "Windows Installable ISO", WINDOWS_WIM: "Windows Imaging Format (WIM)", IMAGELESS: "Online Update", DOCKER_IMAGE: "Docker Image", } typeNamesShort = { NETBOOT_IMAGE: "Netboot", INSTALLABLE_ISO: "Inst CD/DVD", RAW_FS_IMAGE: "Raw FS", STUB_IMAGE: "Stub", RAW_HD_IMAGE: "HDD", VMWARE_IMAGE: "VMware (R)", VMWARE_ESX_IMAGE: "VMware (R) ESX", LIVE_ISO: "Demo CD/DVD", TARBALL: "Tar", VIRTUAL_PC_IMAGE: "Microsoft (R) Hyper-V", XEN_OVA: "Citrix XenServer (TM)", VIRTUAL_IRON: "Virtual Iron", PARALLELS: "Parallels", AMI: "AMI", UPDATE_ISO: "Update CD/DVD", APPLIANCE_ISO: "Appliance Inst", DEFERRED_IMAGE: "Layered", WINDOWS_ISO: "Windows Inst", WINDOWS_WIM: "Windows WIM", IMAGELESS: "Online Update", VMWARE_OVF_IMAGE: "VMware (R) OVF", DOCKER_IMAGE: "Docker", } # To be used to map image types ids from XML tag names # used the build definition contained within the # product definition. # # Note: Only supported image types are contained here. # Thus you will not see XML tags for the following: # - STUB_IMAGE # - PARALLELS # # Furthermore, we don't support IMAGELESS builds # in the context of a product definition. # xmlTagNameImageTypeMap = { 'amiImage': AMI, 'applianceIsoImage': APPLIANCE_ISO, 'deferredImage': DEFERRED_IMAGE, 'dockerImage': DOCKER_IMAGE, 'installableIsoImage': INSTALLABLE_ISO, 'liveIsoImage': LIVE_ISO, 'netbootImage': NETBOOT_IMAGE, 'rawFsImage': RAW_FS_IMAGE, 'rawHdImage': RAW_HD_IMAGE, 'tarballImage': TARBALL, 'updateIsoImage': UPDATE_ISO, 'vhdImage': VIRTUAL_PC_IMAGE, 'virtualIronImage': VIRTUAL_IRON, 'vmwareImage': VMWARE_IMAGE, 'vmwareEsxImage': VMWARE_ESX_IMAGE, 'vmwareOvfImage': VMWARE_OVF_IMAGE, 'xenOvaImage': XEN_OVA, 'imageless': IMAGELESS, 'windowsIsoImage': WINDOWS_ISO, 'wimImage': WINDOWS_WIM, } imageTypeXmlTagNameMap = dict([(v,k) for k,v in xmlTagNameImageTypeMap.iteritems()]) typeNamesMarketing = { NETBOOT_IMAGE: "Netboot Image", INSTALLABLE_ISO: "Legacy Installable CD/DVD", RAW_FS_IMAGE: "Eucalyptus/Mountable Filesystem", STUB_IMAGE: "Stub Image", RAW_HD_IMAGE: "OpenStack/KVM/QEMU/Raw Hard Disk", VMWARE_IMAGE: "VMware(R) Workstation/Fusion / Parallels(R) Virtual Appliance", VMWARE_ESX_IMAGE: "VMware(R) ESX/VCD / Oracle(R) VirtualBox Virtual Appliance", VMWARE_OVF_IMAGE: "VMware(R) Virtual Appliance OVF", LIVE_ISO: "Demo CD/DVD (Live CD/DVD)", TARBALL: "TAR File", VIRTUAL_PC_IMAGE: "VHD for Microsoft(R) Hyper-V(R)", XEN_OVA: "Citrix(R) XenServer(TM) Appliance", VIRTUAL_IRON: "Virtual Iron Virtual Appliance", PARALLELS: "Parallels(R) Virtual Appliance", AMI: "Amazon Machine Image (EC2)", UPDATE_ISO: "Update CD/DVD", APPLIANCE_ISO: "Appliance Installable ISO", DEFERRED_IMAGE: "Layered Image", WINDOWS_ISO: "Installable CD/DVD (ISO)", WINDOWS_WIM: "Windows Imaging Format (WIM)", IMAGELESS: "Online Update", DOCKER_IMAGE: "Docker Image", # flavor flags here XEN_DOMU: "DomU", APPLIANCE: "Appliance", } buildTypeExtra = { APPLIANCE_ISO: "This image type will not work without using " "a version of anaconda-templates based on " "rPath Linux 2.", IMAGELESS: "Select this image type to mark a group for " "later publishing to an Update Service." } buildTypeIcons = { VMWARE_IMAGE: dict( icon="get-vmware-player.png", href="http://www.vmware.com/download/player/", text="Download VMware Player"), RAW_HD_IMAGE: dict( icon="get-parallels.png", href="http://www.parallels.com/", text="Try Parallels Workstation 2.2"), VIRTUAL_IRON: dict( icon="get-virtual-iron.png", href="http://www.virtualiron.com/free", text="Virtual Iron: Download Now"), XEN_OVA: dict( icon="get-xen-express.gif", href="http://www.citrix.com/xenserver/getexpress", text="Citrix XenServer Express Edition: Download Now", ), VIRTUAL_PC_IMAGE: dict( icon="get-hyper-v.png", href="http://www.microsoft.com/Hyper-V", text="Learn more about Microsoft Hyper-V", ), } typeFlavorOverride = { (RAW_HD_IMAGE, XEN_DOMU): dict( marketingName="Raw Hard Disk Image", icon=False, ), } # sizes are listed in bytes... discSizes = { 'CD: 650 MB' : '681574400', 'CD: 700 MB' : '734003200', 'DVD: 4.7 GB' : '4700000000', 'DVD: 8.5 GB' : '8500000000', } buildDefinitionFlavorTypes = { 'BD_GENERIC_X86' : 0, 'BD_GENERIC_X86_64' : 1, 'BD_DOM0_X86' : 2, 'BD_DOM0_X86_64' : 3, 'BD_DOMU_X86' : 4, 'BD_DOMU_X86_64' : 5, 'BD_VMWARE_X86' : 6, 'BD_VMWARE_X86_64' : 7, } sys.modules[__name__].__dict__.update(buildDefinitionFlavorTypes) buildDefinitionFlavorMap = { BD_GENERIC_X86 : '!dom0, !domU, !xen, !vmware is: x86', BD_GENERIC_X86_64 : '!dom0, !domU, !xen, !vmware is: x86_64', BD_DOM0_X86 : 'dom0, !domU, xen, !vmware is: x86', BD_DOM0_X86_64 : 'dom0, !domU, xen, !vmware is: x86_64', BD_DOMU_X86 : '!dom0, domU, xen, !vmware is: x86', BD_DOMU_X86_64 : '!dom0, domU, xen, !vmware is: x86_64', BD_VMWARE_X86 : '!dom0, !domU, !xen, vmware is: x86', BD_VMWARE_X86_64 : '!dom0, !domU, !xen, vmware is: x86_64', } def alphabatizeBuildTypes(visibleBuildTypes): sortedList = sorted([x for x in visibleBuildTypes if x != IMAGELESS], key = lambda x: typeNames.get(x)) if IMAGELESS in visibleBuildTypes: sortedList.insert(0, IMAGELESS) return sortedList def makeBuildFlavorMap(prd): baseFlavor = prd.getBaseFlavor() or prd.getPlatformBaseFlavor() or '' baseFlavor = deps.parseFlavor(baseFlavor) flavorSets = prd.getFlavorSets() architectures = prd.getArchitectures() if prd.platform: flavorSets += prd.platform.getFlavorSets() architectures = prd.platform.getArchitectures() res = {} for flavorSet in flavorSets: for architecture in architectures: flv = deps.parseFlavor(flavorSet.flavor) arch = deps.parseFlavor(architecture.flavor) flavor = deps.overrideFlavor(baseFlavor, flv) flavor = deps.overrideFlavor(flavor, arch) res[str(flavor)] = \ "%s %s" % (flavorSet.displayName, architecture.displayName) return res def makeFlavorMap(prd): flavorSets = prd.getFlavorSets() architectures = prd.getArchitectures() if prd.platform: flavorSets += prd.platform.getFlavorSets() architectures += prd.platform.getArchitectures() return dict([("%s %s" % (x.displayName, y.displayName), "%s,%s" % (x.name, y.name)) \ for x in flavorSets for y in architectures]) def makeFlavorsForBuild(prd, key): # compose a flavor map much like above but filter illegal types flavorSets = prd.getFlavorSets() architectures = prd.getArchitectures() buildTemplates = prd.getBuildTemplates() if prd.platform: flavorSets += prd.platform.getFlavorSets() architectures += prd.platform.getArchitectures() buildTemplates += prd.platform.getBuildTemplates() containerTemplateRef = imageTypeXmlTagNameMap.get(key) if not containerTemplateRef: return makeFlavorMap(prd) # for arch and flavorSet, if None is encountered, all available types # are legal arches = set([x.architectureRef for x in buildTemplates \ if x.containerTemplateRef == containerTemplateRef]) arches = [x for x in architectures if None in arches or x.name in arches] flavors = set([x.flavorSetRef for x in buildTemplates \ if x.containerTemplateRef == containerTemplateRef]) flavors = [x for x in flavorSets if None in flavors or x.name in flavors] return dict([("%s %s" % (x.displayName, y.displayName), "%s,%s" % (x.name, y.name)) \ for x in flavors for y in arches]) # generate mapping of flavors to flavor names buildDefinitionFlavorToFlavorMapRev = \ dict((x[1], x[0]) for x in buildDefinitionFlavorMap.iteritems()) buildDefinitionFlavorNameMap = { BD_GENERIC_X86 : 'Generic x86 (32-bit)', BD_GENERIC_X86_64 : 'Generic x86 (64-bit)', BD_DOM0_X86 : 'dom0 x86 (32-bit)', BD_DOM0_X86_64 : 'dom0 x86 (64-bit)', BD_DOMU_X86 : 'domU x86 (32-bit)', BD_DOMU_X86_64 : 'domU x86 (64-bit)', BD_VMWARE_X86 : 'VMware x86 (32-bit)', BD_VMWARE_X86_64 : 'VMware x86 (64-bit)', } # a mapping of build types to supported flavors. If a build type does not # exist in this map, it is assumed it supports all flavors. The first flavor # is assumed to be the default. buildDefinitionSupportedFlavorsMap = { VMWARE_IMAGE : [BD_VMWARE_X86, BD_VMWARE_X86_64], VMWARE_ESX_IMAGE : [BD_VMWARE_X86, BD_VMWARE_X86_64], XEN_OVA : [BD_DOMU_X86, BD_DOMU_X86_64], AMI : [BD_DOMU_X86, BD_DOMU_X86_64], } # code generator run by make to generate javascript constants # should only be run by the makefile in mint/web/content/javascript def codegen(): s = "// this Javascript was generated by mint/buildtypes.py\n" s += "// do not edit or check into source control\n" s += "var maxBuildType = %d;" % max(validBuildTypes.values()) s += "var buildTypeNames = {" i = [] for k, v in typeNames.items(): i.append(" '%d': '%s'" % (k, v,)) s += ", ".join(i) s += "};" s += "var buildTypeNamesShort = {" i = [] for k, v in typeNamesShort.items(): i.append(" '%d': '%s'" % (k, v,)) s += ", ".join(i) s += "};" s += "var buildTypeNamesMarketing = {" i = [] for k, v in typeNamesMarketing.items(): i.append(" '%d': '%s'" % (k, v,)) s += ", ".join(i) s += "};" for k, v in validBuildTypes.items(): s += "%s = %d;\n" % (k, v) return s if __name__ == "__main__": #pragma: no cover if len(sys.argv) > 1 and sys.argv[1] == "--genjs": print codegen() sys.exit(0) else: sys.exit(1)
apache-2.0
-8,518,906,341,440,424,000
33.970803
88
0.598066
false
3.110366
false
false
false
KIOS-Research/effinet-smart-water-game
test.py
1
6635
# -*- coding: cp1253 -*- from tkinter import * from time import sleep def create(w, x1, y1): w.place(x=x1, y=y1) def erase(w): w.destroy() def reset(w): w.destroy() start() def exit(w): w.destroy() def e_q1(root, counter, step): TL = Toplevel() w, h = TL.winfo_screenwidth(), TL.winfo_screenheight() TL.overrideredirect(1) TL.geometry("%dx%d+0+0" % (w, h)) a01 = 0 a02 = 0 a03 = 0 if step == 1: question = "Question 1: How much of Earth's water is salty and undrinkable?" a1 = "37%" a2 = "97%" a3 = "67%" backfile = "1.gif" # effinet solution = "1a.gif" a02 = 1 elif step == 2: question = "Question 2: How much water do Europeans use per day on average?" a1 = "50 Liters" a2 = "150 Liters" a3 = "10 Liters" solution = "" backfile = "2.gif" # William Newman a02 = 1 elif step == 3: question = "Question 3: Which substance do water companies use to kill bacteria in water?" a1 = "Soap" a2 = "Citric Acid" a3 = "Chlorine" solution = "" backfile = "3.gif" # Jacob Vanderheyden a03 = 1 elif step == 4: question = "Question 4: How much water is lost due to leakages in Cyprus?" a1 = "Around 20%" a2 = "Around 50%" a3 = "Around 12%" solution = "" backfile = "4.gif" # Pete a01 = 1 elif step == 5: question = "Question 5: What is the energy cost to deliver water to consumers in Barcelona, Spain?" a1 = "7 Million Euros" a2 = "700,000 Euros" a3 = "70 Million Euros" solution = "" backfile = "5.gif" # a01 = 1 elif step == 6: question = "Question 6: How water utilities detect leakages?" a1 = "Using many sensors" a2 = "Monitoring night flow increase" a3 = "Consumer complaints" solution = "" backfile = "6.gif" # a02 = 1 elif step == 7: question = "Question 7: A water tank is equivalent to:" a1 = "A battery" a2 = "A lamp" a3 = "A switch" backfile = "7.gif" # solution = "" a01 = 1 elif step == 8: question = "Question 8: The most energy consumption in a water network goes for" a1 = "Disinfection System" a2 = "ICT Functions" a3 = "Pump operations" solution = "" backfile = "8.gif" # a03 = 1 elif step == 9: question = "Question 9: How can we reduce energy usage in water networks?" a1 = "Use pumps during off-peak hours" a2 = "Use ground water" a3 = "Increase water prices" solution = "" backfile = "9.gif" # a01 = 1 elif step == 10: question = "Question 10: In the future, water utilities will" a1 = "Communicate information to the consumers" a2 = "Get information directly from the consumers" a3 = "Both of the above" solution = "" backfile = "10.gif" # a03 = 1 photo = PhotoImage(file=backfile) wback = Label(TL, image=photo) wback.photo = photo wback.place(x=-5, y=-5) photo = PhotoImage(file="logo2.gif") wlogo = Label(TL, image=photo) wlogo.photo = photo wlogo.place(x=1050, y=100) l = Label(TL, text=question, font="Verdana 20", bg="Plum", pady=10) l.pack(side=TOP) b2 = Button(TL, text=a1, bd=10, width=35, font="Verdana 11 bold", bg="Darkred", fg="White", command=lambda: e_correct1(root, TL, a01, counter, step,solution)) b2.pack() b2.place(x=500, y=250) b3 = Button(TL, text=a2, bd=10, width=35, font="Verdana 11 bold", bg="Darkred", fg="White", command=lambda: e_correct1(root, TL, a02, counter, step,solution)) b3.pack() b3.place(x=500, y=340) b2 = Button(TL, text=a3, bd=10, width=35, font="Verdana 11 bold", bg="Darkred", fg="White", command=lambda: e_correct1(root, TL, a03, counter, step, solution)) b2.pack() b2.place(x=500, y=430) # ex = Button(window2, text="EXIT", bd=1, width=6, font="Verdana 10 bold", bg="red", fg="White", # command=lambda: exit2(window1)) #ex.pack() #ex.place(x=1168, y=725) ex1 = Button(TL, text="RESET", bd=1, width=8, font="Verdana 10 bold", bg="red", fg="White", command=lambda: TL.destroy()) ex1.pack() ex1.place(x=1048, y=725) def e_correct1(root, TL, a, counter, step, solution): #t = Text(TL, text=solution, font="Verdana 20", bg="Plum") #t.place(100,20) #l = Label(TL, text=solution, font="Verdana 20", bg="Plum", pady=10) #l.pack(side=BOTTOM) photo = PhotoImage(file=solution) wsol = Label(TL, image=photo) wsol.photo = photo wsol.place(x=100, y=100) if a == 1: counter += 1 photo = PhotoImage(file="cr.gif") w = Label(TL, image=photo) w.photo = photo w.place(x=570, y=60) else: photo = PhotoImage(file="wr.gif") w = Label(TL, image=photo) w.photo = photo w.place(x=570, y=60) if step < 10: TL.update() sleep(3) e_q1(root, counter, step + 1) TL.destroy() else: sleep(0.5) backfile = '0.gif' photo = PhotoImage(file=backfile) w = Label(TL, image=photo) w.photo = photo w.place(x=-5, y=-5) ex = Button(TL, text="EXIT", bd=1, width=6, font="Verdana 10 bold", bg="red", fg="White", command=lambda: root.destroy()) ex.pack() ex.place(x=1168, y=725) # t= lambda: reset(w) #window2.after(1500, t) def start(): root = Tk() w, h = root.winfo_screenwidth(), root.winfo_screenheight() root.overrideredirect(1) root.geometry("%dx%d+0+0" % (w, h)) photo = PhotoImage(file="0.gif") w = Label(root, image=photo) w.photo = photo w.place(x=-5, y=-5) photo = PhotoImage(file="logo2.gif") w = Label(root, image=photo) w.photo = photo w.place(x=1050, y=100) counter = 0 step = 1 b2 = Button(root, text='Begin Smart Water Challenge!', bd=10, height=1, font="Verdana 14 bold", bg="Black", fg="White", command=lambda: e_q1(root, counter, step), compound=CENTER) b2.pack() b2.place(x=500, y=350) ex = Button(root, text="EXIT", bd=1, width=6, font="Verdana 10 bold", bg="red", fg="White", command=lambda: root.destroy()) ex.pack() ex.place(x=1168, y=725) root.mainloop() start()
bsd-2-clause
6,492,672,037,668,850,000
30.009346
111
0.547099
false
3.049173
false
false
false
bbengfort/inigo
inigo/image.py
1
7931
# inigo.image # Handles data dealing with images, particularly EXIF for JPEG # # Author: Benjamin Bengfort <[email protected]> # Created: Sun Jun 14 22:32:17 2015 -0400 # # Copyright (C) 2015 Bengfort.com # For license information, see LICENSE.txt # # ID: image.py [] [email protected] $ """ Handles data dealing with images, particularly EXIF for JPEG """ ########################################################################## ## Imports ########################################################################## from inigo.fs import FileMeta from PIL import Image, ExifTags from datetime import datetime from dateutil.tz import tzutc from inigo.config import settings from inigo.utils.timez import epochptime from inigo.utils.decorators import memoized from inigo.exceptions import PictureNotFound from inigo.models import STYPE, create_session from inigo.models import Picture, Storage from inigo.utils.timez import tzaware_now from sqlalchemy.sql import exists from geopy.geocoders import GoogleV3 ########################################################################## ## Module Constants ########################################################################## EXIF_DATE_FORMAT = "%Y:%m:%d %H:%M:%S" ########################################################################## ## Helper functions ########################################################################## def convert_to_degrees(value): """ Helper function to convert GPS coordinates stored in EXIF degrees to a decimal float format, though this function does not take into account N/S or E/W cardinality of the degree vector. """ deg = float(value[0][0]) / float(value[0][1]) mns = float(value[1][0]) / float(value[1][1]) sec = float(value[2][0]) / float(value[2][1]) return deg + (mns / 60.0) + (sec / 3600.0) ########################################################################## ## Image Node ########################################################################## class ImageMeta(FileMeta): """ Wraps a path and provides image meta data. """ @property def exif(self): """ Uses Pillow to extract the EXIF data """ if not hasattr(self, '_exif'): self.read_image_data() return self._exif @property def dimensions(self): """ Returns a tuple of the width and height of the image. """ if not hasattr(self, '_dimensions'): self.read_image_data() return self._dimensions @memoized def date_taken(self): """ Attempts to find the date taken. Returns any timestamp, even if it is just the date created on the file meta. Current logic for the method: 1. Attempt to parse DateTimeOriginal from EXIF 2. Return st_ctime from os.stat """ dtorig = self.exif.get('DateTimeOriginal', None) if dtorig: return datetime.strptime(dtorig, EXIF_DATE_FORMAT).replace(tzinfo=tzutc()) return epochptime(self.stat().st_ctime) @memoized def coordinates(self): """ Returns the latitude and longitude as a tuple. """ lat = lon = None # Decode the GPSInfo tags if "GPSInfo" in self.exif: self.exif["GPSInfo"] = { ExifTags.GPSTAGS[k]: v for k,v in self.exif["GPSInfo"].iteritems() if k in ExifTags.GPSTAGS } # Gather GPS data points gps_info = self.exif["GPSInfo"] gps_lat = gps_info.get("GPSLatitude", None) gps_lon = gps_info.get("GPSLongitude", None) gps_lat_ref = gps_info.get("GPSLatitudeRef", None) gps_lon_ref = gps_info.get("GPSLongitudeRef", None) # Perform GPS conversions if gps_lat and gps_lon and gps_lat_ref and gps_lon_ref: lat = convert_to_degrees(gps_lat) if gps_lat_ref != "N": lat = 0 - lat lon = convert_to_degrees(gps_lon) if gps_lon_ref != "E": lon = 0 - lon return (lat, lon) @memoized def address(self): """ Reverses the address from the coordinates """ if not self.coordinates: return geocoder = GoogleV3(api_key=settings.geocode.apikey) query = "{},{}".format(*self.coordinates) result = geocoder.reverse(query, exactly_one=True, sensor=False) if result: return result.address def read_image_data(self): """ Reads the image data and returns specific information. """ with Image.open(self.path) as img: # Read size data self._dimensions = img.size # Read EXIF data exifdata = img._getexif() if hasattr(img, "_getexif") else {} self._exif = { ExifTags.TAGS[k]: v for k,v in exifdata.iteritems() if k in ExifTags.TAGS } if exifdata else {} def save(self, session=None, commit=False): """ Stores the image information in the database along with the current file path. Pass a session object in to use the same session for multiple saves. This method returns the session object. Will commit if required. """ session = session or create_session() if not session.query(exists().where( Picture.signature == self.signature )).scalar(): session.add(Picture( signature = self.signature, date_taken = self.date_taken, latitude = self.coordinates[0] if self.coordinates else None, longitude = self.coordinates[1] if self.coordinates else None, width = self.dimensions[0], height = self.dimensions[1], mimetype = unicode(self.mimetype), bytes = self.filesize, )) if commit: session.commit() return session def save_storage(self, session=None, commit=False, **skwargs): """ Saves the storage associated with this image and file meta. """ session = session or create_session() # Fetch the picture from the database picture = session.query(Picture) picture = picture.filter(Picture.signature == self.signature).first() if not picture: raise PictureNotFound( "Must save the picture before assigning storages." ) # Create the storage object sdata = { "stype": STYPE.ORIGINAL, "hostname": unicode(self.hostname), "filepath": unicode(self.path), "memo": None, "picture": picture, "modified": tzaware_now(), } sdata.update(skwargs) # Attempt to fetch the storage on the dependent keys storage = session.query(Storage) storage = storage.filter(Storage.stype == sdata['stype']) storage = storage.filter(Storage.hostname == sdata['hostname']) storage = storage.filter(Storage.filepath == sdata['filepath']) storage = storage.filter(Storage.picture == sdata['picture']) storage = storage.first() or Storage() # Set the new values on the storage object for key, val in sdata.iteritems(): setattr(storage, key, val) session.add(storage) if commit: session.commit() return session if __name__ == '__main__': import os from inigo.config import PROJECT img = ImageMeta(os.path.join(PROJECT, "fixtures/animals/land/cats/cat.jpg")) print img.date_taken print img.dimensions
mit
2,563,230,144,697,206,000
30.724
86
0.536628
false
4.31267
false
false
false
macarthur-lab/xbrowse
seqr/views/apis/locus_list_api_tests.py
1
8486
import json import mock from django.test import TransactionTestCase from django.urls.base import reverse from seqr.models import LocusList, Project from seqr.views.apis.locus_list_api import locus_lists, locus_list_info, create_locus_list_handler, \ update_locus_list_handler, delete_locus_list_handler, add_project_locus_lists, delete_project_locus_lists from seqr.views.utils.orm_to_json_utils import get_project_locus_list_models from seqr.views.utils.test_utils import _check_login LOCUS_LIST_GUID = 'LL00049_pid_genes_autosomal_do' PROJECT_GUID = 'R0001_1kg' class LocusListAPITest(TransactionTestCase): fixtures = ['users', '1kg_project', 'reference_data'] def test_locus_lists(self): url = reverse(locus_lists) _check_login(self, url) response = self.client.get(url) self.assertEqual(response.status_code, 200) locus_lists_dict = response.json()['locusListsByGuid'] self.assertSetEqual(set(locus_lists_dict.keys()), {'LL00049_pid_genes_autosomal_do', 'LL00005_retina_proteome'}) locus_list = locus_lists_dict[LOCUS_LIST_GUID] self.assertSetEqual( set(locus_list.keys()), {'locusListGuid', 'description', 'lastModifiedDate', 'numEntries', 'isPublic', 'createdBy', 'createdDate', 'canEdit', 'name'} ) def test_locus_list_info(self): url = reverse(locus_list_info, args=[LOCUS_LIST_GUID]) _check_login(self, url) response = self.client.get(url) self.assertEqual(response.status_code, 200) response_json = response.json() locus_lists_dict = response_json['locusListsByGuid'] self.assertListEqual(locus_lists_dict.keys(), [LOCUS_LIST_GUID]) locus_list = locus_lists_dict[LOCUS_LIST_GUID] self.assertSetEqual( set(locus_list.keys()), {'locusListGuid', 'description', 'lastModifiedDate', 'numEntries', 'isPublic', 'createdBy', 'createdDate', 'canEdit', 'name', 'items', 'intervalGenomeVersion'} ) self.assertSetEqual( {item['geneId'] for item in locus_list['items'] if item.get('geneId')}, set(response_json['genesById'].keys()) ) def test_create_update_and_delete_locus_list(self): create_locus_list_url = reverse(create_locus_list_handler) _check_login(self, create_locus_list_url) # send invalid requests to create locus_list response = self.client.post(create_locus_list_url, content_type='application/json', data=json.dumps({})) self.assertEqual(response.status_code, 400) self.assertEqual(response.reason_phrase, '"Name" is required') response = self.client.post(create_locus_list_url, content_type='application/json', data=json.dumps({ 'name': 'new_locus_list', 'isPublic': True, 'rawItems': 'DDX11L1, foo 10:10-1 chr100:1-10 \n2:1234-5678', })) self.assertEqual(response.status_code, 400) self.assertEqual(response.reason_phrase, 'This list contains invalid genes/ intervals. Update them, or select the "Ignore invalid genes and intervals" checkbox to ignore.') self.assertListEqual(response.json()['invalidLocusListItems'], ['chr10:10-1', 'chr100:1-10', 'foo']) # send valid request to create locus_list response = self.client.post(create_locus_list_url, content_type='application/json', data=json.dumps({ 'name': 'new_locus_list', 'isPublic': True, 'ignoreInvalidItems': True, 'rawItems': 'DDX11L1, foo chr100:1-1 \nchr2:1234-5678', })) self.assertEqual(response.status_code, 200) new_locus_list_response = response.json() self.assertEqual(len(new_locus_list_response['locusListsByGuid']), 1) new_locus_list = new_locus_list_response['locusListsByGuid'].values()[0] self.assertEqual(new_locus_list['name'], 'new_locus_list') self.assertEqual(new_locus_list['isPublic'], True) self.assertSetEqual( {item['geneId'] for item in new_locus_list['items'] if item.get('geneId')}, set(new_locus_list_response['genesById'].keys()) ) self.assertListEqual( new_locus_list['items'], [ {'geneId': 'ENSG00000223972'}, {'chrom': '2', 'start': 1234, 'end': 5678, 'genomeVersion': '37', 'locusListIntervalGuid': mock.ANY} ] ) guid = new_locus_list['locusListGuid'] gene_id = new_locus_list['items'][0]['geneId'] new_locus_list_model = LocusList.objects.filter(guid=guid).first() self.assertIsNotNone(new_locus_list_model) self.assertEqual(new_locus_list_model.name, new_locus_list['name']) self.assertEqual(new_locus_list_model.is_public, new_locus_list['isPublic']) self.assertEqual(new_locus_list_model.locuslistgene_set.count(), 1) self.assertEqual(new_locus_list_model.locuslistgene_set.first().gene_id, gene_id) self.assertEqual(new_locus_list_model.locuslistinterval_set.count(), 1) new_interval = new_locus_list_model.locuslistinterval_set.first() self.assertEqual(new_interval.chrom, '2') self.assertEqual(new_interval.start, 1234) # update the locus_list update_locus_list_url = reverse(update_locus_list_handler, args=[guid]) response = self.client.post(update_locus_list_url, content_type='application/json', data=json.dumps( {'name': 'updated_locus_list', 'isPublic': False, 'rawItems': 'DDX11L1 FAM138A'})) self.assertEqual(response.status_code, 200) updated_locus_list_response = response.json() self.assertEqual(len(updated_locus_list_response['locusListsByGuid']), 1) updated_locus_list = updated_locus_list_response['locusListsByGuid'].values()[0] self.assertEqual(updated_locus_list['name'], 'updated_locus_list') self.assertEqual(updated_locus_list['isPublic'], False) self.assertEqual(len(updated_locus_list_response['genesById']), 2) self.assertTrue(gene_id in updated_locus_list_response['genesById']) new_gene_id = next(gid for gid in updated_locus_list_response['genesById'] if gid != gene_id) self.assertSetEqual({item['geneId'] for item in updated_locus_list['items']}, {new_gene_id, gene_id}) updated_locus_list_model = LocusList.objects.filter(guid=guid).first() self.assertIsNotNone(updated_locus_list_model) self.assertEqual(updated_locus_list_model.name, updated_locus_list['name']) self.assertEqual(updated_locus_list_model.is_public, updated_locus_list['isPublic']) self.assertEqual(updated_locus_list_model.locuslistgene_set.count(), 2) self.assertEqual(updated_locus_list_model.locuslistgene_set.last().gene_id, new_gene_id) self.assertEqual(updated_locus_list_model.locuslistinterval_set.count(), 0) # delete the locus_list delete_locus_list_url = reverse(delete_locus_list_handler, args=[guid]) response = self.client.post(delete_locus_list_url, content_type='application/json') self.assertEqual(response.status_code, 200) # check that locus_list was deleted new_locus_list = LocusList.objects.filter(guid=guid) self.assertEqual(len(new_locus_list), 0) def test_add_and_remove_project_locus_lists(self): project = Project.objects.get(guid=PROJECT_GUID) self.assertListEqual(list(get_project_locus_list_models(project)), []) # add a locus list url = reverse(add_project_locus_lists, args=[PROJECT_GUID]) _check_login(self, url) response = self.client.post(url, content_type='application/json', data=json.dumps({'locusListGuids': [LOCUS_LIST_GUID]})) self.assertEqual(response.status_code, 200) self.assertListEqual(response.json()['locusListGuids'], [LOCUS_LIST_GUID]) self.assertListEqual(list(get_project_locus_list_models(project)), [LocusList.objects.get(guid=LOCUS_LIST_GUID)]) # remove a locus list url = reverse(delete_project_locus_lists, args=[PROJECT_GUID]) response = self.client.post(url, content_type='application/json', data=json.dumps({'locusListGuids': [LOCUS_LIST_GUID]})) self.assertEqual(response.status_code, 200) self.assertListEqual(response.json()['locusListGuids'], []) self.assertListEqual(list(get_project_locus_list_models(project)), [])
agpl-3.0
3,673,026,669,628,493,000
49.511905
180
0.665213
false
3.397118
true
false
false
bjtox/ec2ssh-manager
ec2ssh/ec2ssh.py
1
11122
import subprocess import boto3 import sys import configparser from codecs import open from os.path import expanduser import os import glob import inquirer import argparse import libtmux import time class Connector: def __init__(self, connection_name, profile): self.hosts_folder = expanduser("~") print(self.hosts_folder) self.profile = profile self.directory_to_save = self.hosts_folder+'/.ec2ssh/hosts/' if not os.path.exists(self.directory_to_save): os.makedirs(self.directory_to_save) if connection_name != None: self.connection_name = connection_name self.config = self.read_config(connection_name) if self.config != False: self.port = self.config['Connection']['connection_port'] self.region_name = self.config['Connection']['region'] def open_tmux(self,selects,connection_name, region, profile, port): server = libtmux.Server() session = server.list_sessions()[0] print(session) window = session.new_window(attach=True, window_name=connection_name+str(round(time.time() * 1000))) instances = len(selects) print(instances) print(instances % 2 == 0) if instances % 2 == 0: count = 1 else: count = 0 while (count < instances): window.split_window() window.select_layout('tiled') count += 1 selection = 1 for pane in window.list_panes(): pane.send_keys('ec2ssh connect -n {} -p {}'.format(connection_name,profile)) pane.send_keys(str(selection)) selection += 1 window.set_window_option('synchronize-panes', True) def printMenu(self): print (30 * '-') print (" M A I N - M E N U") print (30 * '-') print ("1. Direct Connect") print ("2. Pass from Bastion Host") print ("3. Autoscaling") print (30 * '-') def read_config(self,host): if os.path.isfile(self.directory_to_save+host+'.ini'): config = configparser.ConfigParser() config.sections() config.read(self.directory_to_save+host+'.ini') return(config); else: return False def query_yes_no(self,question, default="yes"): valid = {"yes": True, "y": True, "ye": True, "no": False, "n": False} if default is None: prompt = " [y/n] " elif default == "yes": prompt = " [Y/n] " elif default == "no": prompt = " [y/N] " else: raise ValueError("invalid default answer: '%s'" % default) while True: sys.stdout.write(question + prompt) choice = input().lower() if default is not None and choice == '': return valid[default] elif choice in valid: return valid[choice] else: sys.stdout.write("Please respond with 'yes' or 'no' " "(or 'y' or 'n').\n") def addConfig(self): config = configparser.ConfigParser() self.printMenu() valid_choise=0 usr_input = '' while usr_input not in ['1', '2', '3']: if valid_choise : print("Not Valid Choise") valid_choise=1 usr_input = input("Input: ") config['Connection']= {} config['Connection']['region'] = input('Specify a Region:\n-> ') config['Connection']['connection_port'] = input('Specify a connection port (for direct or for Bastion):\n-> ') config['Connection']['profile'] = input('Specify which AWS profile use:\n-> ') if not config['Connection']['profile']: config['Connection']['profile'] = 'default' if usr_input == "1": config['Connection']['type'] = "direct" config['EC2INSTANCE'] = {} config['EC2INSTANCE']['pem_path'] = input('Enter a keyPair EC2 file path (absolute path):\n-> ') config['EC2INSTANCE']['user'] = input('Enter a EC2 user (default "ec2-user"):\n-> ') config['EC2INSTANCE']['ec2_instance_id'] = input('Enter a EC2 Instance ID:\n-> ') if not config['EC2INSTANCE']['user']: config['EC2INSTANCE']['user'] = 'ec2-user' elif usr_input == "2": config['Connection']['type'] = "bastion" config['EC2INSTANCE'] = {} config['EC2INSTANCE']['pem_path'] = input('Enter a keyPair EC2 file path (absolute path):\n-> ') config['EC2INSTANCE']['user'] = input('Enter a EC2 user (default "ec2-user"):\n-> ') config['EC2INSTANCE']['ec2_instance_id'] = input('Enter a EC2 Instance ID:\n-> ') config['BASTIONHOST'] = {} config['BASTIONHOST']['b_pem_path'] = input('Enter a Bastion pem file path (absolute path):\n-> ') config['BASTIONHOST']['b_user'] = input('Enter a Bastion user:\n-> ') config['BASTIONHOST']['b_ec2_instance_id'] = input('Enter a Bastion Instance ID:\n-> ') if not config['EC2INSTANCE']['user']: config['EC2INSTANCE']['user'] = 'ec2-user' elif usr_input == "3": config['Connection']['type'] = "asg" config['ASG'] = {} config['ASG']['pem_path'] = input('Enter a pem file path (absolute path):\n-> ') config['ASG']['user'] = input('Enter a user (default "ec2-user"):\n-> ') config['ASG']['name'] = input('Enter a ASG Name ID:\n-> ') if not config['ASG']['user']: config['ASG']['user'] = 'ec2-user' questions = self.query_yes_no("ASG allow ssh only from Bastion Host?") if questions == True: config['BASTIONHOST'] = {} config['BASTIONHOST']['b_pem_path'] = input('Enter a Bastion pem file path (absolute path):\n-> ') config['BASTIONHOST']['b_user'] = input('Enter a Bastion user:\n-> ') config['BASTIONHOST']['b_ec2_instance_id'] = input('Enter a Bastion Instance ID:\n-> ') with open(self.directory_to_save+self.connection_name+'.ini', 'w') as configfile: config.write(configfile) print("File Config "+self.connection_name+" created") def direct_connect(self,ec2_instance_config): target = {'key': ec2_instance_config['pem_path'], 'user': ec2_instance_config['user'], 'host': ec2_instance_config['ec2_instance_id']} target_ec2 = self.client target_response = target_ec2.describe_instances(InstanceIds=[target['host']]) target_ip = target_response['Reservations'][0]['Instances'][0]['PublicIpAddress'] subprocess.call("ssh-add {}".format(target['key']), shell=True) subprocess.call("ssh {}@{} -p {}".format(target['user'], target_ip, self.port), shell=True) def bastion_connect(self,ec2_instance_config,bastion_config): target = {'key': ec2_instance_config['pem_path'], 'user': ec2_instance_config['user'], 'host': ec2_instance_config['ec2_instance_id']} target_ec2 = self.client target_response = target_ec2.describe_instances(InstanceIds=[target['host']]) bastion = {'key': bastion_config['b_pem_path'], 'user': bastion_config['b_user'], 'host': bastion_config['b_ec2_instance_id']} bastion_ec2 = self.client bastion_response = bastion_ec2.describe_instances(InstanceIds=[bastion['host']]) bastion_ip = bastion_response['Reservations'][0]['Instances'][0]['PublicIpAddress'] target_ip = target_response['Reservations'][0]['Instances'][0]['NetworkInterfaces'][0]['PrivateIpAddress'] subprocess.call("ssh-add {} {}".format(bastion['key'], target['key']), shell=True) subprocess.call("ssh -t -A {}@{} -p {} ssh {}@{}".format(bastion['user'], bastion_ip,self.port, target['user'], target_ip), shell=True) def ec2ssh(self): self.session = boto3.Session(profile_name=self.profile) self.client = self.session.client('ec2',region_name=self.config['Connection']['region']) config = self.read_config(self.connection_name) if config['Connection']['type'] == "direct": self.direct_connect(config['EC2INSTANCE']) elif config['Connection']['type'] == "bastion": self.bastion_connect(config['EC2INSTANCE'], config['BASTIONHOST']) elif config['Connection']['type'] == "asg": print ('Please select an option:') print (" 0. All") i=1 selects = {} for instance in self.list_instance_in_asg(config['ASG']['name']): print (" "+str(i)+". "+instance['InstanceId']+" - "+instance['LifecycleState']) selects[i]=instance['InstanceId'] i+=1 config_asg = {} choise = input('Enter Value: ') if choise != "0": config_asg['pem_path']=config['ASG']['pem_path'] config_asg['user']=config['ASG']['user'] config_asg['ec2_instance_id']=selects[int(choise)] if config.has_section('BASTIONHOST'): config_asg_bastion = {} config_asg_bastion['b_pem_path']=config['BASTIONHOST']['b_pem_path'] config_asg_bastion['b_user']=config['BASTIONHOST']['b_user'] config_asg_bastion['b_ec2_instance_id']=config['BASTIONHOST']['b_ec2_instance_id'] self.bastion_connect(config_asg, config_asg_bastion) else: self.direct_connect(config_asg) else: self.open_tmux(selects, self.connection_name, self.region_name, self.profile, self.port) def list_avaible_connection(self): print (30 * '-') for file in os.listdir(self.directory_to_save): if file.endswith(".ini"): name_file = file.replace('.ini','') print(" Connection Name: "+name_file) config = self.read_config(name_file) print(" Type: "+config['Connection']['type']) print(" Region Name: "+config['Connection']['region']) print(" Connection Port: "+config['Connection']['connection_port']) if config['Connection']['type'] == "direct": print(" Key Pair: "+config['EC2INSTANCE']['pem_path']) print(" User Pair: "+config['EC2INSTANCE']['user']) print(" Instance Id Pair: "+config['EC2INSTANCE']['ec2_instance_id']) elif config['Connection']['type'] == "bastion": print(" Key Pair: "+config['EC2INSTANCE']['pem_path']) print(" User Pair: "+config['EC2INSTANCE']['user']) print(" Instance Id Pair: "+config['EC2INSTANCE']['ec2_instance_id']) print(" Bastion Id: "+config['BASTIONHOST']['b_ec2_instance_id']) elif config['Connection']['type'] == "asg": print(" Key Pair: "+config['ASG']['pem_path']) print(" User Pair: "+config['ASG']['user']) print(" ASG Name: "+config['ASG']['name']) print(" Bastion Id: "+config['BASTIONHOST']['b_ec2_instance_id']) print (30 * '-') def list_instance_in_asg(self, asg_name): if self.profile!=None: asg_client = self.session.client('autoscaling',region_name=self.region_name) else: asg_client = boto3.client('autoscaling',region_name=self.region_name) response = asg_client.describe_auto_scaling_groups( AutoScalingGroupNames=[ asg_name, ] ) return response['AutoScalingGroups'][0]['Instances'] def rm_connecition(self): try: os.remove(self.directory_to_save+self.connection_name+'.ini') print(self.connection_name+" connection was removed!") except OSError: print(self.connection_name+" connection doesn't exist!") pass
mit
-2,762,748,396,079,273,000
39.155235
139
0.605107
false
3.521849
true
false
false
mmahut/openshift-ansible
roles/openshift_health_checker/action_plugins/openshift_health_check.py
1
5501
""" Ansible action plugin to execute health checks in OpenShift clusters. """ # pylint: disable=wrong-import-position,missing-docstring,invalid-name import sys import os from collections import defaultdict try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() from ansible.plugins.action import ActionBase from ansible.module_utils.six import string_types # Augment sys.path so that we can import checks from a directory relative to # this callback plugin. sys.path.insert(1, os.path.dirname(os.path.dirname(__file__))) from openshift_checks import OpenShiftCheck, OpenShiftCheckException, load_checks # noqa: E402 class ActionModule(ActionBase): def run(self, tmp=None, task_vars=None): result = super(ActionModule, self).run(tmp, task_vars) task_vars = task_vars or {} # vars are not supportably available in the callback plugin, # so record any it will need in the result. result['playbook_context'] = task_vars.get('r_openshift_health_checker_playbook_context') if "openshift" not in task_vars: result["failed"] = True result["msg"] = "'openshift' is undefined, did 'openshift_facts' run?" return result try: known_checks = self.load_known_checks(tmp, task_vars) args = self._task.args requested_checks = normalize(args.get('checks', [])) resolved_checks = resolve_checks(requested_checks, known_checks.values()) except OpenShiftCheckException as e: result["failed"] = True result["msg"] = str(e) return result result["checks"] = check_results = {} user_disabled_checks = normalize(task_vars.get('openshift_disable_check', [])) for check_name in resolved_checks: display.banner("CHECK [{} : {}]".format(check_name, task_vars["ansible_host"])) check = known_checks[check_name] if not check.is_active(): r = dict(skipped=True, skipped_reason="Not active for this host") elif check_name in user_disabled_checks: r = dict(skipped=True, skipped_reason="Disabled by user request") else: try: r = check.run() except OpenShiftCheckException as e: r = dict( failed=True, msg=str(e), ) if check.changed: r["changed"] = True check_results[check_name] = r result["changed"] = any(r.get("changed") for r in check_results.values()) if any(r.get("failed") for r in check_results.values()): result["failed"] = True result["msg"] = "One or more checks failed" return result def load_known_checks(self, tmp, task_vars): load_checks() known_checks = {} for cls in OpenShiftCheck.subclasses(): check_name = cls.name if check_name in known_checks: other_cls = known_checks[check_name].__class__ raise OpenShiftCheckException( "non-unique check name '{}' in: '{}.{}' and '{}.{}'".format( check_name, cls.__module__, cls.__name__, other_cls.__module__, other_cls.__name__)) known_checks[check_name] = cls(execute_module=self._execute_module, tmp=tmp, task_vars=task_vars) return known_checks def resolve_checks(names, all_checks): """Returns a set of resolved check names. Resolving a check name expands tag references (e.g., "@tag") to all the checks that contain the given tag. OpenShiftCheckException is raised if names contains an unknown check or tag name. names should be a sequence of strings. all_checks should be a sequence of check classes/instances. """ known_check_names = set(check.name for check in all_checks) known_tag_names = set(name for check in all_checks for name in check.tags) check_names = set(name for name in names if not name.startswith('@')) tag_names = set(name[1:] for name in names if name.startswith('@')) unknown_check_names = check_names - known_check_names unknown_tag_names = tag_names - known_tag_names if unknown_check_names or unknown_tag_names: msg = [] if unknown_check_names: msg.append('Unknown check names: {}.'.format(', '.join(sorted(unknown_check_names)))) if unknown_tag_names: msg.append('Unknown tag names: {}.'.format(', '.join(sorted(unknown_tag_names)))) msg.append('Make sure there is no typo in the playbook and no files are missing.') raise OpenShiftCheckException('\n'.join(msg)) tag_to_checks = defaultdict(set) for check in all_checks: for tag in check.tags: tag_to_checks[tag].add(check.name) resolved = check_names.copy() for tag in tag_names: resolved.update(tag_to_checks[tag]) return resolved def normalize(checks): """Return a clean list of check names. The input may be a comma-separated string or a sequence. Leading and trailing whitespace characters are removed. Empty items are discarded. """ if isinstance(checks, string_types): checks = checks.split(',') return [name.strip() for name in checks if name.strip()]
apache-2.0
1,259,086,927,939,239,200
36.168919
109
0.612979
false
4.154834
false
false
false
jakub-m/phantomcurl
phantomcurl/test/test_post_data.py
1
1059
from nose.tools import * from phantomcurl.utils import split_post_items def test_post_data_good(): expected_given = [ ([('foo', 'bar')], ['foo=bar']), ([('foo', '')], ['foo=']), ([('foo', '=')], ['foo==']), ([('', '')], ['=']), ([('', '=')], ['==']), ([('', 'bar')], ['=bar']) ] for expected, given in expected_given: yield check_post_data_good, expected, given def check_post_data_good(expected_dict, post_items): post_dict = split_post_items(post_items) assert_equals(expected_dict, post_dict) def test_post_data_bad(): bad_input = ['foo', ''] for input_item in bad_input: yield check_post_data_bad, input_item def check_post_data_bad(post_item): assert_raises(ValueError, split_post_items, [post_item]) #def test_dict_to_post_string(): # assert_in( # dict_to_post_string({'foo', 'bar'}), # ['foo=bar']) # assert_in( # dict_to_post_string({'foo': '', 'ham': 'spam '}), # ['foo=&ham=spam+', 'ham=spam+&foo='] # )
gpl-2.0
-7,154,057,688,203,821,000
24.214286
60
0.525024
false
3.105572
false
false
false
richardkiss/pycoinnet
pycoinnet/peer/Fetcher.py
1
4318
import asyncio import logging import weakref from pycoin.serialize import b2h_rev from pycoinnet.InvItem import InvItem, ITEM_TYPE_TX, ITEM_TYPE_BLOCK, ITEM_TYPE_MERKLEBLOCK class Fetcher: """ Fetching a merkleblock also fetches the transactions that follow, and includes them in the message as the "tx" key. """ def __init__(self, peer): self.peer = peer self.request_q = asyncio.Queue() self.futures = weakref.WeakValueDictionary() getdata_loop_future = asyncio.Task(self._getdata_loop()) next_message = peer.new_get_next_message_f( filter_f=lambda name, data: name in ["tx", "block", "merkleblock", "notfound"]) peer.add_task(self._fetch_loop(next_message, getdata_loop_future)) def fetch(self, inv_item, timeout=None): """ Return the fetched object or None if the remote says it doesn't have it, or times out by exceeding `timeout` seconds. """ future = self.futures.get(inv_item) if not future: future = asyncio.Future() self.futures[inv_item] = future self.request_q.put_nowait(inv_item) try: return (yield from asyncio.wait_for(future, timeout=timeout)) except asyncio.TimeoutError: return None def queue_size(self): pass # ## TODO: finish @asyncio.coroutine def _getdata_loop(self): while True: so_far = [] inv_item = yield from self.request_q.get() while True: so_far.append(inv_item) if self.request_q.qsize() == 0 or len(so_far) >= 50000: break inv_item = yield from self.request_q.get() self.peer.send_msg("getdata", items=so_far) @asyncio.coroutine def _fetch_loop(self, next_message, getdata_loop_future): try: while True: name, data = yield from next_message() ITEM_LOOKUP = dict(tx="tx", block="block", merkleblock="header") if name in ITEM_LOOKUP: item = data[ITEM_LOOKUP[name]] the_hash = item.hash() TYPE_DB = {"tx": ITEM_TYPE_TX, "block": ITEM_TYPE_BLOCK, "merkleblock": ITEM_TYPE_MERKLEBLOCK} the_type = TYPE_DB[name] inv_item = InvItem(the_type, the_hash) future = self.futures.get(inv_item) if name == "merkleblock": txs = [] for h in data["tx_hashes"]: name, data = yield from next_message() if name != "tx": logging.error( "insufficient tx messages after merkleblock message: missing %s", b2h_rev(h)) del self.futures[inv_item] future.set_result(None) break tx = data["tx"] if tx.hash() != h: logging.error( "missing tx message after merkleblock message: missing %s", b2h_rev(h)) del self.futures[inv_item] future.set_result(None) break txs.append(tx) item.txs = txs if future is not None: del self.futures[inv_item] if not future.done(): future.set_result(item) else: logging.info("got %s unsolicited", item.id()) if name == "notfound": for inv_item in data["items"]: the_hash = inv_item.data future = self.futures.get(inv_item) if future: del self.futures[inv_item] future.set_result(None) except EOFError: getdata_loop_future.cancel()
mit
-5,082,640,224,147,455,000
40.12381
107
0.46943
false
4.540484
false
false
false
GNOME/orca
src/orca/scripts/apps/Instantbird/chat.py
1
6860
# Orca # # Copyright 2010 Joanmarie Diggs. # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the # Free Software Foundation, Inc., Franklin Street, Fifth Floor, # Boston MA 02110-1301 USA. """Custom chat module for Instantbird.""" __id__ = "$Id$" __version__ = "$Revision$" __date__ = "$Date$" __copyright__ = "Copyright (c) 2010 Joanmarie Diggs." __license__ = "LGPL" import pyatspi import orca.chat as chat ######################################################################## # # # The Instantbird chat class. # # # ######################################################################## class Chat(chat.Chat): def __init__(self, script, buddyListAncestries): chat.Chat.__init__(self, script, buddyListAncestries) ######################################################################## # # # InputEvent handlers and supporting utilities # # # ######################################################################## def getMessageFromEvent(self, event): """Get the actual displayed message. This will almost always be the unaltered any_data from an event of type object:text-changed:insert. Arguments: - event: the Event from which to take the text. Returns the string which should be presented as the newly-inserted text. (Things like chatroom name prefacing get handled elsewhere.) """ string = "" # IMs are written in areas that look like bubbles. When a new bubble # is inserted, we see an embedded object character inserted into the # document frame. The first paragraph is the bubble title; the # rest (usually just one) are the message itself. # if self._script.utilities.isDocument(event.source): bubble = event.source[event.detail1] hasRole = lambda x: x and x.getRole() == pyatspi.ROLE_PARAGRAPH paragraphs = pyatspi.findAllDescendants(bubble, hasRole) # If the user opted the non-default, "simple" appearance, then this # might not be a bubble at all, but a paragraph. # if not paragraphs and bubble.getRole() == pyatspi.ROLE_PARAGRAPH: paragraphs.append(bubble) for paragraph in paragraphs: msg = self._script.utilities.substring(paragraph, 0, -1) if msg == self._script.EMBEDDED_OBJECT_CHARACTER: # This seems to occur for non-focused conversations. # msg = self._script.utilities.substring(paragraph[0], 0, -1) string = self._script.utilities.appendString(string, msg) return string # If we instead have a section, we are writing another message into # the existing bubble. In this case, we get three separate items # inserted: a separator, a paragraph with the desired text, and an # empty section. # if event.source.getRole() == pyatspi.ROLE_SECTION: obj = event.source[event.detail1] if obj and obj.getRole() == pyatspi.ROLE_PARAGRAPH: try: text = obj.queryText() except: pass else: string = text.getText(0, -1) return string ######################################################################## # # # Convenience methods for identifying, locating different accessibles # # # ######################################################################## def isChatRoomMsg(self, obj): """Returns True if the given accessible is the text object for associated with a chat room conversation. Arguments: - obj: the accessible object to examine. """ if not obj: return False if self._script.utilities.isDocument(obj): return True return obj.getRole() in [pyatspi.ROLE_SECTION, pyatspi.ROLE_PARAGRAPH] def getChatRoomName(self, obj): """Attempts to find the name of the current chat room. Arguments: - obj: The accessible of interest Returns a string containing what we think is the chat room name. """ name = "" ancestor = self._script.utilities.ancestorWithRole( obj, [pyatspi.ROLE_SCROLL_PANE, pyatspi.ROLE_FRAME], [pyatspi.ROLE_APPLICATION]) if ancestor and ancestor.getRole() == pyatspi.ROLE_SCROLL_PANE: # The scroll pane has a proper labelled by relationship set. # name = self._script.utilities.displayedLabel(ancestor) if not name: try: text = self._script.utilities.displayedText(ancestor) if text.lower().strip() != self._script.name.lower().strip(): name = text except: pass return name def isFocusedChat(self, obj): """Returns True if we plan to treat this chat as focused for the purpose of deciding whether or not a message should be presented to the user. Arguments: - obj: the accessible object to examine. """ # Normally, we'd see if the top level window associated # with this object had STATE_ACTIVE. That doesn't work # here. So see if the script for the locusOfFocus is # this script. If so, the only other possibility is that # we're in the buddy list instead. # if obj and obj.getState().contains(pyatspi.STATE_SHOWING) \ and self._script.utilities.isInActiveApp(obj) \ and not self.isInBuddyList(obj): return True return False
lgpl-2.1
-3,200,580,494,293,113,300
37.757062
79
0.530029
false
4.803922
false
false
false
wfhio/tramcar
job_board/models/site_config.py
1
2504
from __future__ import unicode_literals from django.db import models from django.contrib.sites.models import Site class SiteConfig(models.Model): expire_after = models.SmallIntegerField(default=30) # NOTE: We set a default here, but we will override this with a more # suitable default when we create the SiteConfig instance admin_email = models.EmailField(default='admin@site') site = models.OneToOneField(Site, on_delete=models.CASCADE) remote = models.BooleanField( default=False, help_text="Select if this job board is for remote jobs only" ) protocol = models.CharField( default='http', choices=(('http', 'http'), ('https', 'https')), max_length=5, help_text="The protocol to use when building links in " "e-mail templates, etc." ) google_analytics = models.CharField( max_length=20, blank=True, help_text="Google Analytics Tracking ID" ) twitter_user = models.CharField( max_length=15, blank=True, help_text="Your site's Twitter username, fill in to " "have a Follow icon appear on select pages" ) twitter_consumer_key = models.CharField(max_length=100, blank=True) twitter_consumer_secret = models.CharField(max_length=100, blank=True) twitter_access_token = models.CharField(max_length=100, blank=True) twitter_access_token_secret = models.CharField(max_length=100, blank=True) stripe_secret_key = models.CharField(max_length=100, blank=True) stripe_publishable_key = models.CharField(max_length=100, blank=True) price = models.DecimalField( max_digits=5, decimal_places=2, default=0, help_text="Price to charge for posting a job, " "set to 0 to disable charging" ) mailchimp_username = models.CharField(max_length=20, blank=True) mailchimp_api_key = models.CharField(max_length=50, blank=True) mailchimp_list_id = models.CharField(max_length=20, blank=True) def price_in_cents(self): # Stripe expects an integer return int(self.price * 100) def __str__(self): return self.site.name
mit
-7,891,469,627,665,054,000
42.929825
78
0.580272
false
4.324698
false
false
false
furthz/colegio
src/discounts/forms.py
1
4824
from django import forms from enrollment.models import Servicio from enrollment.models import TipoServicio from enrollment.models import Matricula from crispy_forms.helper import FormHelper from crispy_forms.layout import Submit from django.forms import ModelForm, Form from utils.models import TiposNivel from django.utils.translation import ugettext_lazy as _ from discounts.models import Descuento from discounts.models import TipoDescuento from utils.middleware import get_current_colegio, get_current_userID ############################################################## # Solicitar Descuentos ############################################################## class SolicitarDescuentoForm(ModelForm): """ Formulario de la clase Descuento Nota: solo se añade como campos los que son definidos por los usuarios """ class Meta: model = Descuento fields = [ 'matricula', 'tipo_descuento', 'numero_expediente', 'comentario', ] labels = { 'matricula':_('Solicitante'), 'tipo_descuento':_('Descuento'), 'numero_expediente':_('Nro. Expediente'), 'comentario':_('Comentario'), } def ChoiceNiveles(self): MY_CHOICES = ( ('1', 'Inicial'), ('2', 'Primaria'), ('3', 'Secundaria'), ) return MY_CHOICES def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) #self.fields['nivel'] = forms.ChoiceField(choices=self.ChoiceNiveles()) #self.fields['grado'] = forms.ChoiceField(choices=self.ChoiceGrados()) self.fields['matricula'].widget.attrs.update({'class': 'form-control'}) self.fields['tipo_descuento'].widget.attrs.update({'class': 'form-control'}) self.fields['numero_expediente'].widget.attrs.update({'class': 'form-control'}) self.fields['comentario'].widget.attrs.update({'class': 'form-control'}) self.fields['matricula'].widget.attrs['editable'] = False class TipoDescuentForm(ModelForm): """ Formulario de la clase Descuento Nota: solo se añade como campos los que son definidos por los usuarios """ servicio = forms.ModelChoiceField(queryset=Servicio.objects.filter(activo=True)) class Meta: model = TipoDescuento fields = [ 'servicio', 'descripcion', 'porcentaje', ] labels = { 'servicio': _('Servicio'), 'descripcion': _('Descripción'), 'porcentaje': _('Porcentaje'), } def __init__(self, *args, **kwargs): colegio = kwargs.pop('colegio', None) super(TipoDescuentForm, self).__init__(*args, **kwargs) # self.fields['nivel'] = forms.ChoiceField(choices=self.ChoiceNiveles()) # self.fields['grado'] = forms.ChoiceField(choices=self.ChoiceGrados()) self.fields['servicio'].widget.attrs.update({'class': 'form-control'}) self.fields['descripcion'].widget.attrs.update({'class': 'form-control'}) self.fields['porcentaje'].widget.attrs.update({'class': 'form-control'}) if colegio: self.fields['servicio'].queryset = Servicio.objects.filter(activo=True,tipo_servicio__colegio__id_colegio=colegio) ############################################################## # Aprobar Descuentos ############################################################## class DetalleDescuentosForm(forms.Form): """ Formulario para filtar los detalles de Control de ingresos Nota: solo se añaden com campos los que son definidos por los usuarios """ alumno = forms.CharField(required=False) anio = forms.CharField() numero_expediente = forms.CharField(required=False) estado = forms.CharField() def ChoiceAnio(self): MY_CHOICES = ( ('2017', '2017'), ('2016', '2016'), ) return MY_CHOICES def ChoiceEstado(self): MY_CHOICES = ( ('Todos', 'Todos'), ('Aprobado', 'Aprobado'), ('No_aprobado', 'No aprobado'), ('Pendiente', 'Pendiente'), ) return MY_CHOICES def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.fields['anio'] = forms.ChoiceField(choices=self.ChoiceAnio()) self.fields['estado'] = forms.ChoiceField(choices=self.ChoiceEstado()) self.fields['alumno'].widget.attrs.update({'class': 'form-control'}) self.fields['anio'].widget.attrs.update({'class': 'form-control'}) self.fields['numero_expediente'].widget.attrs.update({'class': 'form-control'}) self.fields['estado'].widget.attrs.update({'class': 'form-control'})
mit
4,555,708,221,566,041,000
35.793893
126
0.578423
false
3.632253
false
false
false
daviddeng/azrael
demos/ctrl_swarm.py
1
4132
# Copyright 2014, Oliver Nagy <[email protected]> # # This file is part of Azrael (https://github.com/olitheolix/azrael) # # Azrael is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # Azrael is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with Azrael. If not, see <http://www.gnu.org/licenses/>. """ Manoeuvre the swarm of cubes in an orchestrated fashion. Due to the lack of any feedback control the cubes may not move too orderly but it suffices to demonstrate the principle. """ import os import sys import time import setproctitle import multiprocessing # Augment the Python path so that we can include the main project. p = os.path.dirname(os.path.abspath(__file__)) p = os.path.join(p, '..') sys.path.insert(0, p) del p import azrael.client import azrael.config as config class ControllerCubeLeft(multiprocessing.Process): def __init__(self, objID, ip=config.addr_clerk, port=config.port_clerk): super().__init__() self.left = 0 self.right = 1 self.ip = ip self.port = port self.objID = objID def run(self): client = azrael.client.Client(ip=self.ip, port_clerk=self.port) # --------------------------------------------------------------------- # Edit here to change the force of boosters. # --------------------------------------------------------------------- # Turn both boosters on after 2s. left = types.CmdBooster(self.left, force=0.1) right = types.CmdBooster(self.right, force=0.1) client.controlParts(self.objID, [right, left], []) print('{0:02d}: Manoeuvre 1'.format(self.objID)) time.sleep(2) # Fire the booster asymmetrically to make the cube turn. left = types.CmdBooster(self.left, force=0) right = types.CmdBooster(self.right, force=1) client.controlParts(self.objID, [right, left], []) print('{0:02d}: Manoeuvre 2'.format(self.objID)) time.sleep(2) # Reverse the force settings to stop the spinning. left = types.CmdBooster(self.left, force=1) right = types.CmdBooster(self.right, force=0) client.controlParts(self.objID, [right, left], []) print('{0:02d}: Manoeuvre 3'.format(self.objID)) time.sleep(2) # Use the same force on both boosters to just move forward without # inducing any more spinning. left = types.CmdBooster(self.left, force=0.1) right = types.CmdBooster(self.right, force=0.1) client.controlParts(self.objID, [right, left], []) time.sleep(4) # Done. print('{0:02d}: Manoeuvre 4'.format(self.objID)) class ControllerCubeRight(ControllerCubeLeft): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # Swap the index for left/right compared to the base class. self.left = 1 self.right = 0 def main(): addr = config.addr_clerk # Controllers for columns 1, 2, 3, 4. CCL, CCR = ControllerCubeLeft, ControllerCubeRight group_1 = [CCL(4 * _ + 0, addr) for _ in range(1, 5)] group_2 = [CCL(4 * _ + 1, addr) for _ in range(1, 5)] group_3 = [CCR(4 * _ + 2, addr) for _ in range(1, 5)] group_4 = [CCR(4 * _ + 3, addr) for _ in range(1, 5)] # Start the cubes in the two outer columns. time.sleep(0.5) for p0, p1 in zip(group_1, group_4): p0.start() p1.start() time.sleep(0.5) # Start the cubes in the two inner columns. time.sleep(1) for p0, p1 in zip(group_2, group_3): p0.start() p1.start() time.sleep(0.5) print('done') if __name__ == '__main__': main()
agpl-3.0
8,619,977,157,674,257,000
32.056
79
0.616167
false
3.364821
false
false
false
scott-maddox/simplepl
src/simplepl/dialogs/lockin_config_dialog.py
1
4145
# # Copyright (c) 2013-2014, Scott J Maddox # # This file is part of SimplePL. # # SimplePL is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # SimplePL is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public # License along with SimplePL. If not, see # <http://www.gnu.org/licenses/>. # ####################################################################### # third party imports from PySide import QtGui, QtCore class LockinConfigDialog(QtGui.QDialog): def __init__(self, lockin, parent=None): super(LockinConfigDialog, self).__init__(parent) self.setModal(True) settings = QtCore.QSettings() timeConstantIndex = int(settings.value('lockin/time_constant_index', 9)) # 300 ms default reserveModeIndex = int(settings.value('lockin/reserve_mode_index', 0)) # High reserve default inputLineFilterIndex = int(settings.value('lockin/input_line_filter_index', 3)) # both filters default self.timeConstantComboBox = QtGui.QComboBox() for text in lockin.getTimeConstantLabelsList(): self.timeConstantComboBox.addItem(text) self.timeConstantComboBox.setCurrentIndex(timeConstantIndex) self.reserveModeComboBox = QtGui.QComboBox() self.reserveModeComboBox.addItem('High Reserve') self.reserveModeComboBox.addItem('Normal') self.reserveModeComboBox.addItem('Low Noise (minimum)') self.reserveModeComboBox.setCurrentIndex(reserveModeIndex) self.inputLineFilterComboBox = QtGui.QComboBox() self.inputLineFilterComboBox.addItem('no filters') self.inputLineFilterComboBox.addItem('line notch filter') self.inputLineFilterComboBox.addItem('2x line notch filter') self.inputLineFilterComboBox.addItem('both notch filters') self.inputLineFilterComboBox.setCurrentIndex(inputLineFilterIndex) layout = QtGui.QVBoxLayout(self) form = QtGui.QFormLayout() form.addRow('Time Constant', self.timeConstantComboBox) form.addRow('Reserve Mode', self.reserveModeComboBox) form.addRow('Input Line Filter', self.inputLineFilterComboBox) layout.addLayout(form) # OK and Cancel buttons self.buttons = QtGui.QDialogButtonBox( QtGui.QDialogButtonBox.Ok | QtGui.QDialogButtonBox.Cancel, QtCore.Qt.Horizontal, self) layout.addWidget(self.buttons) # Connect buttons self.buttons.accepted.connect(self.accept) self.buttons.rejected.connect(self.reject) @staticmethod def getLockinConfig(lockin, parent=None): ''' Returns (timeConstantIndex, reserveModeIndex, inputLineFilterIndex, accepted), and changes the corresponding values in the settings. ''' dialog = LockinConfigDialog(lockin, parent) result = dialog.exec_() accepted = (result == QtGui.QDialog.Accepted) timeConstantIndex = dialog.timeConstantComboBox.currentIndex() reserveModeIndex = dialog.reserveModeComboBox.currentIndex() inputLineFilterIndex = dialog.inputLineFilterComboBox.currentIndex() settings = QtCore.QSettings() settings.setValue('lockin/time_constant_index', timeConstantIndex) settings.setValue('lockin/reserve_mode_index', reserveModeIndex) settings.setValue('lockin/input_line_filter_index', inputLineFilterIndex) settings.sync() return timeConstantIndex, reserveModeIndex, \ inputLineFilterIndex, accepted
agpl-3.0
-6,380,274,891,226,863,000
42.177083
83
0.666104
false
4.456989
false
false
false
eawag-rdm/xlsxtocsv
xlsxtocsv/xlsxtocsv.py
1
3605
# -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function, unicode_literals import datetime as dt from Tkinter import Tk import tkFileDialog import openpyxl as op import argparse import os.path import sys import re import csv __metaclass__ = type class RFC4180(csv.Dialect): def __init__(self): csv.Dialect.__init__(self) delimiter = b',' doublequote = True escapechar = None lineterminator = b'\r\n' quotechar = b'"' quoting = csv.QUOTE_MINIMAL skipinitialspace = False stric = True def parseargs(): pa = argparse.ArgumentParser(description= 'Exports multiple CSV files from an Excel *.xlsx Workbook') pa.add_argument('-f', metavar='EXCELFILE', help='The Excel file to export. ' + 'If omitted, a graphical file chooser will be used.') pa.add_argument('-o', metavar='OUTPUTDIRECTORY', help='The output directory. Default is the current ' + 'directory if EXCELFILE was given, otherwise a ' + 'file chooser will be used as well.') args = pa.parse_args(sys.argv[1:]) return vars(args) def _stringify(dat): if not isinstance(dat, basestring): return str(dat).encode('utf-8') else: return dat.encode('utf-8') def _transmap(dat): transmap = { # empty cells are going to be empty strings None: '', # workaround for bug in openpyxl # https://bitbucket.org/openpyxl/openpyxl/issues/674/ dt.datetime(1899, 12, 30, 0, 0): dt.time(0, 0), dt.datetime(1899, 12, 31, 0, 0): dt.datetime(1900, 1, 1, 0, 0), } return transmap[dat] if dat in transmap else dat def _datefix(dat): # if typ is datetime.datetime and time-part is 0:0:0, # covert to datetime.date (assume xlsx cell-type is "Date"). if (type(dat) == dt.datetime and (dat.hour, dat.minute, dat.second) == (0, 0, 0)): dat = dat.date() return dat def transform(l): l = [_transmap(f) for f in l] l = [_datefix(f) for f in l] l = [_stringify(f) for f in l] return l def write_csv(data, outfile): with open(outfile, 'wb') as fout: writer = csv.writer(fout, dialect='RFC4180') writer.writerows(data) def main(): csv.register_dialect(u'RFC4180', RFC4180) home = os.path.expanduser('~') xlsxfile = parseargs()['f'] out_dir = parseargs()['o'] if xlsxfile is None: root = Tk() root.withdraw() f = tkFileDialog.askopenfile(title='Choose file to convert', filetypes=[('xlsx', '*.xlsx')], initialdir=home) if f: xlsxfile = f.name f.close() else: sys.exit() if out_dir is None: out_dir = tkFileDialog.askdirectory(title='Choose output directory', initialdir=home) if not out_dir: sys.exit() root.destroy() if not out_dir: out_dir = os.getcwd() out_prefix = os.path.splitext(os.path.basename(xlsxfile))[0] wb = op.load_workbook(xlsxfile, data_only=True) for sn in wb.sheetnames: outfile = os.path.join(out_dir, out_prefix + '_' + re.sub(r'\s+', '_', sn) + '.csv') data = [] sheet = wb.get_sheet_by_name(sn) for l in sheet.values: data.append(transform(l)) write_csv(data, outfile) if __name__ == '__main__': main()
agpl-3.0
8,526,891,086,674,498,000
30.347826
82
0.561165
false
3.601399
false
false
false
rboman/progs
bin/powergrep.py
1
3561
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # try to replace "old_div(a,b)"" by "a/b" # with a and b being complex expressions involving brackets, etc. # processes all the python files recursively from the current folder # # you must use the script several times # (it processes 1 "old_div" per line at a time) # Does not process old_divs spanning several lines such as # old_div(a, # b) import sys, os import fnmatch, re import subprocess def all_files(root, patterns='*', skips='*.svn*;*.git*;*build*', single_level=False, yield_folders=False): #self.checkPath(root) patterns = patterns.split(';') skips = skips.split(';') for path, subdirs, files in os.walk(root): # print('processing folder', path) if yield_folders: files.extend(subdirs) files.sort() for name in files: for pattern in patterns: if fnmatch.fnmatch(name, pattern): fullname = os.path.join(path, name) ok = True for skip in skips: if fnmatch.fnmatch(fullname, skip): ok = False if ok: yield fullname break if single_level: break def paren_matcher (n): # poor man's matched paren scanning, gives up # after n+1 levels. Matches any string with balanced # parens inside; add the outer parens yourself if needed. # Nongreedy. # https://stackoverflow.com/questions/5454322/python-how-to-match-nested-parentheses-with-regex return r"[^()]*?(?:\("*n+r"[^()]*?"+r"\)[^()]*?)*?"*n if __name__ == '__main__': # the regexp reg = re.compile("old_div\s*\(("+paren_matcher(5)+'),('+paren_matcher(5)+')\)') # loop recursively on all files with a given extension for f in all_files(os.getcwd(), patterns='*.py;*.pyw'): #print('f=',f) # red the whole file file = open(f, mode='r', encoding='utf-8') try: alllines = file.readlines() except: print(f'\nERROR: file {f} contains non-unicode characters!\n') raise file.close() newlines = [] modified = False for l in alllines: m = reg.search(l) if m: print(f"match found in {f}") g = m.groups() if len(g)!=2: raise Exception ("=> ERROR: {len(g)} arguments found instead of 2!") else: #print(f'\t{m.group(0)} => {g[0].strip()}/{g[1].strip()}') newl = l.replace(m.group(0), f'{g[0].strip()}/{g[1].strip()}') print("\told string:", l.rstrip()) print("\tnew string:", newl.rstrip()) newlines.append(newl) modified = True else: newlines.append(l) if modified: file = open(f, mode='w', encoding='utf-8') for l in newlines: file.write(l) file.close() """ with open(f, "rb") as source: m = reg.search(s1) # print(m) if m: g = m.groups() if len(g)!=2: print ("error:") print (g) else: print(f'{m.group(0)} => {g[0].strip()}/{g[1].strip()}') print("old string:", s1) print("new string:", s1.replace(m.group(0), f'{g[0].strip()}/{g[1].strip()}')) """
apache-2.0
5,097,906,595,243,001,000
30.236842
99
0.495366
false
3.764271
false
false
false
soupmonkey/pushcoin
PoS/payment-processor/settings.py
1
1339
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright (c) 2012 Slawomir Lisznianski <[email protected]> # # GNU General Public Licence (GPL) # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free Software # Foundation; either version 2 of the License, or (at your option) any later # version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., 59 Temple # Place, Suite 330, Boston, MA 02111-1307 USA # # PushCoin Error Codes from # https://pushcoin.com/Pub/SDK/ErrorCodes # ERR_ACCOUNT_NOT_FOUND=201 ERR_INVALID_CURRENCY=202 ERR_PAYMENT_SIGNATURE_CHECK_FAILED=203 ERR_CRYPTO_FAILURE=204 ERR_INVALID_GRATUITY_TYPE=205 ERR_VALUE_OUT_OF_RANGE=206 ERR_INVALID_RECIPIENT=207 ERR_EXPIRED_PTA=208 ERR_DUPLICATE_PTA=209 ERR_INSUFFICIENT_FUNDS=300 MAX_SCALE_VAL = 6 MERCHANT_MAT = '5bf54dd118bc866567061a2be41860f7b5389f7c' CURRENCY_CODE = 'USD' PUSHCOIN_SERVER_URL = 'https://api.pushcoin.com:20001/pcos/'
gpl-3.0
-4,831,188,054,566,369,000
33.333333
79
0.765497
false
3.099537
false
false
false
PuzzleboxIO/brainstorms-python
setup.py2app.py
1
3818
""" This is a setup.py script generated by py2applet Usage: python2.7 setup.py py2app """ from setuptools import setup APP = ['brainstorms-local.py'] data_files=[ \ (".", \ #("Content/Resources", \ ["puzzlebox_brainstorms_configuration.ini"]), ("images", \ ["images/puzzlebox.ico", \ "images/puzzlebox.icns", \ "images/puzzlebox_logo.png", \ "images/1-upper_left-orange.png", \ "images/1-upper_left-white.png", \ "images/2-up-orange.png", \ "images/2-up-white.png", \ "images/3-upper_right-orange.png", \ "images/3-upper_right-white.png", \ "images/7-lower_left-orange.png", \ "images/7-lower_left-white.png", \ "images/8-down-orange.png", \ "images/8-down-white.png", \ "images/9-lower_right-orange.png", \ "images/9-lower_right-white.png", \ "images/brainstorms-aileron_left.svg", \ "images/brainstorms-aileron_right.svg", \ "images/brainstorms-elevator_forward.svg", \ "images/brainstorms-elevator_reverse.svg", \ "images/brainstorms-fly_forward.svg", \ "images/brainstorms-hover.svg", \ "images/brainstorms-land_arrow.svg", \ "images/brainstorms-rudder-left.svg", \ "images/brainstorms-rudder-right.svg", \ "images/brainstorms_stop.svg", \ "images/brainstorms_wheelchair_forward.svg", \ "images/brainstorms_wheelchair_left.svg", \ "images/brainstorms_wheelchair_reverse.svg", \ "images/brainstorms_wheelchair_right.svg", \ "images/braintorms-throttle_up.svg", \ "images/puzzlebox_helicopter.svg", \ ]), \ #("qt_menu.nib", \ #["/opt/local/lib/Resources/qt_menu.nib/classes.nib", \ #"/opt/local/lib/Resources/qt_menu.nib/info.nib", \ #"/opt/local/lib/Resources/qt_menu.nib/keyedobjects.nib", \ #]), \ ] data_files=[] OPTIONS = { \ #'argv_emulation': True, \ 'argv_emulation': False, \ 'iconfile': 'images/puzzlebox.icns', \ 'strip': True, \ # Semi-standalone is an option you can enable with py2app that makes # your code reliant on the version of Python that is installed with the OS. # You also need to enable site-packages, as well (which apparently encourages # py2app to create the links to Python necessary for getting the bundle up # and running, although it's only supposed to tell it to include the # system and user site-packages in the system path) # http://beckism.com/2009/03/pyobjc_tips/ #'semi_standalone': True, \ #'site_packages': True, \ 'includes': [ \ 'PySide.QtSvg', \ ], \ 'excludes': ['PyQt4', 'sip'], \ 'frameworks': [ \ "/opt/local/share/qt4/plugins/imageformats/libqjpeg.dylib", \ "/opt/local/share/qt4/plugins/imageformats/libqgif.dylib", \ "/opt/local/share/qt4/plugins/imageformats/libqico.dylib", \ "/opt/local/share/qt4/plugins/imageformats/libqmng.dylib", \ "/opt/local/share/qt4/plugins/imageformats/libqsvg.dylib", \ "/opt/local/share/qt4/plugins/imageformats/libqtiff.dylib", \ ], \ "resources": [ \ "puzzlebox_brainstorms_configuration.ini", \ #"images/puzzlebox.ico", \ #"/opt/local/lib/Resources/qt_menu.nib/classes.nib", \ #"/opt/local/lib/Resources/qt_menu.nib/info.nib", \ #"/opt/local/lib/Resources/qt_menu.nib/keyedobjects.nib", \ ], \ } setup( name='Puzzlebox Brainstorms', version='0.8.0', description='Puzzlebox Brainstorms provides Brain-Computer Interface (BCI) controls for robots and devices', author='Steve Castellotti', author_email='[email protected]', url='http://brainstorms.puzzlebox.info', classifiers=[ \ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: End Users/Desktop', 'Programming Language :: Python', 'Operating System :: OS Independent', 'License :: Commercial', 'Topic :: Scientific/Engineering :: Human Machine Interfaces', ], app=APP, data_files=data_files, options={'py2app': OPTIONS}, setup_requires=['py2app'], )
agpl-3.0
-7,486,098,163,901,852,000
30.04065
109
0.680723
false
2.666201
false
false
false
nmih/ssbio
ssbio/databases/pdb.py
1
34959
""" PDBProp ======= """ import gzip import json import logging import os.path as op import mmtf import os from cobra.core import DictList import pandas as pd import requests import deprecation from Bio.PDB import PDBList from lxml import etree from six.moves.urllib_error import URLError from six.moves.urllib.request import urlopen, urlretrieve import ssbio.databases.pisa as pisa import ssbio.utils from ssbio.protein.structure.structprop import StructProp try: from StringIO import StringIO except ImportError: from io import StringIO log = logging.getLogger(__name__) class PDBProp(StructProp): """Store information about a protein structure from the Protein Data Bank. Extends the :class:`~ssbio.protein.structure.structprop.StructProp` class to allow initialization of the structure by its PDB ID, and then enabling downloads of the structure file as well as parsing its metadata. Args: ident (str): description (str): chains (str): mapped_chains (str): structure_path (str): file_type (str): ``pdb``, ``mmCif``, ``xml``, ``mmtf`` - file type for files downloaded from the PDB """ def __init__(self, ident, description=None, chains=None, mapped_chains=None, structure_path=None, file_type=None): StructProp.__init__(self, ident, description=description, chains=chains, mapped_chains=mapped_chains, is_experimental=True, structure_path=structure_path, file_type=file_type) self.experimental_method = None self.resolution = None self.date = None self.taxonomy_name = None self.biological_assemblies = DictList() """DictList: A list for storing Bioassembly objects related to this PDB ID""" def download_structure_file(self, outdir, file_type=None, load_header_metadata=True, force_rerun=False): """Download a structure file from the PDB, specifying an output directory and a file type. Optionally download the mmCIF header file and parse data from it to store within this object. Args: outdir (str): Path to output directory file_type (str): ``pdb``, ``mmCif``, ``xml``, ``mmtf`` - file type for files downloaded from the PDB load_header_metadata (bool): If header metadata should be loaded into this object, fastest with mmtf files force_rerun (bool): If structure file should be downloaded even if it already exists """ ssbio.utils.double_check_attribute(object=self, setter=file_type, backup_attribute='file_type', custom_error_text='Please set file type to be downloaded from the PDB: ' 'pdb, mmCif, xml, or mmtf') # XTODO: check if outfile exists using ssbio.utils.force_rerun, pdblist seems to take long if it exists # I know why - it's because we're renaming the ent to pdb. need to have mapping from file type to final extension # Then check if file exists, if not then download again p = PDBList() with ssbio.utils.suppress_stdout(): structure_file = p.retrieve_pdb_file(pdb_code=self.id, pdir=outdir, file_format=file_type, overwrite=force_rerun) if not op.exists(structure_file): log.debug('{}: {} file not available'.format(self.id, file_type)) raise URLError('{}.{}: file not available to download'.format(self.id, file_type)) else: log.debug('{}: {} file saved'.format(self.id, file_type)) # Rename .ent files to .pdb if file_type == 'pdb': new_name = structure_file.replace('pdb', '').replace('ent', 'pdb') os.rename(structure_file, new_name) structure_file = new_name self.load_structure_path(structure_file, file_type) if load_header_metadata and file_type == 'mmtf': self.update(parse_mmtf_header(structure_file)) if load_header_metadata and file_type != 'mmtf': self.update(parse_mmcif_header(download_mmcif_header(pdb_id=self.id, outdir=outdir, force_rerun=force_rerun))) def get_pisa_complex_predictions(self, outdir, existing_pisa_multimer_xml=None): if not existing_pisa_multimer_xml: pisa_xmls = pisa.download_pisa_multimers_xml(pdb_ids=self.id, outdir=outdir, save_single_xml_files=True) else: pisa_xmls = {} pisa_xmls[self.id] = existing_pisa_multimer_xml pisa_dict = pisa.parse_pisa_multimers_xml(pisa_xmls[self.id], download_structures=True, outdir=outdir) def __json_encode__(self): # TODO: investigate why saving with # does not work! to_return = {} for x in self.__dict__.keys(): if x == 'pdb_title' or x == 'description': sanitized = ssbio.utils.force_string(getattr(self, x)).replace('#', '-') else: to_return.update({x: getattr(self, x)}) return to_return def parse_mmtf_header(infile): """Parse an MMTF file and return basic header-like information. Args: infile (str): Path to MMTF file Returns: dict: Dictionary of parsed header Todo: - Can this be sped up by not parsing the 3D coordinate info somehow? - OR just store the sequences when this happens since it is already being parsed. """ infodict = {} mmtf_decoder = mmtf.parse(infile) infodict['date'] = mmtf_decoder.deposition_date infodict['release_date'] = mmtf_decoder.release_date try: infodict['experimental_method'] = [x.decode() for x in mmtf_decoder.experimental_methods] except AttributeError: infodict['experimental_method'] = [x for x in mmtf_decoder.experimental_methods] infodict['resolution'] = mmtf_decoder.resolution infodict['description'] = mmtf_decoder.title group_name_exclude = ['HOH'] chem_comp_type_exclude = ['l-peptide linking', 'peptide linking'] chemicals = list(set([mmtf_decoder.group_list[idx]['groupName'] for idx in mmtf_decoder.group_type_list if mmtf_decoder.group_list[idx]['chemCompType'].lower() not in chem_comp_type_exclude and mmtf_decoder.group_list[idx]['groupName'] not in group_name_exclude])) infodict['chemicals'] = chemicals return infodict def download_mmcif_header(pdb_id, outdir='', force_rerun=False): """Download a mmCIF header file from the RCSB PDB by ID. Args: pdb_id: PDB ID outdir: Optional output directory, default is current working directory force_rerun: If the file should be downloaded again even if it exists Returns: str: Path to outfile """ # TODO: keep an eye on https://github.com/biopython/biopython/pull/943 Biopython PR#493 for functionality of this # method in biopython. extra file types have not been added to biopython download yet pdb_id = pdb_id.lower() file_type = 'cif' folder = 'header' outfile = op.join(outdir, '{}.header.{}'.format(pdb_id, file_type)) if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile): download_link = 'http://files.rcsb.org/{}/{}.{}'.format(folder, pdb_id, file_type) urlretrieve(download_link, outfile) log.debug('{}: saved header file'.format(outfile)) else: log.debug('{}: header file already saved'.format(outfile)) return outfile def parse_mmcif_header(infile): """Parse a couple important fields from the mmCIF file format with some manual curation of ligands. If you want full access to the mmCIF file just use the MMCIF2Dict class in Biopython. Args: infile: Path to mmCIF file Returns: dict: Dictionary of parsed header """ from Bio.PDB.MMCIF2Dict import MMCIF2Dict newdict = {} try: mmdict = MMCIF2Dict(infile) except ValueError as e: log.exception(e) return newdict chemical_ids_exclude = ['HOH'] chemical_types_exclude = ['l-peptide linking','peptide linking'] if '_struct.title' in mmdict: newdict['pdb_title'] = mmdict['_struct.title'] else: log.debug('{}: No title field'.format(infile)) if '_struct.pdbx_descriptor' in mmdict: newdict['description'] = mmdict['_struct.pdbx_descriptor'] else: log.debug('{}: no description field'.format(infile)) if '_pdbx_database_status.recvd_initial_deposition_date' in mmdict: newdict['date'] = mmdict['_pdbx_database_status.recvd_initial_deposition_date'] elif '_database_PDB_rev.date' in mmdict: newdict['date'] = mmdict['_database_PDB_rev.date'] else: log.debug('{}: no date field'.format(infile)) if '_exptl.method' in mmdict: newdict['experimental_method'] = mmdict['_exptl.method'] else: log.debug('{}: no experimental method field'.format(infile)) # TODO: refactor how to get resolutions based on experimental method if '_refine.ls_d_res_high' in mmdict: try: if isinstance(mmdict['_refine.ls_d_res_high'], list): newdict['resolution'] = [float(x) for x in mmdict['_refine.ls_d_res_high']] else: newdict['resolution'] = float(mmdict['_refine.ls_d_res_high']) except: try: newdict['resolution'] = float(mmdict['_em_3d_reconstruction.resolution']) except: log.debug('{}: no resolution field'.format(infile)) else: log.debug('{}: no resolution field'.format(infile)) if '_chem_comp.id' in mmdict: chemicals_filtered = ssbio.utils.filter_list_by_indices(mmdict['_chem_comp.id'], ssbio.utils.not_find(mmdict['_chem_comp.type'], chemical_types_exclude, case_sensitive=False)) chemicals_fitered = ssbio.utils.filter_list(chemicals_filtered, chemical_ids_exclude, case_sensitive=True) newdict['chemicals'] = chemicals_fitered else: log.debug('{}: no chemical composition field'.format(infile)) if '_entity_src_gen.pdbx_gene_src_scientific_name' in mmdict: newdict['taxonomy_name'] = mmdict['_entity_src_gen.pdbx_gene_src_scientific_name'] else: log.debug('{}: no organism field'.format(infile)) return newdict def download_sifts_xml(pdb_id, outdir='', force_rerun=False): """Download the SIFTS file for a PDB ID. Args: pdb_id (str): PDB ID outdir (str): Output directory, current working directory if not specified. force_rerun (bool): If the file should be downloaded again even if it exists Returns: str: Path to downloaded file """ baseURL = 'ftp://ftp.ebi.ac.uk/pub/databases/msd/sifts/xml/' filename = '{}.xml.gz'.format(pdb_id.lower()) outfile = op.join(outdir, filename.split('.')[0] + '.sifts.xml') if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile): response = urlopen(baseURL + filename) with open(outfile, 'wb') as f: f.write(gzip.decompress(response.read())) return outfile def map_uniprot_resnum_to_pdb(uniprot_resnum, chain_id, sifts_file): """Map a UniProt residue number to its corresponding PDB residue number. This function requires that the SIFTS file be downloaded, and also a chain ID (as different chains may have different mappings). Args: uniprot_resnum (int): integer of the residue number you'd like to map chain_id (str): string of the PDB chain to map to sifts_file (str): Path to the SIFTS XML file Returns: (tuple): tuple containing: mapped_resnum (int): Mapped residue number is_observed (bool): Indicates if the 3D structure actually shows the residue """ # Load the xml with lxml parser = etree.XMLParser(ns_clean=True) tree = etree.parse(sifts_file, parser) root = tree.getroot() my_pdb_resnum = None # TODO: "Engineered_Mutation is also a possible annotation, need to figure out what to do with that my_pdb_annotation = False # Find the right chain (entities in the xml doc) ent = './/{http://www.ebi.ac.uk/pdbe/docs/sifts/eFamily.xsd}entity' for chain in root.findall(ent): # TODO: IMPORTANT - entityId is not the chain ID!!! it is just in alphabetical order! if chain.attrib['entityId'] == chain_id: # Find the "crossRefDb" tag that has the attributes dbSource="UniProt" and dbResNum="your_resnum_here" # Then match it to the crossRefDb dbResNum that has the attribute dbSource="PDBresnum" # Check if uniprot + resnum even exists in the sifts file (it won't if the pdb doesn't contain the residue) ures = './/{http://www.ebi.ac.uk/pdbe/docs/sifts/eFamily.xsd}crossRefDb[@dbSource="UniProt"][@dbResNum="%s"]' % uniprot_resnum my_uniprot_residue = chain.findall(ures) if len(my_uniprot_residue) == 1: # Get crossRefDb dbSource="PDB" parent = my_uniprot_residue[0].getparent() pres = './/{http://www.ebi.ac.uk/pdbe/docs/sifts/eFamily.xsd}crossRefDb[@dbSource="PDB"]' my_pdb_residue = parent.findall(pres) my_pdb_resnum = int(my_pdb_residue[0].attrib['dbResNum']) # Get <residueDetail dbSource="PDBe" property="Annotation"> # Will be Not_Observed if it is not seen in the PDB anno = './/{http://www.ebi.ac.uk/pdbe/docs/sifts/eFamily.xsd}residueDetail[@dbSource="PDBe"][@property="Annotation"]' my_pdb_annotation = parent.findall(anno) if len(my_pdb_annotation) == 1: my_pdb_annotation = my_pdb_annotation[0].text if my_pdb_annotation == 'Not_Observed': my_pdb_annotation = False else: my_pdb_annotation = True else: return None, False return my_pdb_resnum, my_pdb_annotation def best_structures(uniprot_id, outname=None, outdir=None, seq_ident_cutoff=0.0, force_rerun=False): """Use the PDBe REST service to query for the best PDB structures for a UniProt ID. More information found here: https://www.ebi.ac.uk/pdbe/api/doc/sifts.html Link used to retrieve results: https://www.ebi.ac.uk/pdbe/api/mappings/best_structures/:accession The list of PDB structures mapping to a UniProt accession sorted by coverage of the protein and, if the same, resolution. Here is the ranking algorithm described by the PDB paper: https://nar.oxfordjournals.org/content/44/D1/D385.full "Finally, a single quality indicator is also calculated for each entry by taking the harmonic average of all the percentile scores representing model and model-data-fit quality measures and then subtracting 10 times the numerical value of the resolution (in Angstrom) of the entry to ensure that resolution plays a role in characterising the quality of a structure. This single empirical 'quality measure' value is used by the PDBe query system to sort results and identify the 'best' structure in a given context. At present, entries determined by methods other than X-ray crystallography do not have similar data quality information available and are not considered as 'best structures'." Args: uniprot_id (str): UniProt Accession ID outname (str): Basename of the output file of JSON results outdir (str): Path to output directory of JSON results seq_ident_cutoff (float): Cutoff results based on percent coverage (in decimal form) force_rerun (bool): Obtain best structures mapping ignoring previously downloaded results Returns: list: Rank-ordered list of dictionaries representing chain-specific PDB entries. Keys are: * pdb_id: the PDB ID which maps to the UniProt ID * chain_id: the specific chain of the PDB which maps to the UniProt ID * coverage: the percent coverage of the entire UniProt sequence * resolution: the resolution of the structure * start: the structure residue number which maps to the start of the mapped sequence * end: the structure residue number which maps to the end of the mapped sequence * unp_start: the sequence residue number which maps to the structure start * unp_end: the sequence residue number which maps to the structure end * experimental_method: type of experiment used to determine structure * tax_id: taxonomic ID of the protein's original organism """ outfile = '' if not outdir: outdir = '' # if output dir is specified but not outname, use the uniprot if not outname and outdir: outname = uniprot_id if outname: outname = op.join(outdir, outname) outfile = '{}.json'.format(outname) # Load a possibly existing json file if not ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile): with open(outfile, 'r') as f: raw_data = json.load(f) log.debug('{}: loaded existing json file'.format(uniprot_id)) # Otherwise run the web request else: # TODO: add a checker for a cached file of uniprot -> PDBs - can be generated within gempro pipeline and stored response = requests.get('https://www.ebi.ac.uk/pdbe/api/mappings/best_structures/{}'.format(uniprot_id), data={'key': 'value'}) if response.status_code == 404: log.debug('{}: 404 returned, probably no structures available.'.format(uniprot_id)) raw_data = {uniprot_id: {}} else: log.debug('{}: Obtained best structures'.format(uniprot_id)) raw_data = response.json() # Write the json file if specified if outfile: with open(outfile, 'w') as f: json.dump(raw_data, f) log.debug('{}: Saved json file of best structures'.format(uniprot_id)) data = dict(raw_data)[uniprot_id] # Filter for sequence identity percentage if seq_ident_cutoff != 0: for result in data: if result['coverage'] < seq_ident_cutoff: data.remove(result) return data def blast_pdb(seq, outfile='', outdir='', evalue=0.0001, seq_ident_cutoff=0.0, link=False, force_rerun=False): """Returns a list of BLAST hits of a sequence to available structures in the PDB. Args: seq (str): Your sequence, in string format outfile (str): Name of output file outdir (str, optional): Path to output directory. Default is the current directory. evalue (float, optional): Cutoff for the E-value - filters for significant hits. 0.001 is liberal, 0.0001 is stringent (default). seq_ident_cutoff (float, optional): Cutoff results based on percent coverage (in decimal form) link (bool, optional): Set to True if a link to the HTML results should be displayed force_rerun (bool, optional): If existing BLAST results should not be used, set to True. Default is False Returns: list: Rank ordered list of BLAST hits in dictionaries. """ if len(seq) < 12: raise ValueError('Sequence must be at least 12 residues long.') if link: page = 'PDB results page: http://www.rcsb.org/pdb/rest/getBlastPDB1?sequence={}&eCutOff={}&maskLowComplexity=yes&matrix=BLOSUM62&outputFormat=HTML'.format(seq, evalue) print(page) parser = etree.XMLParser(ns_clean=True) outfile = op.join(outdir, outfile) if ssbio.utils.force_rerun(force_rerun, outfile): # Load the BLAST XML results if force_rerun=True page = 'http://www.rcsb.org/pdb/rest/getBlastPDB1?sequence={}&eCutOff={}&maskLowComplexity=yes&matrix=BLOSUM62&outputFormat=XML'.format( seq, evalue) req = requests.get(page) if req.status_code == 200: response = req.text # Save the XML file if outfile: with open(outfile, 'w') as f: f.write(response) # Parse the XML string tree = etree.ElementTree(etree.fromstring(response, parser)) log.debug('Loaded BLAST results from REST server') else: log.error('BLAST request timed out') return [] else: tree = etree.parse(outfile, parser) log.debug('{}: Loaded existing BLAST XML results'.format(outfile)) # Get length of original sequence to calculate percentages len_orig = float(len(seq)) root = tree.getroot() hit_list = [] for hit in root.findall('BlastOutput_iterations/Iteration/Iteration_hits/Hit'): info = {} hitdef = hit.find('Hit_def') if hitdef is not None: info['hit_pdb'] = hitdef.text.split('|')[0].split(':')[0].lower() info['hit_pdb_chains'] = hitdef.text.split('|')[0].split(':')[2].split(',') # One PDB can align to different parts of the sequence # Will just choose the top hit for this single PDB hsp = hit.findall('Hit_hsps/Hsp')[0] # Number of identical residues hspi = hsp.find('Hsp_identity') if hspi is not None: info['hit_num_ident'] = int(hspi.text) info['hit_percent_ident'] = int(hspi.text)/len_orig if int(hspi.text)/len_orig < seq_ident_cutoff: log.debug('{}: does not meet sequence identity cutoff'.format(hitdef.text.split('|')[0].split(':')[0])) continue # Number of similar residues (positive hits) hspp = hsp.find('Hsp_positive') if hspp is not None: info['hit_num_similar'] = int(hspp.text) info['hit_percent_similar'] = int(hspp.text) / len_orig # Total number of gaps (unable to align in either query or subject) hspg = hsp.find('Hsp_gaps') if hspg is not None: info['hit_num_gaps'] = int(hspg.text) info['hit_percent_gaps'] = int(hspg.text) / len_orig # E-value of BLAST hspe = hsp.find('Hsp_evalue') if hspe is not None: info['hit_evalue'] = float(hspe.text) # Score of BLAST hsps = hsp.find('Hsp_score') if hsps is not None: info['hit_score'] = float(hsps.text) hit_list.append(info) log.debug("{}: Number of BLAST hits".format(len(hit_list))) return hit_list def blast_pdb_df(blast_results): """Make a dataframe of BLAST results""" cols = ['hit_pdb', 'hit_pdb_chains', 'hit_evalue', 'hit_score', 'hit_num_ident', 'hit_percent_ident', 'hit_num_similar', 'hit_percent_similar', 'hit_num_gaps', 'hit_percent_gaps'] return pd.DataFrame.from_records(blast_results, columns=cols) def _property_table(): """Download the PDB -> resolution table directly from the RCSB PDB REST service. See the other fields that you can get here: http://www.rcsb.org/pdb/results/reportField.do Returns: Pandas DataFrame: table of structureId as the index, resolution and experimentalTechnique as the columns """ url = 'http://www.rcsb.org/pdb/rest/customReport.csv?pdbids=*&customReportColumns=structureId,resolution,experimentalTechnique,releaseDate&service=wsfile&format=csv' r = requests.get(url) p = pd.read_csv(StringIO(r.text)).set_index('structureId') return p def get_resolution(pdb_id): """Quick way to get the resolution of a PDB ID using the table of results from the REST service Returns infinity if the resolution is not available. Returns: float: resolution of a PDB ID in Angstroms TODO: - Unit test """ pdb_id = pdb_id.upper() if pdb_id not in _property_table().index: raise ValueError('PDB ID not in property table') else: resolution = _property_table().ix[pdb_id, 'resolution'] if pd.isnull(resolution): log.debug('{}: no resolution available, probably not an X-ray crystal structure') resolution = float('inf') return resolution def get_release_date(pdb_id): """Quick way to get the release date of a PDB ID using the table of results from the REST service Returns None if the release date is not available. Returns: str: Organism of a PDB ID """ pdb_id = pdb_id.upper() if pdb_id not in _property_table().index: raise ValueError('PDB ID not in property table') else: release_date = _property_table().ix[pdb_id, 'releaseDate'] if pd.isnull(release_date): log.debug('{}: no release date available') release_date = None return release_date def get_num_bioassemblies(pdb_id, cache=False, outdir=None, force_rerun=False): """Check if there are bioassemblies using the PDB REST API, and if there are, get the number of bioassemblies available. See: https://www.rcsb.org/pages/webservices/rest, section 'List biological assemblies' Not all PDB entries have biological assemblies available and some have multiple. Details that are necessary to recreate a biological assembly from the asymmetric unit can be accessed from the following requests. - Number of biological assemblies associated with a PDB entry - Access the transformation information needed to generate a biological assembly (nr=0 will return information for the asymmetric unit, nr=1 will return information for the first assembly, etc.) A query of https://www.rcsb.org/pdb/rest/bioassembly/nrbioassemblies?structureId=1hv4 returns this:: <nrBioAssemblies structureId="1HV4" hasAssemblies="true" count="2"/> Args: pdb_id (str): PDB ID cache (bool): If the XML file should be downloaded outdir (str): If cache, then specify the output directory force_rerun (bool): If cache, and if file exists, specify if API should be queried again """ parser = etree.XMLParser(ns_clean=True) if not outdir: outdir = os.getcwd() outfile = op.join(outdir, '{}_nrbiomols.xml'.format(pdb_id)) if ssbio.utils.force_rerun(force_rerun, outfile): page = 'https://www.rcsb.org/pdb/rest/bioassembly/nrbioassemblies?structureId={}'.format(pdb_id) req = requests.get(page) if req.status_code == 200: response = req.text # Save the XML file if cache: with open(outfile, 'w') as f: f.write(response) # Parse the XML string tree = etree.ElementTree(etree.fromstring(response, parser)) log.debug('Loaded bioassembly information from REST server') else: log.error('Request timed out') req.raise_for_status() else: tree = etree.parse(outfile, parser) log.debug('{}: Loaded existing XML results'.format(outfile)) r = tree.getroot() has_biomols = r.get('hasAssemblies') if has_biomols == 'true': has_biomols = True else: has_biomols = False if has_biomols: num_biomols = r.get('count') else: num_biomols = 0 num_biomols = int(num_biomols) return num_biomols def get_bioassembly_info(pdb_id, biomol_num, cache=False, outdir=None, force_rerun=False): """Get metadata about a bioassembly from the RCSB PDB's REST API. See: https://www.rcsb.org/pdb/rest/bioassembly/bioassembly?structureId=1hv4&nr=1 The API returns an XML file containing the information on a biological assembly that looks like this:: <bioassembly structureId="1HV4" assemblyNr="1" method="PISA" desc="author_and_software_defined_assembly"> <transformations operator="1" chainIds="A,B,C,D"> <transformation index="1"> <matrix m11="1.00000000" m12="0.00000000" m13="0.00000000" m21="0.00000000" m22="1.00000000" m23="0.00000000" m31="0.00000000" m32="0.00000000" m33="1.00000000"/> <shift v1="0.00000000" v2="0.00000000" v3="0.00000000"/> </transformation> </transformations> </bioassembly> Args: pdb_id (str): PDB ID biomol_num (int): Biological assembly number you are interested in cache (bool): If the XML file should be downloaded outdir (str): If cache, then specify the output directory force_rerun (bool): If cache, and if file exists, specify if API should be queried again """ parser = etree.XMLParser(ns_clean=True) # # if not outdir: # outdir = os.getcwd() # outfile = op.join(outdir, '{}.xml'.format(self.id)) # # if ssbio.utils.force_rerun(force_rerun, outfile): # page = 'https://www.rcsb.org/pdb/rest/bioassembly/bioassembly?structureId={}&nr={}'.format( # self.original_pdb_id, biomol_num) # req = requests.get(page) # # if req.status_code == 200: # response = req.text # # # Save the XML file # if cache: # with open(outfile, 'w') as f: # f.write(response) # # # Parse the XML string # r = xmltodict.parse(response) # log.debug('Loaded bioassembly information from REST server') # else: # log.error('Request timed out') # req.raise_for_status() # else: # with open(outfile, 'r') as f: # r = xmltodict.parse(f.read()) # log.debug('{}: Loaded existing XML results'.format(outfile)) # # self.biomol_to_chain_dict[biomol_num] = {'chains': r['bioassembly']['transformations']['@chainIds'], # 'multiplier': len(r['bioassembly']['transformations']['transformation'])} # # TODO: figure out how to store matrices etc. # # log.info('{}_{}: ') def download_biomol(pdb_id, biomol_num, outdir, file_type='pdb', force_rerun=False): import zlib from six.moves.urllib_error import URLError from six.moves.urllib.request import urlopen, urlretrieve import contextlib ssbio.utils.make_dir(outdir) server_folder = pdb_id[1:3] if file_type == 'pdb': # server = 'ftp://ftp.wwpdb.org/pub/pdb/data/biounit/coordinates/divided/{}/'.format(server_folder) server = 'https://files.rcsb.org/download/' server_filename = pdb_id + '.pdb%i.gz' % biomol_num local_filename = pdb_id + '_bio%i.pdb' % biomol_num outfile = op.join(outdir, local_filename) elif file_type.lower() == 'mmcif' or file_type.lower() == 'cif': server = 'ftp://ftp.wwpdb.org/pub/pdb/data/biounit/mmCIF/divided/{}/'.format(server_folder) server_filename = pdb_id + '-assembly%i.cif.gz' % biomol_num local_filename = pdb_id + '_bio%i.cif' % biomol_num outfile = op.join(outdir, local_filename) else: raise ValueError('Biological assembly only available in PDB or mmCIF file types.') if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile): download_link = op.join(server, server_filename) try: with contextlib.closing(urlopen(download_link)) as f: decompressed_data = zlib.decompress(f.read(), 16 + zlib.MAX_WBITS) with open(op.join(outdir, local_filename), 'wb') as f: f.write(decompressed_data) except URLError as e: print(e) return None return outfile ######################################################################################################################## ######################################################################################################################## # DEPRECATED FUNCTIONS ######################################################################################################################## ######################################################################################################################## @deprecation.deprecated(deprecated_in="1.0", removed_in="2.0", details="Use Biopython's PDBList.retrieve_pdb_file function instead") def download_structure(pdb_id, file_type, outdir='', only_header=False, force_rerun=False): """Download a structure from the RCSB PDB by ID. Specify the file type desired. Args: pdb_id: PDB ID file_type: pdb, pdb.gz, mmcif, cif, cif.gz, xml.gz, mmtf, mmtf.gz outdir: Optional output directory only_header: If only the header file should be downloaded force_rerun: If the file should be downloaded again even if it exists Returns: str: Path to outfile """ # method in biopython. extra file types have not been added to biopython download yet pdb_id = pdb_id.lower() file_type = file_type.lower() file_types = ['pdb', 'pdb.gz', 'mmcif', 'cif', 'cif.gz', 'xml.gz', 'mmtf', 'mmtf.gz'] if file_type not in file_types: raise ValueError('Invalid file type, must be either: pdb, pdb.gz, cif, cif.gz, xml.gz, mmtf, mmtf.gz') if file_type == 'mmtf': file_type = 'mmtf.gz' if file_type.endswith('.gz'): gzipped = True else: gzipped = False if file_type == 'mmcif': file_type = 'cif' if only_header: folder = 'header' outfile = op.join(outdir, '{}.header.{}'.format(pdb_id, file_type)) else: folder = 'download' outfile = op.join(outdir, '{}.{}'.format(pdb_id, file_type)) if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile): if file_type == 'mmtf.gz' or file_type == 'mmtf': mmtf_api = '1.0' download_link = 'http://mmtf.rcsb.org/v{}/full/{}.mmtf.gz'.format(mmtf_api, pdb_id) else: download_link = 'http://files.rcsb.org/{}/{}.{}'.format(folder, pdb_id, file_type) urlretrieve(download_link, outfile) if gzipped: outfile = ssbio.utils.gunzip_file(infile=outfile, outfile=outfile.strip('.gz'), outdir=outdir, delete_original=False, force_rerun_flag=force_rerun) log.debug('{}: saved structure file'.format(outfile)) else: if file_type == 'mmtf.gz': outfile = op.join(outdir, '{}.{}'.format(pdb_id, 'mmtf')) log.debug('{}: structure file already saved'.format(outfile)) return outfile
mit
1,119,046,539,905,341,600
40.225236
268
0.615778
false
3.782623
false
false
false
tensorflow/probability
tensorflow_probability/python/optimizer/linesearch/hager_zhang.py
1
30378
# Copyright 2018 The TensorFlow Probability Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Implements the Hager-Zhang inexact line search algorithm. Line searches are a central component for many optimization algorithms (e.g. BFGS, conjugate gradient etc). Most of the sophisticated line search methods aim to find a step length in a given search direction so that the step length satisfies the [Wolfe conditions](https://en.wikipedia.org/wiki/Wolfe_conditions). [Hager-Zhang 2006](https://epubs.siam.org/doi/abs/10.1137/030601880) algorithm is a refinement of the commonly used [More-Thuente](https://dl.acm.org/citation.cfm?id=192132) algorithm. This module implements the Hager-Zhang algorithm. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import numpy as np import tensorflow.compat.v2 as tf from tensorflow_probability.python.internal import dtype_util from tensorflow_probability.python.internal import prefer_static from tensorflow_probability.python.optimizer.linesearch.internal import hager_zhang_lib as hzl __all__ = [ 'hager_zhang', ] def _machine_eps(dtype): """Returns the machine epsilon for the supplied dtype.""" dtype = dtype_util.as_numpy_dtype(tf.as_dtype(dtype)) return np.finfo(dtype).eps HagerZhangLineSearchResult = collections.namedtuple( 'HagerZhangLineSearchResults', [ 'converged', # Whether a point satisfying Wolfe/Approx wolfe was found. 'failed', # Whether the line search failed. It can fail if either the # objective function or the gradient are not finite at # an evaluation point. 'func_evals', # Number of function evaluations made. 'iterations', # Number of line search iterations made. 'left', # The left end point of the final bracketing interval. # If converged is True, it is equal to `right`. # Otherwise, it corresponds to the last interval computed. 'right' # The right end point of the final bracketing interval. # If converged is True, it is equal to `left`. # Otherwise, it corresponds to the last interval computed. ]) def hager_zhang(value_and_gradients_function, initial_step_size=None, value_at_initial_step=None, value_at_zero=None, converged=None, threshold_use_approximate_wolfe_condition=1e-6, shrinkage_param=0.66, expansion_param=5.0, sufficient_decrease_param=0.1, curvature_param=0.9, max_iterations=50, name=None): """The Hager Zhang line search algorithm. Performs an inexact line search based on the algorithm of [Hager and Zhang (2006)][2]. The univariate objective function `value_and_gradients_function` is typically generated by projecting a multivariate objective function along a search direction. Suppose the multivariate function to be minimized is `g(x1,x2, .. xn)`. Let (d1, d2, ..., dn) be the direction along which we wish to perform a line search. Then the projected univariate function to be used for line search is ```None f(a) = g(x1 + d1 * a, x2 + d2 * a, ..., xn + dn * a) ``` The directional derivative along (d1, d2, ..., dn) is needed for this procedure. This also corresponds to the derivative of the projected function `f(a)` with respect to `a`. Note that this derivative must be negative for `a = 0` if the direction is a descent direction. The usual stopping criteria for the line search is the satisfaction of the (weak) Wolfe conditions. For details of the Wolfe conditions, see ref. [3]. On a finite precision machine, the exact Wolfe conditions can be difficult to satisfy when one is very close to the minimum and as argued by [Hager and Zhang (2005)][1], one can only expect the minimum to be determined within square root of machine precision. To improve the situation, they propose to replace the Wolfe conditions with an approximate version depending on the derivative of the function which is applied only when one is very close to the minimum. The following algorithm implements this enhanced scheme. ### Usage: Primary use of line search methods is as an internal component of a class of optimization algorithms (called line search based methods as opposed to trust region methods). Hence, the end user will typically not want to access line search directly. In particular, inexact line search should not be confused with a univariate minimization method. The stopping criteria of line search is the satisfaction of Wolfe conditions and not the discovery of the minimum of the function. With this caveat in mind, the following example illustrates the standalone usage of the line search. ```python # Define value and gradient namedtuple ValueAndGradient = namedtuple('ValueAndGradient', ['x', 'f', 'df']) # Define a quadratic target with minimum at 1.3. def value_and_gradients_function(x): return ValueAndGradient(x=x, f=(x - 1.3) ** 2, df=2 * (x-1.3)) # Set initial step size. step_size = tf.constant(0.1) ls_result = tfp.optimizer.linesearch.hager_zhang( value_and_gradients_function, initial_step_size=step_size) # Evaluate the results. with tf.Session() as session: results = session.run(ls_result) # Ensure convergence. assert results.converged # If the line search converged, the left and the right ends of the # bracketing interval are identical. assert results.left.x == result.right.x # Print the number of evaluations and the final step size. print ("Final Step Size: %f, Evaluations: %d" % (results.left.x, results.func_evals)) ``` ### References: [1]: William Hager, Hongchao Zhang. A new conjugate gradient method with guaranteed descent and an efficient line search. SIAM J. Optim., Vol 16. 1, pp. 170-172. 2005. https://www.math.lsu.edu/~hozhang/papers/cg_descent.pdf [2]: William Hager, Hongchao Zhang. Algorithm 851: CG_DESCENT, a conjugate gradient method with guaranteed descent. ACM Transactions on Mathematical Software, Vol 32., 1, pp. 113-137. 2006. http://users.clas.ufl.edu/hager/papers/CG/cg_compare.pdf [3]: Jorge Nocedal, Stephen Wright. Numerical Optimization. Springer Series in Operations Research. pp 33-36. 2006 Args: value_and_gradients_function: A Python callable that accepts a real scalar tensor and returns a namedtuple with the fields 'x', 'f', and 'df' that correspond to scalar tensors of real dtype containing the point at which the function was evaluated, the value of the function, and its derivative at that point. The other namedtuple fields, if present, should be tensors or sequences (possibly nested) of tensors. In usual optimization application, this function would be generated by projecting the multivariate objective function along some specific direction. The direction is determined by some other procedure but should be a descent direction (i.e. the derivative of the projected univariate function must be negative at 0.). Alternatively, the function may represent the batching of `n` such line functions (e.g. projecting a single multivariate objective function along `n` distinct directions at once) accepting n points as input, i.e. a tensor of shape [n], and the fields 'x', 'f' and 'df' in the returned namedtuple should each be a tensor of shape [n], with the corresponding input points, function values, and derivatives at those input points. initial_step_size: (Optional) Scalar positive `Tensor` of real dtype, or a tensor of shape [n] in batching mode. The initial value (or values) to try to bracket the minimum. Default is `1.` as a float32. Note that this point need not necessarily bracket the minimum for the line search to work correctly but the supplied value must be greater than 0. A good initial value will make the search converge faster. value_at_initial_step: (Optional) The full return value of evaluating value_and_gradients_function at initial_step_size, i.e. a namedtuple with 'x', 'f', 'df', if already known by the caller. If supplied the value of `initial_step_size` will be ignored, otherwise the tuple will be computed by evaluating value_and_gradients_function. value_at_zero: (Optional) The full return value of value_and_gradients_function at `0.`, i.e. a namedtuple with 'x', 'f', 'df', if already known by the caller. If not supplied the tuple will be computed by evaluating value_and_gradients_function. converged: (Optional) In batching mode a tensor of shape [n], indicating batch members which have already converged and no further search should be performed. These batch members are also reported as converged in the output, and both their `left` and `right` are set to the `value_at_initial_step`. threshold_use_approximate_wolfe_condition: Scalar positive `Tensor` of real dtype. Corresponds to the parameter 'epsilon' in [Hager and Zhang (2006)][2]. Used to estimate the threshold at which the line search switches to approximate Wolfe conditions. shrinkage_param: Scalar positive Tensor of real dtype. Must be less than `1.`. Corresponds to the parameter `gamma` in [Hager and Zhang (2006)][2]. If the secant**2 step does not shrink the bracketing interval by this proportion, a bisection step is performed to reduce the interval width. expansion_param: Scalar positive `Tensor` of real dtype. Must be greater than `1.`. Used to expand the initial interval in case it does not bracket a minimum. Corresponds to `rho` in [Hager and Zhang (2006)][2]. sufficient_decrease_param: Positive scalar `Tensor` of real dtype. Bounded above by the curvature param. Corresponds to `delta` in the terminology of [Hager and Zhang (2006)][2]. curvature_param: Positive scalar `Tensor` of real dtype. Bounded above by `1.`. Corresponds to 'sigma' in the terminology of [Hager and Zhang (2006)][2]. max_iterations: Positive scalar `Tensor` of integral dtype or None. The maximum number of iterations to perform in the line search. The number of iterations used to bracket the minimum are also counted against this parameter. name: (Optional) Python str. The name prefixed to the ops created by this function. If not supplied, the default name 'hager_zhang' is used. Returns: results: A namedtuple containing the following attributes. converged: Boolean `Tensor` of shape [n]. Whether a point satisfying Wolfe/Approx wolfe was found. failed: Boolean `Tensor` of shape [n]. Whether line search failed e.g. if either the objective function or the gradient are not finite at an evaluation point. iterations: Scalar int32 `Tensor`. Number of line search iterations made. func_evals: Scalar int32 `Tensor`. Number of function evaluations made. left: A namedtuple, as returned by value_and_gradients_function, of the left end point of the final bracketing interval. Values are equal to those of `right` on batch members where converged is True. Otherwise, it corresponds to the last interval computed. right: A namedtuple, as returned by value_and_gradients_function, of the right end point of the final bracketing interval. Values are equal to those of `left` on batch members where converged is True. Otherwise, it corresponds to the last interval computed. """ with tf.name_scope(name or 'hager_zhang'): val_0, val_initial, f_lim, prepare_evals = _prepare_args( value_and_gradients_function, initial_step_size, value_at_initial_step, value_at_zero, threshold_use_approximate_wolfe_condition) valid_inputs = (hzl.is_finite(val_0) & (val_0.df < 0) & tf.math.is_finite(val_initial.x) & (val_initial.x > 0)) if converged is None: init_converged = tf.zeros_like(valid_inputs) # i.e. all false. else: init_converged = tf.convert_to_tensor(converged) failed = ~init_converged & ~valid_inputs init_interval = HagerZhangLineSearchResult( converged=init_converged, failed=failed, func_evals=prepare_evals, iterations=tf.convert_to_tensor(0), left=val_0, right=hzl.val_where(init_converged, val_0, val_initial)) def _apply_bracket_and_search(): """Bracketing and searching to do for valid inputs.""" return _bracket_and_search( value_and_gradients_function, init_interval, f_lim, max_iterations, shrinkage_param, expansion_param, sufficient_decrease_param, curvature_param) init_active = ~init_interval.failed & ~init_interval.converged return prefer_static.cond( tf.reduce_any(init_active), _apply_bracket_and_search, lambda: init_interval) _LineSearchInnerResult = collections.namedtuple('_LineSearchInnerResult', [ 'iteration', 'found_wolfe', 'failed', 'num_evals', 'left', 'right']) def _bracket_and_search( value_and_gradients_function, init_interval, f_lim, max_iterations, shrinkage_param, expansion_param, sufficient_decrease_param, curvature_param): """Brackets the minimum and performs a line search. Args: value_and_gradients_function: A Python callable that accepts a real scalar tensor and returns a namedtuple with the fields 'x', 'f', and 'df' that correspond to scalar tensors of real dtype containing the point at which the function was evaluated, the value of the function, and its derivative at that point. The other namedtuple fields, if present, should be tensors or sequences (possibly nested) of tensors. In usual optimization application, this function would be generated by projecting the multivariate objective function along some specific direction. The direction is determined by some other procedure but should be a descent direction (i.e. the derivative of the projected univariate function must be negative at 0.). Alternatively, the function may represent the batching of `n` such line functions (e.g. projecting a single multivariate objective function along `n` distinct directions at once) accepting n points as input, i.e. a tensor of shape [n], and the fields 'x', 'f' and 'df' in the returned namedtuple should each be a tensor of shape [n], with the corresponding input points, function values, and derivatives at those input points. init_interval: Instance of `HagerZhangLineSearchResults` containing the initial line search interval. The gradient of init_interval.left must be negative (i.e. must be a descent direction), while init_interval.right must be positive and finite. f_lim: Scalar `Tensor` of float dtype. max_iterations: Positive scalar `Tensor` of integral dtype. The maximum number of iterations to perform in the line search. The number of iterations used to bracket the minimum are also counted against this parameter. shrinkage_param: Scalar positive Tensor of real dtype. Must be less than `1.`. Corresponds to the parameter `gamma` in [Hager and Zhang (2006)][2]. expansion_param: Scalar positive `Tensor` of real dtype. Must be greater than `1.`. Used to expand the initial interval in case it does not bracket a minimum. Corresponds to `rho` in [Hager and Zhang (2006)][2]. sufficient_decrease_param: Positive scalar `Tensor` of real dtype. Bounded above by the curvature param. Corresponds to `delta` in the terminology of [Hager and Zhang (2006)][2]. curvature_param: Positive scalar `Tensor` of real dtype. Bounded above by `1.`. Corresponds to 'sigma' in the terminology of [Hager and Zhang (2006)][2]. Returns: A namedtuple containing the following fields. converged: Boolean `Tensor` of shape [n]. Whether a point satisfying Wolfe/Approx wolfe was found. failed: Boolean `Tensor` of shape [n]. Whether line search failed e.g. if either the objective function or the gradient are not finite at an evaluation point. iterations: Scalar int32 `Tensor`. Number of line search iterations made. func_evals: Scalar int32 `Tensor`. Number of function evaluations made. left: A namedtuple, as returned by value_and_gradients_function, of the left end point of the updated bracketing interval. right: A namedtuple, as returned by value_and_gradients_function, of the right end point of the updated bracketing interval. """ bracket_result = hzl.bracket(value_and_gradients_function, init_interval, f_lim, max_iterations, expansion_param) converged = init_interval.converged | _very_close( bracket_result.left.x, bracket_result.right.x) # We fail if we have not yet converged but already exhausted all iterations. exhausted_iterations = ~converged & ( bracket_result.iteration >= max_iterations) line_search_args = HagerZhangLineSearchResult( converged=converged, failed=bracket_result.failed | exhausted_iterations, iterations=bracket_result.iteration, func_evals=bracket_result.num_evals, left=bracket_result.left, right=bracket_result.right) return _line_search_after_bracketing( value_and_gradients_function, line_search_args, init_interval.left, f_lim, max_iterations, sufficient_decrease_param, curvature_param, shrinkage_param) def _line_search_after_bracketing( value_and_gradients_function, search_interval, val_0, f_lim, max_iterations, sufficient_decrease_param, curvature_param, shrinkage_param): """The main loop of line search after the minimum has been bracketed. Args: value_and_gradients_function: A Python callable that accepts a real scalar tensor and returns a namedtuple with the fields 'x', 'f', and 'df' that correspond to scalar tensors of real dtype containing the point at which the function was evaluated, the value of the function, and its derivative at that point. The other namedtuple fields, if present, should be tensors or sequences (possibly nested) of tensors. In usual optimization application, this function would be generated by projecting the multivariate objective function along some specific direction. The direction is determined by some other procedure but should be a descent direction (i.e. the derivative of the projected univariate function must be negative at 0.). Alternatively, the function may represent the batching of `n` such line functions (e.g. projecting a single multivariate objective function along `n` distinct directions at once) accepting n points as input, i.e. a tensor of shape [n], and the fields 'x', 'f' and 'df' in the returned namedtuple should each be a tensor of shape [n], with the corresponding input points, function values, and derivatives at those input points. search_interval: Instance of `HagerZhangLineSearchResults` containing the current line search interval. val_0: A namedtuple as returned by value_and_gradients_function evaluated at `0.`. The gradient must be negative (i.e. must be a descent direction). f_lim: Scalar `Tensor` of float dtype. max_iterations: Positive scalar `Tensor` of integral dtype. The maximum number of iterations to perform in the line search. The number of iterations used to bracket the minimum are also counted against this parameter. sufficient_decrease_param: Positive scalar `Tensor` of real dtype. Bounded above by the curvature param. Corresponds to `delta` in the terminology of [Hager and Zhang (2006)][2]. curvature_param: Positive scalar `Tensor` of real dtype. Bounded above by `1.`. Corresponds to 'sigma' in the terminology of [Hager and Zhang (2006)][2]. shrinkage_param: Scalar positive Tensor of real dtype. Must be less than `1.`. Corresponds to the parameter `gamma` in [Hager and Zhang (2006)][2]. Returns: A namedtuple containing the following fields. converged: Boolean `Tensor` of shape [n]. Whether a point satisfying Wolfe/Approx wolfe was found. failed: Boolean `Tensor` of shape [n]. Whether line search failed e.g. if either the objective function or the gradient are not finite at an evaluation point. iterations: Scalar int32 `Tensor`. Number of line search iterations made. func_evals: Scalar int32 `Tensor`. Number of function evaluations made. left: A namedtuple, as returned by value_and_gradients_function, of the left end point of the updated bracketing interval. right: A namedtuple, as returned by value_and_gradients_function, of the right end point of the updated bracketing interval. """ def _loop_cond(curr_interval): """Loop condition.""" active = ~(curr_interval.converged | curr_interval.failed) return (curr_interval.iterations < max_iterations) & tf.reduce_any(active) def _loop_body(curr_interval): """The loop body.""" secant2_raw_result = hzl.secant2( value_and_gradients_function, val_0, curr_interval, f_lim, sufficient_decrease_param, curvature_param) secant2_result = HagerZhangLineSearchResult( converged=secant2_raw_result.converged, failed=secant2_raw_result.failed, iterations=curr_interval.iterations + 1, func_evals=secant2_raw_result.num_evals, left=secant2_raw_result.left, right=secant2_raw_result.right) should_check_shrinkage = ~(secant2_result.converged | secant2_result.failed) def _do_check_shrinkage(): """Check if interval has shrinked enough.""" old_width = curr_interval.right.x - curr_interval.left.x new_width = secant2_result.right.x - secant2_result.left.x sufficient_shrinkage = new_width < old_width * shrinkage_param func_is_flat = ( _very_close(curr_interval.left.f, curr_interval.right.f) & _very_close(secant2_result.left.f, secant2_result.right.f)) new_converged = ( should_check_shrinkage & sufficient_shrinkage & func_is_flat) needs_inner_bisect = should_check_shrinkage & ~sufficient_shrinkage inner_bisect_args = secant2_result._replace( converged=secant2_result.converged | new_converged) def _apply_inner_bisect(): return _line_search_inner_bisection( value_and_gradients_function, inner_bisect_args, needs_inner_bisect, f_lim) return prefer_static.cond( tf.reduce_any(needs_inner_bisect), _apply_inner_bisect, lambda: inner_bisect_args) next_args = prefer_static.cond( tf.reduce_any(should_check_shrinkage), _do_check_shrinkage, lambda: secant2_result) interval_shrunk = ( ~next_args.failed & _very_close(next_args.left.x, next_args.right.x)) return [next_args._replace(converged=next_args.converged | interval_shrunk)] return tf.while_loop( cond=_loop_cond, body=_loop_body, loop_vars=[search_interval], parallel_iterations=1)[0] def _line_search_inner_bisection( value_and_gradients_function, search_interval, active, f_lim): """Performs bisection and updates the interval.""" midpoint = (search_interval.left.x + search_interval.right.x) / 2 val_mid = value_and_gradients_function(midpoint) is_valid_mid = hzl.is_finite(val_mid) still_active = active & is_valid_mid new_failed = active & ~is_valid_mid next_inteval = search_interval._replace( failed=search_interval.failed | new_failed, func_evals=search_interval.func_evals + 1) def _apply_update(): update_result = hzl.update( value_and_gradients_function, next_inteval.left, next_inteval.right, val_mid, f_lim, active=still_active) return HagerZhangLineSearchResult( converged=next_inteval.converged, failed=next_inteval.failed | update_result.failed, iterations=next_inteval.iterations + update_result.iteration, func_evals=next_inteval.func_evals + update_result.num_evals, left=update_result.left, right=update_result.right) return prefer_static.cond( tf.reduce_any(still_active), _apply_update, lambda: next_inteval) def _prepare_args(value_and_gradients_function, initial_step_size, val_initial, val_0, approximate_wolfe_threshold): """Prepares the arguments for the line search initialization. Args: value_and_gradients_function: A Python callable that accepts a real scalar tensor and returns a namedtuple with the fields 'x', 'f', and 'df' that correspond to scalar tensors of real dtype containing the point at which the function was evaluated, the value of the function, and its derivative at that point. The other namedtuple fields, if present, should be tensors or sequences (possibly nested) of tensors. In usual optimization application, this function would be generated by projecting the multivariate objective function along some specific direction. The direction is determined by some other procedure but should be a descent direction (i.e. the derivative of the projected univariate function must be negative at 0.). Alternatively, the function may represent the batching of `n` such line functions (e.g. projecting a single multivariate objective function along `n` distinct directions at once) accepting n points as input, i.e. a tensor of shape [n], and the fields 'x', 'f' and 'df' in the returned namedtuple should each be a tensor of shape [n], with the corresponding input points, function values, and derivatives at those input points. initial_step_size: Scalar positive `Tensor` of real dtype, or a tensor of shape [n] in batching mode. The initial value (or values) to try to bracket the minimum. Default is `1.` as a float32. Note that this point need not necessarily bracket the minimum for the line search to work correctly but the supplied value must be greater than 0. A good initial value will make the search converge faster. val_initial: The full return value of evaluating value_and_gradients_function at initial_step_size, i.e. a namedtuple with 'x', 'f', 'df', if already known by the caller. If not None the value of `initial_step_size` will be ignored, otherwise the tuple will be computed by evaluating value_and_gradients_function. val_0: The full return value of value_and_gradients_function at `0.`, i.e. a namedtuple with 'x', 'f', 'df', if already known by the caller. If None the tuple will be computed by evaluating value_and_gradients_function. approximate_wolfe_threshold: Scalar positive `Tensor` of real dtype. Corresponds to the parameter 'epsilon' in [Hager and Zhang (2006)][2]. Used to estimate the threshold at which the line search switches to approximate Wolfe conditions. Returns: left: A namedtuple, as returned by value_and_gradients_function, containing the value and derivative of the function at `0.`. val_initial: A namedtuple, as returned by value_and_gradients_function, containing the value and derivative of the function at `initial_step_size`. f_lim: Real `Tensor` of shape [n]. The function value threshold for the approximate Wolfe conditions to be checked. eval_count: Scalar int32 `Tensor`. The number of target function evaluations made by this function. """ eval_count = 0 if val_initial is None: if initial_step_size is not None: initial_step_size = tf.convert_to_tensor(initial_step_size) else: initial_step_size = np.float32(1.) val_initial = value_and_gradients_function(initial_step_size) eval_count += 1 if val_0 is None: x_0 = tf.zeros_like(val_initial.x) val_0 = value_and_gradients_function(x_0) eval_count += 1 f_lim = val_0.f + (approximate_wolfe_threshold * tf.math.abs(val_0.f)) return val_0, val_initial, f_lim, tf.convert_to_tensor(eval_count) def _very_close(x, y): return tf.math.nextafter(x, y) >= y def _to_str(x): """Converts a bool tensor to a string with True/False values.""" x = tf.convert_to_tensor(x) if x.dtype == tf.bool: return tf.where(x, 'True', 'False') return x # A convenience function useful while debugging in the graph mode. def _print(pass_through_tensor, values): """Wrapper for tf.Print which supports lists and namedtuples for printing.""" flat_values = [] for value in values: # Checks if it is a namedtuple. if hasattr(value, '_fields'): for field in value._fields: flat_values.extend([field, _to_str(getattr(value, field))]) continue if isinstance(value, (list, tuple)): for v in value: flat_values.append(_to_str(v)) continue flat_values.append(_to_str(value)) return tf.Print(pass_through_tensor, flat_values)
apache-2.0
6,853,210,817,681,248,000
46.539906
94
0.697511
false
3.993427
false
false
false
tonyduckles/svn2svn
svn2svn/run/parse.py
1
2731
""" optparser helper functions """ import optparse import textwrap class HelpFormatter(optparse.IndentedHelpFormatter): """ Modified version of certain optparse.IndentedHelpFormatter methods: * Respect line-breaks in parser.desription and option.help_text * Vertically-align long_opts Inspired by: http://groups.google.com/group/comp.lang.python/browse_thread/thread/6df6e6b541a15bc2/09f28e26af0699b1?pli=1 """ def format_description(self, description): if not description: return "" desc_width = self.width - self.current_indent indent = " "*self.current_indent bits = description.split('\n') formatted_bits = [ textwrap.fill(bit, desc_width, initial_indent=indent, subsequent_indent=indent) for bit in bits] result = "\n".join(formatted_bits) + "\n" return result def format_option_strings(self, option): """Return a comma-separated list of option strings & metavariables.""" if option.takes_value(): metavar = option.metavar or option.dest.upper() short_opts = [("%s" % (sopt)) if option._long_opts else \ (self._short_opt_fmt % (sopt, metavar)) for sopt in option._short_opts] long_opts = [self._long_opt_fmt % (lopt, metavar) for lopt in option._long_opts] else: short_opts = option._short_opts long_opts = option._long_opts return (" " if not short_opts else "")+(", ".join(short_opts + long_opts)) def format_option(self, option): result = [] opts = self.option_strings[option] opt_width = self.help_position - self.current_indent - 2 if len(opts) > opt_width: opts = "%*s%s\n" % (self.current_indent, "", opts) indent_first = self.help_position else: # start help on same line as opts opts = "%*s%-*s " % (self.current_indent, "", opt_width, opts) indent_first = 0 result.append(opts) if option.help: help_text = self.expand_default(option) help_lines = [] for para in help_text.split("\n"): help_lines.extend(textwrap.wrap(para, self.help_width)) result.append("%*s%s\n" % (indent_first, "", help_lines[0])) result.extend(["%*s%s\n" % (self.help_position, "", line) for line in help_lines[1:]]) elif opts[-1] != "\n": result.append("\n") return "".join(result) def format_usage(self, usage): return usage
gpl-3.0
-6,258,201,666,401,967,000
40.378788
125
0.556573
false
3.912607
false
false
false
tensorflow/tpu
models/official/detection/utils/config_utils.py
1
2218
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Config utils.""" import os import tensorflow.compat.v1 as tf from hyperparameters import params_dict _PARSERS = [ 'classification_parser', 'retinanet_parser', 'maskrcnn_parser', 'segmentation_parser', 'shapemask_parser', ] _BACKBONES = [ 'resnet', 'spinenet', 'spinenet_mbconv', ] _MULTILEVEL_FEATURES = [ 'fpn', 'nasfpn', ] def filter_unused_blocks(params): """Filters unused architecture params blocks.""" filtered_params = params_dict.ParamsDict(params) if 'parser' in params.architecture.as_dict().keys(): for parser in _PARSERS: if (parser in params.as_dict().keys() and parser != params.architecture.parser): delattr(filtered_params, parser) if 'backbone' in params.architecture.as_dict().keys(): for backbone in _BACKBONES: if (backbone in params.as_dict().keys() and backbone != params.architecture.backbone): delattr(filtered_params, backbone) if 'multilevel_features' in params.architecture.as_dict().keys(): for features in _MULTILEVEL_FEATURES: if (features in params.as_dict().keys() and features != params.architecture.multilevel_features): delattr(filtered_params, features) return filtered_params def save_config(params, model_dir): if model_dir: params = filter_unused_blocks(params) if not tf.gfile.Exists(model_dir): tf.gfile.MakeDirs(model_dir) params_dict.save_params_dict_to_yaml( params, os.path.join(model_dir, 'params.yaml'))
apache-2.0
-6,733,611,204,582,500,000
30.239437
80
0.670875
false
3.797945
false
false
false
WillsB3/glue
glue/formats/jsonformat.py
1
2704
import os import json import codecs from base import BaseJSONFormat class JSONFormat(BaseJSONFormat): extension = 'json' build_per_ratio = True @classmethod def populate_argument_parser(cls, parser): group = parser.add_argument_group("JSON format options") group.add_argument("--json", dest="json_dir", nargs='?', const=True, default=os.environ.get('GLUE_JSON', False), metavar='DIR', help="Generate JSON files and optionally where") group.add_argument("--json-format", dest="json_format", metavar='NAME', type=unicode, default=os.environ.get('GLUE_JSON_FORMAT', 'array'), choices=['array', 'hash'], help=("JSON structure format (array, hash)")) def get_context(self, *args, **kwargs): context = super(JSONFormat, self).get_context(*args, **kwargs) frames = dict([[i['filename'], {'filename': i['filename'], 'frame': {'x': i['x'], 'y': i['y'], 'w': i['width'], 'h': i['height']}, 'rotated': False, 'trimmed': False, 'spriteSourceSize': {'x': i['x'], 'y': i['y'], 'w': i['width'], 'h': i['height']}, 'sourceSize': {'w': i['original_width'], 'h': i['original_height']}}] for i in context['images']]) data = dict(frames=None, meta={'version': context['version'], 'hash': context['hash'], 'name': context['name'], 'sprite_path': context['sprite_path'], 'sprite_filename': context['sprite_filename'], 'width': context['width'], 'height': context['height']}) if self.sprite.config['json_format'] == 'array': data['frames'] = frames.values() else: data['frames'] = frames return data
bsd-3-clause
3,899,344,654,583,642,000
41.920635
112
0.366864
false
5.716702
false
false
false
LennonChin/Django-Practices
MxShop/apps/utils/alipay.py
1
6122
# _*_ coding: utf-8 _*_ __author__ = 'LennonChin' __date__ = '2017/10/23 21:37' # pip install pycryptodome __author__ = 'bobby' from datetime import datetime from Crypto.PublicKey import RSA from Crypto.Signature import PKCS1_v1_5 from Crypto.Hash import SHA256 from base64 import b64encode, b64decode from urllib.parse import quote_plus from urllib.parse import urlparse, parse_qs from urllib.request import urlopen from base64 import decodebytes, encodebytes import json class AliPay(object): """ 支付宝支付接口 """ def __init__(self, appid, app_notify_url, app_private_key_path, alipay_public_key_path, return_url, debug=False): self.appid = appid self.app_notify_url = app_notify_url self.app_private_key_path = app_private_key_path self.app_private_key = None self.return_url = return_url with open(self.app_private_key_path) as fp: self.app_private_key = RSA.importKey(fp.read()) self.alipay_public_key_path = alipay_public_key_path with open(self.alipay_public_key_path) as fp: self.alipay_public_key = RSA.import_key(fp.read()) if debug is True: self.__gateway = "https://openapi.alipaydev.com/gateway.do" else: self.__gateway = "https://openapi.alipay.com/gateway.do" def direct_pay(self, subject, out_trade_no, total_amount, return_url=None, **kwargs): biz_content = { "subject": subject, "out_trade_no": out_trade_no, "total_amount": total_amount, "product_code": "FAST_INSTANT_TRADE_PAY", # "qr_pay_mode":4 } biz_content.update(kwargs) data = self.build_body("alipay.trade.page.pay", biz_content, self.return_url) return self.sign_data(data) def build_body(self, method, biz_content, return_url=None): data = { "app_id": self.appid, "method": method, "charset": "utf-8", "sign_type": "RSA2", "timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "version": "1.0", "biz_content": biz_content } if return_url is not None: data["notify_url"] = self.app_notify_url data["return_url"] = self.return_url return data def sign_data(self, data): data.pop("sign", None) # 排序后的字符串 unsigned_items = self.ordered_data(data) unsigned_string = "&".join("{0}={1}".format(k, v) for k, v in unsigned_items) sign = self.sign(unsigned_string.encode("utf-8")) ordered_items = self.ordered_data(data) quoted_string = "&".join("{0}={1}".format(k, quote_plus(v)) for k, v in ordered_items) # 获得最终的订单信息字符串 signed_string = quoted_string + "&sign=" + quote_plus(sign) return signed_string def ordered_data(self, data): complex_keys = [] for key, value in data.items(): if isinstance(value, dict): complex_keys.append(key) # 将字典类型的数据dump出来 for key in complex_keys: data[key] = json.dumps(data[key], separators=(',', ':')) return sorted([(k, v) for k, v in data.items()]) def sign(self, unsigned_string): # 开始计算签名 key = self.app_private_key signer = PKCS1_v1_5.new(key) signature = signer.sign(SHA256.new(unsigned_string)) # base64 编码,转换为unicode表示并移除回车 sign = encodebytes(signature).decode("utf8").replace("\n", "") return sign def _verify(self, raw_content, signature): # 开始计算签名 key = self.alipay_public_key signer = PKCS1_v1_5.new(key) digest = SHA256.new() digest.update(raw_content.encode("utf8")) if signer.verify(digest, decodebytes(signature.encode("utf8"))): return True return False def verify(self, data, signature): if "sign_type" in data: sign_type = data.pop("sign_type") # 排序后的字符串 unsigned_items = self.ordered_data(data) message = "&".join(u"{}={}".format(k, v) for k, v in unsigned_items) return self._verify(message, signature) if __name__ == "__main__": return_url = 'http://47.92.87.172:8000/?total_amount=0.01&timestamp=2017-08-15+17%3A15%3A13&sign=jnnA1dGO2iu2ltMpxrF4MBKE20Akyn%2FLdYrFDkQ6ckY3Qz24P3DTxIvt%2BBTnR6nRk%2BPAiLjdS4sa%2BC9JomsdNGlrc2Flg6v6qtNzTWI%2FEM5WL0Ver9OqIJSTwamxT6dW9uYF5sc2Ivk1fHYvPuMfysd90lOAP%2FdwnCA12VoiHnflsLBAsdhJazbvquFP%2Bs1QWts29C2%2BXEtIlHxNgIgt3gHXpnYgsidHqfUYwZkasiDGAJt0EgkJ17Dzcljhzccb1oYPSbt%2FS5lnf9IMi%2BN0ZYo9%2FDa2HfvR6HG3WW1K%2FlJfdbLMBk4owomyu0sMY1l%2Fj0iTJniW%2BH4ftIfMOtADHA%3D%3D&trade_no=2017081521001004340200204114&sign_type=RSA2&auth_app_id=2016080600180695&charset=utf-8&seller_id=2088102170208070&method=alipay.trade.page.pay.return&app_id=2016080600180695&out_trade_no=201702021222&version=1.0' alipay = AliPay( appid="2016080600180695", app_notify_url="http://projectsedus.com/", app_private_key_path=u"../trade/keys/private_2048.txt", alipay_public_key_path="../trade/keys/alipay_pub_key.txt", # 支付宝的公钥,验证支付宝回传消息使用,不是你自己的公钥, debug=True, # 默认False, return_url="http://47.92.87.172:8000/" ) o = urlparse(return_url) query = parse_qs(o.query) processed_query = {} ali_sign = query.pop("sign")[0] for key, value in query.items(): processed_query[key] = value[0] print (alipay.verify(processed_query, ali_sign)) url = alipay.direct_pay( subject="测试订单", out_trade_no="201702021222", total_amount=0.01 ) re_url = "https://openapi.alipaydev.com/gateway.do?{data}".format(data=url) print(re_url)
apache-2.0
472,593,492,612,720,300
36.454545
699
0.607264
false
2.880779
false
false
false
edermartioli/ExoplanetLight
src/spectrum.py
1
2235
# -*- coding: utf-8 -*- """ Created on Nov 25 2016 @author: Eder Martioli Laboratorio Nacional de Astrofisica, Brazil spectrum.py is a library of classes and functions to handle spectral data. """ import numpy as np from scipy import constants ########## SPECTRUM CLASS ############ class Spectrum : 'Common base class for a spectrum' def __init__(self, Filename): """ Create a Spectrum object. Parameters ---------- filename : string File to read the spectrum from. Examples -------- >>> sp = Spectrum("spectrumfile.1d.spc") """ self.filename = Filename self.load_spectrum(self.filename) def load_spectrum(self,Filename): try: self.wl,self.flux,self.var = np.loadtxt(Filename, unpack=True, comments='#', usecols=(0,1,2), delimiter=' ') except: print "Error: could not open file:",Filename exit() def getdata(self, wl0=0., wlf=0.) : """ Retrieve data for a given wavelength range Parameters ---------- wl0 : initial wavelength [nm] wlf : final wavelength [nm] Return : wl[], flux[], variance[] """ if (wl0 == 0.) : wl0 = self.wl[0] if (wlf == 0.) : wlf = self.wl[-1] mask = np.where((self.wl > wl0) & (self.wl < wlf)) return self.wl[mask],self.flux[mask],self.var[mask] def applyRVShift(self, RVshift, interp=False) : """ Apply radial velocity shift to the wavelength data. Parameters ---------- RVshift : radial velocity shift [m/s] interp : interpolate shifted data to keep original wavelength sampling? [boolean] """ self.rvshit = RVshift if interp == True : wl_tmp = self.wl*(1.0 + self.rvshit/constants.c) flux_tmp = np.interp(self.wl, wl_tmp, self.flux) self.flux = flux_tmp else : self.wl *= (1.0 + self.rvshit/constants.c)
mit
5,748,290,067,744,352,000
26.256098
120
0.499329
false
4.019784
false
false
false
dpgaspar/Flask-AppBuilder
examples/quickactions/config.py
1
1945
import os from flask_appbuilder.security.manager import ( AUTH_OID, AUTH_REMOTE_USER, AUTH_DB, AUTH_LDAP, AUTH_OAUTH, ) basedir = os.path.abspath(os.path.dirname(__file__)) CSRF_ENABLED = True SECRET_KEY = "\2\1thisismyscretkey\1\2\e\y\y\h" OPENID_PROVIDERS = [ {"name": "Yahoo", "url": "https://me.yahoo.com"}, {"name": "AOL", "url": "http://openid.aol.com/<username>"}, {"name": "Flickr", "url": "http://www.flickr.com/<username>"}, {"name": "MyOpenID", "url": "https://www.myopenid.com"}, ] SQLALCHEMY_DATABASE_URI = "sqlite:///" + os.path.join(basedir, "app.db") # SQLALCHEMY_DATABASE_URI = 'mysql://root:password@localhost/quickhowto' # SQLALCHEMY_DATABASE_URI = 'postgresql://fab:password@localhost:5432/quickhowto2' # SQLALCHEMY_ECHO = True BABEL_DEFAULT_LOCALE = "en" BABEL_DEFAULT_FOLDER = "translations" LANGUAGES = { "en": {"flag": "gb", "name": "English"}, "pt": {"flag": "pt", "name": "Portuguese"}, "es": {"flag": "es", "name": "Spanish"}, "de": {"flag": "de", "name": "German"}, "zh": {"flag": "cn", "name": "Chinese"}, "ru": {"flag": "ru", "name": "Russian"}, } # ------------------------------ # GLOBALS FOR GENERAL APP's # ------------------------------ UPLOAD_FOLDER = basedir + "/app/static/uploads/" IMG_UPLOAD_FOLDER = basedir + "/app/static/uploads/" IMG_UPLOAD_URL = "/static/uploads/" AUTH_TYPE = AUTH_DB AUTH_ROLE_ADMIN = "Admin" AUTH_ROLE_PUBLIC = "Public" APP_NAME = "F.A.B. Example" APP_ICON = "/static/img/brand.jpg" # APP_THEME = "bootstrap-theme.css" # default # APP_THEME = "cerulean.css" # COOL # APP_THEME = "amelia.css" # APP_THEME = "cosmo.css" # APP_THEME = "cyborg.css" # COOL # APP_THEME = "flatly.css" # APP_THEME = "journal.css" # APP_THEME = "readable.css" # APP_THEME = "simplex.css" # APP_THEME = "slate.css" # COOL # APP_THEME = "spacelab.css" # NICE # APP_THEME = "united.css" # APP_THEME = "yeti.css"
bsd-3-clause
-8,113,568,028,232,835,000
28.923077
82
0.594859
false
2.649864
false
false
false
chipaca/snapcraft
snapcraft/project/_project_options.py
1
12984
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*- # # Copyright (C) 2016-2019 Canonical Ltd # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import logging import multiprocessing import os import platform import sys from typing import Set from snapcraft import file_utils from snapcraft.internal import common, errors, os_release logger = logging.getLogger(__name__) _ARCH_TRANSLATIONS = { "aarch64": { "kernel": "arm64", "deb": "arm64", "uts_machine": "aarch64", "cross-compiler-prefix": "aarch64-linux-gnu-", "cross-build-packages": ["gcc-aarch64-linux-gnu", "libc6-dev-arm64-cross"], "triplet": "aarch64-linux-gnu", "core-dynamic-linker": "lib/ld-linux-aarch64.so.1", }, "armv7l": { "kernel": "arm", "deb": "armhf", "uts_machine": "arm", "cross-compiler-prefix": "arm-linux-gnueabihf-", "cross-build-packages": ["gcc-arm-linux-gnueabihf", "libc6-dev-armhf-cross"], "triplet": "arm-linux-gnueabihf", "core-dynamic-linker": "lib/ld-linux-armhf.so.3", }, "i686": { "kernel": "x86", "deb": "i386", "uts_machine": "i686", "triplet": "i386-linux-gnu", }, "ppc": { "kernel": "powerpc", "deb": "powerpc", "uts_machine": "powerpc", "cross-compiler-prefix": "powerpc-linux-gnu-", "cross-build-packages": ["gcc-powerpc-linux-gnu", "libc6-dev-powerpc-cross"], "triplet": "powerpc-linux-gnu", }, "ppc64le": { "kernel": "powerpc", "deb": "ppc64el", "uts_machine": "ppc64el", "cross-compiler-prefix": "powerpc64le-linux-gnu-", "cross-build-packages": [ "gcc-powerpc64le-linux-gnu", "libc6-dev-ppc64el-cross", ], "triplet": "powerpc64le-linux-gnu", "core-dynamic-linker": "lib64/ld64.so.2", }, "riscv64": { "kernel": "riscv64", "deb": "riscv64", "uts_machine": "riscv64", "cross-compiler-prefix": "riscv64-linux-gnu-", "cross-build-packages": ["gcc-riscv64-linux-gnu", "libc6-dev-riscv64-cross"], "triplet": "riscv64-linux-gnu", "core-dynamic-linker": "lib/ld-linux-riscv64-lp64d.so.1", }, "s390x": { "kernel": "s390", "deb": "s390x", "uts_machine": "s390x", "cross-compiler-prefix": "s390x-linux-gnu-", "cross-build-packages": ["gcc-s390x-linux-gnu", "libc6-dev-s390x-cross"], "triplet": "s390x-linux-gnu", "core-dynamic-linker": "lib/ld64.so.1", }, "x86_64": { "kernel": "x86", "deb": "amd64", "uts_machine": "x86_64", "triplet": "x86_64-linux-gnu", "core-dynamic-linker": "lib64/ld-linux-x86-64.so.2", }, } _32BIT_USERSPACE_ARCHITECTURE = { "aarch64": "armv7l", "armv8l": "armv7l", "ppc64le": "ppc", "x86_64": "i686", } _WINDOWS_TRANSLATIONS = {"AMD64": "x86_64"} _HOST_CODENAME_FOR_BASE = {"core18": "bionic", "core": "xenial"} _HOST_COMPATIBILITY = { "xenial": ["trusty", "xenial"], "bionic": ["trusty", "xenial", "bionic"], } _STATIC_BASES = ["bare"] # TODO: just check the base. _LINKER_VERSION_FOR_BASE = {"core20": "2.31", "core18": "2.27", "core": "2.23"} def _get_platform_architecture(): architecture = platform.machine() # Translate the windows architectures we know of to architectures # we can work with. if sys.platform == "win32": architecture = _WINDOWS_TRANSLATIONS.get(architecture) if platform.architecture()[0] == "32bit": userspace = _32BIT_USERSPACE_ARCHITECTURE.get(architecture) if userspace: architecture = userspace return architecture class ProjectOptions: @property def parallel_build_count(self) -> int: try: build_count = len(os.sched_getaffinity(0)) except AttributeError: # Fall back to multiprocessing.cpu_count()... try: build_count = multiprocessing.cpu_count() except NotImplementedError: logger.warning( "Unable to determine CPU count; disabling parallel builds" ) build_count = 1 return build_count @property def is_cross_compiling(self): return self.__target_machine != self.__platform_arch @property def target_arch(self): return self.__target_arch @property def cross_compiler_prefix(self): try: # cross-compilation of x86 32bit binaries on a x86_64 host is # possible by reusing the native toolchain - let Kbuild figure # it out by itself and pass down an empty cross-compiler-prefix # to start the build if self.__platform_arch == "x86_64" and self.__target_machine == "i686": return "" return self.__machine_info["cross-compiler-prefix"] except KeyError: raise errors.SnapcraftEnvironmentError( "Cross compilation not supported for target arch {!r}".format( self.__target_machine ) ) @property def additional_build_packages(self): packages = [] if self.is_cross_compiling: packages.extend(self.__machine_info.get("cross-build-packages", [])) return packages @property def arch_triplet(self): return self.__machine_info["triplet"] @property def deb_arch(self): return self.__machine_info["deb"] @property def kernel_arch(self): return self.__machine_info["kernel"] @property def parts_dir(self) -> str: return self._parts_dir @property def stage_dir(self) -> str: return self._stage_dir @property def prime_dir(self) -> str: return self._prime_dir @property def debug(self): return self._debug def __init__( self, target_deb_arch=None, debug=False, *, work_dir: str = None ) -> None: # Here for backwards compatibility. project_dir = os.getcwd() if work_dir is None: work_dir = project_dir self._debug = debug self._parts_dir = os.path.join(work_dir, "parts") self._stage_dir = os.path.join(work_dir, "stage") self._prime_dir = os.path.join(work_dir, "prime") logger.debug("Parts dir {}".format(self._parts_dir)) logger.debug("Stage dir {}".format(self._stage_dir)) logger.debug("Prime dir {}".format(self._prime_dir)) self._set_machine(target_deb_arch) def _get_content_snaps(self) -> Set[str]: """Temporary shim for unit tests using ProjectOptions where Project is really required. Will be removed in future convergence work. """ return set() def _get_provider_content_dirs(self) -> Set[str]: """Temporary shim for unit tests using ProjectOptions where Project is really required. Will be removed in future convergence work. """ return set() def _get_stage_packages_target_arch(self) -> str: """Stub for 'Project' interface for tests using ProjectOptions().""" return self.deb_arch def is_static_base(self, base: str) -> bool: """Return True if a base that is intended to be static is used. Static bases require all their necessary components to live within the snap. """ return base in _STATIC_BASES def is_host_compatible_with_base(self, base: str) -> bool: """Determines if the host is compatible with the GLIBC of the base. The system should warn early on when building using a host that does not match the intended base, this mechanism here enables additional logic when that is ignored to determine built projects will actually run. :param str base: the base core snap to search for linker. :returns: True if there are no GLIBC incompatibilities with the chosen build host, else it returns False. :rtype: bool """ try: codename = os_release.OsRelease().version_codename() except errors.OsReleaseCodenameError: return False logger.debug("Running on {!r}".format(codename)) build_host_for_base = _HOST_CODENAME_FOR_BASE.get(base) if build_host_for_base is None: return False compatible_hosts = _HOST_COMPATIBILITY.get(build_host_for_base, []) return codename in compatible_hosts # This is private to not make the API public given that base # will be part of the new Project. def _get_linker_version_for_base(self, base: str) -> str: """Returns the linker version for base.""" try: return _LINKER_VERSION_FOR_BASE[base] except KeyError: linker_file = os.path.basename(self.get_core_dynamic_linker(base)) return file_utils.get_linker_version_from_file(linker_file) def get_core_dynamic_linker(self, base: str, expand: bool = True) -> str: """Returns the dynamic linker used for the targeted core. :param str base: the base core snap to search for linker. :param bool expand: expand the linker to the actual linker if True, else the main entry point to the linker for the projects architecture. :return: the absolute path to the linker :rtype: str :raises snapcraft.internal.errors.SnapcraftMissingLinkerInBaseError: if the linker cannot be found in the base. :raises snapcraft.internal.errors.SnapcraftEnvironmentError: if a loop is found while resolving the real path to the linker. """ core_path = common.get_installed_snap_path(base) dynamic_linker_path = os.path.join( core_path, self.__machine_info.get("core-dynamic-linker", "lib/ld-linux.so.2"), ) # return immediately if we do not need to expand if not expand: return dynamic_linker_path # We can't use os.path.realpath because any absolute symlinks # have to be interpreted relative to core_path, not the real # root. seen_paths = set() # type: Set[str] while True: if dynamic_linker_path in seen_paths: raise errors.SnapcraftEnvironmentError( "found symlink loop resolving dynamic linker path" ) seen_paths.add(dynamic_linker_path) if not os.path.lexists(dynamic_linker_path): raise errors.SnapcraftMissingLinkerInBaseError( base=base, linker_path=dynamic_linker_path ) if not os.path.islink(dynamic_linker_path): return dynamic_linker_path link_contents = os.readlink(dynamic_linker_path) if os.path.isabs(link_contents): dynamic_linker_path = os.path.join(core_path, link_contents.lstrip("/")) else: dynamic_linker_path = os.path.join( os.path.dirname(dynamic_linker_path), link_contents ) def _set_machine(self, target_deb_arch): self.__platform_arch = _get_platform_architecture() if not target_deb_arch: self.__target_machine = self.__platform_arch else: self.__target_machine = _find_machine(target_deb_arch) logger.info("Setting target machine to {!r}".format(target_deb_arch)) self.__machine_info = _ARCH_TRANSLATIONS[self.__target_machine] # Set target arch to match the host if unspecified. if target_deb_arch is None: self.__target_arch = self.__machine_info.get("deb") else: self.__target_arch = target_deb_arch def _get_deb_arch(machine): return _ARCH_TRANSLATIONS[machine].get("deb", None) def _find_machine(deb_arch): for machine in _ARCH_TRANSLATIONS: if _ARCH_TRANSLATIONS[machine].get("deb", "") == deb_arch: return machine elif _ARCH_TRANSLATIONS[machine].get("uts_machine", "") == deb_arch: return machine raise errors.SnapcraftEnvironmentError( "Cannot set machine from deb_arch {!r}".format(deb_arch) )
gpl-3.0
-6,421,764,754,426,104,000
32.900783
88
0.59658
false
3.788737
false
false
false
eroicaleo/LearningPython
interview/leet/146_LRU_Cache.py
1
2568
#!/usr/bin/env python class LRUCache: class Node: def __init__(self, key, val): self.val, self.key = val, key self.prev = None self.next = None def __init__(self, capacity): """ :type capacity: int """ self.capacity = capacity self.dict = dict() self.head, self.tail = None, None def get(self, key): """ :type key: int :rtype: int """ if key in self.dict: node = self.dict[key] if node == self.head: return node.val node.prev.next = node.next if node == self.tail: self.tail = node.prev else: node.next.prev = node.prev print('In get node: %d' % node.key) print('In get self.head: %d' % self.head.key) self.head.prev = node node.next, self.head, = self.head, node print('In get after swapping node: %d' % node.key) print('In get after swapping self.head: %d' % self.head.key) print('In get after swapping self.head.next.prev: %d' % self.head.next.prev.key) return node.val return -1 def put(self, key, value): """ :type key: int :type value: int :rtype: void """ if self.get(key) != -1: self.head.val = value elif len(self.dict) < self.capacity: print("I am inserting new node: %d" % (key)) node = self.Node(key, value) if len(self.dict) == 0: self.tail = node else: self.head.prev = node node.next, self.head = self.head, node print("new head: %d" % self.head.key) self.dict[key] = node else: self.get(self.tail.key) node = self.head node.val = value print('Prepare to delete key %d' % node.key) del self.dict[node.key] node.key = key self.dict[key] = node cache = LRUCache(2) print(cache.get(1)) cache.put(2, 6) print(cache.get(1)) cache.put(1, 5) cache.put(1, 2) print(cache.get(1)) print(cache.get(2)) quit() cache = LRUCache(2) cache.put(1, 1) print(cache.get(1)) print("now head: ", cache.head.key) print(cache.get(2)) cache.put(2, 2) print("now head: ", cache.head.key) print(cache.get(1)) print("now head: ", cache.head.key) print(cache.get(2)) cache.put(3, 3) print(cache.get(2)) print(cache.get(1))
mit
2,415,132,396,107,739,600
27.21978
92
0.503505
false
3.396825
false
false
false
martinsch/vigra
vigranumpy/lib/pyqt/imagewindow.py
1
23939
####################################################################### # # Copyright 2009-2010 by Ullrich Koethe # # This file is part of the VIGRA computer vision library. # The VIGRA Website is # http://hci.iwr.uni-heidelberg.de/vigra/ # Please direct questions, bug reports, and contributions to # [email protected] or # [email protected] # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation # files (the "Software"), to deal in the Software without # restriction, including without limitation the rights to use, # copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following # conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the # Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. # ####################################################################### import math, os, numpy, PyQt4 import PyQt4.QtCore as qcore import PyQt4.QtGui as qt from PyQt4.QtCore import SIGNAL import vigra import vigra.ufunc try: from VigraQt import OverlayViewer, ImageCursor except Exception, e: vigra._fallbackModule('VigraQt', ''' %s If VigraQt is missing on your system, you can download it from http://kogs-www.informatik.uni-hamburg.de/~meine/software/vigraqt/.''' % str(e)) from VigraQt import OverlayViewer, ImageCursor import quickdialog import weakref import viewer2svg class Crosshair(ImageCursor): def __init__(self, *args): ImageCursor.__init__(self, *args) self.visible = False self.position = qcore.QPoint(-1, -1) def setVisible(self, what=True): self.visible = what if what: ImageCursor.setPosition(self, self.position) else: ImageCursor.setPosition(self, qcore.QPoint(-1, -1)) def setPosition(self, pos): self.position = pos if self.visible: ImageCursor.setPosition(self, self.position) class ImageViewer(OverlayViewer): activeViewers = weakref.WeakValueDictionary() def __init__(self, image, normalize=True, title=None, parent=None): OverlayViewer.__init__(self, parent) self.setImage(image, normalize) self._savedExpression = "x" self._lastSaveType = 2 self.overlays = [] if title is not None: self.setWindowTitle(title) elif hasattr(image, "name"): self.setWindowTitle(image.name) else: for k in xrange(1, 10000): if not ImageViewer.activeViewers.has_key(k): break ImageViewer.activeViewers[k] = self self.setWindowTitle("Image %d" % k) #self.imageCursor = ImageCursor(self) # doesn't work anymore - setVisible() is gone self.imageCursor = Crosshair(self) self.imageCursor.setVisible(False) self.imageCursor.setPosition(qcore.QPoint(self.image.width // 2, self.image.height // 2)) OverlayViewer.addOverlay(self, self.imageCursor) self.zoomInAction = qt.QAction("Zoom in", self) self.zoomInAction.setShortcut("+") self.connect(self.zoomInAction, SIGNAL("triggered()"), self.zoomInPopup) self.zoomOutAction = qt.QAction("Zoom out", self) self.zoomOutAction.setShortcut("-") self.connect(self.zoomOutAction, SIGNAL("triggered()"), self.zoomOutPopup) self.saveAction = qt.QAction("Save image...", self) self.saveAction.setShortcut("S") self.connect(self.saveAction, SIGNAL("triggered()"), self.writeImage) self.svgAction = qt.QAction("Save as SVG...", self) self.svgAction.setShortcut("V") self.connect(self.svgAction, SIGNAL("triggered()"), self.writeSVG) self.expressionAction = qt.QAction("Apply expression...", self) self.expressionAction.setShortcut("E") self.connect(self.expressionAction, SIGNAL("triggered()"), self.applyExpression) self.cursorAction = qt.QAction("Line cursor", self) self.cursorAction.setShortcut("L") self.cursorAction.setCheckable(True) self.cursorAction.setChecked(False) self.connect(self.cursorAction, SIGNAL("triggered()"), self._toggleImageCursor) self.popup = qt.QMenu(self) self.popup.addAction(self.zoomInAction) self.popup.addAction(self.zoomOutAction) self.popup.addAction(self.saveAction) self.popup.addAction(self.svgAction) self.popup.addAction(self.expressionAction) self.popup.addAction(self.cursorAction) self.overlayMenu = self.popup.addMenu("Overlays") self.connect(self.overlayMenu, SIGNAL("aboutToShow()"), self.overlayPopup) def setImage(self, image, normalize=True): if not hasattr(image, "qimage"): image = image.view(vigra.Image) self.image = image self._normalized = normalize OverlayViewer.setImage(self, image.qimage(normalize)) def showImageCursor(self, yesOrNo=True): if yesOrNo != self.cursorAction.isChecked(): self.cursorAction.trigger() def _toggleImageCursor(self): self.imageCursor.activateTool(self.cursorAction.isChecked()) self.imageCursor.setVisible(self.cursorAction.isChecked()) def addOverlay(self, overlay): if not hasattr(overlay, "draw"): raise TypeError("addOverlay: " + str(overlay) + "is no valid overlay with 'draw' method!") if overlay.parent() is None: overlay.setParent(self) overlay.visible = True if not hasattr(overlay, "name") or not overlay.name: overlay.name = self._defaultOverlayName(overlay) self.overlays.append(overlay) OverlayViewer.addOverlay(self, overlay) self.update() return len(self.overlays) - 1 def removeOverlay(self, overlay): if type(overlay) == int: try: OverlayViewer.removeOverlay(self, self.overlays[overlay]) self.overlays.pop(overlay) self.update() except IndexError, e: print "No such overlay." else: try: self.overlays.remove(overlay) OverlayViewer.removeOverlay(self, overlay) self.update() except ValueError, e: print "No such overlay." def _slideAfterZoom(self, shift): if self.zoomLevel() > 0: shift *= 1 + self.zoomLevel() elif self.zoomLevel() < 0: shift /= 1 - self.zoomLevel() self.slideBy(shift) def zoomInPopup(self): beforePos = self.imageCoordinate(self.mousepos) self.zoomUp() afterPos = self.imageCoordinate(self.mousepos) self._slideAfterZoom(afterPos - beforePos) def zoomOutPopup(self): beforePos = self.imageCoordinate(self.mousepos) self.zoomDown() afterPos = self.imageCoordinate(self.mousepos) self._slideAfterZoom(afterPos - beforePos) def _defaultOverlayName(self, o): name = str(o.__class__) if name[:8] == "<class '": name = name[8:-2] try: name = name[name.rindex(".") + 1:] except ValueError: pass return name def overlayPopup(self): self.overlayMenu.clear() index = 0 hideable = False showable = False for o in self.overlays: overlayName = o.name text = "[%d] %s" % (index, overlayName) color = None if hasattr(o, "color") and isinstance(o.color, qt.QColor): color = o.color pmHeight = 5 elif hasattr(o, "fillColor") and isinstance(o.fillColor, qt.QColor): color = o.fillColor pmHeight = 16 if color: colorPM = qt.QPixmap(16, pmHeight) colorPM.fill(color) icon = qt.QIcon(colorPM) id = qt.QAction(icon, text, self) else: id = qt.QAction(text, self) self.overlayMenu.addAction(id) id.setCheckable(True) self.connect(id, SIGNAL('triggered()'), self.toggleOverlayVisibilityWithParam(o)) id.setChecked(o.isVisible()) if o.isVisible(): hideable = True else: showable = True index += 1 id = qt.QAction("&Hide all", self) self.overlayMenu.addAction(id) self.connect(id, SIGNAL('triggered()'), self.toggleOverlayVisibilityWithParam(False)) id.setEnabled(hideable) id = qt.QAction("&Show all", self) self.overlayMenu.addAction(id) self.connect(id, SIGNAL('triggered()'), self.toggleOverlayVisibilityWithParam(True)) id.setEnabled(showable) def toggleOverlayVisibilityWithParam(self, o): return lambda: self.toggleOverlayVisibility(o) def toggleOverlayVisibility(self, o=None): '''Toggle or set visibility of given overlay and update view. The parameter can be a boolean - which sets the visibility of all overlays accordingly - an overlay object or the index of the overlay to be hidden/re-shown. If it is omitted, all overlays will be toggled. ''' if o is None: for k in self.overlays: k.setVisible(not k.isVisible()) elif type(o) is bool: for k in self.overlays: k.setVisible(o) else: if type(o) is int: o = self.overlays[o] o.setVisible(not o.isVisible()) self.update() def applyExpression(self, expr=None, normalized=None): if expr is not None: self._savedExpression = expr else: d = quickdialog.QuickDialog(self, "Enter Expression") d.expression = quickdialog.OptionalStringInput(d, "Execute 'lambda x: ") d.expression.setText(self._savedExpression) d.expression.setFocus() d.addSpacing(10) d.norm = quickdialog.CheckBox(d, "Normalize intensity to range 0...255") d.norm.setChecked(self._normalized) if d.exec_() == 0: return self._savedExpression = d.expression.text() self._normalized = True if d.norm.selection() else False if normalized is not None: self._normalized = normalized try: image, normalized = self.getDisplayedImage() except Exception, e: qt.QMessageBox.critical(self, "Error Applying Expression", str(e)) return OverlayViewer.setImage(self, image.qimage(normalized)) def getDisplayedImage(self): """Returns the displayed image and the normalize flag (BYTE or NBYTE) as tuple/pair. Note that the returned image is the original image if no expression is applied, i.e. you should not change the returned object. If active, the expression is applied via eval() on every call of getDisplayedImage().""" if not self._savedExpression or self._savedExpression == "x": self._savedExpression = "x" image = self.image else: for f in vigra.ufunc.__all__: exec 'from vigra.ufunc import %s' % f for f in dir(vigra.colors): if not f.startswith('__'): exec 'from vigra.colors import %s' % f x = self.image image = eval(self._savedExpression) return image, self._normalized def writeImage(self): d = quickdialog.QuickDialog(self, "Write Image") imageFileExtensions = '*.' + ' *.'.join(vigra.impex.listExtensions().split(' ')) d.filedialog = quickdialog.OutputFile( d, "Output filename:", "Image Files (" + imageFileExtensions + ")") d.filedialog.setFocus() d.choices = quickdialog.HDialogGroup(d) d.type = quickdialog.VChoice(d.choices, "Output Pixel Type") d.type.addButton("Byte", "UINT8") d.type.addButton("Normalized to byte", "NBYTE") d.type.addButton("Keep type", "NATIVE") d.type.selectButton(1 if self._normalized else 0) d.type.buttonBox.setEnabled(self._lastSaveType) d.choices.addStretch(1) d.which = quickdialog.VChoice(d.choices, "Save ...") d.which.addButton("displayed image (zoomed, overlays)", 0) d.which.addButton("displayed image (1:1)", 1) d.which.addButton("original image", 2) d.connect(d.which.buttonBox, SIGNAL("clicked(int)"), \ d.type.buttonBox.setEnabled) d.which.selectButton(self._lastSaveType) while True: if d.exec_() == 0: return filename = d.filedialog.text() pixelType = d.type.selection() self._lastSaveType = d.which.selection() if d.which.selection(): if d.which.selection() == 2: image = self.image else: image = self.getDisplay()[0] try: image.writeImage(filename, pixelType) except RuntimeError, e: qt.QMessageBox.critical(self, "Error", str(e)) else: return else: formats = {"png": "PNG", \ "bmp": "BMP", \ "xbm": "XBM", \ "xpm": "XPM", \ "pnm": "PPM", \ "ppm": "PPM", \ "png": "PNG", \ "jpg": "JPEG", \ "jpeg": "JPEG", \ "tif": "TIF"} _, ext = os.path.splitext(filename) if not formats.has_key(ext[1:]): f = " ".join(formats.keys()) qt.QMessageBox.critical(self, "Error", \ "Displayed image with overlays can only be stored as\n" + f) else: pixmap = self.getContentsPixmap() pixmap.save(filename, formats[ext[1:]]) return def writeSVG(self): d = quickdialog.QuickDialog(self, "Write Viewer Contents to SVG") d.filedialog = quickdialog.OutputFile( d, "Output filename:", "SVG Files (*.svg)") d.filedialog.setFocus() d.choices = quickdialog.HDialogGroup(d) d.which = quickdialog.VChoice(d.choices, "Save ...") d.which.addButton("all overlays", 0) d.which.addButton("only displayed overlays", 1) d.which.selectButton(self._lastSaveType) while True: if d.exec_() == 0: return self._lastSaveType = d.which.selection() allOVs = (d.which.selection() == 0) filename = d.filedialog.text() basename, ext = os.path.splitext(filename) try: if ext == ".SVG" or ext == ".svg": viewer2svg.viewer2svg(self, basename, not allOVs) else: viewer2svg.viewer2svg(self, filename, not allOVs) except RuntimeError, e: qt.QMessageBox.critical(self, "Error", str(e)) return def contextMenuEvent(self, e): "handles pop-up menu" self.overlayMenu.setEnabled(len(self.overlays) > 0) self.mousepos = e.pos() self.popup.exec_(e.globalPos()) def keyPressEvent(self, e): "handles keys [S], [E], and possibly [Q] (for toplevel-windows)" if e.key() == qcore.Qt.Key_Q and not self.parent(): self.close() elif e.key() == qcore.Qt.Key_S: self.writeImage() elif e.key() == qcore.Qt.Key_E: self.applyExpression() elif e.key() == qcore.Qt.Key_L: self.cursorAction.trigger() elif e.key() == qcore.Qt.Key_Right or e.key() == qcore.Qt.Key_Left or \ e.key() == qcore.Qt.Key_Up or e.key() == qcore.Qt.Key_Down: OverlayViewer.keyPressEvent(self, e) elif e.key() == qcore.Qt.Key_Plus or e.key() == qcore.Qt.Key_Greater: OverlayViewer.zoomUp(self) elif e.key() == qcore.Qt.Key_Minus or e.key() == qcore.Qt.Key_Less: OverlayViewer.zoomDown(self) else: self.emit(qcore.SIGNAL("keyPressed"), (e.key())) e.ignore() def keyReleaseEvent(self, e): self.emit(qcore.SIGNAL("keyReleased"), (e.key())) e.ignore() def mousePressEvent(self, e): imagePos = OverlayViewer.imageCoordinateF(self, qcore.QPoint(e.x(), e.y())) self.emit(qcore.SIGNAL("mousePressed"), (imagePos.x(), imagePos.y(), e.button())) OverlayViewer.mousePressEvent(self, e) e.ignore() class CaptionImageViewer(qt.QFrame): def __init__(self, image, normalize=True, title=None, parent=None): qt.QFrame.__init__(self, parent) self.viewer = ImageViewer(image, normalize, title, parent=self) self.setWindowTitle(self.viewer.windowTitle()) self._captionCoords = 0, 0 self._xplaces = int(math.log10(self.viewer.image.width) + 1.0) self._yplaces = int(math.log10(self.viewer.image.height) + 1.0) self._valueplaces = self.viewer.image.channels * 5 self.label = qt.QLabel(self) font = qt.QFont() font.setPointSize(10) font.setStyleHint(qt.QFont.TypeWriter) self.label.setFont(font) self._layout = qt.QVBoxLayout(self) self._layout.setSpacing(5) self._layout.addWidget(self.viewer, 1) self._layout.addWidget(self.label) self.connect(self.viewer, SIGNAL('mouseOver(int, int)'), self.updateCaption) self.connect(self.viewer.cursorAction, SIGNAL('triggered()'), self._toggleCaptionSignals) self.updateCaption() def updateCaption(self, x=None, y=None): x = int(round(x)) if x is not None else self._captionCoords[0] y = int(round(y)) if y is not None else self._captionCoords[1] if x < 0 or x >= self.viewer.image.width or \ y < 0 or y >= self.viewer.image.height: return self._captionCoords = x, y label = str(x).rjust(self._xplaces) + " x " + str(y).rjust(self._yplaces) +\ " = " + str(self.viewer.image[x, y]).ljust(self._valueplaces) self.label.setText(label) self.emit(SIGNAL('captionChanged'), self.label.text()) def updateCaptionP(self, point): self.updateCaption(point.x(), point.y()) def _toggleCaptionSignals(self): if self.viewer.cursorAction.isChecked(): self.disconnect(self.viewer, SIGNAL('mouseOver(int, int)'), self.updateCaption) self.connect(self.viewer.imageCursor, SIGNAL('positionChanged(QPoint)'), self.updateCaptionP) else: self.connect(self.viewer, SIGNAL('mouseOver(int, int)'), self.updateCaption) self.disconnect(self.viewer.imageCursor, SIGNAL('positionChanged(QPoint)'), self.updateCaptionP) def setImage(self, image, normalize=None): """imageWindow.setImage(image, normalize = None) Replace the current image with the given one. If normalized is not given (or None), the normalized state is not changed.""" self.viewer.setImage(image, normalize) self.updateCaption() class CursorAction(qt.QAction): def __init__(self, name, parent): qt.QAction.__init__(self, name, parent) self.x, self.y = -1, -1 self.zoomLevel = 0 def trigger(self): qt.QAction.trigger(self) for v in self.viewers: v.viewer.cursorAction.setChecked(self.isChecked()) v.viewer._toggleImageCursor() v._toggleCaptionSignals() def broadcastPosition(self, pos): if self.x == pos.x() and self.y == pos.y(): return self.x, self.y = pos.x(), pos.y() for v in self.viewers: v.viewer.imageCursor.setPosition(pos) def broadcastZoom(self, level): if self.zoomLevel == level: return self.zoomLevel = level for v in self.viewers: v.viewer.setZoomLevel(level) class ImageWindow(qt.QFrame): '''Display one or more images in a grid-like layout. ''' def __init__(self, parent=None): qt.QFrame.__init__(self, parent) self.cursorAction = CursorAction("Connected line cursors", self) self.cursorAction.setCheckable(True) self.cursorAction.setChecked(False) self.addAction(self.cursorAction) self.cursorAction.viewers = [] self.layout = qt.QGridLayout(self) def setImage(self, image, x=0, y=0, normalize=True, title=None): """Place the given image at the given position of this window's grid layout. If an image already exists at this position, it is replaced. """ if self.layout.itemAtPosition(y, x): self.layout.itemAtPosition(y, x).widget().setImage(image, normalize) else: CIviewer = CaptionImageViewer(image, normalize, title, parent=self) self.layout.addWidget(CIviewer, y, x) self.cursorAction.viewers.append(CIviewer) if len(self.cursorAction.viewers) == 1: self.setWindowTitle(CIviewer.windowTitle()) if self.cursorAction.x != -1: CIviewer.viewer.imageCursor.setPosition( qcore.QPoint(self.cursorAction.x, self.cursorAction.y)) CIviewer.viewer.setZoomLevel(self.cursorAction.zoomLevel) if self.cursorAction.isChecked(): CIviewer.viewer.cursorAction.trigger() self.disconnect(CIviewer.viewer.cursorAction, SIGNAL("triggered()"), CIviewer.viewer._toggleImageCursor) self.connect(CIviewer.viewer.cursorAction, SIGNAL("triggered()"), self.cursorAction.trigger) self.connect(CIviewer.viewer.imageCursor, SIGNAL("positionChanged(QPoint)"), self.cursorAction.broadcastPosition) self.connect(CIviewer.viewer, SIGNAL("zoomLevelChanged(int)"), self.cursorAction.broadcastZoom) self.updateGeometry() # this call is necessary to update the sizeHint() before adjustSize() is called qcore.QCoreApplication.processEvents() self.adjustSize() def viewer(self, x=0, y=0): if self.layout.itemAtPosition(y, x): return self.layout.itemAtPosition(y, x).widget().viewer raise ValueError("ImageWindow.viewer(): viewer at (%d, %d) is undefined." % (x, y)) def showImage(image, normalize=True, title=None): if isinstance(image, str): image = vigra.impex.readImage(image) v = ImageWindow() v.setImage(image, normalize=normalize, title=title) v.show() return v
mit
4,147,204,187,256,811,500
37.799028
97
0.588329
false
3.981207
false
false
false
GSA/PricesPaidAPI
SolrLodr.py
1
4723
#!/usr/local/bin/python import solr import sys, traceback # This file is for (for example) Apache with mod_wsgi. import sys, os # import sys # sys.path.insert(0, '../configuration/') # The purpose of this file is to take the standard # datafiles and load them into SOLR in such a way that they # will be searchable. # This is meant to be run from a command line because # I assume it is to be invoked when you change the # source data directory, which implies you are changing # files and it will be easy to run it from a command line. # Later, we can wrap this into something that allows # a file to be uploaded through the API. # We may someday need to manage the SOLR index with # an administrative interface, but for now the goal is # just to make it reflect the directory. I'm assuming # those are the simplest way to do these things. import Transaction import time from configs.ppApiConfig import PathToDataFiles, MAXIMUM_NUMBER_TO_LOAD, SolrDeleteExistingData, PathToActualInputFiles # Note: For now, these are explict imports. # Evntually, we want to make this automatic, and essentially # create a dynamic array of adapters and loaders based on # what we find in some directory so that it is easily # extendable. But that would be over-engineering if we did it now. from RevAucAdapter import getDictionaryFromRevAuc,loadRevAucFromCSVFile from OS2Adapter import getDictionaryFromOS2,loadOS2FromCSVFile from GSAAdvAdapter import getDictionaryFromGSAAdv,loadGSAAdvFromCSVFile from LabEquipAdapter import getDictionaryFromLabEquipment,loadLabequipmentFromCSVFile from USASpendingAdapter import getDictionaryFromUSASpending,loadUSASpendingFromCSVFile from EDWGSAAdvAdapter import getDictionaryFromEDWGSAAdv,loadEDWGSAAdvFromCSVFile from csv_rename import splitfiles from os import listdir from os.path import isfile, join import re import logging import SearchApi logger = logging.getLogger('PPSolrLodr') hdlr = logging.FileHandler('../logs/PPSolrLodr.log') formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s') hdlr.setFormatter(formatter) logger.addHandler(hdlr) logger.setLevel(logging.ERROR) LIMIT_NUM_MATCHING_TRANSACTIONS = 5000*1000*100; # create a connection to a solr server # This needs to come from ppconfig solrCon = solr.SolrConnection('http://localhost:8983/solr') def chunks(l, n): """ Yield successive n-sized chunks from l. """ for i in xrange(0, len(l), n): yield l[i:i+n] idcnt = 0; def loadChunk(filename,chunk): global idcnt l = [] for t in chunk: d = {} # we need to look at the dictionary and map # non standard fields to those matching our "dynamic field" name # in the schema. for key, value in t.dict.items(): v = unicode(value, errors='ignore') # This unicode stuff needs to be changed at the source.. # We should not carry around bad data and then cover it up like this! if (key in Transaction.STANDARD_FIELDS): d[unicode(key,errors='ignore')] = v; else: # I think _txt might be clearer! d[key+"_t"] = v; # possibly the addtion of this id field should actually be done # when we create the objects! That would make the class useful! d['id'] = filename+"_"+str(idcnt); idcnt = idcnt+1; l.append(d); try: print "about to add "+str(len(l)) solrCon.add_many(l) solrCon.commit() print "success" except: print "failure" exc_type, exc_value, exc_traceback = sys.exc_info() traceback.print_exception(exc_type, exc_value, exc_traceback, limit=2, file=sys.stderr) logger.error("don't know what went wrong here") def loadSolr(filename,transactions): global idcnt chunkedTransactions = list(chunks(transactions, 1000)) for chunk in chunkedTransactions: loadChunk(filename,chunk) # Before we load, we need to delete! # This seems a little dangerous, but there is not much we can do. # We really want to make this a command-line argument so # that we can load one data file at a time. # Default param for SolrDeleteExistingData in ppGuiConfig is F if SolrDeleteExistingData=='T': response = solrCon.delete_query('*:*') solrCon.commit() print "Solr Loader Starts" onlyfiles = [ f for f in listdir(PathToActualInputFiles) if isfile(join(PathToActualInputFiles,f)) ] onlycsvfiles = [ f for f in onlyfiles if re.search(".csv$",f)] for filename in onlycsvfiles: splitfiles(filename) SearchApi.applyToLoadedFiles(filename,PathToDataFiles,None,loadSolr,MAXIMUM_NUMBER_TO_LOAD) print "Solr Loader Ends"
unlicense
4,258,783,536,388,830,700
34.246269
119
0.712683
false
3.652746
false
false
false
ntoll/code-dojo
adventure/week3/team3/adventure.py
1
6088
from cmd import Cmd import re DIRECTIONS = 'N', 'E', 'S', 'W' NORTH, EAST, SOUTH, WEST = DIRECTIONS class Player(object): def __init__(self, location, name='Player'): assert isinstance(location, Location) self.location = location self.name = name class Location(object): def __init__(self, name, description=""): self.name = name self.description = description self.exits = dict() self.props = [] def __str__(self): return self.name def add_direction(self, direction, other_location): assert direction in DIRECTIONS self.exits[direction] = other_location def describe(self): out = '' out += "Current location: %s\n%s\n\n" % (self.name, self.description) for direction, location in self.exits.items(): out += "\t%s (%s)\n" % (location, direction) if self.props: plural = len(self.props) > 1 out += "\n%s item%s may come in handy (hint hint):\n\t%s" \ % (['This', 'These'][plural], ['', 's'][plural], '\n\t'.join(prop.aliases[0] for prop in self.props)) return out class Prop(object): def __init__(self, name): self.description = None self.location = None self.aliases = [name] def test_location(): startroot = Location('Start room') kitchen = Location('Kitchen') startroot.add_direction(NORTH, kitchen) def test_player(): lobby = Location('Lobby') john = Player(lobby, 'John') def load_universe(content): location = first_location = None locations = {} props = {} #parts = re.split(r"(?:\n|\r\n|\r){2,}", content.read()) parts = content.read().split('\r\n\r\n') import pdb for part in parts: location = None prop = None for line in part.splitlines(): line = line.strip() if not line or line.startswith('#'): continue #if line == 'N:Hall': # pdb.set_trace() if not location and not prop: # first line if line.startswith(':'): location = Location(line[1:]) locations[line[1:]] = location if not first_location: first_location = location if line.startswith('*'): prop = Prop(line[1:]) props[line[1:]] = prop else: if location: #print 'line', line if not location.description or line[1] != ':': location.description+= line else: direction, destination = line.split(':', 1) #print 'direction, destination', direction, destination location.add_direction(direction, destination) else: if not prop.location: items_location = locations[line] prop.location = items_location items_location.props.append(prop) elif not prop.description: prop.description = line elif line.startswith("A:"): # aliases #A:flashlight prop.aliases = [x.strip() for x in line[2:].split(',')] for location in locations.values(): for direction, destination in location.exits.items(): try: location.add_direction(direction, locations[destination]) except KeyError: raise SystemError("Your universe file sucks! %s" % destination) return locations, first_location class Game(Cmd): def __init__(self, gamefile, player_name): Cmd.__init__(self) self.locations, self.start_room = load_universe(file(gamefile)) self.player = Player(self.start_room, player_name) print self.player.location.describe() def do_move(self, direction): direction = direction.upper() newroom = self.player.location.exits.get(direction,None) if newroom == None: print "No pass around!" return self.player.location = self.player.location.exits[direction] def do_look(self, where): if where == "": self.player.location.describe() else: # TODO validate where newroom = self.player.location.exits.get(where,None) print newroom.describe() pass def do_joke(self, ok): print "that is not funny. What don't you try a pun?" if hasattr(self, 'joke'): print 'this is funny:%s' % self.joke self.joke = ok def postcmd(self, stop, x): #pass if not hasattr(self, 'joke'): print self.player.location.describe() #print self.player.location.describe() def play(gamefile): #start_room = _create_universe() player_name = raw_input('Player name?: ') or 'No name' g = Game(gamefile, player_name) g.cmdloop() ''' while True: if not player.location.exits: print "No more exits! GAME OVER!" break next_direction = raw_input('Where to next? ').upper() while next_direction not in player.location.exits.keys(): next_direction = raw_input('Where to next? (%s) ' %\ ', '.join(player.location.exits.keys())).upper() player.location = player.location.exits[next_direction] ''' if __name__ == '__main__': import sys if sys.argv[1] == 'test': test_location() test_player() sys.exit(0) try: play(sys.argv[1]) except KeyboardInterrupt: pass
mit
1,014,150,448,326,991,400
29.813472
113
0.508377
false
4.272281
false
false
false
amanzi/ats-dev
tools/meshing_ats/meshing_ats/meshing_ats.py
1
34933
"""Extrudes a 2D mesh to generate an ExodusII 3D mesh. Works with and assumes all polyhedra cells (and polygon faces). To see usage, run: ------------------------------------------------------------ python meshing_ats.py -h Example distributed with this source, to run: ------------------------------------------------------------ $> cd four-polygon-test $> python ../meshing_ats.py -n 10 -d 1 ./four_polygon.vtk $> mkdir run0 $> cd run0 $> ats --xml_file=../test1-fv-four-polygon.xml Requires building the latest version of Exodus ------------------------------------------------------------ Note that this is typically done in your standard ATS installation, assuming you have built your Amanzi TPLs with shared libraries (the default through bootstrap). In that case, simply ensure that ${AMANZI_TPLS_DIR}/SEACAS/lib is in your PYTHONPATH. """ from __future__ import print_function import sys,os import numpy as np import collections import argparse try: import exodus except ImportError: sys.path.append(os.path.join(os.environ["SEACAS_DIR"],"lib")) import exodus class SideSet(object): def __init__(self, name, setid, elem_list, side_list): assert(type(setid) == int) assert(type(elem_list) == list or type(elem_list) == np.ndarray) assert(type(side_list) == list or type(side_list) == np.ndarray) self.name = name self.setid = setid self.elem_list = elem_list self.side_list = side_list class LabeledSet(object): def __init__(self, name, setid, entity, ent_ids): assert entity in ['CELL', 'FACE', 'NODE'] assert(type(setid) == int) assert(type(ent_ids) == list or type(ent_ids) == np.ndarray) self.name = name self.setid = setid self.entity = entity self.ent_ids = np.array(ent_ids) class Mesh2D(object): def __init__(self, coords, connectivity, labeled_sets=None, check_handedness=True): """ Creates a 2D mesh from coordinates and a list cell-to-node connectivity lists. coords : numpy array of shape (NCOORDS, NDIMS) connectivity : list of lists of integer indices into coords specifying a (clockwise OR counterclockwise) ordering of the nodes around the 2D cell labeled_sets : list of LabeledSet objects """ assert type(coords) == np.ndarray assert len(coords.shape) == 2 self.dim = coords.shape[1] self.coords = coords self.conn = connectivity if labeled_sets is not None: self.labeled_sets = labeled_sets else: self.labeled_sets = [] self.validate() self.edge_counts() if check_handedness: self.check_handedness() def validate(self): assert self.coords.shape[1] == 2 or self.coords.shape[1] == 3 assert type(self.conn) is list for f in self.conn: assert type(f) is list assert len(set(f)) == len(f) for i in f: assert i < self.coords.shape[0] for ls in self.labeled_sets: if ls.entity == "NODE": size = len(self.coords) elif ls.entity == "CELL": size = len(self.conn) for i in ls.ent_ids: assert i < size return True def num_cells(self): return len(self.conn) def num_nodes(self): return self.coords.shape[0] def num_edges(self): return len(self.edges()) @staticmethod def edge_hash(i,j): return tuple(sorted((i,j))) def edges(self): return self.edge_counts().keys() def edge_counts(self): try: return self._edges except AttributeError: self._edges = collections.Counter(self.edge_hash(f[i], f[(i+1)%len(f)]) for f in self.conn for i in range(len(f))) return self._edges def check_handedness(self): for conn in self.conn: points = np.array([self.coords[c] for c in conn]) cross = 0 for i in range(len(points)): im = i - 1 ip = i + 1 if ip == len(points): ip = 0 p = points[ip] - points[i] m = points[i] - points[im] cross = cross + p[1] * m[0] - p[0] * m[1] if cross < 0: conn.reverse() def plot(self, color=None, ax=None): if color is None: import colors cm = colors.cm_mapper(0,self.num_cells()-1) colors = [cm(i) for i in range(self.num_cells())] else: colors = color verts = [[self.coords[i,0:2] for i in f] for f in self.conn] from matplotlib import collections gons = collections.PolyCollection(verts, facecolors=colors) from matplotlib import pyplot as plt if ax is None: fig,ax = plt.subplots(1,1) ax.add_collection(gons) ax.autoscale_view() @classmethod def read_VTK(cls, filename): try: return cls.read_VTK_Simplices(filename) except AssertionError: return cls.read_VTK_Unstructured(filename) @classmethod def read_VTK_Unstructured(cls, filename): with open(filename,'r') as fid: points_found = False polygons_found = False while True: line = fid.readline().decode('utf-8') if not line: # EOF break line = line.strip() if len(line) == 0: continue split = line.split() section = split[0] if section == 'POINTS': ncoords = int(split[1]) points = np.fromfile(fid, count=ncoords*3, sep=' ', dtype='d') points = points.reshape(ncoords, 3) points_found = True elif section == 'POLYGONS': ncells = int(split[1]) n_to_read = int(split[2]) gons = [] data = np.fromfile(fid, count=n_to_read, sep=' ', dtype='i') idx = 0 for i in range(ncells): n_in_gon = data[idx] gon = list(data[idx+1:idx+1+n_in_gon]) # check handedness -- need normals to point up! cross = [] for i in range(len(gon)): if i == len(gon)-1: ip = 0 ipp = 1 elif i == len(gon)-2: ip = i+1 ipp = 0 else: ip = i+1 ipp = i+2 d2 = points[gon[ipp]] - points[gon[ip]] d1 = points[gon[i]] - points[gon[ip]] cross.append(np.cross(d2, d1)) if (np.array([c[2] for c in cross]).mean() < 0): gon.reverse() gons.append(gon) idx += n_in_gon + 1 assert(idx == n_to_read) polygons_found = True if not points_found: raise RuntimeError("Unstructured VTK must contain sections 'POINTS'") if not polygons_found: raise RuntimeError("Unstructured VTK must contain sections 'POLYGONS'") return cls(points, gons) @classmethod def read_VTK_Simplices(cls, filename): """Stolen from meshio, https://github.com/nschloe/meshio/blob/master/meshio/vtk_io.py""" import vtk_io with open(filename,'r') as fid: data = vtk_io.read_buffer(fid) points = data[0] if len(data[1]) != 1: raise RuntimeError("Simplex VTK file is readable by vtk_io but not by meshing_ats. Includes: %r"%data[1].keys()) gons = [v for v in data[1].itervalues()][0] gons = gons.tolist() # check handedness for gon in gons: cross = [] for i in range(len(gon)): if i == len(gon)-1: ip = 0 ipp = 1 elif i == len(gon)-2: ip = i+1 ipp = 0 else: ip = i+1 ipp = i+2 d2 = points[gon[ipp]] - points[gon[ip]] d1 = points[gon[i]] - points[gon[ip]] cross.append(np.cross(d2, d1)) if (np.array([c[2] for c in cross]).mean() < 0): gon.reverse() return cls(points, gons) @classmethod def from_Transect(cls, x, z, width=1): """Creates a 2D surface strip mesh from transect data""" # coordinates if (type(width) is list or type(width) is np.ndarray): variable_width = True y = np.array([0,1]) else: variable_width = False y = np.array([0,width]) Xc, Yc = np.meshgrid(x, y) if variable_width: assert(Yc.shape[1] == 2) assert(len(width) == Yc.shape[0]) assert(min(width) > 0.) Yc[:,0] = -width/2. Yc[:,1] = width/2. Xc = Xc.flatten() Yc = Yc.flatten() Zc = np.concatenate([z,z]) # connectivity nsurf_cells = len(x)-1 conn = [] for i in range(nsurf_cells): conn.append([i, i+1, nsurf_cells + i + 2, nsurf_cells + i + 1]) coords = np.array([Xc, Yc, Zc]) return cls(coords.transpose(), conn) @classmethod def from_Transect_Guide(cls, x, z, guide): """Creates a 2D surface strip mesh from transect data""" assert type(guide) == np.ndarray assert guide.shape[1] == 3 # coordinates Xc = x Yc = np.zeros_like(x) Zc = z nsteps = guide.shape[0] xnew = Xc ynew = Yc znew = Zc for i in range(nsteps): xnew = xnew + guide[i][0] ynew = ynew + guide[i][1] znew = znew + guide[i][2] Xc = np.concatenate([Xc, xnew]) Yc = np.concatenate([Yc, ynew]) Zc = np.concatenate([Zc, znew]) # y = np.array([0,1,2]) # Xc, Yc = np.meshgrid(x, y) # Xc = Xc.flatten() # Yc = Yc.flatten() # Zc = np.concatenate([z,z,z]) # connectivity ns = len(x) conn = [] for j in range(nsteps): for i in range(ns-1): conn.append([j*ns + i, j*ns + i + 1, (j+1)*ns + i + 1, (j+1)*ns + i ]) coords = np.array([Xc, Yc, Zc]) return cls(coords.transpose(), conn) @classmethod def from_Transect_GuideX(cls, x, z, guide, nsteps): """Creates a 2D surface strip mesh from transect data""" assert type(guide) == np.ndarray assert guide.shape[1] == 3 # coordinates Xc = x Yc = np.zeros_like(x) Zc = z nsteps = guide.shape[0] xnew = np.zeros_like(x) ynew = np.zeros(len(x)) znew = np.zeros_like(x) xnew[:] = Xc[:] ynew[:] = Yc[:] znew[:] = Zc[:] for i in range(nsteps): print(Yc) for j in range(len(x)): xnew[j] = xnew[j] + guide[j][0] ynew[j] = ynew[j] + guide[j][1] znew[j] = znew[j] + guide[j][2] Xc = np.concatenate([Xc, xnew]) Yc = np.concatenate([Yc, ynew]) Zc = np.concatenate([Zc, znew]) # y = np.array([0,1,2]) # Xc, Yc = np.meshgrid(x, y) # Xc = Xc.flatten() # Yc = Yc.flatten() # Zc = np.concatenate([z,z,z]) # connectivity ns = len(x) conn = [] for j in range(nsteps): for i in range(ns-1): conn.append([j*ns + i, j*ns + i + 1, (j+1)*ns + i + 1, (j+1)*ns + i ]) coords = np.array([Xc, Yc, Zc]) return cls(coords.transpose(), conn) class Mesh3D(object): def __init__(self, coords, face_to_node_conn, elem_to_face_conn, side_sets=None, labeled_sets=None, material_ids=None): """ Creates a 3D mesh from coordinates and connectivity lists. coords : numpy array of shape (NCOORDS, 3) face_to_node_conn : list of lists of integer indices into coords specifying an (clockwise OR counterclockwise) ordering of the nodes around the face elem_to_face_conn : list of lists of integer indices into face_to_node_conn specifying a list of faces that make up the elem """ assert type(coords) == np.ndarray assert len(coords.shape) == 2 assert coords.shape[1] == 3 self.dim = coords.shape[1] self.coords = coords self.face_to_node_conn = face_to_node_conn self.elem_to_face_conn = elem_to_face_conn if labeled_sets is not None: self.labeled_sets = labeled_sets else: self.labeled_sets = [] if side_sets is not None: self.side_sets = side_sets else: self.side_sets = [] if material_ids is not None: self.material_id_list = collections.Counter(material_ids).keys() self.material_ids = material_ids else: self.material_id_list = [10000,] self.material_ids = [10000,]*len(self.elem_to_face_conn) self.validate() def validate(self): assert self.coords.shape[1] == 3 assert type(self.face_to_node_conn) is list for f in self.face_to_node_conn: assert type(f) is list assert len(set(f)) == len(f) for i in f: assert i < self.coords.shape[0] assert type(self.elem_to_face_conn) is list for e in self.elem_to_face_conn: assert type(e) is list assert len(set(e)) == len(e) for i in e: assert i < len(self.face_to_node_conn) for ls in self.labeled_sets: if ls.entity == "NODE": size = self.num_nodes() if ls.entity == "FACE": size = self.num_faces() elif ls.entity == "CELL": size = self.num_cells() for i in ls.ent_ids: assert i < size for ss in self.side_sets: for j,i in zip(ss.elem_list, ss.side_list): assert j < self.num_cells() assert i < len(self.elem_to_face_conn[j]) def num_cells(self): return len(self.elem_to_face_conn) def num_faces(self): return len(self.face_to_node_conn) def num_nodes(self): return self.coords.shape[0] def write_exodus(self, filename, face_block_mode="one block"): """Write the 3D mesh to ExodusII using arbitrary polyhedra spec""" # put cells in with blocks, which renumbers the cells, so we have to track sidesets. # Therefore we keep a map of old cell to new cell ordering # # also, though not required by the spec, paraview and visit # seem to crash if num_face_blocks != num_elem_blocks. So # make face blocks here too, which requires renumbering the faces. # -- first pass, form all elem blocks and make the map from old-to-new new_to_old_elems = [] elem_blks = [] for i_m,m_id in enumerate(self.material_id_list): # split out elems of this material, save new_to_old map elems_tuple = [(i,c) for (i,c) in enumerate(self.elem_to_face_conn) if self.material_ids[i] == m_id] new_to_old_elems.extend([i for (i,c) in elems_tuple]) elems = [c for (i,c) in elems_tuple] elem_blks.append(elems) old_to_new_elems = sorted([(old,i) for (i,old) in enumerate(new_to_old_elems)], lambda a,b: int.__cmp__(a[0],b[0])) # -- deal with faces, form all face blocks and make the map from old-to-new face_blks = [] if face_block_mode == "one block": # no reordering of faces needed face_blks.append(self.face_to_node_conn) elif face_block_mode == "n blocks, not duplicated": used_faces = np.zeros((len(self.face_to_node_conn),),'bool') new_to_old_faces = [] for i_m,m_id in enumerate(self.material_id_list): # split out faces of this material, save new_to_old map def used(f): result = used_faces[f] used_faces[f] = True return result elem_blk = elem_blks[i_m] faces_tuple = [(f,self.face_to_node_conn[f]) for c in elem_blk for (j,f) in enumerate(c) if not used(f)] new_to_old_faces.extend([j for (j,f) in faces_tuple]) faces = [f for (j,f) in faces_tuple] face_blks.append(faces) # get the renumbering in the elems old_to_new_faces = sorted([(old,j) for (j,old) in enumerate(new_to_old_faces)], lambda a,b: int.__cmp__(a[0],b[0])) elem_blks = [[[old_to_new_faces[f][1] for f in c] for c in elem_blk] for elem_blk in elem_blks] elif face_block_mode == "n blocks, duplicated": elem_blks_new = [] offset = 0 for i_m, m_id in enumerate(self.material_id_list): used_faces = np.zeros((len(self.face_to_node_conn),),'bool') def used(f): result = used_faces[f] used_faces[f] = True return result elem_blk = elem_blks[i_m] tuple_old_f = [(f,self.face_to_node_conn[f]) for c in elem_blk for f in c if not used(f)] tuple_new_old_f = [(new,old,f) for (new,(old,f)) in enumerate(tuple_old_f)] old_to_new_blk = np.zeros((len(self.face_to_node_conn),),'i')-1 for new,old,f in tuple_new_old_f: old_to_new_blk[old] = new + offset elem_blk_new = [[old_to_new_blk[f] for f in c] for c in elem_blk] #offset = offset + len(ftuple_new) elem_blks_new.append(elem_blk_new) face_blks.append([f for i,j,f in tuple_new_old_f]) elem_blks = elem_blks_new elif face_block_mode == "one block, repeated": # no reordering of faces needed, just repeat for eblock in elem_blks: face_blks.append(self.face_to_node_conn) else: raise RuntimeError("Invalid face_block_mode: '%s', valid='one block', 'n blocks, duplicated', 'n blocks, not duplicated'"%face_block_mode) # open the mesh file num_elems = sum(len(elem_blk) for elem_blk in elem_blks) num_faces = sum(len(face_blk) for face_blk in face_blks) ep = exodus.ex_init_params(title=filename, num_dim=3, num_nodes=self.num_nodes(), num_face=num_faces, num_face_blk=len(face_blks), num_elem=num_elems, num_elem_blk=len(elem_blks), num_side_sets=len(self.side_sets)) e = exodus.exodus(filename, mode='w', array_type='numpy', init_params=ep) # put the coordinates e.put_coord_names(['coordX', 'coordY', 'coordZ']) e.put_coords(self.coords[:,0], self.coords[:,1], self.coords[:,2]) # put the face blocks for i_blk, face_blk in enumerate(face_blks): face_raveled = [n for f in face_blk for n in f] e.put_polyhedra_face_blk(i_blk+1, len(face_blk), len(face_raveled), 0) e.put_node_count_per_face(i_blk+1, np.array([len(f) for f in face_blk])) e.put_face_node_conn(i_blk+1, np.array(face_raveled)+1) # put the elem blocks assert len(elem_blks) == len(self.material_id_list) for i_blk, (m_id, elem_blk) in enumerate(zip(self.material_id_list, elem_blks)): elems_raveled = [f for c in elem_blk for f in c] e.put_polyhedra_elem_blk(m_id, len(elem_blk), len(elems_raveled), 0) e.put_elem_blk_name(m_id, "MATERIAL_ID_%d"%m_id) e.put_face_count_per_polyhedra(m_id, np.array([len(c) for c in elem_blk])) e.put_elem_face_conn(m_id, np.array(elems_raveled)+1) # add sidesets e.put_side_set_names([ss.name for ss in self.side_sets]) for ss in self.side_sets: for elem in ss.elem_list: assert old_to_new_elems[elem][0] == elem new_elem_list = [old_to_new_elems[elem][1] for elem in ss.elem_list] e.put_side_set_params(ss.setid, len(ss.elem_list), 0) e.put_side_set(ss.setid, np.array(new_elem_list)+1, np.array(ss.side_list)+1) # finish and close e.close() @classmethod def extruded_Mesh2D(cls, mesh2D, layer_types, layer_data, ncells_per_layer, mat_ids): """ Regularly extrude a 2D mesh to make a 3D mesh. mesh2D : a Mesh2D object layer_types : either a string (type) or list of strings (types) layer_data : array of data needed (specific to the type) ncells_per_layer : either a single integer (same number of cells in all : layers) or a list of number of cells in the layer mat_ids : either a single integer (one mat_id for all layers) : or a list of integers (mat_id for each layer) : or a 2D numpy array of integers (mat_id for each layer and each column: [layer_id, surface_cell_id]) types: - 'constant' : (data=float thickness) uniform thickness - 'function' : (data=function or functor) thickness as a function : of (x,y) - 'snapped' : (data=float z coordinate) snap the layer to : provided z coordinate, telescoping as needed - 'node' : thickness provided on each node of the surface domain - 'cell' : thickness provided on each cell of the surface domain, : interpolate to nodes NOTE: dz is uniform through the layer in all but the 'snapped' case NOTE: 2D mesh is always labeled 'surface', extrusion is always downwards """ # make the data all lists # --------------------------------- def is_list(data): if type(data) is str: return False try: len(data) except TypeError: return False else: return True if is_list(layer_types): if not is_list(layer_data): layer_data = [layer_data,]*len(layer_types) else: assert len(layer_data) == len(layer_types) if not is_list(ncells_per_layer): ncells_per_layer = [ncells_per_layer,]*len(layer_types) else: assert len(ncells_per_layer) == len(layer_types) elif is_list(layer_data): layer_types = [layer_types,]*len(layer_data) if not is_list(ncells_per_layer): ncells_per_layer = [ncells_per_layer,]*len(layer_data) else: assert len(ncells_per_layer) == len(layer_data) elif is_list(ncells_per_layer): layer_type = [layer_type,]*len(ncells_per_layer) layer_data = [layer_data,]*len(ncells_per_layer) else: layer_type = [layer_type,] layer_data = [layer_data,] ncells_per_layer = [ncells_per_layer,] # helper data and functions for mapping indices from 2D to 3D # ------------------------------------------------------------------ if min(ncells_per_layer) < 0: raise RuntimeError("Invalid number of cells, negative value provided.") ncells_tall = sum(ncells_per_layer) ncells_total = ncells_tall * mesh2D.num_cells() nfaces_total = (ncells_tall+1) * mesh2D.num_cells() + ncells_tall * mesh2D.num_edges() nnodes_total = (ncells_tall+1) * mesh2D.num_nodes() np_mat_ids = np.array(mat_ids, dtype=int) if np_mat_ids.size == np.size(np_mat_ids, 0): if np_mat_ids.size == 1: np_mat_ids = np.full((len(ncells_per_layer), mesh2D.num_cells()), mat_ids[0], dtype=int) else: np_mat_ids = np.empty((len(ncells_per_layer), mesh2D.num_cells()), dtype=int) for ilay in range(len(ncells_per_layer)): np_mat_ids[ilay, :] = np.full(mesh2D.num_cells(), mat_ids[ilay], dtype=int) def col_to_id(column, z_cell): """Maps 2D cell ID and index in the vertical to a 3D cell ID""" return z_cell + column * ncells_tall def node_to_id(node, z_node): """Maps 2D node ID and index in the vertical to a 3D node ID""" return z_node + node * (ncells_tall+1) def edge_to_id(edge, z_cell): """Maps 2D edge hash and index in the vertical to a 3D face ID of a vertical face""" return (ncells_tall + 1) * mesh2D.num_cells() + z_cell + edge * ncells_tall # create coordinates # --------------------------------- coords = np.zeros((mesh2D.coords.shape[0],ncells_tall+1, 3),'d') coords[:,:,0:2] = np.expand_dims(mesh2D.coords[:,0:2],1) if mesh2D.dim == 3: coords[:,0,2] = mesh2D.coords[:,2] # else the surface is at 0 depth cell_layer_start = 0 for layer_type, layer_datum, ncells in zip(layer_types, layer_data, ncells_per_layer): if layer_type.lower() == 'constant': dz = float(layer_datum) / ncells for i in range(1,ncells+1): coords[:,cell_layer_start+i,2] = coords[:,cell_layer_start,2] - i * dz else: # allocate an array of coordinates for the bottom of the layer layer_bottom = np.zeros((mesh2D.coords.shape[0],),'d') if layer_type.lower() == 'snapped': # layer bottom is uniform layer_bottom[:] = layer_datum elif layer_type.lower() == 'function': # layer thickness is given by a function evaluation of x,y for node_col in range(mesh2D.coords.shape[0]): layer_bottom[node_col] = coords[node_col,cell_layer_start,2] - layer_datum(coords[node_col,0,0], coords[node_col,0,1]) elif layer_type.lower() == 'node': # layer bottom specifically provided through thickness layer_bottom[:] = coords[:,cell_layer_start,2] - layer_datum elif layer_type.lower() == 'cell': # interpolate cell thicknesses to node thicknesses import scipy.interpolate centroids = mesh2D.cell_centroids() interp = scipy.interpolate.interp2d(centroids[:,0], centroids[:,1], layer_datum, kind='linear') layer_bottom[:] = coords[:,cell_layer_start,2] - interp(mesh2D.coords[:,0], mesh2D.coords[:,1]) else: raise RuntimeError("Unrecognized layer_type '%s'"%layer_type) # linspace from bottom of previous layer to bottom of this layer for node_col in range(mesh2D.coords.shape[0]): coords[node_col,cell_layer_start:cell_layer_start+ncells+1,2] = np.linspace(coords[node_col,cell_layer_start,2], layer_bottom[node_col], ncells+1) cell_layer_start = cell_layer_start + ncells # create faces, face sets, cells bottom = [] surface = [] faces = [] cells = [list() for c in range(ncells_total)] # -- loop over the columns, adding the horizontal faces for col in range(mesh2D.num_cells()): nodes_2 = mesh2D.conn[col] surface.append(col_to_id(col,0)) for z_face in range(ncells_tall + 1): i_f = len(faces) f = [node_to_id(n, z_face) for n in nodes_2] if z_face != ncells_tall: cells[col_to_id(col, z_face)].append(i_f) if z_face != 0: cells[col_to_id(col, z_face-1)].append(i_f) faces.append(f) bottom.append(col_to_id(col,ncells_tall-1)) # -- loop over the columns, adding the vertical faces added = dict() vertical_side_cells = [] vertical_side_indices = [] for col in range(mesh2D.num_cells()): nodes_2 = mesh2D.conn[col] for i in range(len(nodes_2)): edge = mesh2D.edge_hash(nodes_2[i], nodes_2[(i+1)%len(nodes_2)]) try: i_e = added[edge] except KeyError: # faces not yet added to facelist i_e = len(added.keys()) added[edge] = i_e for z_face in range(ncells_tall): i_f = len(faces) assert i_f == edge_to_id(i_e, z_face) f = [node_to_id(edge[0], z_face), node_to_id(edge[1], z_face), node_to_id(edge[1], z_face+1), node_to_id(edge[0], z_face+1)] faces.append(f) face_cell = col_to_id(col, z_face) cells[face_cell].append(i_f) # check if this is an external if mesh2D._edges[edge] == 1: vertical_side_cells.append(face_cell) vertical_side_indices.append(len(cells[face_cell])-1) else: # faces already added from previous column for z_face in range(ncells_tall): i_f = edge_to_id(i_e, z_face) cells[col_to_id(col, z_face)].append(i_f) # Do some idiot checking # -- check we got the expected number of faces assert len(faces) == nfaces_total # -- check every cell is at least a tet for c in cells: assert len(c) > 4 # -- check surface sideset has the right number of entries assert len(surface) == mesh2D.num_cells() # -- check bottom sideset has the right number of entries assert len(bottom) == mesh2D.num_cells() # -- len of vertical sides sideset is number of external edges * number of cells, no pinchouts here num_sides = ncells_tall * sum(1 for e,c in mesh2D.edge_counts().iteritems() if c == 1) assert num_sides == len(vertical_side_cells) assert num_sides == len(vertical_side_indices) # make the material ids material_ids = np.zeros((len(cells),),'i') for col in range(mesh2D.num_cells()): z_cell = 0 for ilay in range(len(ncells_per_layer)): ncells = ncells_per_layer[ilay] for i in range(z_cell, z_cell+ncells): material_ids[col_to_id(col, i)] = np_mat_ids[ilay, col] z_cell = z_cell + ncells # make the side sets side_sets = [] side_sets.append(SideSet("bottom", 1, bottom, [1,]*len(bottom))) side_sets.append(SideSet("surface", 2, surface, [0,]*len(surface))) side_sets.append(SideSet("external_sides", 3, vertical_side_cells, vertical_side_indices)) # reshape coords coords = coords.reshape(nnodes_total, 3) for e,s in zip(side_sets[0].elem_list, side_sets[0].side_list): face = cells[e][s] fz_coords = np.array([coords[n] for n in faces[face]]) #print "bottom centroid = ", np.mean(fz_coords, axis=0) for e,s in zip(side_sets[1].elem_list, side_sets[1].side_list): face = cells[e][s] fz_coords = np.array([coords[n] for n in faces[face]]) #print "surface centroid = ", np.mean(fz_coords, axis=0) # instantiate the mesh return cls(coords, faces, cells, side_sets=side_sets, material_ids=material_ids) def commandline_options(): parser = argparse.ArgumentParser(description='Extrude a 2D mesh to make a 3D mesh') parser.add_argument("-n", "--num-cells", default=10, type=int, help="number of cells to extrude") parser.add_argument("-d", "--depth", default=40.0, type=float, help="depth to extrude") parser.add_argument("-o", "--outfile", default=None, type=str, help="output filename") parser.add_argument("-p", "--plot", default=False, action="store_true", help="plot the 2D mesh") parser.add_argument("infile",metavar="INFILE", type=str, help="input filename of surface mesh") options = parser.parse_args() if options.outfile is None: options.outfile = ".".join(options.infile.split(".")[:-1])+".exo" if os.path.isfile(options.outfile): print('Output file "%s" exists, cowardly not overwriting.'%options.outfile) sys.exit(1) if not os.path.isfile(options.infile): print('No input file provided') parser.print_usage() sys.exit(1) return options if __name__ == "__main__": options = commandline_options() m2 = Mesh2D.read_VTK(options.infile) if options.plot: m2.plot() m3 = Mesh3D.extruded_Mesh2D(m2, [options.depth,], [options.num_cells,], [10000,]) m3.write_exodus(options.outfile)
bsd-3-clause
6,506,556,784,610,115,000
36.724622
166
0.507972
false
3.681421
false
false
false
pecryptfs/pecryptfs
pecryptfs/cmd_genfile.py
1
2753
#!/usr/bin/env python3 # pecryptfs - Portable Userspace eCryptfs # Copyright (C) 2015 Ingo Ruhnke <[email protected]> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from typing import List import argparse import os from pecryptfs.ecryptfs import generate_encrypted_file from pecryptfs.auth_token import AuthToken def main(): parser = argparse.ArgumentParser(description="eCryptfs Encrypted File Generator") parser.add_argument('files', metavar='FILE', type=str, nargs='+', help='Filenames to decrypt') parser.add_argument('-p', '--password', type=str, default="Test", help='Password to use for decryption, prompt when none given') parser.add_argument('-s', '--salt', type=str, default="0011223344556677", help='Salt to use for decryption') parser.add_argument('-o', '--output', type=str, help='Output directory') parser.add_argument('-c', '--cipher', type=str, help='Cipher to use', default="aes") parser.add_argument('-k', '--key-bytes', type=int, help='Key bytes to use', default=24) parser.add_argument('-v', '--verbose', action='store_true', help='Be verbose') args = parser.parse_args() output_directory = args.output if not os.path.isdir(output_directory): os.makedirs(output_directory) cipher = args.cipher key_bytes = args.key_bytes auth_token = AuthToken(args.password, args.salt) for input_filename in args.files: filenames: List[str] = [] data = generate_encrypted_file(auth_token, cipher, key_bytes) output_filename = "{}-{}.raw".format(cipher, key_bytes) with open(os.path.join(output_directory, output_filename), "wb") as fout: fout.write(data) if args.verbose: print("Password: {}".format(args.password)) print("Salt: {}".format(args.salt)) print("Filename: {}".format(input_filename)) print() for cipher, key_bytes, f in filenames: print("{:8} {:2} {}".format(cipher, key_bytes, f)) else: for cipher, key_bytes, f in filenames: print(f) # EOF #
gpl-3.0
4,288,613,616,355,062,000
36.712329
98
0.656375
false
3.792011
false
false
false
ptphp/PyLib
src/fangte/fetch/fetch58_bak.py
1
29601
# -*- coding: utf-8 -*- import time import datetime import random import cookielib import urllib import urllib2 from urlparse import urlparse from config import * from common import * from BeautifulSoup import BeautifulSoup class BaseCrawl(object): #房源类型 1 出售 2 出租 3 求购 4 求租 flag = None isStoped = False response = None header = None #房源信息模板 infoT = {} #传入参数 param = {} #全局队列 queue = [] pageNo = 0 isFetched = False #超过时间的条数 overTimeNum = 0 def __init__(self,param,que): self.queue = que self.param = param self.header = header cj = cookielib.MozillaCookieJar() self.br=urllib2.build_opener(urllib2.HTTPHandler(),urllib2.HTTPCookieProcessor(cj),urllib2.HTTPRedirectHandler()) self.endtime=str(datetime.date.today() -datetime.timedelta(days=7)) self._initRe() def getContent(self): if self.__cInit__(self.infoT['url']) : self.response = re.sub(" |\n|\r|\t| |&nbsp;|联系我时,请说是在58同城上看到的,谢谢!","",self.response) self.response = re.sub("rlist\d\">.*?</ul>","",self.response) try: if self.param['flag'] == 1: self.sell(); if self.param['flag'] == 2: self.rent(); if self.param['flag'] == 3: self.buy(); if self.param['flag'] == 4: self.req(); except Exception,what: print what if (time.time() - int(self.infoT['posttime']))>self.param['args']["timelimit"]: self.overTimeNum +=1 if self.overTimeNum > 5: self.pageNo = 0 self.isStoped = True self.overTimeNum = 0 def getPhoneself(self): if self.__cInit__(self.infoT['url']) : sHtml = self.response self.infoT['phone'] = regx_data(self.house_phone_regex,sHtml,"",False) def __getLinks(self,url): if not self.__cInit__(url): return self.response = re.sub("\n|\r|\t| |&nbsp;","",self.response) page_main = regx_data(self.page_main_regex,self.response,"",0) self.page_main_trs_regex = "<tr logr=\".*?\">(.*?)</tr>" page_main_trs = regx_lists(self.page_main_trs_regex,page_main,"",0) if page_main_trs and len(page_main_trs)>0: for tr in page_main_trs: if self.isStoped: self.pageNo = 0 break self._initTemple(self.param['flag'],self.param['city']) try: if self.param['flag'] == 1: self.__parseSellTrs(tr) if self.param['flag'] == 2: self.__parseRentTrs(tr) if self.param['flag'] == 3: self.__parseBuyTrs(tr) if self.param['flag'] == 4: self.__parseReqTrs(tr) except Exception,what: print what else: if not self.isFetched: self.queue.append(self.infoT) self.isFetched = False time.sleep(0.1) self.infoT = {} self.pageNo +=1 else: self.pageNo = 0 def __parseBuyTrs(self,tr): soup = BeautifulSoup(tr) at = soup.find('a',{'class':'t'}) #标题 if at: self.infoT['title'] = at.string #链接 self.infoT['url'] = at['href'] if checkPath("pagecash",self.infoT['url']): self.isFetched = True return else: return #图片 img = soup.find('td',{'class':'img'}) if img: if img.img['src'].find("noimg") == -1: self.infoT['thumb'] = img.img['src'] #信息 t = soup.find('td',{'class':'t'}) self.infoT['belong'] = regx_data(self.house_belong_dict_regex,str(t),"",False) self.infoT['houseType'] = regx_data(self.house_type_regex,str(t),"",False) self.infoT['posttime'] = self.postTime(regx_data("更新时间:(.*?)<",str(t),"",False)) #self.infoT['room'] = regx_data(self.house_room_regex,str(soup),"",False) #if self.infoT['room']: #self.infoT['room'] = re.sub("一|二|三|四|五|六|七|八|九|十","1|2|3|4|5|6|7|8|9|10",self.infoT['room']) self.infoT['hall'] = regx_data(self.house_hall_regex,str(soup),"",False) self.infoT['toilet'] = regx_data(self.house_toilet_regex,str(soup),"",False) agencyname = regx_data("(个人)",str(t),"",False) if agencyname: self.infoT['isPerson'] = 1 else: self.infoT['isPerson'] = 0 #价格 num = soup('td',{'class':'tc'}) if num and len(num) > 1: if str(num[0]).find("面议") == -1: price = num[0].b.string if price.find('-') == -1: self.infoT['price'] = price else: self.infoT['price'] = price.split("-")[0] self.infoT['price_max'] = price.split("-")[1] del price area = num[1].b.string if area.find('-') == -1: self.infoT['area'] = area else: self.infoT['area'] = area.split("-")[0] self.infoT['area_max'] = area.split("-")[1] del area self.infoT['search']= re.sub("<.*?>","",str(soup)) del soup del t del img del at del num del agencyname self.getContent() def __parseReqTrs(self,tr): soup = BeautifulSoup(tr) at = soup.find('a',{'class':'t'}) #标题 if at: self.infoT['title'] = at.string #链接 self.infoT['url'] = at['href'] if checkPath("pagecash",self.infoT['url']): self.isFetched = True return else: return agencyname = regx_data("(个人)",str(soup),"",False) if agencyname: self.infoT['isPerson'] = 1 else: self.infoT['isPerson'] = 0 #价格 if soup.find('b',{'class':'pri'}): self.infoT['price'] = soup.find('b',{'class':'pri'}).string if self.infoT['price']: if self.infoT['price'].find('-') != -1: self.infoT['price_max'] = self.infoT['price'].split("-")[1] self.infoT['price'] = self.infoT['price'].split("-")[0] self.infoT['room'] = soup("td")[2].string #时间 tds = soup("td")[3] if tds: self.infoT['posttime']= self.postTime(tds.string) #rint tds.string self.infoT['search']= re.sub("<.*?>","",str(soup)) del soup del at del agencyname del tds self.getContent() def __parseSellTrs(self,tr): soup = BeautifulSoup(tr) at = soup.find('a',{'class':'t'}) #标题 if at: self.infoT['title'] = at.string #链接 self.infoT['url'] = at['href'] if checkPath("pagecash",self.infoT['url']): self.isFetched = True return else: return #图片 img = soup.find('td',{'class':'img'}) if img: if img.img['src'].find("noimg") == -1: self.infoT['thumb'] = img.img['src'] #信息 t = soup.find('td',{'class':'t'}) self.infoT['topfloor'] = regx_data(self.house_topfloor_regex,str(t),"",False) self.infoT['floor'] = regx_data(self.house_floor_regex,str(t),"",False) self.infoT['belong'] = regx_data(self.house_belong_dict_regex,str(t),"",False) self.infoT['houseType'] = regx_data(self.house_type_regex,str(t),"",False) self.infoT['toward'] = regx_data(self.house_toward_regex,str(t),"",False) self.infoT['age'] = regx_data("(\d+)年",str(t),"",False) self.infoT['posttime'] = self.postTime(regx_data("更新时间:(.*?)<",str(t),"",False)) #self.infoT['room'] = regx_data(self.house_room_regex,str(soup),"",False) #if self.infoT['room']: #self.infoT['room'] = re.sub("一|二|三|四|五|六|七|八|九|十","1|2|3|4|5|6|7|8|9|10",self.infoT['room']) self.infoT['hall'] = regx_data(self.house_hall_regex,str(soup),"",False) self.infoT['toilet'] = regx_data(self.house_toilet_regex,str(soup),"",False) agencyname = regx_data("(个人)",str(t),"",False) if agencyname: self.infoT['isPerson'] = 1 else: self.infoT['isPerson'] = 0 #价格 num = soup('td',{'class':'tc'}) if num and len(num) > 1: if str(num[0]).find("面议") == -1: self.infoT['price'] = num[0].b.string self.infoT['area'] = num[1].b.string self.infoT['search']= re.sub("<.*?>","",str(soup)) del soup del t del img del at del agencyname self.getContent() def __parseRentTrs(self,tr): soup = BeautifulSoup(tr) at = soup.find('a',{'class':'t'}) #标题 if at: self.infoT['title'] = at.string #链接 self.infoT['url'] = at['href'] if checkPath("pagecash",self.infoT['url']): self.isFetched = True return else: return #图片 img = soup.find('td',{'class':'img'}) if img: if img.img['src'].find("noimg") == -1: self.infoT['thumb'] = img.img['src'] #信息 t = soup.find('td',{'class':'t'}) self.infoT['topfloor'] = regx_data(self.house_topfloor_regex,str(t),"",False) self.infoT['floor'] = regx_data(self.house_floor_regex,str(t),"",False) self.infoT['area'] = regx_data(self.house_totalarea_regex,str(t),"",False) self.infoT['fitment'] = regx_data(self.house_fitment_regex,str(t),"",False) self.infoT['room'] = regx_data(self.house_room_regex,str(soup),"",False) self.infoT['hall'] = regx_data(self.house_hall_regex,str(soup),"",False) self.infoT['toilet'] = regx_data(self.house_toilet_regex,str(soup),"",False) self.infoT['equ'] = regx_data("配置:(.*?)<",str(soup),"",False) agencyname = regx_data("(个人)",str(t),"",False) if agencyname: self.infoT['isPerson'] = 1 else: self.infoT['isPerson'] = 0 #价格 if soup.find('b',{'class':'pri'}): self.infoT['price'] = soup.find('b',{'class':'pri'}).string #时间 tds = soup("td")[4] if tds: self.infoT['posttime']= self.postTime(tds.string) #rint tds.string self.infoT['search']= re.sub("<.*?>","",str(soup)) del soup del t del img del at del agencyname del tds self.getContent() def __cInit__(self,url): try: request = urllib2.Request(url, None, self.header) self.response = urllib2.urlopen(request).read() except Exception,what: return False else: return True def req(self): sHtml = self.response self.response = None #个人 OR 经纪人 #agencyname = regx_data(self.agencyname_regex,sHtml,"个人房源",False) #if not agencyname: #agencyname = '个人房源' #联系人 self.infoT['owner'] = regx_data(self.username_regex,sHtml,"个人",False) #价格 if not self.infoT['price']: self.infoT['price'] = regx_data(self.house_price_regex,sHtml,0,False) #500以下 if not self.infoT['price'] : self.infoT['price'] = regx_data(self.house_price1_regex,sHtml,0,False) #以上 if not self.infoT['price'] : self.infoT['price'] = regx_data(self.house_price2_regex,sHtml,0,False) #标题 if not self.infoT['title']: self.infoT['title'] = regx_data(self.house_title_regex,sHtml,"",False) #发布时间 if not self.infoT['posttime']: self.infoT['posttime'] = self.postTime(regx_data(self.house_posttime_regex,sHtml,"",False)) #house_posttime = postTime(house_posttime,1) #室 if not self.infoT['room']: self.infoT['room'] = regx_data(self.house_room_regex,sHtml,"",False) #区 self.infoT['region'] = regx_data(self.house_region_regex,sHtml,"",False) #地段 #print self.house_section_regex self.infoT['section'] = regx_data(self.house_section_regex,sHtml,"",False) #详细 self.infoT['desc'] = regx_data(self.house_desc_regex,sHtml,"",False ,"<.*?>") #电话 if self.param['getPhone']: self.infoT['phone'] = regx_data(self.house_phone_regex,sHtml,"",False) def rent(self): sHtml = self.response self.response = None #个人 OR 经纪人 #agencyname = regx_data(self.agencyname_regex,sHtml,"",False) #联系人 self.infoT['owner'] = regx_data(self.username_regex,sHtml,"个人",False) #楼层 if not self.infoT['floor']: self.infoT['floor'] = regx_data(self.house_floor_regex,sHtml,"",False) #顶层 if not self.infoT['topfloor']: self.infoT['topfloor'] = regx_data(self.house_topfloor_regex,sHtml,"",False) #面积 if not self.infoT['area']: self.infoT['area'] = regx_data(self.house_totalarea_regex,sHtml,"",False) #价格 if not self.infoT['price']: self.infoT['price'] = regx_data(self.house_price_regex,sHtml,0,False) #标题 if not self.infoT['title']: self.infoT['title'] = regx_data(self.house_title_regex,sHtml,"",False) #发布时间 if not self.infoT['posttime']: self.infoT['posttime'] = self.postTime(regx_data(self.house_posttime_regex,sHtml,"",False) ) #house_posttime = postTime(house_posttime,1) #室 if not self.infoT['room']: self.infoT['room'] = regx_data(self.house_room_regex,sHtml,"",False) #厅 if not self.infoT['hall']: self.infoT['hall'] = regx_data(self.house_hall_regex,sHtml,"",False) #卫 if not self.infoT['toilet']: self.infoT['toilet'] = regx_data(self.house_toilet_regex,sHtml,"",False) #押金 if not self.infoT['deposit']: self.infoT['deposit'] = regx_data(self.house_deposit_regex,sHtml,"",False) #小区 self.infoT['borough'] = regx_data(self.borough_name_regex,sHtml,"",False) #地址 self.infoT['addr'] = regx_data(self.house_addr_regex,sHtml,"",False) #区 self.infoT['region'] = regx_data(self.house_region_regex,sHtml,"",False) #地段 self.infoT['section'] = regx_data(self.house_section_regex,sHtml,"",False) #详细 self.infoT['desc'] = regx_data(self.house_desc_regex,sHtml,"",False ,"<.*?>") #图片 self.infoT['pics'] = regx_datas(self.house_pics_regex,sHtml,"",False ,"tiny","big") _t = regx_data(self.house_toward_t_regex,sHtml,"",False) #装修 if not self.infoT['fitment']: self.infoT['fitment'] = regx_data(self.house_fitment_regex,_t,"",False) #朝向 if not self.infoT['toward']: self.infoT['toward'] = regx_data(self.house_toward_regex,_t,"",False) #类型 if not self.infoT['houseType']: self.infoT['houseType'] = regx_data(self.house_type_regex,_t,"",False) #电话 if self.param['getPhone']: self.infoT['phone'] = regx_data(self.house_phone_regex,sHtml,"",False) def buy(self): sHtml = self.response self.response = None #个人 OR 经纪人 #agencyname = regx_data(self.agencyname_regex,sHtml,"",False) #联系人 if not self.infoT['owner']: self.infoT['owner']= regx_data(self.username_regex,sHtml,"个人",False) #面积 if not self.infoT['area']: self.infoT['area'] = regx_data(self.house_totalarea_regex,sHtml,"",False) #价格 if not self.infoT['price']: self.infoT['price']= regx_data(self.house_price_regex,sHtml,0,False) #标题 if not self.infoT['title']: self.infoT['title'] = regx_data(self.house_title_regex,sHtml,"",False) #发布时间 if not self.infoT['posttime']: self.infoT['posttime'] = self.postTime(regx_data(self.house_posttime_regex,sHtml,"",False) ) #house_posttime = postTime(house_posttime,1) #室 if not self.infoT['room']: self.infoT['room'] = regx_data(self.house_room_regex,sHtml,"",False) #地址 self.infoT['addr'] = regx_data(self.house_addr_regex,sHtml,"",False) #详细 self.infoT['desc'] = regx_data(self.house_desc_regex,sHtml,"",False ,"<.*?>") #图片 self.infoT['pics'] = regx_datas(self.house_pics_regex,sHtml,"",False ,"tiny","big") #电话 if self.param['getPhone']: self.infoT['phone'] = regx_data(self.house_phone_regex,sHtml,"",False) def sell(self): sHtml = self.response self.response = None #个人 OR 经纪人 #agencyname = regx_data(self.agencyname_regex,sHtml,"",False) #联系人 self.infoT['owner'] = regx_data(self.username_regex,sHtml,"个人",False) #楼层 if not self.infoT['floor']: self.infoT['floor'] = regx_data(self.house_floor_regex,sHtml,"",False) #顶层 if not self.infoT['topfloor']: self.infoT['topfloor']= regx_data(self.house_topfloor_regex,sHtml,"",False) #面积 if not self.infoT['area']: self.infoT['area'] = regx_data(self.house_totalarea_regex,sHtml,"",False) #价格 if not self.infoT['price']: self.infoT['price'] = regx_data(self.house_price_regex,sHtml,0,False) #标题 if not self.infoT['title']: self.infoT['title'] = regx_data(self.house_title_regex,sHtml,"",False) #发布时间 if not self.infoT['posttime']: self.infoT['posttime'] = self.postTime(regx_data(self.house_posttime_regex,sHtml,"",False) ) #house_posttime = postTime(house_posttime,1) #室 if not self.infoT['room']: self.infoT['room'] = regx_data(self.house_room_regex,sHtml,"",False) #厅 if not self.infoT['hall']: self.infoT['hall'] = regx_data(self.house_hall_regex,sHtml,"",False) #卫 if not self.infoT['toilet']: self.infoT['toilet'] = regx_data(self.house_toilet_regex,sHtml,"",False) #产权 if not self.infoT['belong']: self.infoT['belong'] = regx_data(self.house_belong_regex,sHtml,"",False) #房龄 99年 self.infoT['age'] = regx_data(self.house_age_regex,sHtml,"",False) #小区 self.infoT['borough'] = regx_data(self.borough_name_regex,sHtml,"",False) #地址 self.infoT['addr'] = regx_data(self.house_addr_regex,sHtml,"",False) #区 self.infoT['region'] = regx_data(self.house_region_regex,sHtml,"",False) #地段 self.infoT['section'] = regx_data(self.house_section_regex,sHtml,"",False) #详细 self.infoT['desc'] = regx_data(self.house_desc_regex,sHtml,"",False ,"<.*?>") #图片 self.infoT['pics'] = regx_datas(self.house_pics_regex,sHtml,"",False ,"tiny","big") _t = regx_data(self.house_toward_t_regex,sHtml,"",False) #装修 if not self.infoT['fitment']: self.infoT['fitment'] = regx_data(self.house_fitment_regex,_t,"",False) #朝向 if not self.infoT['toward']: self.infoT['toward'] = regx_data(self.house_toward_regex,_t,"",False) #类型 if not self.infoT['houseType']: self.infoT['houseType'] = regx_data(self.house_type_regex,_t,"",False) #电话 if self.param['getPhone']: self.infoT['phone'] = regx_data(self.house_phone_regex,sHtml,"",False) def _initRe(self): self.page_main_regex = "<div id=\"main\">(.*?)<div id=\"links\"> " self.agencyname_regex="agencyname:'(.*?)'," self.username_regex="username:'(.*?)'," self.house_title_regex="<h1>(.*)</h1>" self.house_floor_regex="第(\d+)层" self.house_topfloor_regex="共(\d+)层" self.house_room_regex="(\d+|一|二|三|四|五|六|七|八|九|十)室" self.house_hall_regex="(\d+)厅" self.house_toilet_regex="(\d+)卫" self.house_posttime_regex="发布时间:(.*?)浏览" self.house_age_regex="(\d+)年" self.house_region_regex = "locallist.*?listname.*?name:'(.*?)'" self.house_section_regex = "<li><i>区域:</i><a.*?<a.*?>(.*?)</a></li>" self.house_desc_regex = "class=\"maincon\">(.*?)</div>" self.house_phone_regex = "(http://image.58.com/showphone.aspx.*?)'" self.house_pics_regex = "(http://\d+.pic.58control.cn/p\d+/tiny/n_\d+.jpg)" self.house_toward_regex = "(东|南|西|北|南北|东西|东南|东北|西北)" self.house_fitment_regex = "(毛坯|简单装修|中等装修|精装修|豪华装修)" self.house_belong_dict_regex = "(商品房|经济适用房|公房|用权)" self.house_type_regex = "(平房|普通住宅|商住两用|公寓|别墅)" self.borough_name_regex = "<li><i>小区:</i><.*?>(.*?)<.*?></li>" self.borough_name1_regex = "<li><i>小区:</i>(.*?)</li>" if self.param['flag'] ==1: self.house_addr_regex = "address\">(.*?)<" self.house_totalarea_regex="(\d+)㎡" self.house_belong_regex="<li><i>产权:</i>(.*?)</li>" self.house_price_regex="(\d+)万元" self.house_toward_t_regex = "房龄:</i>(.*?)<" elif self.param['flag'] ==2: self.house_totalarea_regex="(\d+)㎡" self.house_price_regex="(\d+)元/月" self.house_equ_regex="vartmp='(.*?)';" self.house_deposit_regex="(押一付三|押一付一|押二付一|半年付|年付)" self.house_toward_t_regex = "基本情况:</i>(.*?)<" self.house_addr_regex = "address\">(.*?)<" elif self.param['flag'] ==3: self.house_belong_regex="<li><i>产权:</i>(.*?)</li>" self.house_totalarea_regex="(\d+-\d+)㎡" self.house_addr_regex="<li><i>地段:</i>(.*?)</li>" self.house_price_regex="(\d+-\d+)万元" elif self.param['flag'] ==4: self.house_price_regex="(\d+-\d+)元" self.house_price1_regex="(\d+)元以下" self.house_price2_regex="(\d+)元以上" self.house_room_regex="(一|两|三|四)居室" def _initTemple(self,flag,city): self.infoT = { 'flag':flag,#房源类型 1 出售 2 出租 3 求购 4 求租 'title':'', 'posttime':'', 'price':0, 'price_max':0, 'deposit':'', 'belong':'', 'room':0, 'hall':0, 'toilet':0, 'yt':0, 'area':0, 'area_max':0, 'houseType':'', 'fitment':'', 'floor':0, 'topfloor':0, 'toward':'', 'age':1, 'equ':'', 'city':city, 'region':'', 'borough':'', 'section':'', 'addr':'', 'phone':'', 'owner':'', 'desc':'', 'search':'', 'url':'', 'thumb':'', 'webFlag':1, 'isPerson':1, } def postTime(self,posttime): if posttime and posttime.find('now') != -1: posttime = int(time.time()) if not posttime: return posttime = str(posttime).replace('前','') #print posttime if posttime.find("<") != -1 or posttime.find(">") != -1: posttime = re.sub('<.*?>','' ,pottime) if posttime.find('-') !=-1: if len(posttime.split("-"))==3: s = datetime.datetime(int(posttime.split('-')[0]),int(posttime.split('-')[1],),int(posttime.split('-')[2])) else: s = datetime.datetime(2011,int(posttime.split('-')[0],),int(posttime.split('-')[1])) posttime = int(time.mktime(s.timetuple())) elif posttime.find('分钟') !=-1: n = int(posttime.replace('分钟',''))*60 posttime = int(time.time() - n) elif posttime.find('小时') !=-1: n = int(posttime.replace('小时',''))*60*60 posttime = int(time.time() - n) else: posttime = int(time.time()) return posttime if (time.time() - self.fd['posttime']) > 3600*24*7: return print "++++++++++++++++" print time.strftime('%Y %m %d', time.localtime(self.fd['posttime'])) def run(self): self.pageNo = 1 while 1: if self.isStoped == True: break if self.pageNo: url = self.baseUrl(self.param['args'],self.pageNo) self.__getLinks(url) def baseUrl(self,args,pn): if args['region'] != '': args['region'] = args['region']+"/" else: args['region'] = '' if args['option']!= '': args['option'] = args['option']+"/" else: args['option'] = '' if self.param['flag'] == 1: baseUrl = 'http://%s.58.com/%sershoufang/0/%spn%d/?final=1&searchtype=3&sourcetype=5&key=%s' % (args['city'],args['region'],args['option'],pn,args['q']) if self.param['flag'] == 2: baseUrl = 'http://%s.58.com/%szufang/0/%spn%d/?final=1&key=%s' % (args['city'],args['region'],args['option'],pn,args['q']); if self.param['flag'] == 3: args['option'] = args['option'][:-1] baseUrl = 'http://%s.58.com/%sershoufang/0/%sh2/pn%d/?final=1&key=%s&searchtype=3&sourcetype=5' % (args['city'],args['region'],args['option'],pn,args['q']) if self.param['flag'] == 4: baseUrl = 'http://%s.58.com/%sqiuzu/0/%spn%d/?final=1&key=%s' % (args['city'],args['region'],args['option'],pn,args['q']) return baseUrl q = [] if __name__=="__main__": url1 = 'http://sh.58.com/ershoufang/7489033818376x.shtml' url2 = 'http://sh.58.com/zufang/7468246420482x.shtml' url3 = 'http://sh.58.com/ershoufang/7544211350792x.shtml' url4 = 'http://sh.58.com/qiuzu/7543125341446x.shtml' link2 = 'http://sh.58.com/zufang/0/?selpic=2' link1 = 'http://sh.58.com/ershoufang/' link3 = 'http://sh.58.com/ershoufang/h2/' link4 = 'http://sh.58.com/qiuzu/0/' data = {} data['flag'] = 1 data['city'] = 1 data['getPhone'] = 1 cc = BaseCrawl(data,q) cc.run()
apache-2.0
-6,180,525,446,337,366,000
38.575967
172
0.475727
false
3.201453
false
false
false
AntaresConsulting/odoo-marble
product_marble/models/stock.py
1
25218
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import models, api, _ from openerp.osv import osv, fields # from openerp.tools.translate import _ from operator import itemgetter import inspect import _common as comm import logging _logger = logging.getLogger(__name__) class stock_picking(osv.osv): _name = "stock.picking" _inherit = "stock.picking" _description = "Picking List" _tipo_de_move = [ ('raw', 'Raw'), ('insu', 'Input'), ('bac', 'Bacha'), ] def _get_tipo_de_move(self, cr, uid, context=None): return sorted(self._tipo_de_move, key=itemgetter(1)) def _get_types(self, cr, uid, ids, field_name, arg, context=None): res = {} for pick in self.browse(cr, uid, ids): if len(pick.move_lines) > 0: res.update({pick.id : pick.move_lines[0].prod_type}) return res @api.cr_uid_ids_context def do_enter_transfer_details_marble(self, cr, uid, picking, context=None): resp = super(stock_picking, self).do_enter_transfer_details(cr, uid, picking, context=context) return resp['res_id'] _columns = { 'move_prod_type': fields.selection(_get_tipo_de_move, string='Product Type picking', select=True), 'prod_type': fields.function(_get_types, type='char', string='Product Type', store=False), } stock_picking() class stock_pack_operation(osv.osv): _name = "stock.pack.operation" _inherit = "stock.pack.operation" _description = "Packing Operation" #dimension_id = openerp.fields.Many2one('product.marble.dimension', string='Dimension', ondelete='set null') #dimension_unit = openerp.fields.Integer(string='Units') #prod_type = openerp.fields.Char(related='product_id.prod_type', string='Product Type') _columns = { 'dimension_id': fields.many2one('product.marble.dimension', 'Dimension', domain=[('state','=','done')]), 'dimension_unit': fields.integer('Units', size=3), # units 'prod_type' : fields.related('product_id', 'prod_type', type='char', relation='product.template', string='Product Type'), } _defaults = { 'dimension_id': False, 'dimension_unit': 0, } def _before_save(self, cr, uid, vals, context): obj_pick = self.pool.get('stock.picking') pick_id = vals.get('picking_id',False) prod_id = vals.get('product_id',False) # localizo el 'stock_move' x picking + product, luego obteng su units a registrar en stock.pack.operation.- for mov in obj_pick.browse(cr, uid, pick_id, context=context).move_lines: if mov.product_id.id == prod_id and mov.product_id.prod_type == comm.RAW: vals.update(dimension_id = mov.dimension_id.id) vals.update(dimension_unit = mov.dimension_unit) break def create(self, cr, uid, vals, context=None): self._before_save(cr, uid, vals, context) #_logger.info('>> stock_pack_opetarion >> create >> 12- vals = %s', vals) return super(stock_pack_operation, self).create(cr, uid, vals, context=context) def write(self, cr, uid, ids, vals, context=None): #_logger.info('>> stock_pack_opetarion >> write >> 20- vals = %s', vals) self._before_save(cr, uid, vals, context) #_logger.info('>> stock_pack_opetarion >> write >> 21- vals = %s', vals) return super(stock_pack_operation, self).write(cr, uid, ids, vals, context=context) stock_pack_operation() class stock_move(osv.osv): _inherit = "stock.move" # defino tipo de movimiento en Locacion de Stock: # return 0 = no afecta a Stock, # 1 = entra prod. en Stock (in: input), # -1 = sale prod. en Stock (out: output) def stock_move(self, cr, uid, mov=None, zeroVal=None): zeroValue = 0 if zeroVal == None else zeroVal if not mov: _logger.info(">> stock_move >> Stock.Move no definido.") return zeroValue loc_propio = [comm.get_location_stock(self, cr, uid), \ comm.get_location_recortes_stock(self, cr, uid)] loc_orig_parents = comm.get_loc_parents(self, mov.location_id, []) loc_dest_parents = comm.get_loc_parents(self, mov.location_dest_id, []) loc_orig_propio = (loc_propio[0] in loc_orig_parents) or (loc_propio[1] in loc_orig_parents) loc_dest_propio = (loc_propio[0] in loc_dest_parents) or (loc_propio[1] in loc_dest_parents) #_logger.info(">> stock_move >> 1- loc_propio = %s", loc_propio) #_logger.info(">> stock_move >> 2- loc_orig_parents = %s", loc_orig_parents) #_logger.info(">> stock_move >> 3- loc_orig_propio = %s", loc_orig_propio) #_logger.info(">> stock_move >> 4- loc_dest_parents = %s", loc_dest_parents) #_logger.info(">> stock_move >> 5- loc_dest_propio = %s", loc_dest_propio) if loc_orig_propio and loc_dest_propio: _logger.info(">> stock_move = 0 (NULO): movimiento interno en sectores propios.") return zeroValue if not loc_orig_propio and not loc_dest_propio: _logger.info(">> stock_move = 0 (NULO): movimiento interno en sectores no propios.") return zeroValue if not loc_orig_propio and loc_dest_propio: _logger.info(">> stock_move = 1 (IN): ingreso de mercaderia en almacen/sector.") return 1 if loc_orig_propio and not loc_dest_propio: _logger.info(">> stock_move = -1 (OUT): egreso de mercaderia en almacen/sector.") return -1 _logger.warning(">> ERROR >> stock_move = 0 >> ¿Entrada o Salida? operación no definida...") return zeroValue def _get_sign_qty(self, cr, uid, ids, field_name, arg, context=None): if not ids: return {} res = {} bal = 0.00 ids_by_date = self.search(cr, uid, [('id','in',ids)], order='date') for m in self.browse(cr, uid, ids_by_date): fields = {} # sign = self._get_sign(m) sign = self.stock_move(cr, uid, m, 1) fields['qty_dimension'] = sign * m.dimension_unit fields['qty_product'] = sign * m.product_qty bal += fields['qty_product'] fields['qty_balance'] = bal res[m.id] = fields # _logger.info(">> _get_field_with_sign >> 5 >> res = %s", res) return res def _get_types(self, cr, uid, ids, field_name, arg, context=None): #_logger.info(">> _get_types >> 1- ids = %s", ids) res = {} if not ids: return res if not isinstance(ids, (list,tuple)): ids = [ids] types = comm.get_prod_types(self, cr, uid, context) #_logger.info(">> _get_types >> 2- types = %s", types) for ms_id in self.browse(cr, uid, ids, context): cid = ms_id.product_id.categ_id.id #_logger.info(">> _get_types >> 3- cid = %s", cid) res.update({ms_id.id : types.get(cid,'*')}) #_logger.info(">> _get_types >> 4- res = %s", res) return res def _is_raw(self, cr, uid, ids, field_name, arg, context=None): #""" #Determina si [ids stock_move] tiene producto, del tipo is_raw si/no... #""" #res = {} #if not ids: # return res # para cada stock_move -> recupero su correspondiente prod_id #prod_ids = [sm.product_id.id for sm in self.browse(cr, uid, ids)] # recupero is_raw por cada producto: {prod_id: is_raw} #data = comm.is_raw_material_by_product_id(self, cr, uid, prod_ids) # convierto de {prod_id: is_raw} -> {stock_move_id: is_raw}: #res = {ids[k]: (data[prod_ids[k]] or False) for k in range(len(ids))} # _logger.info("10 >> _is_raw >> res = %s", res) #return res res = { sm.id : (sm.product_id.prod_type == comm.RAW) for sm in self.browse(cr, uid, ids) } #_logger.info("10 >> _is_raw >> res = %s", res) return res def _get_move_name(self, cr, uid, pro_id=False, dim_id=False): name = '' if not pro_id: return name obj_pro = self.pool.get('product.product') name = obj_pro.name_get(cr, uid, [pro_id], context=None)[0][1] if not dim_id or \ not comm.is_raw_material_by_product_id(self, cr, uid, [pro_id])[pro_id]: return name obj_dim = self.pool.get('product.marble.dimension') d = obj_dim.browse(cr, uid, [dim_id])[0] name = "%s >> %s" % (name, d.dimension) return name # ------------------------------------------------------------------------ def onchange_product_id(self, cr, uid, ids, prod_id=False, loc_id=False, loc_dest_id=False, partner_id=False): res = super(stock_move, self).onchange_product_id(cr, uid, ids, prod_id, loc_id, loc_dest_id, partner_id) #_logger.info(">> onchange_product_id >> 1- res = %s", res) v = {} if (not res) or (not prod_id): return v no_prod_id = ('product_id' not in res['value']) if no_prod_id: res['value'].update({'product_id':prod_id}) v = self.calculate_dim(cr, uid, res['value']) if no_prod_id: del v['product_id'] res['value'].update(v) #_logger.info(">> onchange_product_id >> 2- res = %s", res) return res def onchange_calculate_dim(self, cr, uid, ids, pro_id, pro_uom, pro_qty, dim_id, dim_unit): v = { 'product_id' : pro_id, 'product_uom' : pro_uom, 'product_uom_qty' : pro_qty, 'dimension_id' : dim_id, 'dimension_unit' : dim_unit, 'is_raw' : False, 'prod_type' : comm.OTHER, } # _logger.info(">> onchange_calculate_dim >> 0- val = %s", val) val = self.calculate_dim(cr, uid, v) # _logger.info(">> onchange_calculate_dim >> 1- val = %s", val) return {'value': val} def calculate_dim(self, cr, uid, val): #_logger.info(" >> calculate_dim >> 100- val = %s", val) pro_id = val.get('product_id', False) pro_uom = val.get('product_uom', False) pro_uos = val.get('product_uos', False) pro_qty = val.get('product_uom_qty', 0.00) dim_id = val.get('dimension_id', False) dim_unit = val.get('dimension_unit', 0.00) is_raw = val.get('is_raw', False) prod_type = val.get('prod_type', comm.OTHER) if not pro_id: return val #pro = self.pool.get('product.product').browse(cr, uid, pro_id) #_logger.info(" >> calculate_dim >> 1- prod = %s", pro) #pro = self.pool.get('product.product').browse(cr, uid, pro_id).categ_id #_logger.info(" >> calculate_dim >> 2- prod = %s", pro) #cid = self.pool.get('product.product').browse(cr, uid, pro_id).categ_id.id #prod_type = comm.get_prod_types(self, cr, uid).get(cid, comm.OTHER) #val['prod_type'] = prod_type prod_type = self.pool.get('product.product').browse(cr, uid, pro_id).prod_type val['prod_type'] = prod_type m2 = 0.00 #is_raw = comm.is_raw_material_by_product_id(self, cr, uid, [pro_id])[pro_id] is_raw = (prod_type == comm.RAW) if prod_type not in ('raw', 'bacha'): val['description'] = self._get_move_name(cr, uid, pro_id, dim_id) return val elif prod_type == 'bacha': val['description'] = self._get_move_name(cr, uid, pro_id, dim_id) val['product_uom'] = comm.get_uom_units_id(self,cr,uid) return val m2 = 0.00 if dim_id: #obj = self.pool.get('product.marble.dimension') #data = obj.read(cr, uid, [dim_id], ['m2'], context=None) #m2 = data[0]['m2'] if (len(data) > 0 and len(data[0]) > 0) else 0.00 m2 = self.pool.get('product.marble.dimension').browse(cr, uid, dim_id).m2 pro_qty = dim_unit * m2 pro_uom = comm.get_uom_m2_id(self,cr,uid) v = {} v['product_id'] = pro_id v['product_uos'] = pro_uos v['product_uom'] = pro_uom v['product_uom_qty'] = pro_qty v['dimension_id'] = dim_id v['dimension_unit'] = dim_unit v['is_raw'] = is_raw v['prod_type'] = prod_type v['description'] = self._get_move_name(cr, uid, pro_id, dim_id) #_logger.info(" >> calculate_dim >> 101- v = %s", v) return v # ------------------------------------------------------------------------ def _check_data_before_save(self, cr, uid, sm_id, val): #_logger.info(">> _check_data_before_save >> 1- sm_id = %s", sm_id) #_logger.info(">> _check_data_before_save >> 2- val = %s", val) if 'product_id' not in val: return # defino campos a evaluar fields_list = ['product_id','product_uom','product_uom_qty','dimension_id','dimension_unit','is_raw','description'] # si (NO existe algun elemento de [fields_list] en [val]) >> me voy, no precesar... if not any(e in fields_list for e in val.keys()): return to_update = {} no_update = {} obj = (sm_id and self.pool.get('stock.move').browse(cr, uid, sm_id)) or False #_logger.info(">> _check_data_before_save >> 3- obj = %s", obj) # divido [info suministrada por actuatizar] e [info calculada, no para actualizar, requerida] for field in fields_list: if (field in val): to_update[field] = val[field] continue # >> si (field es 'read-only') >> la data no viaja... elif (field in ['product_uom', 'product_uom_qty', 'description']): to_update[field] = val.get(field,'') continue else: no_update[field] = (obj and (obj[0][field].id if ('class' in str(type(obj[0][field]))) else obj[0][field])) or False param = dict(to_update.items() + no_update.items()) v = self.calculate_dim(cr, uid, param) # actualizo valores de retorno for field in to_update: if (field not in val) and (not v[field]): # no copiarlo... pass else: val[field] = v[field] # ------------------------------------------------- # si 'is_raw' >> valido datos requeridos... valu = v mov = obj and obj[0] #_logger.info(">> _check_data_before_save >> 6- mov = %s", mov) is_raw = valu.get('is_raw',False) or (mov and mov.is_raw) dim_id = valu.get('dimension_id',0) or (mov and mov.dimension_id.id) dim_unit = valu.get('dimension_unit',0) or (mov and mov.dimension_unit) pro_qty = valu.get('product_uom_qty',0) or (mov and mov.product_uom_qty) msg = self._check_data_required(cr, uid, is_raw, dim_id, dim_unit, pro_qty) if msg: raise osv.except_osv(_('Error'), _(msg)) return def _check_data_required(self, cr, uid, is_raw, dim_id, dim_unit, prod_qty): if not is_raw: return '' if not dim_id: return 'You cannot save a Move-Stock without Dimension (id)' if not dim_unit: return 'You cannot save a Move-Stock without Quantity Dimension (qty)' if not prod_qty: return 'You cannot save a Move-Stock without Quantity Product (uom qty)' return '' # ------------------------------------------------------------------------ def create(self, cr, uid, data, context=None): #_logger.info('>> stock_move >> create >> 1- data = %s', data) self._check_data_before_save(cr, uid, [], data) #_logger.info('>> stock_move >> create >> 2- data = %s', data) return super(stock_move, self).create(cr, uid, data, context=context) def write(self, cr, uid, ids, vals, context=None): #for ms_id in ids: # self._check_data_before_save(cr, uid, ms_id, vals) #_logger.info('>> stock_move >> write >> 11- ids = %s', ids) #_logger.info('>> stock_move >> write >> 12- vals = %s', vals) #if len(ids) > 1: # raise osv.except_osv(_('Error'), 'TODO: A corregir. Mas de un registro a escribir....') sm_id = ids[0] if len(ids) >= 1 else False self._check_data_before_save(cr, uid, sm_id, vals) #_logger.info('>> stock_move >> write >> 13- vals = %s', vals) return super(stock_move, self).write(cr, uid, ids, vals, context=context) # --- extend: registro en balance --- def action_done(self, cr, uid, ids, context=None): if not super(stock_move, self).action_done(cr, uid, ids, context=context): return False #_logger.info(">> _action_done >> 01 >> ids = %s", ids) obj_bal = self.pool.get('product.marble.dimension.balance') #obj_mov = [move for move in self.browse(cr, uid, ids, context=context) if move.state == 'done' and move.product_id.is_raw] obj_mov = [move for move in self.browse(cr, uid, ids, context=context) if move.state == 'done' and (move.product_id.prod_type == comm.RAW)] if not obj_mov: return True #_logger.info(">> _action_done >> 02 >> obj_mov = %s", obj_mov) # obj_mov is raw -> verifico: # >> si (move.location = stock_loc or move.location_dest = stock_loc) # >> registro en Balance. # stock_loc = comm.get_location_stock(self, cr, uid) # bal_list = [mov for mov in obj_mov if stock_loc in [mov.location_id.id, mov.location_dest_id.id]] # bal_list = [mov for mov in obj_mov if self.stock_move(cr, uid, mov) != 0] bal_list = [mov for mov in obj_mov] #_logger.info(">> _action_done >> 02 >> stock_loc = %s", stock_loc) #_logger.info(">> _action_done >> 03 >> bal_list = %s", bal_list) for mov in bal_list: # valid data required #msg = self._check_data_required(cr, uid, mov.product_id.is_raw, mov.dimension_id, mov.dimension_unit, mov.product_uom_qty) is_raw = (mov.product_id.prod_type == comm.RAW) msg = self._check_data_required(cr, uid, is_raw, mov.dimension_id, mov.dimension_unit, mov.product_uom_qty) if msg: raise osv.except_osv(_('Error'), _(msg)) #_logger.info(">> _action_done >> 888- stock_move = %s", self.stock_move(cr, uid, mov)) # set data.. val = { 'prod_id': mov.product_id.id, 'dim_id': mov.dimension_id.id, 'dimension_unit': mov.dimension_unit, 'dimension_m2': mov.product_uom_qty, # 'typeMove': 'in' if stock_loc == mov.location_dest_id.id else 'out' 'typeMove': 'in' if self.stock_move(cr, uid, mov) > 0 else 'out' } #_logger.info(">> _action_done >> 04- val = %s", val) obj_bal.register_balance(cr, uid, val, context) #_logger.info(">> _action_done >> 05- OK >> val = %s", val) return True _columns = { 'description': fields.char('Description'), 'dimension_id': fields.many2one('product.marble.dimension', 'Dimension', select=True, states={'done': [('readonly', True)]}, domain=[('state','=','done')]), 'dimension_unit': fields.integer('Units', size=3, states={'done': [('readonly', True)]}), 'is_raw': fields.function(_is_raw, type='boolean', string='Is Marble'), 'prod_type' : fields.related('product_id', 'prod_type', type='char', relation='product.template', string='Product Type'), 'employee_id': fields.many2one('hr.employee', 'Empleado', select=True, states={'done': [('readonly', True)]}, domain=[('active','=',True)]), 'employee_image': fields.related('employee_id', 'image_small', type='binary', relation='hr.employee', string='Part Number', store=True, readonly=True), 'partner_picking_id': fields.related('picking_id', 'partner_id', type='many2one', relation='res.partner', string='Patern', store=False), 'qty_dimension': fields.function(_get_sign_qty, string='Unidades', multi="sign"), 'qty_product': fields.function(_get_sign_qty, string='Area (m2)', multi="sign"), 'qty_balance': fields.function(_get_sign_qty, string='Balance (m2)', multi="sign"), 'use_client_location': fields.boolean('Does the customer provides the products?', readonly=True), } _defaults = { 'dimension_id': False, 'dimension_unit': 0, } stock_move() class stock_inventory_line(osv.osv): _inherit = "stock.inventory.line" _name = "stock.inventory.line" _description = "Inventory Line" _columns = { 'is_raw': fields.boolean('Is Raw', readonly=True), 'dimension_id': fields.many2one('product.marble.dimension', 'Dimension', domain=[('state','=','done')]), 'dimension_unit': fields.integer('Real Dim. [Units]', size=3), # units 'dimension_m2': fields.float('Real Dim. [M2]', digits=(5,3)), # m2 'dimension_unit_theoretical': fields.integer('Theoretical Dim. [Units]', size=3, readonly=True), # units 'dimension_m2_theoretical': fields.float('Theoretical Dim. [M2]', digits=(5,3), readonly=True), # m2 } defaults = { 'is_raw': False, 'dimension_id': False, 'dimension_unit': 0, 'dimension_m2': 0, 'dimension_unit_theoretical': 0, 'dimension_m2_theoretical': 0, } # overwrite: stock > stock_inventory_line - odoo v8.0 - line: 2727 - 27555 # sobre escribo metodo para incorporar 'dimensiones' en caso de ser materia prima def _resolve_inventory_line(self, cr, uid, inventory_line, context=None): stock_move_obj = self.pool.get('stock.move') if inventory_line.is_raw: diff_unit = inventory_line.dimension_unit_theoretical - inventory_line.dimension_unit diff = inventory_line.dimension_m2_theoretical - inventory_line.dimension_m2 else: diff = inventory_line.theoretical_qty - inventory_line.product_qty if not diff: return # each theorical_lines where difference between theoretical and checked quantities is not 0 is a line for which we need to create a stock move vals = { 'name': _('INV:') + (inventory_line.inventory_id.name or ''), 'product_id': inventory_line.product_id.id, 'product_uom': inventory_line.product_uom_id.id, 'date': inventory_line.inventory_id.date, 'company_id': inventory_line.inventory_id.company_id.id, 'inventory_id': inventory_line.inventory_id.id, 'state': 'confirmed', 'restrict_lot_id': inventory_line.prod_lot_id.id, 'restrict_partner_id': inventory_line.partner_id.id, 'dimension_id': inventory_line.dimension_id.id # dimension } inventory_location_id = inventory_line.product_id.property_stock_inventory.id if diff < 0: # found more than expected vals['location_id'] = inventory_location_id vals['location_dest_id'] = inventory_line.location_id.id vals['product_uom_qty'] = -diff # dim >> m2 [faltante] vals['dimension_unit'] = (inventory_line.is_raw and -diff_unit) or 0 # dim >> unidades [faltante] else: # found less than expected vals['location_id'] = inventory_line.location_id.id vals['location_dest_id'] = inventory_location_id vals['product_uom_qty'] = diff # dim >> m2 [excedente] vals['dimension_unit'] = (inventory_line.is_raw and diff_unit) or 0 # dim >> unidades [excedente] #_logger.info(">> _inv >> 01- vals = %s", vals) #_logger.info(">> _inv >> 02- uom_qty = %s", vals['product_uom_qty']) #_logger.info(">> _inv >> 03- dim_uni = %s", vals['dimension_unit']) return stock_move_obj.create(cr, uid, vals, context=context) stock_inventory_line() #
gpl-2.0
-4,009,658,643,104,912,400
41.884354
164
0.560398
false
3.363927
false
false
false
CiscoSystems/dashboard-quantum-beta
django-openstack/django_openstack/api.py
1
19242
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Copyright 2011 Fourth Paradigm Development, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Methods and interface objects used to interact with external apis. API method calls return objects that are in many cases objects with attributes that are direct maps to the data returned from the API http call. Unfortunately, these objects are also often constructed dynamically, making it difficult to know what data is available from the API object. Because of this, all API calls should wrap their returned object in one defined here, using only explicitly defined atributes and/or methods. In other words, django_openstack developers not working on django_openstack.api shouldn't need to understand the finer details of APIs for Nova/Glance/Swift et al. """ from django.conf import settings import cloudfiles import glance.client import httplib import json import logging import openstack.compute import openstackx.admin import openstackx.api.exceptions as api_exceptions import openstackx.extras import openstackx.auth from urlparse import urlparse LOG = logging.getLogger('django_openstack.api') class APIResourceWrapper(object): """ Simple wrapper for api objects Define _attrs on the child class and pass in the api object as the only argument to the constructor """ _attrs = [] def __init__(self, apiresource): self._apiresource = apiresource def __getattr__(self, attr): if attr in self._attrs: # __getattr__ won't find properties return self._apiresource.__getattribute__(attr) else: LOG.debug('Attempted to access unknown attribute "%s" on' ' APIResource object of type "%s" wrapping resource of' ' type "%s"' % (attr, self.__class__, self._apiresource.__class__)) raise AttributeError(attr) class APIDictWrapper(object): """ Simple wrapper for api dictionaries Some api calls return dictionaries. This class provides identical behavior as APIResourceWrapper, except that it will also behave as a dictionary, in addition to attribute accesses. Attribute access is the preferred method of access, to be consistent with api resource objects from openstackx """ def __init__(self, apidict): self._apidict = apidict def __getattr__(self, attr): if attr in self._attrs: try: return self._apidict[attr] except KeyError, e: raise AttributeError(e) else: LOG.debug('Attempted to access unknown item "%s" on' 'APIResource object of type "%s"' % (attr, self.__class__)) raise AttributeError(attr) def __getitem__(self, item): try: return self.__getattr__(item) except AttributeError, e: # caller is expecting a KeyError raise KeyError(e) def get(self, item, default=None): try: return self.__getattr__(item) except AttributeError: return default class Container(APIResourceWrapper): """Simple wrapper around cloudfiles.container.Container""" _attrs = ['name'] class Console(APIResourceWrapper): """Simple wrapper around openstackx.extras.consoles.Console""" _attrs = ['id', 'output', 'type'] class Flavor(APIResourceWrapper): """Simple wrapper around openstackx.admin.flavors.Flavor""" _attrs = ['disk', 'id', 'links', 'name', 'ram', 'vcpus'] class Image(APIDictWrapper): """Simple wrapper around glance image dictionary""" _attrs = ['checksum', 'container_format', 'created_at', 'deleted', 'deleted_at', 'disk_format', 'id', 'is_public', 'location', 'name', 'properties', 'size', 'status', 'updated_at'] def __getattr__(self, attrname): if attrname == "properties": return ImageProperties(super(Image, self).__getattr__(attrname)) else: return super(Image, self).__getattr__(attrname) class ImageProperties(APIDictWrapper): """Simple wrapper around glance image properties dictionary""" _attrs = ['architecture', 'image_location', 'image_state', 'kernel_id', 'project_id', 'ramdisk_id'] class KeyPair(APIResourceWrapper): """Simple wrapper around openstackx.extras.keypairs.Keypair""" _attrs = ['fingerprint', 'key_name', 'private_key'] class Server(APIResourceWrapper): """Simple wrapper around openstackx.extras.server.Server Preserves the request info so image name can later be retrieved """ _attrs = ['addresses', 'attrs', 'hostId', 'id', 'imageRef', 'links', 'metadata', 'name', 'private_ip', 'public_ip', 'status', 'uuid', 'image_name', 'virtual_interfaces'] def __init__(self, apiresource, request): super(Server, self).__init__(apiresource) self.request = request def __getattr__(self, attr): if attr == "attrs": return ServerAttributes(super(Server, self).__getattr__(attr)) else: return super(Server, self).__getattr__(attr) @property def image_name(self): image = image_get(self.request, self.imageRef) return image.name class ServerAttributes(APIDictWrapper): """Simple wrapper around openstackx.extras.server.Server attributes Preserves the request info so image name can later be retrieved """ _attrs = ['description', 'disk_gb', 'host', 'image_ref', 'kernel_id', 'key_name', 'launched_at', 'mac_address', 'memory_mb', 'name', 'os_type', 'project_id', 'ramdisk_id', 'scheduled_at', 'terminated_at', 'user_data', 'user_id', 'vcpus', 'hostname'] class Services(APIResourceWrapper): _attrs = ['disabled', 'host', 'id', 'last_update', 'stats', 'type', 'up', 'zone'] class SwiftObject(APIResourceWrapper): _attrs = ['name'] class Tenant(APIResourceWrapper): """Simple wrapper around openstackx.auth.tokens.Tenant""" _attrs = ['id', 'description', 'enabled'] class Token(APIResourceWrapper): """Simple wrapper around openstackx.auth.tokens.Token""" _attrs = ['id', 'serviceCatalog', 'tenant_id', 'username'] class Usage(APIResourceWrapper): """Simple wrapper around openstackx.extras.usage.Usage""" _attrs = ['begin', 'instances', 'stop', 'tenant_id', 'total_active_disk_size', 'total_active_instances', 'total_active_ram_size', 'total_active_vcpus', 'total_cpu_usage', 'total_disk_usage', 'total_hours', 'total_ram_usage'] class User(APIResourceWrapper): """Simple wrapper around openstackx.extras.users.User""" _attrs = ['email', 'enabled', 'id', 'tenantId'] def url_for(request, service_name, admin=False): catalog = request.session['serviceCatalog'] if admin: rv = catalog[service_name][0]['adminURL'] else: rv = catalog[service_name][0]['internalURL'] return rv def check_openstackx(f): """Decorator that adds extra info to api exceptions The dashboard currently depends on openstackx extensions being present in nova. Error messages depending for views depending on these extensions do not lead to the conclusion that nova is missing extensions. This decorator should be dropped and removed after keystone and dashboard more gracefully handle extensions and openstackx extensions aren't required by the dashboard in nova. """ def inner(*args, **kwargs): try: return f(*args, **kwargs) except api_exceptions.NotFound, e: e.message = e.details or '' e.message += ' This error may be caused by missing openstackx' \ ' extensions in nova. See the dashboard README.' raise return inner def compute_api(request): compute = openstack.compute.Compute( auth_token=request.session['token'], management_url=url_for(request, 'nova')) # this below hack is necessary to make the jacobian compute client work # TODO(mgius): It looks like this is unused now? compute.client.auth_token = request.session['token'] compute.client.management_url = url_for(request, 'nova') LOG.debug('compute_api connection created using token "%s"' ' and url "%s"' % (request.session['token'], url_for(request, 'nova'))) return compute def account_api(request): LOG.debug('account_api connection created using token "%s"' ' and url "%s"' % (request.session['token'], url_for(request, 'identity', True))) return openstackx.extras.Account( auth_token=request.session['token'], management_url=url_for(request, 'identity', True)) def glance_api(request): o = urlparse(url_for(request, 'glance')) LOG.debug('glance_api connection created for host "%s:%d"' % (o.hostname, o.port)) return glance.client.Client(o.hostname, o.port) def admin_api(request): LOG.debug('admin_api connection created using token "%s"' ' and url "%s"' % (request.session['token'], url_for(request, 'nova', True))) return openstackx.admin.Admin(auth_token=request.session['token'], management_url=url_for(request, 'nova', True)) def extras_api(request): LOG.debug('extras_api connection created using token "%s"' ' and url "%s"' % (request.session['token'], url_for(request, 'nova'))) return openstackx.extras.Extras(auth_token=request.session['token'], management_url=url_for(request, 'nova')) def auth_api(): LOG.debug('auth_api connection created using url "%s"' % settings.OPENSTACK_KEYSTONE_URL) return openstackx.auth.Auth( management_url=settings.OPENSTACK_KEYSTONE_URL) def swift_api(): return cloudfiles.get_connection( settings.SWIFT_ACCOUNT + ":" + settings.SWIFT_USER, settings.SWIFT_PASS, authurl=settings.SWIFT_AUTHURL) def console_create(request, instance_id, kind=None): return Console(extras_api(request).consoles.create(instance_id, kind)) def flavor_create(request, name, memory, vcpu, disk, flavor_id): return Flavor(admin_api(request).flavors.create( name, int(memory), int(vcpu), int(disk), flavor_id)) def flavor_delete(request, flavor_id, purge=False): admin_api(request).flavors.delete(flavor_id, purge) def flavor_get(request, flavor_id): return Flavor(compute_api(request).flavors.get(flavor_id)) @check_openstackx def flavor_list(request): return [Flavor(f) for f in extras_api(request).flavors.list()] def image_create(request, image_meta, image_file): return Image(glance_api(request).add_image(image_meta, image_file)) def image_delete(request, image_id): return glance_api(request).delete_image(image_id) def image_get(request, image_id): return Image(glance_api(request).get_image(image_id)[0]) def image_list_detailed(request): return [Image(i) for i in glance_api(request).get_images_detailed()] def image_update(request, image_id, image_meta=None): image_meta = image_meta and image_meta or {} return Image(glance_api(request).update_image(image_id, image_meta=image_meta)) def keypair_create(request, name): return KeyPair(extras_api(request).keypairs.create(name)) def keypair_delete(request, keypair_id): extras_api(request).keypairs.delete(keypair_id) @check_openstackx def keypair_list(request): return [KeyPair(key) for key in extras_api(request).keypairs.list()] def server_create(request, name, image, flavor, user_data, key_name): return Server(extras_api(request).servers.create( name, image, flavor, user_data=user_data, key_name=key_name), request) def server_delete(request, instance): compute_api(request).servers.delete(instance) def server_get(request, instance_id): response = compute_api(request).servers.get(instance_id), request LOG.info(response) return Server(compute_api(request).servers.get(instance_id), request) @check_openstackx def server_list(request): return [Server(s, request) for s in extras_api(request).servers.list()] def server_reboot(request, instance_id, hardness=openstack.compute.servers.REBOOT_HARD): server = server_get(request, instance_id) server.reboot(hardness) def service_get(request, name): return Services(admin_api(request).services.get(name)) @check_openstackx def service_list(request): return [Services(s) for s in admin_api(request).services.list()] def service_update(request, name, enabled): return Services(admin_api(request).services.update(name, enabled)) def token_get_tenant(request, tenant_id): tenants = auth_api().tenants.for_token(request.session['token']) for t in tenants: if str(t.id) == str(tenant_id): return Tenant(t) LOG.warning('Unknown tenant id "%s" requested' % tenant_id) def token_list_tenants(request, token): return [Tenant(t) for t in auth_api().tenants.for_token(token)] def tenant_create(request, tenant_id, description, enabled): return Tenant(account_api(request).tenants.create(tenant_id, description, enabled)) def tenant_get(request, tenant_id): return Tenant(account_api(request).tenants.get(tenant_id)) @check_openstackx def tenant_list(request): return [Tenant(t) for t in account_api(request).tenants.list()] def tenant_update(request, tenant_id, description, enabled): return Tenant(account_api(request).tenants.update(tenant_id, description, enabled)) def token_create(request, tenant, username, password): return Token(auth_api().tokens.create(tenant, username, password)) def token_info(request, token): # TODO(mgius): This function doesn't make a whole lot of sense to me. The # information being gathered here really aught to be attached to Token() as # part of token_create. May require modification of openstackx so that the # token_create call returns this information as well hdrs = {"Content-type": "application/json", "X_AUTH_TOKEN": settings.OPENSTACK_ADMIN_TOKEN, "Accept": "text/json"} o = urlparse(token.serviceCatalog['identity'][0]['adminURL']) conn = httplib.HTTPConnection(o.hostname, o.port) conn.request("GET", "/v2.0/tokens/%s" % token.id, headers=hdrs) response = conn.getresponse() data = json.loads(response.read()) admin = False LOG.info(data) for role in data['auth']['user']['roleRefs']: if role['roleId'] == 'Admin': admin = True return {'tenant': data['auth']['user']['tenantId'], 'user': data['auth']['user']['username'], 'admin': admin} @check_openstackx def usage_get(request, tenant_id, start, end): return Usage(extras_api(request).usage.get(tenant_id, start, end)) @check_openstackx def usage_list(request, start, end): return [Usage(u) for u in extras_api(request).usage.list(start, end)] def user_create(request, user_id, email, password, tenant_id): return User(account_api(request).users.create( user_id, email, password, tenant_id)) def user_delete(request, user_id): account_api(request).users.delete(user_id) def user_get(request, user_id): return User(account_api(request).users.get(user_id)) @check_openstackx def user_list(request): return [User(u) for u in account_api(request).users.list()] def user_update_email(request, user_id, email): return User(account_api(request).users.update_email(user_id, email)) def user_update_password(request, user_id, password): return User(account_api(request).users.update_password(user_id, password)) def user_update_tenant(request, user_id, tenant_id): return User(account_api(request).users.update_tenant(user_id, tenant_id)) def swift_container_exists(container_name): try: swift_api().get_container(container_name) return True except cloudfiles.errors.NoSuchContainer: return False def swift_object_exists(container_name, object_name): container = swift_api().get_container(container_name) try: container.get_object(object_name) return True except cloudfiles.errors.NoSuchObject: return False def swift_get_containers(): return [Container(c) for c in swift_api().get_all_containers()] def swift_create_container(name): if swift_container_exists(name): raise Exception('Container with name %s already exists.' % (name)) return Container(swift_api().create_container(name)) def swift_delete_container(name): swift_api().delete_container(name) def swift_get_objects(container_name, prefix=None): container = swift_api().get_container(container_name) return [SwiftObject(o) for o in container.get_objects(prefix=prefix)] def swift_copy_object(orig_container_name, orig_object_name, new_container_name, new_object_name): container = swift_api().get_container(orig_container_name) if swift_object_exists(new_container_name, new_object_name) == True: raise Exception('Object with name %s already exists in container %s' % (new_object_name, new_container_name)) orig_obj = container.get_object(orig_object_name) return orig_obj.copy_to(new_container_name, new_object_name) def swift_upload_object(container_name, object_name, object_data): container = swift_api().get_container(container_name) obj = container.create_object(object_name) obj.write(object_data) def swift_delete_object(container_name, object_name): container = swift_api().get_container(container_name) container.delete_object(object_name) def swift_get_object_data(container_name, object_name): container = swift_api().get_container(container_name) return container.get_object(object_name).stream()
apache-2.0
248,219,022,108,928,350
31.94863
79
0.653934
false
3.922936
false
false
false
leotrs/decu
test/notsosimple_project/src/script.py
1
1196
""" testscript.py ------------- This is a test script for decu. """ from decu import Script, experiment, figure, run_parallel import numpy as np import matplotlib.pyplot as plt class TestScript(Script): @experiment(data_param='data') def exp(self, data, param, param2): """Compute x**param for each data point.""" self.log.info('Working hard for {}..'.format(TestScript.exp.run)) return np.power(data, param) + param2 @figure() def plot_result(self, data, result): """Plot results of experiment.""" plt.plot(data, result) @figure() def plot_many_results(self, data, results): """Plot results of experiment.""" plt.figure() for res in results: plt.plot(data, res) def main(self): """Run some experiments and make some figures.""" data = np.arange(5) result1 = self.exp(data, param=4, param2=10) self.plot_result(data, result1) param_list = [(data, x, y) for x, y in zip(np.arange(5), np.arange(5, 10))] result2 = run_parallel(self.exp, param_list) self.plot_many_results(data, result2, suffix='parallel')
mit
5,614,853,097,660,750,000
26.813953
73
0.594482
false
3.624242
false
false
false
BinMatrix/camshift_ros
scripts/camshift_node.py
1
9812
#!/usr/bin/env python ''' Camshift node ================ This is a ros node that shows mean-shift based tracking You select a color objects such as your face and it tracks it. This subscrib from "/image" topic for reading image, and publish the information of target to "/TargetPositionSize" or "/roi" topic. The position and size have been normalized in "/TargetPositionSize". http://www.robinhewitt.com/research/track/camshift.html Usage: ------ To initialize tracking, select the object with mouse Keys: ----- ESC/q - exit b - toggle back-projected probability visualization s - save roi to file l - load roi from file to calculate hist ''' # Python 2/3 compatibility from __future__ import print_function import sys PY3 = sys.version_info[0] == 3 if PY3: xrange = range import numpy as np import cv2 import time import os # debug with pudb # import pudb; pu.db import rospy from sensor_msgs.msg import Image from cv_bridge import CvBridge, CvBridgeError # from mav_msgs.msg import TargetPositionSize from sensor_msgs.msg import Image, RegionOfInterest, CameraInfo class App: def __init__(self): self.roi_file = os.path.expanduser("~/roi.jpg") cv2.namedWindow('camshift', 1) cv2.setMouseCallback('camshift', self.onmouse) self.frame = None self.vis = None self.vis_roi = None self.selection = None self.drag_start = None self.show_backproj = False self.track_window = None self.track_box = None #rotated rect self.expand_ratio = 0.2 self.hist = None self.last_track = None self.fps = 0 self.fps_values = list() self.fps_n_values = 10 self.time_star = time.time() self.bridge = CvBridge() self.image_sub = rospy.Subscriber( "/image", Image, self.callback) # self.target_pub = rospy.Publisher( # "/TargetPositionSize", TargetPositionSize) self.roi_pub = rospy.Publisher("roi", RegionOfInterest) def onmouse(self, event, x, y, flags, param): if event == cv2.EVENT_LBUTTONDOWN: self.drag_start = (x, y) self.track_window = None if event == cv2.EVENT_LBUTTONUP: self.drag_start = None self.track_window = self.selection if self.drag_start: xmin = min(x, self.drag_start[0]) ymin = min(y, self.drag_start[1]) xmax = max(x, self.drag_start[0]) ymax = max(y, self.drag_start[1]) self.selection = (xmin, ymin, xmax - xmin + 1, ymax - ymin + 1) def show_hist(self): bin_count = self.hist.shape[0] bin_w = 24 img = np.zeros((256, bin_count * bin_w, 3), np.uint8) for i in xrange(bin_count): h = int(self.hist[i]) cv2.rectangle(img, (i * bin_w + 2, 255), ((i + 1) * bin_w - 2, 255 - h), (int(180.0 * i / bin_count), 255, 255), -1) img = cv2.cvtColor(img, cv2.COLOR_HSV2BGR) cv2.imshow('hist', img) def show_hist_new(self): bin_count = self.hist.shape[0] bin_w = 1 img = np.zeros((256, bin_count * bin_w, 3), np.uint8) for i in xrange(bin_count): h = int(self.hist[i]) cv2.rectangle(img, (i * bin_w, 255), ((i + 1) * bin_w, 255 - h), (int(180.0 * i / bin_count), 255, 255), -1) img = cv2.cvtColor(img, cv2.COLOR_HSV2BGR) cv2.imshow('hist', img) def expand_window(self, last_track): x, y, w, h = last_track row, col = self.frame.shape[:2] n_x0 = np.maximum(0, x - int(w * self.expand_ratio) - 1) n_y0 = np.maximum(0, y - int(h * self.expand_ratio) - 1) n_w = np.minimum(col, w + int(w * self.expand_ratio * 2) + 1) n_h = np.minimum(row, h + int(h * self.expand_ratio * 2) + 1) return (n_x0, n_y0, n_w, n_h) def cvBox2D_to_cvRect(self, roi): try: if len(roi) == 3: (center, size, angle) = roi pt1 = ( int(center[0] - size[0] / 2), int(center[1] - size[1] / 2)) pt2 = ( int(center[0] + size[0] / 2), int(center[1] + size[1] / 2)) rect = [pt1[0], pt1[1], pt2[0] - pt1[0], pt2[1] - pt1[1]] else: rect = list(roi) except: return [0, 0, 0, 0] return rect def publish_target(self): target = TargetPositionSize() height, width = self.frame.shape[:2] x, y, w, h = self.track_window target.center_x = (x + w / 2.0) / width * 2 - 1 target.center_y = 1 - (y + h / 2.0) / height * 2 target.size_x = float(w) / width target.size_y = float(h) / height self.target_pub.publish(target) def publish_roi(self): roi_box = self.track_window # roi_box = self.track_box try: roi_box = self.cvBox2D_to_cvRect(roi_box) except: return # Watch out for negative offsets roi_box[0] = max(0, roi_box[0]) roi_box[1] = max(0, roi_box[1]) try: roi = RegionOfInterest() roi.x_offset = int(roi_box[0]) roi.y_offset = int(roi_box[1]) roi.width = int(roi_box[2]) roi.height = int(roi_box[3]) self.roi_pub.publish(roi) except: rospy.loginfo("Publishing ROI failed") def display_fps(self): time_end = time.time() img_fps = int(1 / (time_end - self.time_star)) self.time_star = time_end self.fps_values.append(img_fps) if len(self.fps_values) > self.fps_n_values: self.fps_values.pop(0) self.fps = int(sum(self.fps_values) / len(self.fps_values)) cv2.putText(self.vis, "FPS: " + str(self.fps), (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 0)) def callback(self, data): try: cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8") except CvBridgeError as e: print(e) self.frame = np.array(cv_image, dtype=np.uint8) self.vis = self.frame.copy() hsv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV) mask = cv2.inRange( hsv, np.array((0., 60., 32.)), np.array((180., 255., 255.))) if self.selection: x0, y0, w, h = self.selection hsv_roi = hsv[y0:y0 + h, x0:x0 + w] mask_roi = mask[y0:y0 + h, x0:x0 + w] self.hist = cv2.calcHist([hsv_roi], [0], mask_roi, [16], [0, 180]) # self.hist = cv2.calcHist([hsv_roi], [0], mask_roi, [360], [0, 180]) cv2.normalize(self.hist, self.hist, 0, 255, cv2.NORM_MINMAX) self.hist = self.hist.reshape(-1) self.show_hist() # self.show_self.hist_new(self.hist) self.vis_roi = self.vis[y0:y0 + h, x0:x0 + w] cv2.bitwise_not(self.vis_roi, self.vis_roi) # highlight befitting object when selecting # self.vis[mask == 0] = 0 if self.track_window: # lost the target, expand last valid track window if self.track_window == (0, 0, 0, 0): self.track_window = self.expand_window(self.last_track) # print("Re-search at : ", self.track_window) self.last_track = self.track_window self.selection = None prob = cv2.calcBackProject([hsv], [0], self.hist, [0, 180], 1) prob &= mask term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1) self.track_box, self.track_window = cv2.CamShift( prob, self.track_window, term_crit) # publish position and size of target, has been normalized. # self.publish_target() self.publish_roi() if self.show_backproj: self.vis[:] = prob[..., np.newaxis] try: cv2.ellipse(self.vis, self.track_box, (0, 0, 255), 2) except: print(self.track_box) # Compute the FPS and display in image self.display_fps() cv2.imshow('camshift', self.vis) ch = 0xFF & cv2.waitKey(1) if ch == 27 or ch == ord('q'): os._exit(0) if ch == ord('b'): self.show_backproj = not self.show_backproj if ch == ord('s'): if self.track_window == None: print("There has no tracked object!") return x, y, w, h = self.track_window cv2.imwrite(self.roi_file, self.frame[y:y+h, x:x+w]) print("Saved to ", self.roi_file) if ch == ord('l'): if not os.path.isfile(self.roi_file): print(self.roi_file, " is not exist!") return roi = cv2.imread(self.roi_file) print("Loaded from ", self.roi_file) roi_hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV) roi_mask = cv2.inRange( roi_hsv, np.array((0., 60., 32.)), np.array((180., 255., 255.))) self.hist = cv2.calcHist([roi_hsv], [0], roi_mask, [16], [0, 180]) cv2.normalize(self.hist, self.hist, 0, 255, cv2.NORM_MINMAX) self.hist = self.hist.reshape(-1) self.show_hist() row, col = self.frame.shape[:2] self.track_window = (0, 0, col, row) if __name__ == '__main__': rospy.init_node('camshift', anonymous=True) cs = App() try: rospy.spin() except KeyboardInterrupt: print("Shutting down") cv2.destroyAllWindows()
gpl-3.0
-7,288,317,239,279,681,000
35.073529
110
0.528027
false
3.249007
false
false
false
rafallo/p2c
torrent/torrent.py
1
10334
# -*- coding: utf-8 -*- import hashlib import libtorrent as lt import logging from threading import Timer, Event import os import time from p2c.exceptions import SessionNotBindedException, TorrentHasNotMetadataYet import settings from torrent.movie import Movie SOURCE_TYPES = ("MAGNET", "TORRENT") logger = logging.getLogger(__name__) class Torrent(object): def __init__(self, source_type, source, name): """ :type source: str magnet or torrent file path :type name: str :type source_type: str """ if not source_type in SOURCE_TYPES: raise Exception( "source_type must be one of {0}".format(SOURCE_TYPES)) self.name = name self.source_type = source_type self.source = source self.torrent_handler = None self._torrent_info = None # dict where key is path and value is Movie instance # this is files which are downloading or downloaded self.files = None # piece_length in this torrent self.piece_length = None # amount of pieces which made up DOWNLOAD_PIECE_SIZE self._jump = None # if first prioritizing task was run once self._prioritized = False self.priority_interval = settings.PRIORITY_INTERVAL self._priority_thread_stop = Event() self._priority_timer = None # currently downloading Movie self._downloading = None def __del__(self): self._stop_torrent_threads() def __str__(self): return self.name def set_source(self, source, session): self.source = source if self.source: self.bind_session(session) def bind_session(self, session): """ Creates torrent handler based on source_type """ add_data = {} if self.source_type == "TORRENT": add_data['ti'] = lt.torrent_info(self.source) elif self.source_type == "MAGNET": add_data['url'] = self.source add_data['save_path'] = self._get_download_dir() add_data['storage_mode'] = lt.storage_mode_t(1) self.torrent_handler = session.add_torrent(add_data) self._prioritize_to_none() def get_filelist(self): info = self.get_torrent_info(wait=True) return [file.path for file in info.files()] def get_movies_filelist(self): if self.files is None: self._create_movies() return list(self.files.keys()) def get_movies(self): if self.files is None: self._create_movies() return list(self.files.values()) def download_file(self, filename:str): if not filename in self.get_movies_filelist(): raise Exception("filename not found in torrent") self._prioritize_to_none() self._downloading = self.files[filename] self._run_torrent_threads() def pause_download(self): self._stop_torrent_threads() self.torrent_handler.pause() self._downloading = None def has_torrent_info(self): """ Checks if torrent has downloaded metadata """ try: self.get_torrent_info() return True except (TorrentHasNotMetadataYet, SessionNotBindedException): return False def get_torrent_info(self, wait=False): """ Gets torrent's metadata """ if self._torrent_info != None: return self._torrent_info if self.torrent_handler is None: if wait: while not self.torrent_handler is None: time.sleep(0.1) else: raise SessionNotBindedException if not self.torrent_handler.has_metadata(): if wait: while not self.torrent_handler.has_metadata(): time.sleep(0.1) else: raise TorrentHasNotMetadataYet self._torrent_info = self.torrent_handler.get_torrent_info() return self._torrent_info def get_status(self): """ Gets torrent's status with field like download rate, peers number, state and progress level """ status = self.torrent_handler.status() state_str = ['queued', 'checking', 'downloading metadata', 'downloading', 'finished', 'seeding', 'allocating', 'checking fastresume'] data = { 'download_rate': status.download_rate, 'download_payload_rate': status.download_payload_rate, 'num_peers': status.num_peers, 'state': state_str[status.state], 'progress': status.progress } return data def get_seconds_to_buffer(self): rate = self.get_status()['download_rate'] if(rate > 100 * 1024): # round to 100 kbs, 200 kbs, 300 kbs rate = int(rate / (100 * 1024)) * 100 * 1024 movie = self.get_downloading_movie() # minimum rate if movie and rate > 30 * 1024: return int(movie.pieces_to_play * movie.piece_length / rate) def get_downloading_movie(self): return self._downloading def _create_movies(self): info = self.get_torrent_info() files = info.files() self.piece_length = info.piece_length() self.priority_interval = settings.PRIORITY_INTERVAL * self.piece_length / ( 1024 ** 2) self._jump = int(settings.DOWNLOAD_PIECE_SIZE / self.piece_length) + 1 self.files = {} for file in files: ext = os.path.splitext(file.path)[1] if ext and ext[1:].lower() in settings.SUPPORTED_MOVIE_EXTENSIONS: first_piece = int(file.offset / self.piece_length) last_piece = int((file.size + file.offset) / self.piece_length) self.files[file.path] = Movie(path=file.path, size=file.size, first_piece=first_piece, last_piece=last_piece, piece_length=self.piece_length, download_dir=self._get_download_dir()) def _update_movies_progress(self): """ Updates movie progress based on number of downloaded pieces """ p_downloaded = self.torrent_handler.status().pieces movie = self.get_downloading_movie() first_piece, last_piece = movie.first_piece, movie.last_piece # logger.debug("first_piece: {}".format(first_piece)) # logger.debug("last_piece: {}".format(last_piece )) counter = 0 for item in p_downloaded[first_piece:last_piece]: if item == True: counter += 1 else: break # logger.debug("download_pieces inside thread is: {}".format(counter)) movie.downloaded_pieces = counter def _manage_pieces_priority(self): """ Sets priority blocks. First pieces should be downloaded first swo its have the highest priority. """ p_downloaded = self.torrent_handler.status().pieces movie = self.get_downloading_movie() if not movie: return first_piece, last_piece = movie.cur_first_piece, movie.cur_last_piece if not False in p_downloaded[first_piece:first_piece + self._jump + 1]: # all block downloaded first_piece += self._jump movie.cur_first_piece = first_piece # prioritezing # [7, 7, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ...] if first_piece + self._jump + self._jump <= last_piece: for piece in range(first_piece + 4 * self._jump, last_piece + 1): # logger.debug("the lowest priority for: {}".format(piece)) self.torrent_handler.piece_priority(piece, 0) if first_piece + self._jump <= last_piece: for piece in range(first_piece + 2 * self._jump, min(last_piece + 1, first_piece + 4 * self._jump)): # logger.debug("low priority for: {}".format(piece)) self.torrent_handler.piece_priority(piece, 2) if first_piece <= last_piece: for piece in range(first_piece, min(last_piece + 1, first_piece + 2 * self._jump)): # logger.debug("the highest priority for: {}".format(piece)) self.torrent_handler.piece_priority(piece, 7) # for mp4 get 512KB end of file # TODO: bug below # for piece in range( # last_piece - int(self.piece_length / 512 * 1024) + 1, # last_piece): # logger.debug("the highest priority for (512KB end of file): {}".format(piece)) # self.torrent_handler.piece_priority(piece, 7) self._update_movies_progress() if not self._priority_thread_stop.is_set(): if self._priority_timer: self._priority_timer.cancel() self._priority_timer = None self._run_torrent_threads() def _run_torrent_threads(self): # logger.debug("run threads for {}".format(self.priority_interval)) if not self._priority_thread_stop.is_set(): if not self._priority_timer: self._priority_timer = Timer(self.priority_interval, self._manage_pieces_priority) self._priority_timer.start() def _stop_torrent_threads(self): self._priority_thread_stop.set() if self._priority_timer: self._priority_timer.cancel() def _prioritize_to_none(self): if not self._prioritized and self.has_torrent_info(): self._prioritized = True info = self.get_torrent_info() for piece in range(0, info.num_pieces()): self.torrent_handler.piece_priority(piece, 0) def _get_download_dir(self): path = os.path.join(settings.DOWNLOAD_DIR, hashlib.md5(self.name.encode()).hexdigest()) try: os.makedirs(path) except OSError: pass return path
mit
2,340,498,763,809,516,500
35.259649
111
0.561641
false
4.171982
false
false
false
dhaitz/CalibFW
plotting/modules/plot_sandbox.py
1
75936
# -*- coding: utf-8 -*- """ plotting sanbox module for merlin. This module is to be used for testing or development work. """ import plotbase import copy import plot1d import getroot import math import plotresponse import plotfractions import plot2d import plot_tagging import fit import os def recogen_alpha_ptbins(files, opt): """ recogen vs alpha as well as Z pT vs alpha in pT bins. """ zptbins = [ "1", "zpt>30 && zpt<50", "zpt>50 && zpt<70", "zpt>70 && zpt<120", "zpt>120" ] texts = [ "$\mathrm{inclusive}$", "$30 < \mathrm{Z} p_\mathrm{T} < 50\ \mathrm{GeV}$", "$50 < \mathrm{Z} p_\mathrm{T} < 70\ \mathrm{GeV}$", "$70 < \mathrm{Z} p_\mathrm{T} < 120\ \mathrm{GeV}$", "$\mathrm{Z}\ p_\mathrm{T} > 120\ \mathrm{GeV}$", ] fig, axes = plotbase.newPlot(subplots = len(zptbins * 2), subplots_X = len(zptbins)) settings = plotbase.getSettings(opt, quantity='recogen_alpha') for ax1, ax2, selection, text in zip(axes[:(len(axes)/2)], axes[(len(axes)/2):], zptbins, texts): plot1d.datamcplot("recogen_alpha", files, opt, fig_axes = [fig, ax1], changes={ 'allalpha': True, 'y': [0.99, 1.1], 'subplot': True, 'nbins': 6, 'fit': 'slope', 'x': [0, 0.3], 'text': text, 'selection': [selection], } ) plot1d.datamcplot("zpt_alpha", files, opt, fig_axes = [fig, ax2], changes={ 'allalpha': True, 'y': [0, 300], 'subplot': True, 'nbins': 6, 'x': [0, 0.3], 'text': text, 'selection': [selection], } ) plotbase.Save(fig, settings) def corrs(files, opt): fig, ax = plotbase.newPlot() settings = plotbase.getSettings(opt, quantity='recogen_genpt') for quantity, marker, color, label in zip( ['raw/recogen_genpt', 'l1/recogen_genpt', 'l1l2l3/recogen_genpt'], ['o', 'D', '-'], ['black', '#7293cb', '#e1974c'], ['raw', 'L1', 'L1L2L3'] ): plot1d.datamcplot(quantity, files, opt, fig_axes = [fig, ax], changes={ 'algorithm': "", 'markers':[marker], 'colors':[color], 'labels':[label, ""], 'correction':"", 'subplot':True, 'grid': True, 'y': [0.9, 1.5], 'legloc': 'upper right', 'x': [20, 100], 'yname': 'recogen', 'xname':'genpt' }) settings['filename'] = plotbase.getDefaultFilename('recogen', opt, settings) plotbase.Save(fig, settings) def corrbins(files, opt): fig, ax = plotbase.newPlot() settings = plotbase.getSettings(opt, quantity='recogen') for quantity, marker, color, label, n in zip( ['l1l2l3/recogen3040', 'l1l2l3/recogen5080', 'l1l2l3/recogen100'], ['o', 'f', '-'], ['black', '#7293cb', '#e1974c'], ['pT 20-40', 'pT 50-80', 'pT >100'], range(10) ): plot1d.datamcplot(quantity, files, opt, fig_axes = [fig, ax], changes={ 'algorithm': "", 'markers':[marker], 'colors':[color], 'labels':[label, ""], 'correction':"", 'subplot':True, 'grid': True, 'fitlabel_offset':-0.07*n, 'legloc': 'upper right', 'x': [0, 2], 'xname':'recogen' }) settings['filename'] = plotbase.getDefaultFilename('recogen-bins', opt, settings) plotbase.Save(fig, settings) def zmassFitted(files, opt, changes=None, settings=None): """ Plots the FITTED Z mass peak position depending on pT, NPV, y.""" quantity = "zmass" # iterate over raw vs corr electrons for mode in ['raw', 'corr']: filenames = ['work/data_ee_%s.root' % mode, 'work/mc_ee_powheg_%s.root' % mode] files, opt = plotbase.openRootFiles(filenames, opt) # iterate over quantities for xq, xbins in zip( ['npv', 'zpt', 'zy'], [ [a - 0.5 for a, b in opt.npv] + [opt.npv[-1][1] - 0.5], opt.zbins, [(i/2.)-2. for i in range(0, 9)], ] ): # iterate over Z pt (inclusive/low,medium,high) for ptregion, ptselection, ptstring in zip(["_inclusivept", "_lowpt", "_mediumpt", "_highpt"], [ "1", "zpt<60", "zpt>60 && zpt < 120", "zpt>120", ], [ "", "Z $p_\mathrm{T}$ < 60 GeV", "60 < Z $p_\mathrm{T}$ < 120 GeV", "Z $p_\mathrm{T}$ > 120 GeV", ]): # iterate over electron eta regions for etaregion, etaselection, etastring in zip( ["_all", "_EBEB", "_EBEE", "_EEEE"], [ "1", "abs(eminuseta) < 1.5 && abs(epluseta) < 1.5", "((abs(eminuseta) < 1.5 && abs(epluseta) > 1.6) || (abs(epluseta) < 1.5 && abs(eminuseta) > 1.6))", "abs(eminuseta) > 1.6 && abs(epluseta) > 1.6", ], [ "", "EB-EB", "EB-EE & EE-EB", "EE-EE", ]): # we dont need pt-binned Z pT plots: if xq == 'zpt' and ptselection is not "1": continue rootobjects, rootobjects2 = [], [] fig = plotbase.plt.figure(figsize=[7, 10]) ax = plotbase.plt.subplot2grid((3, 1), (0, 0), rowspan=2) ax.number = 1 ax2 = plotbase.plt.subplot2grid((3, 1), (2, 0)) ax2.number = 2 fig.add_axes(ax) fig.add_axes(ax2) # print the Z pt and electron eta region on the plot ax.text(0.98, 0.98, ptstring, va='top', ha='right', transform=ax.transAxes) ax.text(0.98, 0.9, etastring, va='top', ha='right', transform=ax.transAxes) changes = { 'y': [90.8, 94.8], 'yname': r'$m^{\mathrm{Z}}$ (peak position from Breit-Wigner fit) / GeV', 'legloc': 'upper left', 'title': mode + " electrons", 'labels': ['Data', 'Powheg'], } settings = plotbase.getSettings(opt, changes=changes, quantity=quantity + "_" + xq) # iterate over files markers = ['o', 'D'] ys, yerrs, xs = [], [], [] for i, f in enumerate(files): bins = xbins y, yerr, x = [], [], [] # iterate over bins for lower, upper in zip(bins[:-1], bins[1:]): changes = { 'selection': ['(%s > %s && %s < %s) && (%s) && (%s)' % (xq, lower, xq, upper, ptselection, etaselection)], 'nbins': 40, 'folder': 'zcuts', 'x': [71, 101], } local_settings = plotbase.getSettings(opt, changes, None, quantity) # get the zmass, fit, get the xq distribution; append to lists rootobjects += [getroot.histofromfile(quantity, f, local_settings, index=i)] p0, p0err, p1, p1err, p2, p2err, chi2, ndf, conf_intervals = fit.fitline2(rootobjects[-1], breitwigner=True, limits=local_settings['x']) y += [p1] yerr += [p1err] changes['x'] = [lower, upper] local_settings = plotbase.getSettings(opt, changes, None, quantity) rootobjects2 += [getroot.histofromfile(xq, f, local_settings, index=i)] x += [rootobjects2[-1].GetMean()] # fine line to indicate bin borders ax.add_line(plotbase.matplotlib.lines.Line2D((lower, upper), (y[-1],y[-1]), color='black', alpha=0.05)) ys.append(y) yerrs.append(yerr) xs.append(x) #plot ax.errorbar(x, y, yerr, drawstyle='steps-mid', color=settings['colors'][i], fmt=markers[i], capsize=0, label=settings['labels'][i]) # format and save if xq == 'zpt': settings['xlog'] = True settings['x'] = [30, 1000] settings['xticks'] = [30, 50, 70, 100, 200, 400, 1000] plot1d.formatting(ax, settings, opt, [], []) # calculate ratio values ratio_y = [d/m for d, m in zip(ys[0], ys[1])] ratio_yerrs = [math.sqrt((derr/d)**2 + (merr/m)**2)for d, derr, m, merr in zip(ys[0], yerrs[0], ys[1], yerrs[1])] ratio_x = [0.5 * (d + m) for d, m in zip(xs[0], xs[1])] #format ratio plot ax2.errorbar(ratio_x, ratio_y, ratio_yerrs, drawstyle='steps-mid', color='black', fmt='o', capsize=0, label='ratio') ax.axhline(1.0) fig.subplots_adjust(hspace=0.1) ax.set_xticklabels([]) ax.set_xlabel("") settings['ratio'] = True settings['legloc'] = None settings['xynames'][1] = 'ratio' plot1d.formatting(ax2, settings, opt, [], []) ax2.set_ylim(0.99, 1.01) settings['filename'] = plotbase.getDefaultFilename(quantity + "_" + xq + "_" + mode + ptregion + etaregion, opt, settings) plotbase.Save(fig, settings) def zmassEBEE(files, opt): """ Plot the Z mass depending on where the electrons are reconstructed. 3 bins: EB-EB, EB-EE, EE-EE """ selections = [ 'abs(eminuseta)<1.5 && abs(epluseta)<1.5', '(abs(eminuseta)>1.5 && abs(epluseta)<1.5) || abs(eminuseta)<1.5 && abs(epluseta)>1.5', 'abs(eminuseta)>1.5 && abs(epluseta)>1.5', ] filenames = ['zmass_ebeb', 'zmass_ebee', 'zmass_eeee'] titles = ['Barrel electrons only', 'One electron barrel, one endcap', 'Endcap electrons only'] for selection, filename, title in zip(selections, filenames, titles): plot1d.plot1dratiosubplot("zmass", files, opt, changes = { 'x': [81, 101], 'selection': [selection, "hlt * (%s)" % selection], 'fit': 'bw', 'nbins': 40, 'filename': filename, 'title': title, 'folder': 'zcuts', }) def eid(files, opt): quantity = 'mvaid' """changes = { 'x': [0, 1.0001], #'log': True, 'folder': 'electron_all', 'nbins':50, 'subplot':True, 'markers': ['f'], } settings = plotbase.getSettings(opt, quantity=quantity) fig, ax = plotbase.newPlot() for c, l, s in zip(['#236BB2', '#E5AD3D'], ['fake', 'true'], ['1', 'deltar < 0.3 && deltar>0']): changes.update({ 'labels': [l], 'colors': [c], 'selection': s, }) plot1d.datamcplot(quantity, files, opt, fig_axes = [fig, ax], changes=changes) settings['filename'] = plotbase.getDefaultFilename(quantity, opt, settings) plotbase.Save(fig, settings)""" ## id vs deltar for quantity in ["mvaid", "mvatrigid", "looseid", "mediumid", "tightid"]: plot1d.datamcplot("%s_deltar" % quantity, files, opt, changes = { 'folder': 'electron_all', 'nbins': 50, 'xynames': ['$\Delta$R(reco, gen)', quantity], 'x': [0, 0.5], 'legloc': None, }) def plots_2014_07_03(files, opt): """ Plots for JEC presentation 03.07. """ #### 2D histograms for obj, x, nbins in zip(['muon', 'jet', 'electron'], [[-2.5, 2.5], [-5.3, 5.3]]*2, [400, 1000, 300]): changes = { 'out': 'out/2014_07_03', 'y': [-3.2, 3.2], } changes.update({ 'folder': obj + "_all", 'nbins': nbins, 'x':x, 'filename': obj + '_phi_eta', 'xynames': ['%s eta' % obj, '%s phi' % obj, obj + 's'], }) if obj is 'electron': filenames = ["data_ee_noc", "mc_ee_corr_test"] else: filenames = ["data_noc", "mc_rundep_noc"] files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames] plot2d.twoD("phi_eta", files, opt, changes = changes) if obj is not 'electron': changes.update({ 'year': 2011, 'filename': obj + '_phi_eta_2011', 'lumi': 5.1, 'energy': 7, }) filenames = ["data11_noc"] files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames] plot2d.twoD("phi_eta", files, opt, changes = changes) ##### PU Jet ID filenames = ["dataPUJETID", "data"] files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames] changes = { 'normalize': False, 'ratiosubplot': 'True', 'ratiosubploty': [0.8, 1.2], 'out': 'out/2014_07_03', 'x': [30, 250], 'title': 'Data', 'labels': ['PUJetID applied', 'default'], } plot1d.datamcplot('zpt', files, opt, changes=changes) for typ in ['mpf', 'ptbalance']: plotresponse.responseratio(files, opt, over='zpt', types=[typ], changes={ 'labels': ['PUJetID applied', 'default'], 'out': 'out/2014_07_03', 'x': [30, 1000], 'xlog': True, }) ##### timedep filenames = ["data", "mc_rundep"] files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames] changes = { 'out': 'out/2014_07_03', 'filename': "timedep", } timedep(files, opt, changes=changes) ###### MPF fix filenames = [ "/storage/a/dhaitz/excalibur/artus/mc_rundep_2014-06-18_10-41/out.root", "/storage/a/dhaitz/excalibur/artus/mc_rundep_2014-06-06_14-26/out.root" ] files = [getroot.openfile(f) for f in filenames] plotresponse.responseratio(files, opt, over='zpt', types=['mpf'], changes={ 'labels': ['MCRD-fixed', 'MCRD'], 'xlog': True, 'filename': "mpf_zpt-fixed", 'out': 'out/2014_07_03', 'x': [30, 1000], 'xticks': [30, 50, 70, 100, 200, 400, 1000], }) # mpf slopes filenames = ["data", "mc_rundep"] files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames] changes = { 'filename': "mpfslopes-fixed", 'labels': ['data', 'MCRD'], 'out': 'out/2014_07_03', 'allalpha': True, 'selection': 'alpha<0.3', } mpfslopes(files, opt, changes) changes.update({ 'filename': "mpfslopes", 'labels': ['data', 'MCRD'], }) filenames = [ '/storage/a/dhaitz/excalibur/artus/data_2014-04-10_21-21/out.root', '/storage/a/dhaitz/excalibur/artus/mc_rundep_2014-06-06_14-26/out.root' ] files = [getroot.openfile(f) for f in filenames] mpfslopes(files, opt, changes) # SYNC os.system("rsync ${EXCALIBUR_BASE}/out/2014_07_03 ekplx26:plots -r") def timedep(files, opt, changes = None): """ Plots for the time dependence, requested by Mikko 2014-06-25.""" settings = plotbase.getSettings(opt, quantity="response_run", changes=changes) fig, ax = plotbase.newPlot() factor = 2e4 methods = ['mpf', 'ptbalance'] labels = ['MPF', '$p_T$ balance'] for q, c, l, m, in zip(methods, settings['colors'], labels, settings['markers']): slopes, serrs, x = [], [], [] for eta1, eta2 in zip(opt.eta[:-1], opt.eta[1:]): changes = { 'alleta': True, 'allalpha': True, 'selection': 'alpha<0.3 && abs(jet1eta) > %s && abs(jet1eta) < %s' % (eta1, eta2), 'fit': 'slope', } rootobject = getroot.histofromfile("%s_run" % q, files[0], settings, changes=changes) # get fit parameters slope, serr = fit.fitline2(rootobject)[2:4] slopes += [slope*factor] serrs += [serr*factor] changes['x'] = [0, 6] x += [getroot.histofromfile("abs(jet1eta)", files[0], settings, changes=changes).GetMean()] ax.errorbar(x, slopes, serrs, drawstyle='steps-mid', color=c, fmt='o', capsize=0, label=l) #formatting stuff settings['x'] = [0, 5] plotbase.setAxisLimits(ax, settings) plotbase.labels(ax, opt, settings) plotbase.axislabels(ax, 'Leading jet $\eta$', 'Response vs run: linear fit slope (muliplied with 20 000)', settings=settings) ax.set_ylim(-0.1, 0.05) ax.set_xlim(0, 5.25) ax.grid(True) ax.set_xticks([float("%1.2f" % eta) for eta in opt.eta]) for label in ax.get_xticklabels(): label.set_rotation(45) ax.axhline(0.0, color='black', linestyle='--') settings['filename'] = quantity="response_run" plotbase.Save(fig, settings) def npuplot(files, opt): """ Plots for the JEC paper that Mikko requested 24.4.: npv and rho in bins of npu.""" settings = plotbase.getSettings(opt, quantity='npv') settings['x'] = [-0.5, 99.5] settings['nbins'] = 100 tgraphs = [] for f in files: if files.index(f) == 0: # flag bad runs in data runs = "run!=191411 && run!=198049 && run!=198050 && run!=198063 && run!=201727 && run!=203830 && run!=203832 && run!=203833 && run!=203834 && run!=203835 && run!=203987 && run!=203992 && run!=203994 && run!=204100 && run!=204101 && run!=208509" else: runs = 1 npuhisto = getroot.histofromfile('nputruth', f, settings) for i in range(100): if npuhisto.GetBinContent(i) > 0: npu = i tgraph = ROOT.TGraphErrors() for n in range(npu): changes = {'selection': 'nputruth>%s && nputruth<%s && %s' % (n-0.5, n+0.5, runs)} npv = getroot.histofromfile('npv', f, settings, changes=changes).GetMean() npverr = getroot.histofromfile('npv', f, settings, changes=changes).GetMeanError() rho = getroot.histofromfile('rho', f, settings, changes=changes).GetMean() rhoerr = getroot.histofromfile('rho', f, settings, changes=changes).GetMeanError() tgraph.SetPoint(n, npv, rho) tgraph.SetPointError(n, npverr, rhoerr) tgraphs.append(tgraph) settings['root'] = settings['root'] or settings['filename'] getroot.saveasroot(tgraphs, opt, settings) def electronupdate(files, opt): """Plots for the Zee update 26.06.2014.""" # Reco/gen electron pt vs eta filenames = ['mc_ee_raw', 'mc_ee_corr'] files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames] changes={ 'x': [0, 2.5], 'y': [0.9, 1.1], 'nbins': 25, 'labels': ['raw', 'corrected'], 'markers': ['o', '-'], 'colors': ['maroon', 'blue'], 'folder':'zcuts', 'y': [0.94, 1.06], 'title': 'Madgraph', 'xynames': [ r"$|\eta_{e^{-}} \| $", r'$\mathrm{e}^{-} p_\mathrm{T}$ Reco/Gen' ] } plot1d.datamcplot('eminuspt/geneminuspt_abs(eminuseta)', files, opt, changes=changes) changes={ 'ratiosubplot': True, 'title': 'Madgraph', 'x': [0, 1000], 'log': True, 'labels': ['raw', 'corrected'], 'folder': 'all', 'ratiosubplotfit': 'chi2', } plot1d.datamcplot('zpt', files, opt, changes=changes) #LHE information fig, ax = plotbase.newPlot() fig2, ax2 = plotbase.newPlot() changes ={ 'folder':'all', 'x': [-4, 4], 'y': [0, 200000], 'subplot': True, 'nbins':50, 'normalize': False, 'xynames': ['Z rapidity', 'Events'], 'log':True, } for q, c, m, l in zip( ['zy', 'genzy', 'lhezy'], ['black', 'lightskyblue', 'FireBrick'], ['o', 'f', '-'], ['RecoZ', 'GenZ', 'LHE-Z'], ): changes['labels'] = [l] changes['markers'] = [m] changes['colors'] = [c] plot1d.datamcplot(q, files[1:], opt, changes=changes, fig_axes=[fig, ax]) settings = plotbase.getSettings(opt, None, None, 'rapidity') settings['filename'] = 'rapidity' plotbase.Save(fig, settings) # Data-MC comparisons ###################################################### # basic quantities filenames = ['data_ee_corr', 'mc_ee_corr'] files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames] changes = { 'x': [-3, 3], 'y': [-3.2, 3.2], 'folder': 'all', 'nbins': 200, } plot2d.twoD('eminusphi_eminuseta', files, opt, changes=changes) for q, c in zip(['eminuspt', 'eminuseta', 'zy', 'zpt', 'zmass'], [ {}, {'x': [-2.5, 2.5]}, {}, {'x': [0, 500], 'log':True}, {'x': [80, 102], 'ratiosubploty':[0.9, 1.1]}, ]): changes = { 'labels': ['Data', 'Madgraph'], 'ratiosubplot': True, 'folder':'zcuts', 'nbins': 50, } changes.update(c) plot1d.datamcplot(q, files, opt, changes=changes) # scale factors changes = { 'x': [0, 100], 'y': [0, 3], 'z': [0.8, 1.2], 'folder': 'all', 'nbins': 100, 'selection': 'sfminus>0', 'colormap': 'bwr', } plot2d.twoD('sfminus_abs(eminuseta)_eminuspt', files[1:], opt, changes=changes) # zpt in rapidities for ybin in [[i/2., (i+1)/2.] for i in range(5)]: changes = { 'x': [0, 600], 'nbins': 30, 'folder':'zcuts', 'title': "%s < $y_Z$ < %s" % tuple(ybin), 'log': 'True', 'ratiosubplot': True, 'selection': 'abs(zy)>%s && abs(zy)<%s' % (ybin[0], ybin[1]), 'filename': ('zpt_rap-%s-%s' % (ybin[0], ybin[1])).replace('.', '_'), } plot1d.datamcplot('zpt', files, opt, changes=changes) # scale factor changes = { 'labels': ['Madgraph'], 'ratiosubplot': True, 'xynames':['eminuspt', r"$|\eta_{e^{-}} \| $"], 'folder':'all', 'x': [0, 60], 'y': [0, 3], 'colormap': 'bwr', 'z': [0.5, 1], } q = 'sfminus_abs(eminuseta)_eminuspt' plot2d.twoD(q, files[1:], opt, changes=changes) ############## # Plot for ID acceptance fig, ax = plotbase.newPlot() changes ={ 'folder':'all', 'x': [0, 150], 'y': [0, 1], 'subplot': True, 'normalize': False, 'legloc': 'lower right', 'xynames': ['eminuspt', 'Acceptance'] } filenames = ['mc_ee_corr_noid'] files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames] for q, c, m, l in zip( ['eminusidtight', 'eminusidmedium', 'eminusidloose', 'eminusidveto', 'eminusid'], ['lightskyblue', 'FireBrick', 'green', 'black', 'blue'], ['f', '_', '-', "o", "*"], ['Tight ID', 'Medium ID', 'Loose ID', "Veto ID", "MVA ID"], ): changes['labels'] = [l] changes['markers'] = [m] changes['colors'] = [c] plot1d.datamcplot("%s_eminuspt" % q, files, opt, changes=changes, fig_axes=[fig, ax]) settings = plotbase.getSettings(opt, None, None, 'id') settings['filename'] = 'id' settings['title'] = 'MC' plotbase.Save(fig, settings) def mpfslopes(files, opt, changes=None): """ Plot the slope of a linear fit on MPF vs NPV, in Z pT bins.""" quantity="mpf_npv" settings = plotbase.getSettings(opt, quantity=quantity, changes=changes) settings['special_binning'] = True print opt.zbins fig, ax = plotbase.newPlot() for f, c, l, m, in zip(files, settings['colors'], settings['labels'], settings['markers']): slopes, serrs, x = [], [], [] # iterate over Z pT bins for ptlow, pthigh in zip(opt.zbins[:-1], opt.zbins[1:]): changes = {'selection':'zpt>%s && zpt<%s' % (ptlow, pthigh)} rootobject = getroot.histofromfile(quantity, f, settings, changes=changes) # get fit parameters and mean Z pT; append to lists slope, serr = fit.fitline2(rootobject)[2:4] slopes += [slope] serrs += [serr] x += [getroot.histofromfile("zpt", f, settings, changes=changes).GetMean()] ax.errorbar(x, slopes, serrs, drawstyle='steps-mid', color=c, fmt='o', capsize=0, label=l) #formatting stuff settings['x'] = [30, 100] plotbase.setAxisLimits(ax, settings) plotbase.labels(ax, opt, settings) ax.set_xscale('log') settings['xticks'] = opt.zbins plotbase.axislabels(ax, 'zpt', 'slope from fit on MPF vs NPV', settings=settings) ax.set_ylim(-0.002, 0.002) ax.grid(True) ax.axhline(0.0, color='black', linestyle='--') plotbase.Save(fig, settings) def pileup(files, opt): for ptlow, pthigh in zip(opt.zbins[:-1], opt.zbins[1:]): plotresponse.responseratio(files, opt, over='npv', types=['mpf'], changes={ 'allalpha':True, 'selection':'alpha<0.3 && zpt>%s && zpt<%s' % (ptlow, pthigh), 'filename': "mpf_npv_%s-%s" % (ptlow, pthigh) } ) def emucomparison(files, opt): values = [] valueerrs = [] for filenames in [['data', 'mc'], ['data_ee', 'mc_ee']]: files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames] for quantity in ['mpf', 'ptbalance']: settings = plotbase.getSettings(opt, None, None, quantity) settings['nbins'] = 40 settings['correction'] = 'L1L2L3' if 'ee' in filenames[0]: if settings['selection']: settings['selection'] = 'abs(epluseta<1.0) && abs(eminuseta)<1.0 && %s' % settings['selection'] else: settings['selection'] = 'abs(epluseta<1.0) && abs(eminuseta)<1.0' datamc = [] rootobjects = [] fitvalues = [] for f in files: rootobjects += [getroot.histofromfile(quantity, f, settings)] p0, p0err, p1, p1err, p2, p2err, chi2, ndf, conf_intervals = fit.fitline2(rootobjects[-1], gauss=True, limits=[0, 2]) fitvalues += [p1, p1err] ratio = fitvalues[0] / fitvalues[2] ratioerr = math.sqrt(fitvalues[1] ** 2 + fitvalues[3] ** 2) values.append(ratio) valueerrs.append(ratioerr) fig, ax = plotbase.newPlot() ax.errorbar(range(4), values, valueerrs, drawstyle='steps-mid', color='black', fmt='o', capsize=0,) ax.set_xticks([0, 1, 2, 3]) ax.set_xticklabels(['Zmm\nMPF', 'Zmm\npT balance', 'Zee\nMPF', 'Zee\npT balance']) ax.set_xlim(-0.5, 3.5) ax.set_ylim(0.96, 1.001) ax.axhline(1.0, color='black', linestyle=':') ax.set_ylabel('Jet response Data/MC ratio', ha="right", x=1) plotbase.Save(fig, settings) def electrons(files, opt): """ Standard set of plots for the dielectron analysis. """ filenames = ['data_ee', 'mc_ee'] files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames] base_changes = { 'out': 'out/ee2014', 'folder': 'zcuts', # no additional restrictions on jets 'normalize': False, # no normalizing to check if the lumi reweighting works 'factor': 1., # on the fly lumi reweighting 'efficiency': 1., # no trigger reweighting for electrons 'ratiosubplot': True, } # zmass with fit changes = { 'legloc': 'center right', 'nbins': 50, 'fit': 'gauss' } changes.update(base_changes) plot1d.datamcplot('zmass', files, opt, changes=changes) #electron quantities for charge in ['plus', 'minus']: changes = { 'x': [0, 150], 'nbins': 40, } changes.update(base_changes) plot1d.datamcplot('e%spt' % charge, files, opt, changes=changes) changes['x'] = [-2.5, 2.5] plot1d.datamcplot('e%seta' % charge, files, opt, changes=changes) changes['x'] = None plot1d.datamcplot('e%sphi' % charge, files, opt, changes=changes) changes['legloc'] = 'center right' changes['filename'] = 'zmass_barrel' changes['selection'] = 'abs(epluseta)<1.0 && abs(eminuseta)<1.0' changes['title'] = '|eta(e)| < 1.0' changes['fit'] = 'gauss' plot1d.datamcplot('zmass', files, opt, changes=changes) changes['filename'] = 'zmass_endcap' changes['selection'] = 'abs(epluseta)>1.0 && abs(eminuseta)>1.0' changes['title'] = '|eta(e)| > 1.0' changes['fit'] = 'gauss' plot1d.datamcplot('zmass', files, opt, changes=changes) #electron quantities for charge in ['plus', 'minus']: changes = { 'x': [0, 150], 'nbins': 40, } changes.update(base_changes) plot1d.datamcplot('e%spt' % charge, files, opt, changes=changes) changes['x'] = [-2.5, 2.5] plot1d.datamcplot('e%seta' % charge, files, opt, changes=changes) changes['x'] = None plot1d.datamcplot('e%sphi' % charge, files, opt, changes=changes) # Z pT in rapidity bins rapbins = ['abs(zy)<1', 'abs(zy)>1 && abs(zy)<2', 'abs(zy)>2 && abs(zy)<3'] raplabels = ['|Y(Z)|<1', '1<|Y(Z)|<2', '2<|Y(Z)|<3'] rapname = ['0zy1', '1zy2', '2zy3'] for rbin, rlabel, rname in zip(rapbins, raplabels, rapname): changes = { 'selection': rbin, 'filename': 'zpt-%s' % rname, 'x': [30, 750], 'log': True, 'title': rlabel, 'nbins': 40, } changes.update(base_changes) plot1d.datamcplot('zpt', files, opt, changes=changes) #electron quantities for charge in ['plus', 'minus']: changes = { 'x': [0, 150], 'nbins': 40, } changes.update(base_changes) plot1d.datamcplot('e%spt' % charge, files, opt, changes=changes) changes['x'] = [-2.5, 2.5] plot1d.datamcplot('e%seta' % charge, files, opt, changes=changes) changes['x'] = None plot1d.datamcplot('e%sphi' % charge, files, opt, changes=changes) # npv changes = { 'folder': 'all', } changes.update(base_changes) changes['folder'] = 'all' plot1d.datamcplot('npv', files, opt, changes=changes) changes['noweighting'] = True changes['factor'] = 3503.71 / 30459503 * 1000 changes['filename'] = 'npv_noweights' plot1d.datamcplot('npv', files, opt, changes=changes) changes['noweighting'] = True changes['factor'] = 3503.71 / 30459503 * 1000 changes['filename'] = 'npv_noweights' plot1d.datamcplot('npv', files, opt, changes=changes) # z pt and rapidity changes = { 'nbins': 40, } changes.update(base_changes) plot1d.datamcplot('zy', files, opt, changes=changes) plot1d.datamcplot('zeta', files, opt, changes=changes) changes['x'] = [30, 750] changes['log'] = True plot1d.datamcplot('zpt', files, opt, changes=changes) #powheg comparison filenames = ['data_ee', 'mc_ee', 'mc_ee_powheg'] files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames] changes = { 'log': True, 'x': [30, 750], 'nbins': 40, 'filename': 'zpt_mad-pow', 'labels': ['Data', 'Madgraph', 'Powheg'], } changes.update(base_changes) plot1d.datamcplot('zpt', files, opt, changes=changes) changes = { 'nbins': 40, 'filename': 'zmass_mad-pow', 'labels': ['Data', 'Madgraph', 'Powheg'], } changes.update(base_changes) plot1d.datamcplot('zmass', files, opt, changes=changes) files = files[::2] filenames = filenames[::2] changes = { 'log':True, 'x': [30, 750], 'nbins': 40, 'filename': 'zpt_pow', 'labels':['Data', 'Powheg'], } changes.update(base_changes) plot1d.Datamcplot('zpt', files, opt, changes=changes) #backgrounds filenames = ['Data_ee', 'mc_ee', 'background_ee'] files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames] changes = { 'log': True, 'x': [30, 750], 'filename': 'zpt_backgrounds', 'labels': ['Data', 'MC', 'Backgrounds'], 'markers': ['o', 'f', 'f'], 'stacked': True, 'ratiosubplot': False, } changes.update(base_changes) changes['ratiosubplot'] = False plot1d.datamcplot('zpt', files, opt, changes=changes) changes.pop('x', None) changes['filename'] = 'zmass_backgrounds' changes['log'] = False changes['ratiosubplot'] = False plot1d.datamcplot('zmass', files, opt, changes=changes) # sync the plots import subprocess subprocess.call(['rsync out/ee2014 dhaitz@ekplx26:plots/ -u -r --progress'], shell=True) """ merlin 2D_zmass_zpt --files $DATAEE $ARGS -x 0 50 --nbins 100 -y 80 100 -o $OUT merlin eemass -o $OUT --files $DATAEE $ARGS --nbins 100 -x 0 120 -C lightskyblue -m f --folder all merlin eemass -o $OUT --files $DATAEE $ARGS --nbins 100 -x 0 15 --filename eemass_low -C lightskyblue -m f --folder all merlin 2D_zpt_zy -o $OUT --files $DATAEE $ARGS -y 0 100 --nbins 100 """ def an(files, opt): """ Plots for the 2014 Z->mumu JEC AN.""" """ #MET for quantity in ['METpt', 'METphi']: plot1d.datamcplot(quantity, files, opt, changes = {'title': 'CMS preliminary'}) plot1d.datamcplot("npv", files, opt, changes = {'folder': 'all', 'title': 'CMS preliminary'}) for n in ['1', '2']: for quantity in ['pt', 'eta', 'phi']: plot1d.datamcplot('mu%s%s' % (n, quantity), files, opt, changes = {'title': 'CMS preliminary'}) if n is '2' and quantity is 'eta': plot1d.datamcplot('jet%s%s' % (n, quantity), files, opt, changes = {'nbins': 10, 'correction': 'L1L2L3', 'title': 'CMS preliminary'}) else: plot1d.datamcplot('jet%s%s' % (n, quantity), files, opt, changes = {'correction': 'L1L2L3', 'title': 'CMS preliminary'}) for quantity in ['zpt', 'zeta', 'zy', 'zphi', 'zmass']: plot1d.datamcplot(quantity, files, opt, changes = {'title': 'CMS preliminary'}) #response stuff plotresponse.responseratio(files, opt, over='zpt', types=['mpf'], changes={'y': [0.98, 1.03, 0.96, 1.03], 'x': [0, 400, 0, 400]}) plotresponse.responseratio(files, opt, over='jet1abseta', types=['mpf'], changes={'y': [0.95, 1.1, 0.93, 1.1]}) plotresponse.responseratio(files, opt, over='npv', types=['mpf'], changes={'y': [0.95, 1.05, 0.92, 1.03], 'x': [0, 35, 0, 35]}) plotresponse.responseratio(files, opt, over='zpt', types=['ptbalance'], changes={'y': [0.93, 1.01, 0.96, 1.03], 'x': [0, 400, 0, 400]}) plotresponse.responseratio(files, opt, over='jet1abseta', types=['ptbalance'], changes={'y': [0.91, 1.01, 0.93, 1.1]}) plotresponse.responseratio(files, opt, over='npv', types=['ptbalance'], changes={'y': [0.91, 1.01, 0.92, 1.03], 'x': [0, 35, 0, 35]}) """ for q in ['mpf', 'ptbalance']: plot1d.datamcplot(q, files, opt, changes={'correction': 'L1L2L3', 'legloc': 'center right', 'nbins': 100, 'fit': 'gauss'}) plotresponse.extrapol(files, opt, changes={'save_individually': True, 'correction': 'L1L2L3'}) """ plotfractions.fractions(files, opt, over='zpt', changes={'x': [0, 400], 'title': 'CMS preliminary'}) plotfractions.fractions(files, opt, over='jet1abseta', changes = {'title': 'CMS preliminary'}) plotfractions.fractions(files, opt, over='npv', changes = {'title': 'CMS preliminary'}) for changes in [{'rebin':10, 'title':'|$\eta^{\mathrm{jet}}$|<1.3'}, {'alleta':True, 'rebin':10, 'selection':'jet1abseta>2.5 && jet1abseta<2.964', 'title':'2.5<|$\eta^{\mathrm{jet}}$|<2.964'}]: if 'alleta' in changes: opt.out += '/ECOT' opt.user_options['out'] += '/ECOT' plotfractions.fractions_run(files, opt, diff=True, response=True, changes=changes, nbr=6) plotfractions.fractions_run(files, opt, diff=False, response=True, changes=changes, nbr=6) plotfractions.fractions_run(files, opt, diff=True, response=False, changes=changes, nbr=6) plotresponse.response_run(files, opt, changes=changes) opt.out = opt.out[:-5] opt.user_options['out'] = opt.user_options['out'][:-5] else: plotfractions.fractions_run(files, opt, diff=True, response=True, changes=changes) plotfractions.fractions_run(files, opt, diff=False, response=True, changes=changes) plotfractions.fractions_run(files, opt, diff=True, response=False, changes=changes) plotresponse.response_run(files, opt, changes=changes) changes['y'] = [0.84, 1.2] plot2d.twoD("qgtag_btag", files, opt, changes = {'title': 'CMS Preliminary', 'nbins':50} ) plot_tagging.tagging_response(files, opt) plot_tagging.tagging_response_corrected(files, opt) """ ## MCONLY if len(files) > 1: files = files[1:] """ # PF composition as function of mc flavour flavour_comp(files, opt, changes={'title': 'CMS Simulation','mconly':True}) # response vs flavour for var in [True, False]: plotresponse.response_physflavour(files, opt, changes={'title': 'CMS Simulation','mconly':True}, add_neutrinopt=var, restrict_neutrals=var, extrapolation=var) plotfractions.flavour_composition(files, opt, changes={'title': 'CMS Simulation','mconly':True}) plotfractions.flavour_composition_eta(files, opt, changes={'title': 'CMS Simulation','mconly':True, 'selection': 'zpt>95 && zpt<110'}) changes = {'cutlabel' : 'ptetaalpha', 'labels' : ['Pythia 6 Tune Z2*', 'Herwig++ Tune EE3C'], 'y' : [0.98, 1.05], 'markers' : ['o', 'd'], 'colors' : ['red', 'blue'], 'title' : 'CMS Simulation', 'mconly' : True, 'legloc' : 'lower left', 'filename': 'recogen_physflavour_pythia-herwig'} files += [getroot.openfile("/storage/a/dhaitz/excalibur/work/mc_herwig/out/closure.root")] plot1d.datamcplot("recogen_physflavour", files, opt, changes=changes) """ def eleven(files, opt): """ Summary of the plots for the response studies with 2011 rereco. """ runrange = [160000, 183000] plot1d.datamcplot('npv', files, opt, changes={'rebin': 1}) plot1d.datamcplot('zmass', files, opt, changes={'fit': 'vertical', 'legloc': 'center right'}) plotresponse.extrapol(files, opt) plotresponse.responseratio(files, opt, over='zpt', types=['mpf'], changes={'y': [0.98, 1.03, 0.96, 1.03], 'uncertaintyband': True, 'x': [0, 400, 0, 400]}) plotresponse.responseratio(files, opt, over='jet1abseta', types=['mpf'], changes={'y': [0.95, 1.1, 0.93, 1.1], 'uncertaintyband': True}) plotresponse.responseratio(files, opt, over='npv', types=['mpf'], changes={'y': [0.95, 1.05, 0.92, 1.03], 'uncertaintyband': True, 'x': [0, 18, 0, 18]}) plotresponse.responseratio(files, opt, over='zpt', types=['ptbalance'], changes={'y': [0.93, 1.01, 0.96, 1.03], 'x': [0, 400, 0, 400], 'uncertaintyband': True}) plotresponse.responseratio(files, opt, over='jet1abseta', types=['ptbalance'], changes={'y': [0.91, 1.01, 0.93, 1.1], 'uncertaintyband': True}) plotresponse.responseratio(files, opt, over='npv', types=['ptbalance'], changes={'y': [0.91, 1.01, 0.92, 1.03], 'x': [0, 18, 0, 18], 'uncertaintyband': True}) plot1d.datamcplot('npv_run', files, opt, changes={'x': runrange, 'y': [0, 15], 'run': True, 'fit': True}) plotfractions.fractions(files, opt, over='zpt', changes={'x': [0, 400]}) plotfractions.fractions(files, opt, over='jet1abseta') plotfractions.fractions(files, opt, over='npv', changes={'x': [-0.5, 24.5]}) for changes in [{'x': runrange, 'rebin':10, 'title':'|$\eta^{\mathrm{jet}}$|<1.3'}, {'x': runrange, 'alleta':True, 'rebin':10, 'selection':'jet1abseta>2.5 && jet1abseta<2.964', 'title':'2.5<|$\eta^{\mathrm{jet}}$|<2.964'}]: if 'alleta' in changes: opt.out += '/ECOT' opt.user_options['out'] += '/ECOT' plotfractions.fractions_run(files, opt, diff=True, response=True, changes=changes, nbr=6) plotfractions.fractions_run(files, opt, diff=False, response=True, changes=changes, nbr=6) plotfractions.fractions_run(files, opt, diff=True, response=False, changes=changes, nbr=6) else: plotfractions.fractions_run(files, opt, diff=True, response=True, changes=changes) plotfractions.fractions_run(files, opt, diff=False, response=True, changes=changes) plotfractions.fractions_run(files, opt, diff=True, response=False, changes=changes) changes['y'] = [0.84, 1.2] plotresponse.response_run(files, opt, changes=changes) def rootfile(files, opt): """Function for the rootfile sent to the JEC group in early August 2013.""" list_of_quantities = ['ptbalance_alpha', 'mpf_alpha', 'ptbalance', 'mpf', 'zpt', 'npv', 'zmass', 'zpt_alpha', 'npv_alpha', 'ptbalance_zpt', 'mpf_zpt', 'ptbalance_npv', 'mpf_npv', ] for muon in [["zmumu", "1"], ["zmumu_muoncuts", "(mupluspt>25 && muminuspt>25 && abs(mupluseta)<1.0 && abs(muminuseta)<1.0)"]]: for alpha in [[0, "alpha<0.2", "alpha0_2"], [1, "alpha<0.3", "alpha0_3"], [1, "alpha<0.4", "alpha0_4"]]: for quantity in list_of_quantities: changes = {'rebin': 1, 'out': 'out/root/', 'allalpha': True, 'root': "__".join([quantity, alpha[2]]), 'filename': muon[0], 'selection': "&&".join([alpha[1], muon[1]]), } if ("_zpt" in quantity) or ("_npv" in quantity): changes['special_binning'] = True if "alpha" in quantity: changes['rebin'] = 10 plot1d.datamcplot(quantity, files, opt, changes=changes) changes['ratio'] = True changes['labels'] = ['ratio'] plot1d.datamcplot(quantity, files, opt, changes=changes) def ineff(files, opt): settings = plotbase.getSettings(opt, changes=None, settings=None, quantity="flavour_zpt") fig, ax = plotbase.newPlot() labels = ["no matching partons", "two matching partons"] colors = ['red', 'blue'] markers = ['o', 'd'] changes = {'subplot': True, 'lumi': 0, 'xynames': ['zpt', 'physflavourfrac'], 'legloc': 'upper left', } for n, l, c, m in zip([0, 2], labels, colors, markers): quantity = "(nmatchingpartons3==%s)_zpt" % n changes['labels'] = [l] changes['colors'] = c changes['markers'] = m plot1d.datamcplot(quantity, files, opt, fig_axes=(fig, ax), changes=changes, settings=settings) settings['filename'] = plotbase.getDefaultFilename("physflavourfrac_zpt", opt, settings) plotbase.Save(fig, settings['filename'], opt) def flav(files, opt): etabins = [0, 1.3, 2.5, 3, 3.2, 5.2] etastrings = ['0-1_3', '1_3-2_5', '2_5-3', '3-3_2', '3_2-5_2'] flavourdefs = ["algoflavour", "physflavour"] flavourdefinitions = ["algorithmic", "physics"] flist = ["(flavour>0&&flavour<4)", "(flavour==1)", "(flavour==2)", "(flavour==3)", "(flavour==4)", "(flavour==5)", "(flavour==21)", "(flavour==0)"] q_names = ['uds', 'u', 'd', 's', 'c', 'b', 'gluon', 'unmatched'] changes = {} ############### FLAVOUR NOT 0!!!!! # barrel: """changes['rebin'] = 1 changes['filename']="flavour" changes['filename']="flavour" for f_id, quantity in zip(['uds','c','b','gluon'], flist): changes['root']=f_id plot1d.datamcplot("%s_zpt" % quantity, files, opt, changes=changes) """ for flavourdef, flavourdefinition in zip(flavourdefs, flavourdefinitions): # iterate over eta bins: for filename, selection in zip(etastrings, getroot.etacuts(etabins)): changes['filename'] = "_".join([filename, flavourdefinition]) changes['alleta'] = True changes['selection'] = "%s && %s" % (selection, "alpha<0.2") changes['rebin'] = 1 for f_id, quantity in zip(q_names, flist): changes['root'] = f_id plot1d.datamcplot("%s_zpt" % quantity.replace("flavour", flavourdef), files, opt, changes=changes) def gif(files, opt): local_opt = copy.deepcopy(opt) runlist = listofruns.runlist[::10] for run, number in zip(runlist, range(len(runlist))): local_opt.lumi = (run - 190456) * 19500 / (209465 - 190456) print plotbase.plot1d.datamcplot('balresp', files, local_opt, changes={'var': 'var_RunRange_0to%s' % run}, filename="%03d" % number) def closure(files, opt): def divide((a, a_err), (b, b_err)): if (b != 0.0): R = a / b else: R = 0 Rerr = R * math.sqrt((a_err / a) ** 2 + (b_err / b) ** 2) return R, Rerr def multiply((a, a_err), (b, b_err)): R = a * b Rerr = R * math.sqrt((a_err / a) ** 2 + (b_err / b) ** 2) return R, Rerr changes = {} changes = plotbase.getchanges(opt, changes) #get extrapol factors with alpha 035 #changes['var']='var_CutSecondLeadingToZPt_0_4' #changes['correction']='L1L2L3' balresp = (getroot.getobjectfromnick('balresp', files[0], changes, rebin=1).GetMean(), getroot.getobjectfromnick('balresp', files[0], changes, rebin=1).GetMeanError()) mpfresp = (getroot.getobjectfromnick('mpfresp', files[0], changes, rebin=1).GetMean(), getroot.getobjectfromnick('mpfresp', files[0], changes, rebin=1).GetMeanError()) genbal = (getroot.getobjectfromnick('genbal', files[0], changes, rebin=1).GetMean(), getroot.getobjectfromnick('genbal', files[0], changes, rebin=1).GetMeanError()) intercept, ierr, slope, serr, chi2, ndf, conf_intervals = getroot.fitline2(getroot.getobjectfromnick('ptbalance_alpha', files[0], changes, rebin=1)) balresp_extrapol = (intercept, conf_intervals[0]) extrapol_reco_factor = divide(balresp_extrapol, balresp) intercept2, ierr2, slope2, serr2, chi22, ndf2, conf_intervals2 = getroot.fitline2(getroot.getobjectfromnick('genbalance_genalpha', files[0], changes, rebin=1)) genbal_extrapol = (intercept2, conf_intervals2[0]) extrapol_gen_factor = divide(genbal_extrapol, genbal) intercept3, ierr3, slope3, serr3, chi23, ndf3, conf_intervals3 = getroot.fitline2(getroot.getobjectfromnick('mpf_alpha', files[0], changes, rebin=1)) mpf_extrapol = (intercept3, conf_intervals3[0]) extrapol_mpf_factor = divide(mpf_extrapol, mpfresp) #del changes['var'] #del changes['correction'] #other quantities with alpha 02 recogen = (getroot.getobjectfromnick('recogen', files[0], changes, rebin=1).GetMean(), getroot.getobjectfromnick('recogen', files[0], changes, rebin=1).GetMeanError()) zresp = (getroot.getobjectfromnick('zresp', files[0], changes, rebin=1).GetMean(), getroot.getobjectfromnick('zresp', files[0], changes, rebin=1).GetMeanError()) balresp = (getroot.getobjectfromnick('balresp', files[0], changes, rebin=1).GetMean(), getroot.getobjectfromnick('balresp', files[0], changes, rebin=1).GetMeanError()) mpfresp = (getroot.getobjectfromnick('mpfresp', files[0], changes, rebin=1).GetMean(), getroot.getobjectfromnick('mpfresp', files[0], changes, rebin=1).GetMeanError()) mpfresp_raw = (getroot.getobjectfromnick('mpfresp-raw', files[0], changes, rebin=1).GetMean(), getroot.getobjectfromnick('mpfresp-raw', files[0], changes, rebin=1).GetMeanError()) genbal = (getroot.getobjectfromnick('genbal', files[0], changes, rebin=1).GetMean(), getroot.getobjectfromnick('genbal', files[0], changes, rebin=1).GetMeanError()) balparton = (getroot.getobjectfromnick('balparton', files[0], changes, rebin=1).GetMean(), getroot.getobjectfromnick('balparton', files[0], changes, rebin=1).GetMeanError()) partoncorr = divide(balparton, genbal) format = "%1.4f" print changes print "" print (r"balresp reco %s +- %s" % (format, format)) % balresp print (r"mpf %s +- %s" % (format, format)) % mpfresp print (r"balparton %s +- %s" % (format, format)) % balparton print (r"zresp %s +- %s" % (format, format)) % zresp print (r"recogen %s +- %s" % (format, format)) % recogen print (r"extrapolReco_factor %s +- %s" % (format, format)) % extrapol_reco_factor print (r"extrapolGen_factor %s +- %s" % (format, format)) % extrapol_gen_factor print (r"extrapolMPF_factor %s +- %s" % (format, format)) % extrapol_mpf_factor print (r"parton/genjet %s +- %s" % (format, format)) % divide(balparton, genbal) print "" print (r"pTgenjet / pTgenZ %s +- %s" % (format, format)) % genbal genbal = multiply(genbal, extrapol_gen_factor) print (r"* gen Level extrapolation %s +- %s" % (format, format)) % genbal #genbal = multiply(genbal, partoncorr) #print (r"* pTparton/pTgenjet correction %s +- %s" % (format, format) ) % genbal #genbal = divide(genbal, balparton) #print (r"* pTparton/pTZ correction %s +- %s" % (format, format) ) % genbal reco_bal = divide(multiply(genbal, recogen), zresp) print (r"* GenToReco for Jet and Z %s +- %s" % (format, format)) % reco_bal print "" print (r"pTrecojet / pTrecoZ %s +- %s" % (format, format)) % balresp balresp = multiply(balresp, extrapol_reco_factor) print (r"* reco Level extrapolation %s +- %s" % (format, format)) % balresp print "" print (r"MPF (typeI) %s +- %s" % (format, format)) % mpfresp #mpfresp = divide(mpfresp, zresp) #print (r"MPF (GenZ) %s +- %s" % (format, format) ) % mpfresp mpfresp = multiply(mpfresp, extrapol_mpf_factor) print (r"MPF (extrapol) %s +- %s" % (format, format)) % mpfresp print (r"MPF (Raw) %s +- %s" % (format, format)) % mpfresp_raw def extrapola(files, opt): fig, ax = plotbase.newPlot() changes = {} changes['var'] = "_var_CutSecondLeadingToZPt_0_3" local_opt = copy.deepcopy(opt) rebin = 5 if opt.rebin is not None: rebin = opt.rebin plot1d.datamcplot('ptbalance_alpha', files, local_opt, legloc='upper center', changes=changes, rebin=rebin, subplot=True, subtext="", fig_axes=(fig, ax), fit='intercept', ratio=False) local_opt.colors = ['red', 'maroon'] plot1d.datamcplot('mpf_alpha', files, local_opt, legloc='upper center', changes=changes, rebin=rebin, subplot=True, xy_names=['alpha', 'response'], subtext="", fig_axes=(fig, ax), fit='intercept', ratio=False, fit_offset=-0.1) file_name = plotbase.getDefaultFilename("extrapolation_", opt, changes) plotbase.Save(fig, file_name, opt) # function for comparing old and new corrections def comparison(datamc, opt): """file_names = [ '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12/out/closure.root', '/storage/8/dhaitz/CalibFW/work/data_2012/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_L1Offset/out/closure.root', '/storage/8/dhaitz/CalibFW/work/data_2012_L1Offset/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC/out/closure.root', '/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_L1Offset/out/closure.root', '/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC_L1Offset/out/closure.root' ]""" colors = ['red', 'blue', 'blue', 'red'] markers = ['*', 'o', 'o', '*'] #labels = [['MC_52xFast', 'data_52xFast'], ['MC_52xOff', 'data_52xOff'], ['MC_53xFast', 'data_53xFast'], ['MC_53xOff', 'data_53xOff']] rebin = 1 import copy file_names = [ '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12/out/closure.root', '/storage/8/dhaitz/CalibFW/work/data_2012/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC/out/closure.root', '/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_L1Offset/out/closure.root', '/storage/8/dhaitz/CalibFW/work/data_2012_L1Offset/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_L1Offset/out/closure.root', '/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC_L1Offset/out/closure.root', ] labels = [['MC_52xFast', 'data_52xFast'], ['MC_53xFast', 'data_53xFast'], ['MC_52xOff', 'data_52xOff'], ['MC_53xOff', 'data_53xOff']] files = [] for f in file_names: files += [getroot.openfile(f, opt.verbose)] local_opt = copy.deepcopy(opt) local_opt.style = markers local_opt.colors = colors quantity = 'L1abs_npv' # ALL fig, axes = plotbase.newPlot(subplots=4) for a, f1, f2, l in zip(axes, files[::2], files[1::2], labels): local_opt.labels = l datamcplot(quantity, (f1, f2), local_opt, 'upper center', changes={'correction': ''}, fig_axes=(fig, a), rebin=rebin, subplot=True, subtext="") filename = "L1_all__" + opt.algorithm plotbase.Save(fig, filename, opt) """ #Fastjet vs Offset fig = plotbase.plt.figure(figsize=(14,7)) axes = [fig.add_subplot(1,2,n) for n in [1,2]] local_opt.labels = labels[0] local_opt.colors = ['blue', 'blue'] datamcplot(quantity, (files[0], files[1]), local_opt, 'upper center', changes={'correction':''}, fig_axes=(fig,axes[0]), rebin=rebin, subplot=True, subtext="") local_opt.labels = labels[1] local_opt.colors = ['red', 'red'] datamcplot(quantity, (files[2], files[3]), local_opt, 'upper center', changes={'correction':''}, fig_axes=(fig,axes[0]), rebin=rebin, subplot=True, subtext="") #53 local_opt.labels = labels[2] local_opt.colors = ['blue', 'blue'] datamcplot(quantity, (files[4], files[5]), local_opt, 'upper center', changes={'correction':''}, fig_axes=(fig,axes[1]), rebin=rebin, subplot=True, subtext="") local_opt.labels = labels[3] local_opt.colors = ['red', 'red'] datamcplot(quantity, (files[6], files[7]), local_opt, 'upper center', changes={'correction':''}, fig_axes=(fig,axes[1]), rebin=rebin, subplot=True, subtext="") filename = "L1_Fastjet_vs_Offset__"+opt.algorithm plotbase.Save(fig, filename, opt) #52X vs 53X fig = plotbase.plt.figure(figsize=(14,7)) axes = [fig.add_subplot(1,2,n) for n in [1,2]] local_opt.labels = labels[0] local_opt.colors = ['blue', 'blue'] datamcplot(quantity, (files[0], files[1]), local_opt, 'upper center', changes={'correction':''}, fig_axes=(fig,axes[0]), rebin=rebin, subplot=True, subtext="") local_opt.labels = labels[2] local_opt.colors = ['red', 'red'] datamcplot(quantity, (files[4], files[5]), local_opt, 'upper center', changes={'correction':''}, fig_axes=(fig,axes[0]), rebin=rebin, subplot=True, subtext="") local_opt.labels = labels[1] local_opt.colors = ['blue', 'blue'] datamcplot(quantity, (files[2], files[3]), local_opt, 'upper center', changes={'correction':''}, fig_axes=(fig,axes[1]), rebin=rebin, subplot=True, subtext="") # local_opt.labels = labels[3] local_opt.colors = ['red', 'red'] datamcplot(quantity, (files[6], files[7]), local_opt, 'upper center', changes={'correction':''}, fig_axes=(fig,axes[1]), rebin=rebin, subplot=True, subtext="") filename = "L1_52X_vs_53X__"+opt.algorithm plotbase.Save(fig, filename, opt) import plotresponse file_names = [ '/storage/8/dhaitz/CalibFW/work/data_2012/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12/out/closure.root', '/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC/out/closure.root', '/storage/8/dhaitz/CalibFW/work/data_2012_L1Offset/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_L1Offset/out/closure.root', '/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC_L1Offset/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_L1Offset/out/closure.root', ] labels = [['data_52xFast', 'MC_52xFast'], [ 'data_53xFast', 'MC_53xFast'], [ 'data_52xOff', 'MC_52xOff'], ['data_53xOff', 'MC_53xOff']] files=[] for f in file_names: files += [getroot.openfile(f, opt.verbose)] for over, fit in zip(['zpt', 'jet1eta', 'npv'], [True, False, True]): fig, axes= plotbase.newPlot(subplots=4) fig2, axes2= plotbase.newPlot(subplots=4) for a1, a2, f1, f2, l in zip(axes, axes2, files[::2], files[1::2], labels): local_opt.labels = l changes ={}# {'correction':'L1L2L3'} plotresponse.responseplot((f1, f2), local_opt, ['bal', 'mpf'], over=over, changes=changes, figaxes=(fig,a1), subplot=True, subtext="") plotresponse.ratioplot((f1, f2), local_opt, ['bal', 'mpf'], over=over, changes=changes, figaxes=(fig2 ,a2), fit=fit, subplot=True, subtext="") filename = "Response_"+over+"_all__"+opt.algorithm plotbase.Save(fig, filename, opt) filename = "Ratio_"+over+"_all__"+opt.algorithm plotbase.Save(fig2, filename, opt)""" # function for 2d grid plots """def twoD_all_grid(quantity, datamc, opt): pt_thresholds = [12, 16, 20, 24, 28, 32, 36] var_list = ['var_JetPt_%1.fto%1.f' % (s1, s2) for (s1, s2) in zip(pt_thresholds, [1000, 1000, 1000, 1000, 1000, 1000, 1000])] var_list_2 = getroot.npvstrings(opt.npv) fig = plt.figure(figsize=(10.*len(var_list), 7.*len(var_list_2))) grid = AxesGrid(fig, 111, nrows_ncols = (len(var_list), len(var_list_2)), axes_pad = 0.4, share_all=True, label_mode = "L", #aspect = True, #cbar_pad = 0, #cbar_location = "right", #cbar_mode='single', ) for n1, var1 in enumerate(var_list): for n2, var2 in enumerate(var_list_2): change = {'var':var1+"_"+var2} index = len(var_list_2)*n1 + n2 change['incut']='allevents' twoD(quantity, datamc, opt, changes=change, fig_axes = [fig, grid[index]], subplot = True, axtitle = change['var'].replace('var_', '')) for grid_element, var_strings in zip(grid, opt.npv): text = r"$%s\leq\mathrm{NPV}\leq%s$" % var_strings grid_element.text(0.5, 5.5, text, ha='center', va='center', size ='40') for grid_element, pt_threshold in zip(grid[::len(var_list_2)], pt_thresholds): text = r"$p_\mathrm{T}^\mathrm{Jet1}$"+"\n"+r"$\geq%s\mathrm{GeV}$" % pt_threshold grid_element.text(-8.7, 0, text, ha='left', va='center', size ='30') #fig.suptitle("%s leading jet $\eta-\phi$ distribution ($before$ cuts) for %s %s" % (opt.labels[0], opt.algorithm, opt.correction), size='50') fig.suptitle("%s %s $\eta-\phi$ distribution ($before$ cuts) for %s %s" % (opt.labels[0], quantity[7:-16], opt.algorithm, opt.correction), size='30') file_name = "grid_"+opt.labels[0]+"_"+quantity +"_"+opt.algorithm + opt.correction fig.set_figwidth(fig.get_figwidth() * 1.2) plotbase.Save(fig, file_name, opt, crop=False, pad=1.5)""" def Fall12(files, opt): local_opt = copy.deepcopy(opt) filelist = [ ['/storage/8/dhaitz/CalibFW/work/data_2012/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12/out/closure.root'], ['/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC/out/closure.root'], ['/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC_V4/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_V4/out/closure.root'] ] labellist = [['data_Summer12', 'MC_Summer12'], ['data_Fall12V1', 'MC_Fall12V1'], ['data_Fall12V4', 'MC_Fall12V4']] over = 'zpt' for over in ['zpt', 'npv', 'jet1eta']: fig = plotbase.plt.figure(figsize=[21, 14]) fig.suptitle(opt.title, size='xx-large') for typ, row in zip(['bal', 'mpf'], [0, 4]): for filenames, labels, col in zip(filelist, labellist, [0, 1, 2]): ax1 = plotbase.plt.subplot2grid((7, 3), (row, col), rowspan=2) ax2 = plotbase.plt.subplot2grid((7, 3), (row + 2, col)) fig.add_axes(ax1) fig.add_axes(ax2) if over == 'jet1eta' and typ == 'bal': legloc = 'upper right' else: legloc = 'lower left' local_opt.labels = labels files = [] for f in filenames: files += [getroot.openfile(f, opt.verbose)] plotresponse.responseplot(files, local_opt, [typ], over=over, figaxes=(fig, ax1), legloc=legloc, subplot=True) plotresponse.ratioplot(files, local_opt, [typ], binborders=True, fit=True, over=over, subplot=True, figaxes=(fig, ax2), ratiosubplot=True) fig.subplots_adjust(hspace=0.05) ax1.set_xticks([]) ax1.set_xlabel("") ax2.set_yticks([1.00, 0.95, 0.90]) if col > 0: ax1.set_ylabel("") ax2.set_ylabel("") title = "" # " Jet Response ($p_T$ balance / MPF) vs. Z $p_T$, $N_{vtx}$ , Jet $\eta$ (" +opt.algorithm+" "+opt.correction+")" fig.suptitle(title, size='x-large') file_name = "comparison_ALL_" + over + opt.algorithm + opt.correction plotbase.Save(fig, file_name, opt) def factors(files, opt): local_opt = copy.deepcopy(opt) filelist = [ ['/storage/8/dhaitz/CalibFW/work/Data_2012_Fall12JEC/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC/out/closure.root', '/storage/8/dhaitz/CalibFW/work/Data_2012_Fall12JEC_L1Offset/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_L1Offset/out/closure.root'], ['/storage/8/dhaitz/CalibFW/work/Data_2012_Fall12JEC_V4/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_V4/out/closure.root', '/storage/8/dhaitz/CalibFW/work/Data_2012_Fall12JEC_V4_L1Offset/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_V4_L1Offset/out/closure.root'] ] labellist = [ ['Data FastJet V1', 'MC FastJet V1', 'Data Offset V1', 'MC Offset V1'], ['Data FastJet V4', 'MC FastJet V4', 'Data Offset V4', 'MC Offset V4']] """filelistt = [ ['/storage/8/dhaitz/CalibFW/work/Data_2012_Fall12JEC/out/closure.root', '/storage/8/dhaitz/CalibFW/work/Data_2012_Fall12JEC_V4/out/closure.root'], ['/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_V4/out/closure.root'], ['/storage/8/dhaitz/CalibFW/work/Data_2012_Fall12JEC_L1Offset/out/closure.root', '/storage/8/dhaitz/CalibFW/work/Data_2012_Fall12JEC_V4_L1Offset/out/closure.root'], ['/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_L1Offset/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_V4_L1Offset/out/closure.root'] ] labellistt = ['Data FastJet V1', 'Data FastJet V4'], ['MC FastJet V1', 'MC FastJet V4'], ['Data Offset V1', 'Data Offset V4'], ['MC Offset V1','MC Offset V4' ]] names = ['DataV1', 'MCV1', 'DataV4', 'MCV4' ]""" files = [] #for sublist in filelist: # rootfiles = [getroot.openfile(f, opt.verbose) for f in sublist] # files.append( rootfiles) for sublist in filelist: files.append([getroot.openfile(f, opt.verbose) for f in sublist]) fit = None rebin = 1 # for files, labellist, name in zip(files, labellist, names) fig, axes = plotbase.newPlot(subplots=2) quantity = 'L1abs_npv' local_opt.style = ['o', '*', 'o', '*'] local_opt.labels = labellist[0] local_opt.colors = ['blue', 'blue', 'red', 'red'] plot1d.datamcplot(quantity, files[0], local_opt, 'upper center', changes={'correction': ''}, fig_axes=(fig, axes[0]), fit=fit, rebin=rebin, subplot=True, subtext="") local_opt.labels = labellist[1] plot1d.datamcplot(quantity, files[1], local_opt, 'upper center', changes={'correction': ''}, fig_axes=(fig, axes[1]), fit=fit, rebin=rebin, subplot=True, subtext="") file_name = "L1_comparison_" # +name plotbase.Save(fig, file_name, opt) def factors2(files, opt): local_opt = copy.deepcopy(opt) filelist = [ ['/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC/out/closure.root', '/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC_V4/out/closure.root'], ['/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_V4/out/closure.root'], ['/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC_L1Offset/out/closure.root', '/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC_V4_L1Offset/out/closure.root'], ['/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_L1Offset/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_V4_L1Offset/out/closure.root'] ] labellistt = [['data FastJet V1', 'data FastJet V4'], ['MC FastJet V1', 'MC FastJet V4'], ['data Offset V1', 'data Offset V4'], ['MC Offset V1', 'MC Offset V4'] ] names = ['dataV1', 'MCV1', 'dataV4', 'MCV4'] files = [] for sublist in filelist: rootfiles = [getroot.openfile(f, opt.verbose) for f in sublist] files.append(rootfiles) #print files fit = 'chi2_linear' rebin = 1 fit_offset = -0.1 for files, labellist, name in zip(files, labellistt, names): print labellist fig, axes = plotbase.newPlot(subplots=2) quantity = 'L1abs_npv' local_opt.style = ['o', '*', 'o', '*'] local_opt.labels = [labellist[0]] local_opt.colors = ['blue', 'blue', 'red', 'red'] plot1d.datamcplot(quantity, [files[0]], local_opt, 'upper center', changes={'correction': ''}, fig_axes=(fig, axes[0]), fit=fit, rebin=rebin, fit_offset=fit_offset, subplot=True, subtext="") local_opt.labels = [labellist[1]] plot1d.datamcplot(quantity, [files[1]], local_opt, 'upper center', changes={'correction': ''}, fig_axes=(fig, axes[1]), fit=fit, rebin=rebin, fit_offset=fit_offset, subplot=True, subtext="") file_name = "L1_comparison_" + name plotbase.Save(fig, file_name, opt) import ROOT def allpu(files, opt, truth=True): print files settings = plotbase.getSettings(opt, quantity='npu') #print settings print settings['folder'] name = "_".join([settings['folder'], settings['algorithm'] + settings['correction']]) print name, files[1] name = name.replace("Res", "") t = files[1].Get(name) if not t: print "no tree", name, t.GetName() exit(1) # raw wei data weight if truth: histos = [getroot.getobject("pileup", files[2])] else: histos = [getroot.getobject("pileup;2", files[2])] histos[-1].Rebin(10) print histos[-1].GetNbinsX(), "pu2" histos[0].SetTitle("Data") histos += [ROOT.TH1D("mcraw", "MC", 1600, 0, 80)] if truth: histos += [ROOT.TH1D("mcraw", "MC", 1600, 0, 80)] t.Project("mcraw", "nputruth") else: histos += [ROOT.TH1D("mcraw", "MC", 80, 0, 80)] t.Project("mcraw", "npu") if truth: histos += [ROOT.TH1D("mcwei", "MC'", 1600, 0, 80)] t.Project("mcwei", "nputruth", "weight") else: histos += [ROOT.TH1D("mcwei", "MC'", 80, 0, 80)] t.Project("mcwei", "npu") binning = [[0, 1, 2, 3.5, 5], range(45, 80)] for h in histos: if h.GetNbinsX() > 1000: h.Rebin() if h.GetNbinsX() > 82: print h.GetNbinsX(), ">82! in", h.GetTitle() if not truth: break print "rebin:", binning b = binning if histos.index(h) == 1: b = binning + [range(5, 46)] print b for l in b: for a, b in zip(l[:-1], l[1:]): x1 = h.FindBin(a) x2 = h.FindBin(b) sumh = sum([h.GetBinContent(i) for i in range(x1, x2)]) / (x2 - x1) for i in range(x1, x2): h.SetBinContent(i, sumh) if truth: f = histos[1].Integral() / histos[1].Integral(histos[1].FindBin(8), histos[1].FindBin(40)) for i in range(3 + 0 * len(histos)): #histos[i].Rebin(4) print i ff = f / histos[i].Integral(histos[i].FindBin(8), histos[i].FindBin(40)) ff = 1.0 / histos[i].Integral() histos[i].Scale(ff) histos += [histos[0].Clone("dataraw")] histos[-1].SetTitle("Data/MC") histos[-1].Divide(histos[1]) if len(files) > 3: histos += [getroot.getobject("pileup", files[3])] histos[-1].SetTitle("weight") histos += [histos[2].Clone("rawmc")] histos[-1].Divide(histos[1]) histos[-1].SetTitle("MC'/MC") histos += [histos[0].Clone("datamc")] histos[-1].Divide(histos[2]) histos[-1].SetTitle("Data/MC'") plots = [getroot.root2histo(h) for h in histos] fig, ax, ratio = plotbase.newPlot(ratio=True) fig = plotbase.plt.figure(figsize=[7, 10]) ax = plotbase.plt.subplot2grid((3, 1), (0, 0), rowspan=2) ax.number = 1 ratio = plotbase.plt.subplot2grid((3, 1), (2, 0)) ratio.number = 2 fig.add_axes(ax) fig.add_axes(ratio) fig.subplots_adjust(hspace=0.05) colors = ['black', 'navy', 'red', 'green'] for p, c in zip(plots[:3], colors): ax.errorbar(p.x, p.y, label=p.title, drawstyle='steps-post', color=c, lw=1.6) colors[1] = 'gray' for p, c in zip(plots[3:], colors): r = ratio.errorbar(p.x, p.y, label=p.title, drawstyle='steps-post', color=c, lw=1.6) plotbase.labels(ax, opt, settings, settings['subplot']) plotbase.axislabels(ax, r"$n_\mathrm{PU}", settings['xynames'][1], settings=settings) xaxistext = r"observed number of pile-up interactions $n_\mathrm{PU}$" if truth: xaxistext = xaxistext.replace("observed", "true") plotbase.axislabels(ratio, xaxistext, "ratio", settings=settings) print ratio.number, r plotbase.setAxisLimits(ax, settings) plotbase.labels(ratio, opt, settings, settings['subplot']) plotbase.setAxisLimits(ratio, settings) #handles, labels = ratio.get_legend_handles_labels() ratio.legend(bbox_to_anchor=[0.8, 1], loc='upper center') ax.set_xticklabels([]) ax.set_xlabel("") settings['filename'] = plotbase.getDefaultFilename("npus", opt, settings) plotbase.Save(fig, settings) def pu(files, opt): allpu(files, opt) def puobserved(files, opt): allpu(files, opt, False)
gpl-2.0
1,261,465,762,515,416,000
40.024311
257
0.540758
false
3.169679
false
false
false
baccenfutter/cpassdb
cpassdb/protocols/client.py
1
19784
"""cpassdb - Client Protocol Classes""" __author__ = "Brian Wiborg <[email protected]>" __license__ = "GNU/GPLv2" import os import sys import json import base64 import commands from twisted.internet import reactor from twisted.protocols.basic import LineReceiver class ClientProtocol(LineReceiver): """Abstract client protocol base-class This class serves as an abstract base-class for all other cpassdb client protocols. It provides common methods for all client protocols. """ # This variable holds the request structure. request = {} # In case the server encounters an error, that error will be saved into # this instance attribute. error = None def connectionMade(self): self.send_request(**self.request) def dataReceived(self, data): """Callback function for incoming data. This function should be called from ancestor functions as it respects failure responses and handles them accordingly. Additionally, it will load the received JSON string into a data structure and return that. So there is always a benefit of calling this method. :param data: str - The received line of data. """ if data.startswith('ERROR: '): self.error = data self.exit_code = 1 return self.terminate(data) try: return json.loads(data) except ValueError: print data self.terminate("ERROR: Can not decode JSON.") def connectionLost(self, reason=""): """Callback function for lost connections. For cpassdb clients, a lost connection means that there is no more data expected from the server, so the reactor should be stopped. :param reason: str - An optional reason. """ if reactor.running: reactor.stop() def terminate(self, error=None): """Helper function for terminating a connection. The provided error message is written to stderr. :param error: str - An optional error message. """ if error is not None: self.error = error self.exit_code = 1 if error: sys.stderr.write("{}\n".format(error)) self.transport.loseConnection() def gracefully_disconnect(self): """Helper function for gracefully terminating a connection. The gracefulness comes from sending a termination request to the server and having the server terminate the connection. """ self.transport.write("{}\n".format(self.sign_request({"type": "BYE"}))) def sign_request(self, request): """Help function for request signing. :param request: struct - Request structure. :return: str - GPG CLear-Text Armor """ request_string = json.dumps(request) status, armor = commands.getstatusoutput("echo '{}' | gpg --clearsign".format(request_string)) if status: return self.terminate("ERROR: Can not load private key.") return armor def send_request(self, *args, **kwargs): """This method must be overloaded by derived classes.""" raise NotImplementedError("This method must be overloaded by derived classes!") class MessyClientProtocol(ClientProtocol): """Abstract base-class for messy client protocols A messy client protocol is one that leaves behind dirty secrets after running. So this would be the case for operations such as adding a key to or removing it from a recipient group. All secrets that are encrypted for this particular recipient group does not match the current state any more; it is still decryptable by the old keys or not decryptable by the new ones. """ # This dict can be used by ancestors as a small state-machine. state_machine = { 'requested_dirty_secrets': False, } def build_request_get_dirty_secrets(self, dirty_secrets): print "{} secret(s) need to be cycled, requesting.".format(len(dirty_secrets)), return { 'type': 'GET', 'names': dirty_secrets, } def build_request_set_dirty_secrets(self, incoming_dirty_secrets): request = { 'type': 'SET', 'secrets': [], } for secret_name in incoming_dirty_secrets: secret = json.loads( commands.getoutput( "echo '{}' | gpg --decrypt 2>/dev/null".format( incoming_dirty_secrets[secret_name]['armor'] ) ) ) ttl = incoming_dirty_secrets[secret_name]['metadata']['ttl'] recipients = incoming_dirty_secrets[secret_name]['metadata']['recipients'] secret_object = { 'name': secret_name, 'secret': secret, 'metadata': { 'ttl': ttl, 'recipients': recipients, } } request['secrets'].append(secret_object) print '.', print return request def get_dirty_secrets(self, dirty_secrets): request = self.build_request_get_dirty_secrets(dirty_secrets) self.transport.write("{}\n".format(self.sign_request(request))) def set_dirty_secrets(self, dirty_secrets): request = self.build_request_set_dirty_secrets(dirty_secrets) self.transport.write("{}\n".format(self.sign_request(request))) def handle_dirty_secret_dialog(self, response): if self.state_machine['requested_dirty_secrets'] is False: self.get_dirty_secrets(response) self.state_machine['requested_dirty_secrets'] = True elif self.state_machine['requested_dirty_secrets'] is True: self.set_dirty_secrets(response) self.state_machine['requested_dirty_secrets'] = None else: print "Cycled {} secret(s).".format(len(response)) class SetSecret(ClientProtocol): """cpassdb client protocol class for setting a secret.""" def send_request(self, name, secret, ttl, recipients): """Send write request to server. :param name: str - Name of the secret (incl. categories). :param secret: struct - The secret data structure (usually a dict). :param ttl: int - Time to live (in days past today). :param recipients: list - List of all recipients. """ request = { "type": "SET", "secrets": [{ "name": name, "secret": secret, "metadata": { "ttl": ttl, "recipients": recipients, } }] } self.transport.write("{}\n".format(self.sign_request(request))) def dataReceived(self, data): """Callback function for incoming response data. This protocol will return a list of written secrets upon successful execution or an error message upon failure. :param data: str - The incoming line of data. """ # Parse incoming data into response object. response = ClientProtocol.dataReceived(self, data) if response: print "Written:", ' '.join(response) self.gracefully_disconnect() class GetSecret(ClientProtocol): """cpassdb client protocol class for getting secrets.""" # Set this variable to true if you would like the data to be outputted as # a JSON string. The JSON output will always print the full secret object # including all metadata. as_json = False # Set this value to the integer value of line indentation you desire for # the JSON output. Defining this attribute only makes sense in combination # with the as_json class attribute. indent_json = None # Set this list to the fields you wish the output the be limited to. Using # this attribute only works on non-JSON output format. filter_fields = [] # Store a sorted list of all requested secrets, so they can be outputted in # the requested order. requested_secrets = [] def send_request(self, names): """Send read request to server. :param names: list - A list of secret-object names. """ request = { "type": "GET", "names": names, } self.transport.write("{}\n".format(self.sign_request(request))) def decrypt_secret_armor(self, gpg_armor): """Helper function for decrypting a GPG encrypted message armor. :param gpg_armor: str - The GPG armor. :return: struct - JSON-loaded secret data structure. """ return json.loads( commands.getoutput( "echo '{}' | gpg --decrypt 2>/dev/null".format(gpg_armor) ) ) def dataReceived(self, data): """Callback function for incoming data. This protocol will return a list of secret object data structures containing all metadata in the following format: [ { 'name': <secret-object-name>, 'secret': <secret-object-data-struct>, 'metadata': { 'ttl': <ttl>, 'recipients': [<recipient>, ...], } }, ... ] :param data: str - The line of incoming data. """ # Parse incoming data. response = ClientProtocol.dataReceived(self, data) if response: if self.as_json: print json.dumps([{ 'name': secret_name, 'secret': self.decrypt_secret_armor(response[secret_name]['armor']), 'metadata': response[secret_name]['metadata'], } for secret_name in response], indent=self.indent_json) else: for secret_name in response: secret = self.decrypt_secret_armor(response[secret_name]['armor']) if self.filter_fields: for field in self.filter_fields: if field in secret: print secret[field] else: print '###', secret_name, '###' for field in secret: print field + ':', secret[field] print self.gracefully_disconnect() class DelSecret(ClientProtocol): """cpassdb client protocol class for deleting a secret.""" def send_request(self, name): """Send delete request to server. :param name: str - Name of secret to delete (incl. its category) """ request = { "type": "DEL", "name": name, } self.transport.write("{}\n".format(self.sign_request(request))) def dataReceived(self, data): """Callback function for incoming response data. This protocol will reply with a success string upon successful execution. :param data: str - The actual line of incoming data. """ response = ClientProtocol.dataReceived(self, data) if response: print response self.gracefully_disconnect() class ListSecrets(ClientProtocol): """cpassdb client protocol class for listing secrets in a given category.""" def send_request(self, category=None): """Send list request to server. :param category: str - Name of category (default: None) """ request = { "type": "LIST", "path": category, } self.transport.write("{}\n".format(self.sign_request(request))) def dataReceived(self, data): """Callback function for incoming response data. This protocol will reply with a list of secret-object names upon successful execution. :param data: str - Actual line of incoming data. """ response = ClientProtocol.dataReceived(self, data) if response: if isinstance(response, dict): for d in response['dirs']: print d + os.path.sep for secret in response['secrets']: print secret elif isinstance(response, list): for secret in response: print secret else: raise NotImplementedError self.gracefully_disconnect() class InstallRootKey(ClientProtocol): """cpassdb client protocol class for installing the root-key. There is a special recipient group that - apart of being a regular recipient group - mark all cpassdb admins. Admins are allowed to perform key-management operations such as adding keys, deleting keys and adding/removing keys from recipient groups. The cpassdb admin group is always included as recipient to every secret-object, meaning that cpassdb admins can always decrypt every secret. The admin group is not stated inside the recipient field in the metadata if not explicitly defined for that recipient group. Due to this convention it is not possible to use the cpassdb server before at least one key-id has been imported and added to the admin group. That's what this protocol is for. It is really only used once per server, usually. """ def send_request(self, pubkey_armor): """Send root-key installation request to server. :param pubkey_armor: str - The GPG armor of the public key. """ request = { 'pubkey_armor': pubkey_armor, } request_string = json.dumps(request) self.transport.write("{}\n".format(request_string)) def dataReceived(self, data): """Callback function for incoming response data. This protocol usually replies with a success string upon successful execution. :param data: str - Actual line of incoming data. :return: """ response = ClientProtocol.dataReceived(self, data) print response self.gracefully_disconnect() class AddKey(MessyClientProtocol): """cpassdb client protocol class for adding a key.""" def send_request(self, pubkey_armor, groups): """Send add-key request to server. :param pubkey_armor: str - GPG armor of public key. :param groups: list - List of group to add this key-id to. """ request = { 'type': 'ADDKEY', 'pubkey_armor': base64.b64encode(pubkey_armor), 'groups': groups, } self.transport.write("{}\n".format(self.sign_request(request))) def dataReceived(self, data): """Callback function for incoming response data. :param data: str - Actual line of incoming data. """ response = ClientProtocol.dataReceived(self, data) if response: self.handle_dirty_secret_dialog(response) self.gracefully_disconnect() class DelKey(MessyClientProtocol): """cpassdb client protocol class for deleting a key.""" def send_request(self, pubkey_id): """Send delete-key request to server. :param pubkey_id: str - Key-id of key to delete. """ request = { "type": "DELKEY", "pubkey_id": pubkey_id, } self.transport.write("{}\n".format(self.sign_request(request))) def dataReceived(self, data): """Callback function for incoming response data. :param data: str - Actual line of incoming data. :return: """ response = ClientProtocol.dataReceived(self, data) if response: self.handle_dirty_secret_dialog(response) self.gracefully_disconnect() class ListKeys(ClientProtocol): """cpassdb client protocol class for listing all keys in the keyring.""" def send_request(self, keyid_length=8): """Send list-keys request to server. :param keyid_length: int - Length of the key-ids (common are 8 or 16). """ request = { "type": "KEYS", "keyid_length": keyid_length, } self.transport.write("{}\n".format(self.sign_request(request))) def dataReceived(self, data): """Callback function for incoming response data. :param data: str - Actual line of incoming data. """ response = ClientProtocol.dataReceived(self, data) if response: for key in response: print key[0], ' '.join(key[1]) self.gracefully_disconnect() class ListGroups(ClientProtocol): """cpassdb client protocol class for listing all recipient groups.""" def send_request(self, pubkey_id=None): """Send list-groups request to server. If the request is supplied with key-id, only the groups of that key-id will be returned. :param pubkey_id: str - Optional key-id. """ request = { "type": "GROUPS", "pubkey_id": pubkey_id, } self.transport.write('{}\n'.format(self.sign_request(request))) def dataReceived(self, data): """Callback function for incoming reponse data. :param data: str - Actual line of incoming data. """ response = ClientProtocol.dataReceived(self, data) if response: for group in response: print group + ':', ' '.join(response[group]) self.gracefully_disconnect() class AddGroups(MessyClientProtocol): """cpassdb client protocol class for add a key-id to a list of groups. When the members of a recipient group change, all secrets of that recipient group must be re-encrypted. If the secrets where not re-encrypted they would not be readable by the keys they should be in the current state of the recipient group. This adds extra ping-pong complexity to this protocol that the other client protocols don't have. """ def send_request(self, pubkey_id, groups): """Send add-group request to server. :param pubkey_id: str - Key-id of concern. :param groups: - List of groups to add this key-id to. """ request = { 'type': 'ADDGRP', 'pubkey_id': pubkey_id, 'groups': groups, } self.transport.write("{}\n".format(self.sign_request(request))) def dataReceived(self, data): """Callback function for incoming response data. :param data: str - Actual line of incoming data. :return: """ response = ClientProtocol.dataReceived(self, data) if response: self.handle_dirty_secret_dialog(response) self.gracefully_disconnect() class DelGroups(AddGroups): """cpassdb client protocol class for deleting a key-id from a group.""" def send_request(self, pubkey_id, groups): """Send delete-group request to the server. :param pubkey_id: str - Key-id of concern. :param groups: - List of groups to free from the given key-id. """ request = { 'type': 'DELGRP', 'pubkey_id': pubkey_id, 'groups': groups, } self.transport.write("{}\n".format(self.sign_request(request))) def dataReceived(self, data): """Callback function for incoming response data. :param data: str - str - Actual line of incoming data. """ AddGroups.dataReceived(self, data)
gpl-2.0
-7,313,434,773,447,821,000
31.863787
102
0.591336
false
4.556426
false
false
false
nonapod/gzinflatekiller
gzinflatekiller.py
1
3114
#!/bin/env python #:############################################ #: GZINFLATEKILLER #: by Les Cordell #: #: Hunt through files containing a base64 #: GZInflate Command #: #: Written on 07/08/2013 #: last modified @ 07/08/2013 #:############################################ import sys, os, re #: Extensions constant, these are the files that our program will check EXTS = ['php'] #: Our Patterns constant contains all of our regular expressions that we want to check against and skip PATTERNS = [re.compile("<\?php eval\(gzinflate\(base64_decode\(\'.*\'\)\)\);\?>"), re.compile('^\r\n')] def gzInflateKill(): """ #: The main function that is run, it checks through the argv arguements first, #: it requires a directory enclosed in quotes. """ dirname = False #: Check provided directory name if (len(sys.argv) < 2): print "You must provide a directory name enclosed in quotes to run this script.\n" quit() elif (len(sys.argv) > 2): print "Too many arguements provided, you must provide a directory for this script to run" quit() elif (len(sys.argv) == 2): #: Store the directory name dirname = sys.argv[1] else: #: If there is an error return false print "There was an error running this script, please check that you have specified a directory enclosed in quotes." quit() #: Open the directory parseDir(dirname) quit() def parseDir(dirname): """ #: This is our directory parser, here we parse through every directory until we hit the last #: feeding the files off to the cleanFile function """ if os.path.exists(dirname): #: If our directory exists then we'll open it and return some files #: Walk through the directory for root, dirs, files in os.walk(dirname): if files: #: If we get any files for file in files: #: For each file in the list if file.split('.')[-1] in EXTS: #: Get the extension thisFile = os.path.join(root, file) if os.path.isfile(thisFile): print "cleaning: " + thisFile cleanFile(thisFile) if dirs: #: If we get any directories for dir in dirs: #: For each directory in the list parseDir(dir); #: Recursively run the function def cleanFile(filename): """ #: Here we will strip the injection from the php file """ newFile = [] #: First open the file for reading and get our new file with open(filename, 'r') as aFile: for line in aFile.readlines(): #: For each line check if it matches the injection or the new line if patternMatch(line): pass else: #: Append line to new file if no match newFile.append(line) aFile.close() #: close the file #: Now we open the file for reading if newFile: newFile = ''.join(newFile) # : join our new file with open(filename, 'w+b') as aFile: aFile.write(newFile) aFile.close() def patternMatch(line): """ #: We pass lines into this function, check them against our PATTERNS constant #: if we match any of them, we return a true, otherwise we return false """ for pattern in PATTERNS: if pattern.match(line): return True return False # BEGIN # if __name__ == '__main__': gzInflateKill();
mit
-5,594,238,935,680,430,000
28.942308
118
0.659923
false
3.395856
false
false
false
L1NT/django-training-log
log/models.py
1
5458
from django.db import models # Create your models here. class Sport(models.Model): """ don't use models.choices because we want the list to be transactional data example list: [ 'bike', 'run', 'swim', 'measurements', 'yoga', 'weights', # for multi-sport `Event`s: 'multisport', #EventType.sport 'transition', #Entry.sport ] """ sport = models.CharField(max_length=20) class Meta: ordering = ['sport'] def __unicode__(self): return self.sport def __str__(self): return self.sport class Measurements(models.Model): id = models.AutoField(primary_key=True) #added by default weight = models.FloatField(blank=True, null=True) class Equipment(models.Model): """ this is for things such as bikes, shoes, wheelsets; i.e. things with a determinable depreciation cost or maintenance periods """ name = models.CharField(max_length=50) cost = models.DecimalField(blank=True, null=True, max_digits=8, decimal_places=2) acquired_date = models.DateField() disposal_date = models.DateField(blank=True, null=True) disposal_method = models.CharField(blank=True, max_length=7, choices=[ ('sold', 'sold'), ('donated', 'donated'), ('retired', 'retired'),# i.e. 'broken' ]) disposal_proceeds = models.DecimalField(blank=True, null=True, max_digits=8, decimal_places=2) expected_lifespan = models.DurationField(blank=True, null=True) maintenance_interval = models.DurationField(blank=True, null=True) def history(self): return EquipmentMaintenance.objects.filter(equipment=self.id) def __unicode__(self): return self.name def __str__(self): return self.name class EquipmentMaintenance(models.Model): date = models.DateField() description = models.CharField(max_length=250) equipment = models.ForeignKey(Equipment) cost = models.DecimalField(blank=True, null=True, max_digits=8, decimal_places=2) vendor = models.CharField(max_length=50, default='DIY') class EventType(models.Model): """ examples: '5k', 'Olympic', 'Criterium' """ event_type = models.CharField(max_length=20) sport = models.ForeignKey(Sport) class Meta: ordering = ['sport', 'event_type'] def __unicode__(self): return str(self.sport) + ': ' + self.event_type def __str__(self): return str(self.sport) + ': ' + self.event_type class Event(models.Model): name = models.CharField(max_length=35) location = models.CharField(max_length=50) event_type = models.ForeignKey(EventType, blank=True, null=True) bib_number = models.IntegerField(blank=True, null=True) dnf = models.BooleanField() finish_overall = models.IntegerField(blank=True, null=True) finishers_overall = models.IntegerField(blank=True, null=True) #maybe just use "handicapped" as the age group description?? finish_handicapped = models.IntegerField(blank=True, null=True) finish_gender = models.IntegerField(blank=True, null=True) finishers_gender = models.IntegerField(blank=True, null=True) finish_age_group = models.IntegerField(blank=True, null=True) finishers_age_group = models.IntegerField(blank=True, null=True) # category/age_group seem to be mutually-exclusive? category = models.CharField(max_length=10, blank=True, null=True) age_group = models.CharField(max_length=10, blank=True) results_url = models.URLField(blank=True, null=True) official_time = models.TimeField(blank=True, null=True) #used for total event time (brevets & triathlons) ## TODO: maybe this should be handled by multiple `Entry`s? # swim_distance = models.FloatField(blank=True) # bike_distance = models.FloatField(blank=True) # run_distance = models.FloatField(blank=True) # swim_time = models.TimeField(blank=True) # bike_time = models.TimeField(blank=True) # run_time = models.TimeField(blank=True) # t1_time = models.TimeField(blank=True) # t2_time = models.TimeField(blank=True) def get_absolute_url(self): return "/events?event=%d" % self.id def __unicode__(self): return self.name + ' ['+self.date.strftime('%b %d, %Y')+']' def __str__(self): return self.name + ' ['+self.date.strftime('%b %d, %Y')+']' class Entry(models.Model): #entry_id: date = models.DateField() sport = models.ForeignKey(Sport) event = models.ForeignKey(Event, blank=True, null=True) route = models.CharField(max_length=50, blank=True) # routes Model? notes = models.CharField(max_length=256, blank=True) equipment = models.ForeignKey(Equipment, blank=True, null=True) distance = models.FloatField(blank=True, null=True) time = models.TimeField(blank=True, null=True) avg_speed = models.FloatField(blank=True, null=True) max_speed = models.FloatField(blank=True, null=True) elevation_gain = models.IntegerField(blank=True, null=True) calories = models.IntegerField(blank=True, null=True) #pace: models.TimeField(blank=True, default=calc_pace(self.time/self.distance)) #could be calculated... class Meta: ordering = ['date', 'id'] def __unicode__(self): return self.date.strftime('%b %d, %Y') + ' ['+str(self.sport)+']' def __str__(self): return self.date.strftime('%b %d, %Y') + ' ['+str(self.sport)+']'
gpl-2.0
1,526,296,373,929,901,000
35.878378
109
0.662147
false
3.47422
false
false
false
chaosk/trinitee
trinitee/forums/migrations/0001_initial.py
1
14851
# encoding: utf-8 import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'Category' db.create_table('forums_category', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('title', self.gf('django.db.models.fields.CharField')(max_length=100)), ('description', self.gf('django.db.models.fields.TextField')(blank=True)), ('parent', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['forums.Category'], null=True, blank=True)), ('ordering', self.gf('django.db.models.fields.IntegerField')(default=1)), )) db.send_create_signal('forums', ['Category']) # Adding model 'Topic' db.create_table('forums_topic', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('title', self.gf('django.db.models.fields.CharField')(max_length=100)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('created_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='created_topics', to=orm['auth.User'])), ('modified_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('modified_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='modified_topics', null=True, to=orm['auth.User'])), ('category', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['forums.Category'])), ('is_closed', self.gf('django.db.models.fields.BooleanField')(default=False)), ('is_sticky', self.gf('django.db.models.fields.BooleanField')(default=False)), ('first_post', self.gf('django.db.models.fields.related.OneToOneField')(related_name='topic_root', unique=True, to=orm['forums.Post'])), )) db.send_create_signal('forums', ['Topic']) # Adding model 'Post' db.create_table('forums_post', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('topic', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['forums.Topic'])), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('created_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='created_posts', to=orm['auth.User'])), ('modified_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('modified_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='modified_posts', null=True, to=orm['auth.User'])), ('show_edits', self.gf('django.db.models.fields.BooleanField')(default=True)), ('content', self.gf('django.db.models.fields.TextField')()), ('content_html', self.gf('django.db.models.fields.TextField')()), )) db.send_create_signal('forums', ['Post']) # Adding model 'PostKarma' db.create_table('forums_postkarma', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])), ('post', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['forums.Post'])), ('karma', self.gf('django.db.models.fields.IntegerField')(default=0)), )) db.send_create_signal('forums', ['PostKarma']) # Adding unique constraint on 'PostKarma', fields ['user', 'post'] db.create_unique('forums_postkarma', ['user_id', 'post_id']) # Adding model 'Poll' db.create_table('forums_poll', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('created_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='polls_started', to=orm['auth.User'])), ('expires_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)), ('question', self.gf('django.db.models.fields.CharField')(max_length=255)), ('topic', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['forums.Topic'])), ('max_votes', self.gf('django.db.models.fields.PositiveIntegerField')(default=1)), )) db.send_create_signal('forums', ['Poll']) # Adding model 'Choice' db.create_table('forums_choice', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('poll', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['forums.Poll'])), ('choice', self.gf('django.db.models.fields.CharField')(max_length=255)), )) db.send_create_signal('forums', ['Choice']) # Adding model 'Vote' db.create_table('forums_vote', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('poll', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['forums.Poll'])), ('choice', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['forums.Choice'])), ('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])), )) db.send_create_signal('forums', ['Vote']) # Adding unique constraint on 'Vote', fields ['poll', 'choice', 'user'] db.create_unique('forums_vote', ['poll_id', 'choice_id', 'user_id']) def backwards(self, orm): # Removing unique constraint on 'Vote', fields ['poll', 'choice', 'user'] db.delete_unique('forums_vote', ['poll_id', 'choice_id', 'user_id']) # Removing unique constraint on 'PostKarma', fields ['user', 'post'] db.delete_unique('forums_postkarma', ['user_id', 'post_id']) # Deleting model 'Category' db.delete_table('forums_category') # Deleting model 'Topic' db.delete_table('forums_topic') # Deleting model 'Post' db.delete_table('forums_post') # Deleting model 'PostKarma' db.delete_table('forums_postkarma') # Deleting model 'Poll' db.delete_table('forums_poll') # Deleting model 'Choice' db.delete_table('forums_choice') # Deleting model 'Vote' db.delete_table('forums_vote') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'forums.category': { 'Meta': {'object_name': 'Category'}, 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ordering': ('django.db.models.fields.IntegerField', [], {'default': '1'}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forums.Category']", 'null': 'True', 'blank': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'forums.choice': { 'Meta': {'object_name': 'Choice'}, 'choice': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'poll': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forums.Poll']"}) }, 'forums.poll': { 'Meta': {'object_name': 'Poll'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polls_started'", 'to': "orm['auth.User']"}), 'expires_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'max_votes': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}), 'question': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'topic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forums.Topic']"}) }, 'forums.post': { 'Meta': {'object_name': 'Post'}, 'content': ('django.db.models.fields.TextField', [], {}), 'content_html': ('django.db.models.fields.TextField', [], {}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_posts'", 'to': "orm['auth.User']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'modified_posts'", 'null': 'True', 'to': "orm['auth.User']"}), 'show_edits': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'topic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forums.Topic']"}) }, 'forums.postkarma': { 'Meta': {'unique_together': "(('user', 'post'),)", 'object_name': 'PostKarma'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'karma': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'post': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forums.Post']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'forums.topic': { 'Meta': {'object_name': 'Topic'}, 'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forums.Category']"}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_topics'", 'to': "orm['auth.User']"}), 'first_post': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'topic_root'", 'unique': 'True', 'to': "orm['forums.Post']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_sticky': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'modified_topics'", 'null': 'True', 'to': "orm['auth.User']"}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'forums.vote': { 'Meta': {'unique_together': "(('poll', 'choice', 'user'),)", 'object_name': 'Vote'}, 'choice': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forums.Choice']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'poll': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forums.Poll']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) } } complete_apps = ['forums']
bsd-3-clause
-7,617,905,021,076,581,000
64.422907
182
0.570265
false
3.634606
false
false
false
pgroudas/pants
src/python/pants/backend/jvm/tasks/jvm_compile/jvm_compile_strategy.py
1
8647
# coding=utf-8 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import os from abc import ABCMeta, abstractmethod from collections import OrderedDict, defaultdict from twitter.common.collections import OrderedSet from pants.base.build_environment import get_buildroot, get_scm from pants.base.exceptions import TaskError from pants.util.dirutil import safe_delete class JvmCompileStrategy(object): """An abstract base strategy for JVM compilation.""" __metaclass__ = ABCMeta class CompileContext(object): """A context for the compilation of a target. This can be used to differentiate between a partially completed compile in a temporary location and a finalized compile in its permanent location. """ def __init__(self, target, analysis_file, classes_dir, sources): self.target = target self.analysis_file = analysis_file self.classes_dir = classes_dir self.sources = sources @property def _id(self): return (self.target, self.analysis_file, self.classes_dir) def __eq__(self, other): return self._id == other._id def __hash__(self): return hash(self._id) # Common code. # ------------ @staticmethod def _analysis_for_target(analysis_dir, target): return os.path.join(analysis_dir, target.id + '.analysis') @staticmethod def _portable_analysis_for_target(analysis_dir, target): return JvmCompileStrategy._analysis_for_target(analysis_dir, target) + '.portable' @classmethod @abstractmethod def register_options(cls, register, language, supports_concurrent_execution): """Registration for strategy-specific options. The abstract base class does not register any options itself: those are left to JvmCompile. """ pass def __init__(self, context, options, workdir, analysis_tools, language, sources_predicate): self._language = language self.context = context self._analysis_tools = analysis_tools # Mapping of relevant (as selected by the predicate) sources by target. self._sources_by_target = None self._sources_predicate = sources_predicate # The ivy confs for which we're building. self._confs = options.confs self._clear_invalid_analysis = options.clear_invalid_analysis @abstractmethod def name(self): """A readable, unique name for this strategy.""" pass @abstractmethod def invalidation_hints(self, relevant_targets): """A tuple of partition_size_hint and locally_changed targets for the given inputs.""" pass @abstractmethod def compile_context(self, target): """Returns the default/stable compile context for the given target. Temporary compile contexts are private to the strategy. """ pass @abstractmethod def compute_classes_by_source(self, compile_contexts): """Compute a map of (context->(src->classes)) for the given compile_contexts. It's possible (although unfortunate) for multiple targets to own the same sources, hence the top level division. Srcs are relative to buildroot. Classes are absolute paths. """ pass @abstractmethod def compile_chunk(self, invalidation_check, all_targets, relevant_targets, invalid_targets, extra_compile_time_classpath_elements, compile_vts, register_vts, update_artifact_cache_vts_work): """Executes compilations for that invalid targets contained in a single language chunk.""" pass @abstractmethod def post_process_cached_vts(self, cached_vts): """Post processes VTS that have been fetched from the cache.""" pass @abstractmethod def compute_resource_mapping(self, compile_contexts): """Computes a merged ResourceMapping for the given compile contexts. Since classes should live in exactly one context, a merged mapping is unambiguous. """ pass def pre_compile(self): """Executed once before any compiles.""" pass def validate_analysis(self, path): """Throws a TaskError for invalid analysis files.""" try: self._analysis_parser.validate_analysis(path) except Exception as e: if self._clear_invalid_analysis: self.context.log.warn("Invalid analysis detected at path {} ... pants will remove these " "automatically, but\nyou may experience spurious warnings until " "clean-all is executed.\n{}".format(path, e)) safe_delete(path) else: raise TaskError("An internal build directory contains invalid/mismatched analysis: please " "run `clean-all` if your tools versions changed recently:\n{}".format(e)) def prepare_compile(self, cache_manager, all_targets, relevant_targets): """Prepares to compile the given set of targets. Has the side effects of pruning old analysis, and computing deleted sources. """ # Target -> sources (relative to buildroot). # TODO(benjy): Should sources_by_target be available in all Tasks? self._sources_by_target = self._compute_sources_by_target(relevant_targets) def class_name_for_class_file(self, compile_context, class_file_name): assert class_file_name.endswith(".class") assert class_file_name.startswith(compile_context.classes_dir) class_file_name = class_file_name[len(compile_context.classes_dir) + 1:-len(".class")] return class_file_name.replace("/", ".") def _compute_sources_by_target(self, targets): """Computes and returns a map target->sources (relative to buildroot).""" def resolve_target_sources(target_sources): resolved_sources = [] for target in target_sources: if target.has_sources(): resolved_sources.extend(target.sources_relative_to_buildroot()) return resolved_sources def calculate_sources(target): sources = [s for s in target.sources_relative_to_buildroot() if self._sources_predicate(s)] # TODO: Make this less hacky. Ideally target.java_sources will point to sources, not targets. if hasattr(target, 'java_sources') and target.java_sources: sources.extend(resolve_target_sources(target.java_sources)) return sources return {t: calculate_sources(t) for t in targets} def _sources_for_targets(self, targets): """Returns a cached map of target->sources for the specified targets.""" if self._sources_by_target is None: raise TaskError('self._sources_by_target not computed yet.') return {t: self._sources_by_target.get(t, []) for t in targets} def _sources_for_target(self, target): """Returns the cached sources for the given target.""" if self._sources_by_target is None: raise TaskError('self._sources_by_target not computed yet.') return self._sources_by_target.get(target, []) def _find_locally_changed_targets(self, sources_by_target): """Finds the targets whose sources have been modified locally. Returns a list of targets, or None if no SCM is available. """ # Compute the src->targets mapping. There should only be one target per source, # but that's not yet a hard requirement, so the value is a list of targets. # TODO(benjy): Might this inverse mapping be needed elsewhere too? targets_by_source = defaultdict(list) for tgt, srcs in sources_by_target.items(): for src in srcs: targets_by_source[src].append(tgt) ret = OrderedSet() scm = get_scm() if not scm: return None changed_files = scm.changed_files(include_untracked=True, relative_to=get_buildroot()) for f in changed_files: ret.update(targets_by_source.get(f, [])) return list(ret) @property def _analysis_parser(self): return self._analysis_tools.parser # Compute any extra compile-time-only classpath elements. # TODO(benjy): Model compile-time vs. runtime classpaths more explicitly. # TODO(benjy): Add a pre-execute goal for injecting deps into targets, so e.g., # we can inject a dep on the scala runtime library and still have it ivy-resolve. def _compute_extra_classpath(self, extra_compile_time_classpath_elements): def extra_compile_classpath_iter(): for conf in self._confs: for jar in extra_compile_time_classpath_elements: yield (conf, jar) return list(extra_compile_classpath_iter())
apache-2.0
-8,624,641,274,634,726,000
36.925439
99
0.686481
false
4.141284
false
false
false
ruhan/django-silk-mongoengine
silk/profiling/profiler.py
1
6695
import inspect import logging import time import traceback from django.conf import settings from django.utils import timezone import six from silk.collector import DataCollector from silk.config import SilkyConfig from silk.models import _time_taken Logger = logging.getLogger('silk') # noinspection PyPep8Naming class silk_meta_profiler(object): """Used in the profiling of Silk itself.""" def __init__(self): super(silk_meta_profiler, self).__init__() self.start_time = None @property def _should_meta_profile(self): return SilkyConfig().SILKY_META def __enter__(self): if self._should_meta_profile: self.start_time = timezone.now() def __exit__(self, exc_type, exc_val, exc_tb): if self._should_meta_profile: end_time = timezone.now() exception_raised = exc_type is not None if exception_raised: Logger.error('Exception when performing meta profiling, dumping trace below') traceback.print_exception(exc_type, exc_val, exc_tb) request = getattr(DataCollector().local, 'request', None) if request: curr = request.meta_time or 0 request.meta_time = curr + _time_taken(self.start_time, end_time) def __call__(self, target): if self._should_meta_profile: def wrapped_target(*args, **kwargs): request = DataCollector().request if request: start_time = timezone.now() result = target(*args, **kwargs) end_time = timezone.now() curr = request.meta_time or 0 request.meta_time = curr + _time_taken(start_time, end_time) else: result = target(*args, **kwargs) return result return wrapped_target return target # noinspection PyPep8Naming class silk_profile(object): def __init__(self, name=None, _dynamic=False): super(silk_profile, self).__init__() self.name = name self.profile = None self._queries_before = None self._queries_after = None self._dynamic = _dynamic def _query_identifiers_from_collector(self): return [x for x in DataCollector().queries] def _start_queries(self): """record queries that have been executed before profiling began""" self._queries_before = self._query_identifiers_from_collector() def _end_queries(self): """record queries that have been executed after profiling has finished""" self._queries_after = self._query_identifiers_from_collector() def __enter__(self): if self._silk_installed() and self._should_profile(): with silk_meta_profiler(): self._start_queries() if not self.name: raise ValueError('silk_profile used as a context manager must have a name') frame = inspect.currentframe() frames = inspect.getouterframes(frame) outer_frame = frames[1] path = outer_frame[1] line_num = outer_frame[2] request = DataCollector().request self.profile = { 'name': self.name, 'file_path': path, 'line_num': line_num, 'dynamic': self._dynamic, 'request': request, 'start_time': timezone.now(), } else: Logger.warn('Cannot execute silk_profile as silk is not installed correctly.') def _finalise_queries(self): collector = DataCollector() self._end_queries() assert self.profile, 'no profile was created' diff = set(self._queries_after).difference(set(self._queries_before)) self.profile['queries'] = diff collector.register_profile(self.profile) # noinspection PyUnusedLocal def __exit__(self, exc_type, exc_val, exc_tb): if self._silk_installed() and self._should_profile(): with silk_meta_profiler(): start_time = None exception_raised = exc_type is not None self.profile['exception_raised'] = exception_raised self.profile['end_time'] = timezone.now() self._finalise_queries() def _silk_installed(self): app_installed = 'silk' in settings.INSTALLED_APPS middleware_installed = 'silk.middleware.SilkyMiddleware' in settings.MIDDLEWARE_CLASSES return app_installed and middleware_installed def _should_profile(self): return DataCollector().request is not None def __call__(self, target): if self._silk_installed(): def wrapped_target(*args, **kwargs): with silk_meta_profiler(): try: func_code = six.get_function_code(target) except AttributeError: raise NotImplementedError('Profile not implemented to decorate type %s' % target.__class__.__name__) line_num = func_code.co_firstlineno file_path = func_code.co_filename func_name = target.__name__ if not self.name: self.name = func_name self.profile = { 'func_name': func_name, 'name': self.name, 'file_path': file_path, 'line_num': line_num, 'dynamic': self._dynamic, 'start_time': timezone.now(), 'request': DataCollector().request } self._start_queries() try: result = target(*args, **kwargs) except Exception: self.profile['exception_raised'] = True raise finally: with silk_meta_profiler(): self.profile['end_time'] = timezone.now() self._finalise_queries() return result return wrapped_target else: Logger.warn('Cannot execute silk_profile as silk is not installed correctly.') return target def distinct_queries(self): queries = [x for x in self._queries_after if not x in self._queries_before] return queries @silk_profile() def blah(): time.sleep(1) if __name__ == '__main__': blah()
mit
-1,680,985,588,628,297,200
35.391304
124
0.542644
false
4.579343
false
false
false
sandz-in/twilio_trello
twilio_sms_handler/views.py
1
1174
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.http import HttpResponse from django.views.decorators.http import require_POST from twilio.twiml.messaging_response import MessagingResponse # Create your views here. from twilio_sms_handler.TrelloQuery import TrelloQuery from twilio_trello.twilio_util import validate_twilio_request from django.views.decorators.csrf import csrf_exempt @require_POST @validate_twilio_request @csrf_exempt def sms_view(request): """Twilio Messaging URL - receives incoming messages from Twilio""" # Create a new TwiML response resp = MessagingResponse() # <Message> a text back to the person who texted us text = request.POST['Body'] split_text = text.lower().split(" ") if len(split_text) < 2: body = '''1)get boards 2)get lists <board-no> 3)get cards <board-no:list-no> ''' else: trello_query = TrelloQuery() action = '_'.join(split_text[:2]) try: body = getattr(trello_query, action)(split_text[2:]) except: body = "Incorrect input!!" resp.message(body) # Return the TwiML return HttpResponse(resp)
mit
3,368,238,826,495,782,000
29.102564
71
0.683986
false
3.463127
false
false
false
fake-name/ReadableWebProxy
WebMirror/management/rss_parser_funcs/feed_parse_extractKeztranslationsWordpressCom.py
1
1210
def extractKeztranslationsWordpressCom(item): ''' Parser for 'keztranslations.wordpress.com' ''' vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title']) if not (chp or vol) or "preview" in item['title'].lower(): return None tagmap = [ ('FOD', 'Quickly Wear the Face of the Devil', 'translated'), ('ABO', 'ABO Cadets', 'translated'), ('dfc', 'The First Dragon Convention', 'translated'), ('ogu', 'My Family’s Omega Has Just Grown Up', 'translated'), ] for tagname, name, tl_type in tagmap: if tagname in item['tags']: return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) titlemap = [ ('ABO Vol', 'ABO Cadets', 'translated'), ('FOD Chapter', 'Quickly Wear the Face of the Devil', 'translated'), ('FOD Chap', 'Quickly Wear the Face of the Devil', 'translated'), ] for titlecomponent, name, tl_type in titlemap: if titlecomponent.lower() in item['title'].lower(): return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
bsd-3-clause
-7,868,676,419,566,919,000
36.78125
104
0.620861
false
3.212766
false
false
false
dvida/UWO-PA-Python-Course
Lecture 3/L3_lecture.py
1
5075
from __future__ import print_function ### READING FILES file_name = 'data.txt' # Reading in and parsing file contents data_list = [] with open(file_name) as f: # SKip the header (the first line) next(f) for line in f: # Remove newline char line = line.replace('\n', '') # Split the line into a list by a comma line = line.split(',') # Parse the line num = line[0] name = line[1].strip() epoch = int(line[2]) elements = list(map(float, line[3:9])) ref = line[9] # Add the line to the data list data_list.append([num, name, epoch, elements, ref]) print(num, name, epoch, elements, ref) ################################### print(data_list) # Wile E. Coyote rewrites history... for line in data_list: line[1] = 'Coyote' print(data_list) # But before we write the data back to disk... ################################### ### STRING FORMATTING ### Note for the lecture: ### C/P and explain how formatting works # Converting floats to strings x = 3.14159 print('{:4.2f}'.format(x)) # Signed formatting print('{:+5.2f}'.format(x)) # Zero padding print('{:06.2f}'.format(x)) # More decimals print('{:7.5f}'.format(x)) # More decimal places than the number precision y = 2.71 print('{:7.5f}'.format(y)) # Less decimal precision, but same size -> left padding print('{:7.2f}'.format(y)) # Integers (same singed and zero padding rules) z = 42 print('{:7d}'.format(z)) # Strings print('{:10}'.format('wile e')) # Align to the right print('{:>10}'.format('wile e')) # Named agruments print("{a} {b} {c}".format(a=5, b=8, c=10)) ################################### ### WRITING FILES # Writing the data back to the list new_file_name = 'true_data.txt' # Open a file for writing (if a file with the same name exists, it will erase its content!) with open(new_file_name, 'w') as f: # Write the header f.write('Num,Name,Epoch,q,e,i,w,Node,Tp,Ref\n') for line in data_list: # Composing a string str_line = ['{:>3}'.format(line[0]), line[1], '{:5d}'.format(line[2])] # Convert all elemets using the same format for element in line[3]: str_line.append('{:.3f}'.format(element)) # Add the reference str_line.append(line[-1]) print(str_line) # Convert the list to a comma delimited string final_line = ','.join(str_line) # Write the line f.write(final_line+'\n') ################################### # Appending to a file with open(new_file_name, 'a') as f: f.write('Wile E. was here') ################################### ### PYTHON MODULES # Python standard library: https://docs.python.org/3/library/ import math # Sqrt print(math.sqrt(2)) # Sine print(math.sin(math.pi)) # Log10 print(math.log10(100)) # Random module import random # Random integer in the 1 to 100 range print(random.randint(1, 100)) # Random float in the 0 to 1 range print(random.random()) # Shuffle a list a = [1, 2, 3, 4, 5] random.shuffle(a) print(a) # Sample 10 elements from a list b = range(1, 100) print(random.sample(b, 10)) # Sampling a gaussian distribution for i in range(10): print(random.gauss(0, 2)) ################################### ### Ways of importing modules # Module alias import math as m print(m.sqrt(2)) # Importing individual functions - PREFERED! from math import sqrt print(sqrt(2)) # Importing all functions from a module - NOT RECOMMENDED! from math import * print(sqrt(2)) print(pi) ################################### # FILE HANDLING - os library import os # Listing the contents of the current directory print(os.listdir('.')) # Printing the current directory print(os.getcwd()) # Changing the current directory one up os.chdir('..') print(os.getcwd()) # Directory separator # DO NOT USE / or \ print(os.sep) ### Making a new directory # Construct a new path to the directory new_dir_path = os.path.join(os.getcwd(), 'test') print(new_dir_path) # Make new dir if the dir does not exist if not os.path.exists(new_dir_path): os.mkdir(new_dir_path) else: print('The directory already exists!') ### # Make an example file in the new directory file_name = 'top_secret.txt' file_path = os.path.join(new_dir_path, file_name) with open(file_path, 'w') as f: pass # Delete the file if os.path.isfile(file_path): os.remove(file_path) else: print('The file does not exist!') ################################### # FILE HANDLING - shutil library import shutil # Make an example file with open(file_path, 'w') as f: pass # Copying files copy_path = 'unclassified.txt' shutil.copy2(file_path, copy_path) # Moving/renaming files new_name = 'public_release.txt' shutil.move(copy_path, new_name)
mit
-3,814,201,120,210,156,500
17.909804
91
0.575369
false
3.199874
false
false
false
terna/SLAPP3
6 objectSwarmObserverAgents_AESOP_turtleLib_NetworkX/$$slapp$$/txtxFunctions.py
1
1888
import os def executeFormula(fIn, fOu, nrow, n, s): # v=0 #init. not required; it can interfere with the try/except structure pos = s.find("v") if pos == -1: print("missing 'v' in formula, row", nrow, "\nexecution stopped in error") fIn.close() fOu.close() os.sys.exit(1) pos = s.find("=") if pos == -1: print("missing '=' in formula, row", nrow, "\nexecution stopped in error") fIn.close() fOu.close() os.sys.exit(1) try: while s[0] == ' ': if s[0] == ' ': s = s[1:] pos = s.find('\n') # eliminating spaces after \n (formerly #) if any if pos != -1: while s[pos + 1] == ' ': s = s[:pos + 1] + s[pos + 2:] # print "[",n, s,"]", d = dict([('n', n), ('v', 0)]) exec(s, d) v = d['v'] return str(v) except BaseException: print("error in formula, row", nrow, "\nexecution stopped in error") fIn.close() fOu.close() os.sys.exit(1) def fill(s): s = list(s) if s == "": return s change = False s = list(s) for i in range(len(s)): if s[i] == '&': if not change: change = True else: change = False if s[i] == ' ' and change: s[i] = '&' return "".join(s) def splitUnfill(s): if s == "": return s # print s s = s.split() # print s for i in range(len(s)): s_tmp = list(s[i]) # print s_tmp, len(s_tmp) for j in range(len(s_tmp)): if s_tmp[j] == "&": s_tmp[j] = ' ' if s_tmp[j] == "#": s_tmp[j] = '\n' # inserting \n sign # print s_tmp s[i] = "".join(s_tmp) return s
cc0-1.0
2,214,278,890,246,831,600
21.211765
77
0.417373
false
3.238422
false
false
false
attdona/NAIS
pynais/msg.py
1
2142
import struct import pynais as ns class Profile: def __init__(self, uid=None, pwd=None): self.uid = uid self.pwd = pwd def __str__(self): return "Profile uid: [%s], pwd: [%s]" % (self.uid, self.pwd) def set_protobuf(self, obj): obj.uid = self.uid obj.pwd = self.pwd def build_from_protobuf(self, obj): self.uid = obj.uid self.pwd = obj.pwd return self class Config: """ board configuration items and connection parameters """ def __init__(self, network="local", board="", host='localhost', port=1883, alive_period=None, secure=False): self.network = network self.board = board self.host = host self.port = port self.alive_period = alive_period self.secure = secure def __str__(self): return "Config network: [%s], board: [%s], remote: [%s:%d]" % ( self.network, self.board, self.host, self.port) def set_protobuf(self, obj): obj.network = self.network obj.board = self.board obj.host = self.host obj.port = self.port if self.alive_period: obj.alive_period = self.alive_period obj.secure = self.secure def build_from_protobuf(self, obj): self.network = obj.network self.host = obj.host self.board = obj.board self.port = obj.port self.alive_period = obj.alive_period self.secure = obj.secure return self class Ack: """ a message acknowledgement Args: id (int): message request identifier (packet.id field value) """ def __init__(self, id=None, sts=None): self.id = id self.sts = sts def __str__(self): return "Ack ([%s] - sts:[%s])" % (ns.msg_type(self.id), self.sts) def set_protobuf(self, obj): obj.id = self.id if not self.sts==None: obj.status = self.sts def build_from_protobuf(self, obj): self.id = obj.id if (obj.HasField('status')): self.sts = obj.status return self
gpl-3.0
-5,864,915,119,413,480,000
25.775
78
0.548086
false
3.725217
false
false
false
liuslevis/handwrite_dataset_generator
3_gen_digit_data_label.py
1
2918
import os import shutil DEBUG = False img_suffix = ['.jpeg','.jpg','.png','.tiff'] def gen_img_unique_file_name(count,total): assert(count<=total) name = '' for i in range(len(str(total)) - len(str(count))): name+='0' offset = str(count) name+=offset return name def copy_img_file(src_path,save_data_path,filename): if DEBUG: print src_path,filename print src_path,filename if not os.path.isdir(save_data_path): os.mkdir(save_data_path) shutil.copyfile(src_path, os.path.join(save_data_path, filename)) def count_img_under_dir(path): count = 0 for label_paths in os.listdir(path): label_path = os.path.join(path, label_paths) if os.path.isdir(label_path) and label_path[-1] >= '0' and label_path[-1] <= '9' : label = int(label_path[-1]) assert( label >= 0 and label <=9) for digit_img in os.listdir(label_path): count+=1 return count def gen_label_file(dict,save_label_path): label_list = [] for label in dict.keys(): times = dict.get(label) print 'digit:',label,' has ',times,' imgs' label_list+=[label for i in range(times)] content = '' for label in label_list: content += str(label) + '\n' with open(save_label_path,'w') as f: f.write(content); f.close() print 'gen_label_file:',save_label_path def main(): save_label_path = './4_dataset/testLabel.txt' save_data_path = './4_dataset/' rootDir ='./3_cropped' dict = {} # store num of each digit labels total = count_img_under_dir(rootDir) uid = 0 suffix = "" print 'total_img:',total for label_paths in os.listdir(rootDir): label_path = os.path.join(rootDir, label_paths) if os.path.isdir(label_path) and label_path[-1] >= '0' and label_path[-1] <= '9' : label = int(label_path[-1]) if DEBUG: print '--------------label:%d--------------'%label assert( label >= 0 and label <=9) for img_path in os.listdir(label_path): if DEBUG: print img_path if suffix not in img_suffix: (filepath,filename)=os.path.split(img_path) suffix = os.path.splitext(filename)[-1] if suffix in img_suffix: count = dict.get(label) if None == count: dict.update({label:1}) else: count += 1 dict.update({label:count}) uid+=1 save_name = gen_img_unique_file_name(uid,total) copy_img_file(os.path.join(label_path,img_path),save_data_path, save_name+suffix ) print 'database' gen_label_file(dict,save_label_path) if __name__ == '__main__': main()
mit
667,820,483,972,718,700
29.726316
102
0.536326
false
3.567237
false
false
false
nrz/ylikuutio
external/bullet3/examples/pybullet/gym/pybullet_envs/minitaur/envs/env_randomizers/minitaur_alternating_legs_env_randomizer.py
2
2810
"""Randomize the minitaur_gym_alternating_leg_env when reset() is called. The randomization include swing_offset, extension_offset of all legs that mimics bent legs, desired_pitch from user input, battery voltage and motor damping. """ import os, inspect currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) parentdir = os.path.dirname(os.path.dirname(currentdir)) parentdir = os.path.dirname(os.path.dirname(parentdir)) os.sys.path.insert(0, parentdir) import numpy as np import tf.compat.v1 as tf from pybullet_envs.minitaur.envs import env_randomizer_base # Absolute range. NUM_LEGS = 4 BATTERY_VOLTAGE_RANGE = (14.8, 16.8) MOTOR_VISCOUS_DAMPING_RANGE = (0, 0.01) class MinitaurAlternatingLegsEnvRandomizer(env_randomizer_base.EnvRandomizerBase): """A randomizer that changes the minitaur_gym_alternating_leg_env.""" def __init__(self, perturb_swing_bound=0.1, perturb_extension_bound=0.1, perturb_desired_pitch_bound=0.01): super(MinitaurAlternatingLegsEnvRandomizer, self).__init__() self.perturb_swing_bound = perturb_swing_bound self.perturb_extension_bound = perturb_extension_bound self.perturb_desired_pitch_bound = perturb_desired_pitch_bound def randomize_env(self, env): perturb_magnitude = np.random.uniform(low=-self.perturb_swing_bound, high=self.perturb_swing_bound, size=NUM_LEGS) env.set_swing_offset(perturb_magnitude) tf.logging.info("swing_offset: {}".format(perturb_magnitude)) perturb_magnitude = np.random.uniform(low=-self.perturb_extension_bound, high=self.perturb_extension_bound, size=NUM_LEGS) env.set_extension_offset(perturb_magnitude) tf.logging.info("extension_offset: {}".format(perturb_magnitude)) perturb_magnitude = np.random.uniform(low=-self.perturb_desired_pitch_bound, high=self.perturb_desired_pitch_bound) env.set_desired_pitch(perturb_magnitude) tf.logging.info("desired_pitch: {}".format(perturb_magnitude)) randomized_battery_voltage = np.random.uniform(BATTERY_VOLTAGE_RANGE[0], BATTERY_VOLTAGE_RANGE[1]) env.minitaur.SetBatteryVoltage(randomized_battery_voltage) tf.logging.info("battery_voltage: {}".format(randomized_battery_voltage)) randomized_motor_damping = np.random.uniform(MOTOR_VISCOUS_DAMPING_RANGE[0], MOTOR_VISCOUS_DAMPING_RANGE[1]) env.minitaur.SetMotorViscousDamping(randomized_motor_damping) tf.logging.info("motor_damping: {}".format(randomized_motor_damping))
agpl-3.0
-7,600,583,277,604,451,000
45.065574
86
0.666904
false
3.39372
false
false
false
dreaming-dog/kaldi-long-audio-alignment
scripts/classes/entry_manager.py
1
2096
# Copyright 2017 Speech Lab, EE Dept., IITM (Author: Srinivas Venkattaramanujam) from entry import Entry class EntryManager: __statuses__ = ['PENDING','DONE'] def __init__(self): self.entries=[] def add_entry(self,entry): # Problem: # add new entries to the existing list such that: # 1) the start and end time of an entry is not the same # 2) All the words in the range are covered # 3) If two consecutive entries have the same status, merge # trivial cases: # 1) if list is empty, simply add to list # edge cases: # 1) While merging, if there is a status change, have to check previous entry, therefore don't do it inplace! remove the last entry, make changes and insert the entry if(len(self.entries)==0): self.entries.append(entry) else: # assert (last word+1) of previous entry and the first word of current entry match try: assert (self.entries[-1].word_end+1)==entry.word_begin except AssertionError: print "Words are not continous in ",self.entries[-1]," and ", entry exit(1) # check if to be merged. if not, just insert. if(entry.begin_time!=entry.end_time and self.entries[-1].status!=entry.status and (entry.end_time-entry.begin_time)>=0.1): self.entries.append(entry) else: # merge case prev_entry=self.entries[-1] self.entries=self.entries[:-1] entry=self.__merge__(prev_entry, entry) return self.add_entry(entry) def __min_status__(self, status1, status2): # _list=[EntryManager.__statuses__.index(status1), EntryManager.__statuses__.index(status2)] # print 'status 1,2', status1, status2 _list=[EntryManager.__statuses__.index(status1), EntryManager.__statuses__.index(status2)] return EntryManager.__statuses__[min(_list)] def __merge__(self,prev_entry, entry): # print 'merge called' return Entry(prev_entry.begin_time, entry.end_time, self.__min_status__(prev_entry.status, entry.status), prev_entry.word_begin, entry.word_end) def print_entries(self): #print the entries for e in self.entries: print e.begin_time, e.end_time, e.status, e.word_begin, e.word_end
apache-2.0
-5,431,410,990,582,398,000
42.666667
169
0.694179
false
3.114413
false
false
false
hirochachacha/apython
bpython/completion/completers/import_completer.py
1
9006
# The MIT License # # Copyright (c) 2009-2011 Andreas Stuehrk # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. from __future__ import with_statement import imp import itertools import os import sys import warnings try: from warnings import catch_warnings except ImportError: import contextlib @contextlib.contextmanager def catch_warnings(): """Stripped-down version of `warnings.catch_warnings()` (available in Py >= 2.6).""" filters = warnings.filters warnings.filters = list(filters) try: yield finally: warnings.filters = filters from bpython._py3compat import PY3 from six import next # The cached list of all known modules modules = dict() sorted_modules = [] fully_loaded = False def get_object(cw, line): if not cw: cw = "" tokens = line.split() completing_from = False if len(tokens) == 1: return if tokens[0] == 'from': if len(tokens) > 3: if '.' in cw: # This will result in a SyntaxError, so do not return # any matches return None completing_from = True cw = '%s.%s' % (tokens[1], cw) elif len(tokens) == 3: if 'import '.startswith(cw): return None else: # Will result in a SyntaxError return None match_objects = list() for name in sorted_modules: if not (name == cw and name.find('.', len(cw)) == -1): continue try: obj = sys.modules[name] except: if modules[name].endswith('.pyc'): f = modules[name][:-1] if os.path.isfile(f): obj = f else: obj = None else: obj = None if completing_from: name = name[len(tokens[1]) + 1:] try: obj = getattr(obj, name) except: obj = None match_objects.append(obj) if completing_from and tokens[1] in sys.modules: # from x import y -> search for attributes starting with y if # x is in sys.modules _, _, cw = cw.rpartition('.') module = sys.modules[tokens[1]] names = [name for name in dir(module) if name == cw] objects = [getattr(module, name) for name in names] match_objects.extend(objects) elif len(tokens) == 2: # from x.y or import x.y -> search for attributes starting # with y if x is in sys.modules and the attribute is also in # sys.modules module_name, _, cw = cw.rpartition('.') if module_name in sys.modules: module = sys.modules[module_name] for name in dir(module): if name != cw: continue submodule_name = '%s.%s' % (module_name, name) if submodule_name in sys.modules: match_objects.append(sys.modules[submodule_name]) if not match_objects: return None return match_objects[0] def complete(cw, line): """Construct a full list of possibly completions for imports.""" if not cw: return None tokens = line.split() completing_from = False if tokens[0] == 'from': if len(tokens) > 3: if '.' in cw: # This will result in a SyntaxError, so do not return # any matches return None completing_from = True cw = '%s.%s' % (tokens[1], cw) elif len(tokens) == 3: if 'import '.startswith(cw): return ['import '] else: # Will result in a SyntaxError return None matches = list() for name in sorted_modules: if not (name.startswith(cw) and name.find('.', len(cw)) == -1): continue if completing_from: name = name[len(tokens[1]) + 1:] matches.append(name) if completing_from and tokens[1] in sys.modules: # from x import y -> search for attributes starting with y if # x is in sys.modules _, _, cw = cw.rpartition('.') module = sys.modules[tokens[1]] names = [name for name in dir(module) if name.startswith(cw)] matches.extend(names) elif len(tokens) == 2: # from x.y or import x.y -> search for attributes starting # with y if x is in sys.modules and the attribute is also in # sys.modules module_name, _, cw = cw.rpartition('.') if module_name in sys.modules: module = sys.modules[module_name] for name in dir(module): if not name.startswith(cw): continue submodule_name = '%s.%s' % (module_name, name) if submodule_name in sys.modules: matches.append(submodule_name) if not matches: return [] return matches def find_modules(path): """Find all modules (and packages) for a given directory.""" if not os.path.isdir(path): # Perhaps a zip file return try: filenames = os.listdir(path) except EnvironmentError: filenames = [] for name in filenames: filename = name if not any(name.endswith(suffix[0]) for suffix in imp.get_suffixes()): # Possibly a package if '.' in name: continue elif os.path.isdir(os.path.join(path, name)): # Unfortunately, CPython just crashes if there is a directory # which ends with a python extension, so work around. continue for suffix in imp.get_suffixes(): if name.endswith(suffix[0]): name = name[:-len(suffix[0])] break if PY3 and name == "badsyntax_pep3120": # Workaround for issue #166 continue try: with catch_warnings(): warnings.simplefilter("ignore", ImportWarning) fo, pathname, _ = imp.find_module(name, [path]) except (ImportError, IOError, SyntaxError): continue except UnicodeEncodeError: # Happens with Python 3 when there is a filename in some # invalid encoding continue else: if fo is not None: fo.close() else: # Yay, package for subname, filename in find_modules(pathname): if subname != '__init__': yield '%s.%s' % (name, subname), os.path.join(pathname, filename) yield name, filename def find_all_modules(path=None): """Return a list with all modules in `path`, which should be a list of directory names. If path is not given, sys.path will be used.""" global sorted_modules i = itertools.repeat(None) if path is None: d = dict(zip(sys.builtin_module_names, i)) modules.update(d) path = sys.path for p in path: if not p: p = os.curdir for module, filename in find_modules(p): if not PY3 and not isinstance(module, unicode): try: module = module.decode(sys.getfilesystemencoding()) except UnicodeDecodeError: # Not importable anyway, ignore it continue modules[module] = os.path.join(p, filename) sorted_modules = sorted(modules) yield def find_coroutine(): global fully_loaded if fully_loaded: return None try: next(find_iterator) except StopIteration: fully_loaded = True return True def reload(): """Refresh the list of known modules.""" modules.clear() for _ in find_all_modules(): pass find_iterator = find_all_modules()
mit
6,365,923,528,338,857,000
31.989011
89
0.563957
false
4.406067
false
false
false
julzhk/codekata
instant_runoff_voting.py
1
1947
from collections import defaultdict, Counter def runoff(voters): """ a function that calculates an election winner from a list of voter selections using an Instant Runoff Voting algorithm. https://en.wikipedia.org/wiki/Instant-runoff_voting Each voter selects several candidates in order of preference. The votes are tallied from the each voter's first choice. If the first-place candidate has more than half the total votes, they win. Otherwise, find the candidate who got the least votes and remove them from each person's voting list. In case of a tie for least, remove all of the tying candidates. In case of a complete tie between every candidate, return None Continue until somebody has more than half the votes; they are the winner. The function takes a list of voter ballots; each ballot will be a list of candidates in descending order of preference. Returns the symbol corresponding to the winning candidate. """ votes_cast_so_far=0 final_tally = Counter() removed_candidates = set() for this_round in range(len(voters[0])): this_round_votes = [voter[this_round] for voter in voters if voter[this_round] not in removed_candidates] if not this_round_votes: # all knocked out return None tally = Counter(this_round_votes) final_tally.update(tally) leader = final_tally.most_common(1) votes_cast_so_far += sum(final_tally.values()) if final_tally[leader] >= votes_cast_so_far / 2.0: return leader lowest_vote = min(tally.values()) knockout_candidates = [candidate for candidate in tally if tally[candidate] == lowest_vote] removed_candidates |= set(knockout_candidates) voters = [ ['c', 'a', 'b', 'd', 'e'], ['b', 'e', 'd', 'c', 'a'], ['b', 'e', 'c', 'a', 'd'], ['d', 'b', 'c', 'a', 'e'], ['c', 'b', 'd', 'a', 'e'] ] assert(runoff(voters) == "b")
mit
-6,354,312,978,943,293,000
42.266667
113
0.659476
false
3.427817
false
false
false
factorlibre/carrier-delivery
delivery_carrier_ups/model/ups_config.py
1
2330
############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2015 FactorLibre (http://www.factorlibre.com) # Hugo Santos <[email protected]> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import models, fields, api UPS_LABEL_FORMAT = [ ('EPL', 'EPL'), ('ZPL', 'ZPL'), ('GIF', 'GIF'), ('STARPL', 'STARPL'), ('SPL', 'SPL') ] class UPSConfig(models.Model): _name = 'ups.config' @api.model def _ups_weight_uom(self): return [ ('KGS', 'KGS'), ('LBS', 'LBS') ] @api.model def _ups_dimension_uom(self): return [ ('CM', 'CM'), ('IN', 'IN') ] @api.model def _ups_label_file_format(self): return UPS_LABEL_FORMAT name = fields.Char('UPS Config Name', required=True) is_test = fields.Boolean('Is a test?') username = fields.Char('UPS Username', required=True) password = fields.Char('UPS Password', required=True) access_license = fields.Char('UPS Access license', required=True) shipper_number = fields.Char('UPS Shipper number', required=True) weight_uom = fields.Selection('_ups_weight_uom', required=True, default="KGS") dimension_uom = fields.Selection('_ups_dimension_uom', required=True, default='CM') label_file_format = fields.Selection('_ups_label_file_format', required=True, default='EPL')
agpl-3.0
545,612,102,985,848,500
34.30303
78
0.566953
false
3.982906
false
false
false
wenhuchen/ETHZ-Bootstrapped-Captioning
visual-concepts/coco/PythonAPI/pycocotools/coco.py
1
16953
__author__ = 'tylin' __version__ = '2.0' # Interface for accessing the Microsoft COCO dataset. # Microsoft COCO is a large image dataset designed for object detection, # segmentation, and caption generation. pycocotools is a Python API that # assists in loading, parsing and visualizing the annotations in COCO. # Please visit http://mscoco.org/ for more information on COCO, including # for the data, paper, and tutorials. The exact format of the annotations # is also described on the COCO website. For example usage of the pycocotools # please see pycocotools_demo.ipynb. In addition to this API, please download both # the COCO images and annotations in order to run the demo. # An alternative to using the API is to load the annotations directly # into Python dictionary # Using the API provides additional utility functions. Note that this API # supports both *instance* and *caption* annotations. In the case of # captions not all functions are defined (e.g. categories are undefined). # The following API functions are defined: # COCO - COCO api class that loads COCO annotation file and prepare data structures. # decodeMask - Decode binary mask M encoded via run-length encoding. # encodeMask - Encode binary mask M using run-length encoding. # getAnnIds - Get ann ids that satisfy given filter conditions. # getCatIds - Get cat ids that satisfy given filter conditions. # getImgIds - Get img ids that satisfy given filter conditions. # loadAnns - Load anns with the specified ids. # loadCats - Load cats with the specified ids. # loadImgs - Load imgs with the specified ids. # segToMask - Convert polygon segmentation to binary mask. # showAnns - Display the specified annotations. # loadRes - Load algorithm results and create API for accessing them. # download - Download COCO images from mscoco.org server. # Throughout the API "ann"=annotation, "cat"=category, and "img"=image. # Help on each functions can be accessed by: "help COCO>function". # See also COCO>decodeMask, # COCO>encodeMask, COCO>getAnnIds, COCO>getCatIds, # COCO>getImgIds, COCO>loadAnns, COCO>loadCats, # COCO>loadImgs, COCO>segToMask, COCO>showAnns # Microsoft COCO Toolbox. version 2.0 # Data, paper, and tutorials available at: http://mscoco.org/ # Code written by Piotr Dollar and Tsung-Yi Lin, 2014. # Licensed under the Simplified BSD License [see bsd.txt] import json import time import matplotlib.pyplot as plt from matplotlib.collections import PatchCollection from matplotlib.patches import Polygon import numpy as np import urllib import copy import itertools import mask import os from collections import defaultdict class COCO: def __init__(self, annotation_file=None): """ Constructor of Microsoft COCO helper class for reading and visualizing annotations. :param annotation_file (str): location of annotation file :param image_folder (str): location to the folder that hosts images. :return: """ # load dataset self.dataset,self.anns,self.cats,self.imgs = dict(),dict(),dict(),dict() self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list) if not annotation_file == None: print 'loading annotations into memory...' tic = time.time() dataset = json.load(open(annotation_file, 'r')) assert type(dataset)==dict, "annotation file format %s not supported"%(type(dataset)) print 'Done (t=%0.2fs)'%(time.time()- tic) self.dataset = dataset self.createIndex() def createIndex(self): # create index print 'creating index...' anns,cats,imgs = dict(),dict(),dict() imgToAnns,catToImgs = defaultdict(list),defaultdict(list) if 'annotations' in self.dataset: for ann in self.dataset['annotations']: imgToAnns[ann['image_id']].append(ann) anns[ann['id']] = ann if 'images' in self.dataset: for img in self.dataset['images']: imgs[img['id']] = img if 'categories' in self.dataset: for cat in self.dataset['categories']: cats[cat['id']] = cat for ann in self.dataset['annotations']: catToImgs[ann['category_id']].append(ann['image_id']) print 'index created!' # create class members self.anns = anns self.imgToAnns = imgToAnns self.catToImgs = catToImgs self.imgs = imgs self.cats = cats def info(self): """ Print information about the annotation file. :return: """ for key, value in self.dataset['info'].items(): print '%s: %s'%(key, value) def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None): """ Get ann ids that satisfy given filter conditions. default skips that filter :param imgIds (int array) : get anns for given imgs catIds (int array) : get anns for given cats areaRng (float array) : get anns for given area range (e.g. [0 inf]) iscrowd (boolean) : get anns for given crowd label (False or True) :return: ids (int array) : integer array of ann ids """ imgIds = imgIds if type(imgIds) == list else [imgIds] catIds = catIds if type(catIds) == list else [catIds] if len(imgIds) == len(catIds) == len(areaRng) == 0: anns = self.dataset['annotations'] else: if not len(imgIds) == 0: lists = [self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns] anns = list(itertools.chain.from_iterable(lists)) else: anns = self.dataset['annotations'] anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds] anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]] if not iscrowd == None: ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd] else: ids = [ann['id'] for ann in anns] return ids def getCatIds(self, catNms=[], supNms=[], catIds=[]): """ filtering parameters. default skips that filter. :param catNms (str array) : get cats for given cat names :param supNms (str array) : get cats for given supercategory names :param catIds (int array) : get cats for given cat ids :return: ids (int array) : integer array of cat ids """ catNms = catNms if type(catNms) == list else [catNms] supNms = supNms if type(supNms) == list else [supNms] catIds = catIds if type(catIds) == list else [catIds] if len(catNms) == len(supNms) == len(catIds) == 0: cats = self.dataset['categories'] else: cats = self.dataset['categories'] cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms] cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms] cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds] ids = [cat['id'] for cat in cats] return ids def getImgIds(self, imgIds=[], catIds=[]): ''' Get img ids that satisfy given filter conditions. :param imgIds (int array) : get imgs for given ids :param catIds (int array) : get imgs with all given cats :return: ids (int array) : integer array of img ids ''' imgIds = imgIds if type(imgIds) == list else [imgIds] catIds = catIds if type(catIds) == list else [catIds] if len(imgIds) == len(catIds) == 0: ids = self.imgs.keys() else: ids = set(imgIds) for i, catId in enumerate(catIds): if i == 0 and len(ids) == 0: ids = set(self.catToImgs[catId]) else: ids &= set(self.catToImgs[catId]) return list(ids) def loadAnns(self, ids=[]): """ Load anns with the specified ids. :param ids (int array) : integer ids specifying anns :return: anns (object array) : loaded ann objects """ if type(ids) == list: return [self.anns[id] for id in ids] elif type(ids) == int: return [self.anns[ids]] def loadCats(self, ids=[]): """ Load cats with the specified ids. :param ids (int array) : integer ids specifying cats :return: cats (object array) : loaded cat objects """ if type(ids) == list: return [self.cats[id] for id in ids] elif type(ids) == int: return [self.cats[ids]] def loadImgs(self, ids=[]): """ Load anns with the specified ids. :param ids (int array) : integer ids specifying img :return: imgs (object array) : loaded img objects """ if type(ids) == list: return [self.imgs[id] for id in ids] elif type(ids) == int: return [self.imgs[ids]] def showAnns(self, anns): """ Display the specified annotations. :param anns (array of object): annotations to display :return: None """ if len(anns) == 0: return 0 if 'segmentation' in anns[0] or 'keypoints' in anns[0]: datasetType = 'instances' elif 'caption' in anns[0]: datasetType = 'captions' else: raise Exception("datasetType not supported") if datasetType == 'instances': ax = plt.gca() ax.set_autoscale_on(False) polygons = [] color = [] for ann in anns: c = (np.random.random((1, 3))*0.6+0.4).tolist()[0] if 'segmentation' in ann: if type(ann['segmentation']) == list: # polygon for seg in ann['segmentation']: poly = np.array(seg).reshape((len(seg)/2, 2)) polygons.append(Polygon(poly)) color.append(c) else: # mask t = self.imgs[ann['image_id']] if type(ann['segmentation']['counts']) == list: rle = mask.frPyObjects([ann['segmentation']], t['height'], t['width']) else: rle = [ann['segmentation']] m = mask.decode(rle) img = np.ones( (m.shape[0], m.shape[1], 3) ) if ann['iscrowd'] == 1: color_mask = np.array([2.0,166.0,101.0])/255 if ann['iscrowd'] == 0: color_mask = np.random.random((1, 3)).tolist()[0] for i in range(3): img[:,:,i] = color_mask[i] ax.imshow(np.dstack( (img, m*0.5) )) if 'keypoints' in ann and type(ann['keypoints']) == list: # turn skeleton into zero-based index sks = np.array(self.loadCats(ann['category_id'])[0]['skeleton'])-1 kp = np.array(ann['keypoints']) x = kp[0::3] y = kp[1::3] v = kp[2::3] for sk in sks: if np.all(v[sk]>0): plt.plot(x[sk],y[sk], linewidth=3, color=c) plt.plot(x[v>0], y[v>0],'o',markersize=8, markerfacecolor=c, markeredgecolor='k',markeredgewidth=2) plt.plot(x[v>1], y[v>1],'o',markersize=8, markerfacecolor=c, markeredgecolor=c, markeredgewidth=2) p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4) ax.add_collection(p) p = PatchCollection(polygons, facecolor="none", edgecolors=color, linewidths=2) ax.add_collection(p) elif datasetType == 'captions': for ann in anns: print ann['caption'] def loadRes(self, resFile): """ Load result file and return a result api object. :param resFile (str) : file name of result file :return: res (obj) : result api object """ res = COCO() res.dataset['images'] = [img for img in self.dataset['images']] print 'Loading and preparing results... ' tic = time.time() if type(resFile) == str or type(resFile) == unicode: anns = json.load(open(resFile)) elif type(resFile) == np.ndarray: anns = self.loadNumpyAnnotations(resFile) else: anns = resFile assert type(anns) == list, 'results in not an array of objects' annsImgIds = [ann['image_id'] for ann in anns] assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \ 'Results do not correspond to current coco set' if 'caption' in anns[0]: imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns]) res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds] for id, ann in enumerate(anns): ann['id'] = id+1 elif 'bbox' in anns[0] and not anns[0]['bbox'] == []: res.dataset['categories'] = copy.deepcopy(self.dataset['categories']) for id, ann in enumerate(anns): bb = ann['bbox'] x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]] if not 'segmentation' in ann: ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]] ann['area'] = bb[2]*bb[3] ann['id'] = id+1 ann['iscrowd'] = 0 elif 'segmentation' in anns[0]: res.dataset['categories'] = copy.deepcopy(self.dataset['categories']) for id, ann in enumerate(anns): # now only support compressed RLE format as segmentation results ann['area'] = mask.area([ann['segmentation']])[0] if not 'bbox' in ann: ann['bbox'] = mask.toBbox([ann['segmentation']])[0] ann['id'] = id+1 ann['iscrowd'] = 0 elif 'keypoints' in anns[0]: res.dataset['categories'] = copy.deepcopy(self.dataset['categories']) for id, ann in enumerate(anns): s = ann['keypoints'] x = s[0::3] y = s[1::3] x0,x1,y0,y1 = np.min(x), np.max(x), np.min(y), np.max(y) ann['area'] = (x1-x0)*(y1-y0) ann['id'] = id + 1 ann['bbox'] = [x0,y0,x1-x0,y1-y0] print 'DONE (t=%0.2fs)'%(time.time()- tic) res.dataset['annotations'] = anns res.createIndex() return res def download( self, tarDir = None, imgIds = [] ): ''' Download COCO images from mscoco.org server. :param tarDir (str): COCO results directory name imgIds (list): images to be downloaded :return: ''' if tarDir is None: print 'Please specify target directory' return -1 if len(imgIds) == 0: imgs = self.imgs.values() else: imgs = self.loadImgs(imgIds) N = len(imgs) if not os.path.exists(tarDir): os.makedirs(tarDir) for i, img in enumerate(imgs): tic = time.time() fname = os.path.join(tarDir, img['file_name']) if not os.path.exists(fname): urllib.urlretrieve(img['coco_url'], fname) print 'downloaded %d/%d images (t=%.1fs)'%(i, N, time.time()- tic) def loadNumpyAnnotations(self, data): """ Convert result data from a numpy array [Nx7] where each row contains {imageID,x1,y1,w,h,score,class} :param data (numpy.ndarray) :return: annotations (python nested list) """ print("Converting ndarray to lists...") assert(type(data) == np.ndarray) print(data.shape) assert(data.shape[1] == 7) N = data.shape[0] ann = [] for i in range(N): if i % 1000000 == 0: print("%d/%d" % (i,N)) ann += [{ 'image_id' : int(data[i, 0]), 'bbox' : [ data[i, 1], data[i, 2], data[i, 3], data[i, 4] ], 'score' : data[i, 5], 'category_id': int(data[i, 6]), }] return ann
bsd-3-clause
-9,197,075,660,975,878,000
42.358056
128
0.54775
false
3.730854
false
false
false
blackball/an-test6
net/migrations/0004_update_calibrations.py
1
22054
# encoding: utf-8 import datetime from south.db import db from south.v2 import DataMigration from django.db import models from astrometry.net.settings import * from astrometry.util.util import Tan import math import os class Migration(DataMigration): def forwards(self, orm): "Write your forwards methods here." for calib in orm.Calibration.objects.all(): wcsfn = os.path.join(JOBDIR, '%08i' % calib.job.id) wcsfn = os.path.join(wcsfn, 'wcs.fits') wcs = Tan(str(wcsfn), 0) ra,dec = wcs.radec_center() radius = (wcs.pixel_scale() * math.hypot(wcs.imagew, wcs.imageh)/2. / 3600.) # Find cartesian coordinates ra *= math.pi/180 dec *= math.pi/180 tempr = math.cos(dec) calib.x = tempr*math.cos(ra) calib.y = tempr*math.sin(ra) calib.z = math.sin(dec) calib.r = radius/180*math.pi calib.save() def backwards(self, orm): "Write your backwards methods here." models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'net.album': { 'Meta': {'object_name': 'Album'}, 'comment_receiver': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['net.CommentReceiver']", 'unique': 'True'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'publicly_visible': ('django.db.models.fields.CharField', [], {'default': "'y'", 'max_length': '1'}), 'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'albums'", 'symmetrical': 'False', 'to': "orm['net.Tag']"}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'albums'", 'null': 'True', 'to': "orm['auth.User']"}), 'user_images': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'albums'", 'symmetrical': 'False', 'to': "orm['net.UserImage']"}) }, 'net.cachedfile': { 'Meta': {'object_name': 'CachedFile'}, 'disk_file': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['net.DiskFile']"}), 'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64', 'primary_key': 'True'}) }, 'net.calibration': { 'Meta': {'object_name': 'Calibration'}, 'decmax': ('django.db.models.fields.FloatField', [], {}), 'decmin': ('django.db.models.fields.FloatField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'r': ('django.db.models.fields.FloatField', [], {}), 'ramax': ('django.db.models.fields.FloatField', [], {}), 'ramin': ('django.db.models.fields.FloatField', [], {}), 'raw_tan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'calibrations_raw'", 'null': 'True', 'to': "orm['net.TanWCS']"}), 'sip': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['net.SipWCS']", 'null': 'True'}), 'sky_location': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'calibrations'", 'null': 'True', 'to': "orm['net.SkyLocation']"}), 'tweaked_tan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'calibrations_tweaked'", 'null': 'True', 'to': "orm['net.TanWCS']"}), 'x': ('django.db.models.fields.FloatField', [], {}), 'y': ('django.db.models.fields.FloatField', [], {}), 'z': ('django.db.models.fields.FloatField', [], {}) }, 'net.comment': { 'Meta': {'ordering': "['-created_at']", 'object_name': 'Comment'}, 'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments_left'", 'to': "orm['auth.User']"}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'recipient': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': "orm['net.CommentReceiver']"}), 'text': ('django.db.models.fields.CharField', [], {'max_length': '1024'}) }, 'net.commentreceiver': { 'Meta': {'object_name': 'CommentReceiver'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}) }, 'net.diskfile': { 'Meta': {'object_name': 'DiskFile'}, 'file_hash': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40', 'primary_key': 'True'}), 'file_type': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True'}), 'size': ('django.db.models.fields.PositiveIntegerField', [], {}) }, 'net.flag': { 'Meta': {'ordering': "['name']", 'object_name': 'Flag'}, 'explanation': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '56', 'primary_key': 'True'}) }, 'net.flaggeduserimage': { 'Meta': {'object_name': 'FlaggedUserImage'}, 'flag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['net.Flag']"}), 'flagged_time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'user_image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['net.UserImage']"}) }, 'net.image': { 'Meta': {'object_name': 'Image'}, 'disk_file': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['net.DiskFile']"}), 'display_image': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'image_display_set'", 'null': 'True', 'to': "orm['net.Image']"}), 'height': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'thumbnail': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'image_thumbnail_set'", 'null': 'True', 'to': "orm['net.Image']"}), 'width': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}) }, 'net.job': { 'Meta': {'object_name': 'Job'}, 'calibration': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'job'", 'unique': 'True', 'null': 'True', 'to': "orm['net.Calibration']"}), 'end_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'error_message': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'queued_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'start_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'status': ('django.db.models.fields.CharField', [], {'max_length': '1'}), 'user_image': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'jobs'", 'to': "orm['net.UserImage']"}) }, 'net.license': { 'Meta': {'object_name': 'License'}, 'allow_commercial_use': ('django.db.models.fields.CharField', [], {'default': "'d'", 'max_length': '1'}), 'allow_modifications': ('django.db.models.fields.CharField', [], {'default': "'d'", 'max_length': '2'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'license_name': ('django.db.models.fields.CharField', [], {'max_length': '1024'}), 'license_uri': ('django.db.models.fields.CharField', [], {'max_length': '1024'}) }, 'net.processsubmissions': { 'Meta': {'object_name': 'ProcessSubmissions'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'pid': ('django.db.models.fields.IntegerField', [], {}), 'watchdog': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}) }, 'net.queuedjob': { 'Meta': {'object_name': 'QueuedJob'}, 'finished': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'job': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['net.Job']"}), 'procsub': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'jobs'", 'to': "orm['net.ProcessSubmissions']"}), 'success': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) }, 'net.queuedsubmission': { 'Meta': {'object_name': 'QueuedSubmission'}, 'finished': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'procsub': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subs'", 'to': "orm['net.ProcessSubmissions']"}), 'submission': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['net.Submission']"}), 'success': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) }, 'net.sipwcs': { 'Meta': {'object_name': 'SipWCS'}, 'apterms': ('django.db.models.fields.TextField', [], {'default': "''"}), 'aterms': ('django.db.models.fields.TextField', [], {'default': "''"}), 'bpterms': ('django.db.models.fields.TextField', [], {'default': "''"}), 'bterms': ('django.db.models.fields.TextField', [], {'default': "''"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '2'}), 'tan': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['net.TanWCS']", 'unique': 'True'}) }, 'net.skylocation': { 'Meta': {'object_name': 'SkyLocation'}, 'healpix': ('django.db.models.fields.BigIntegerField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'nside': ('django.db.models.fields.PositiveSmallIntegerField', [], {}) }, 'net.skyobject': { 'Meta': {'object_name': 'SkyObject'}, 'name': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'primary_key': 'True'}) }, 'net.sourcelist': { 'Meta': {'object_name': 'SourceList', '_ormbases': ['net.Image']}, 'image_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['net.Image']", 'unique': 'True', 'primary_key': 'True'}), 'source_type': ('django.db.models.fields.CharField', [], {'max_length': '4'}) }, 'net.submission': { 'Meta': {'object_name': 'Submission'}, 'album': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['net.Album']", 'null': 'True', 'blank': 'True'}), 'center_dec': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'center_ra': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'comment_receiver': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['net.CommentReceiver']", 'unique': 'True'}), 'deduplication_nonce': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'disk_file': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'submissions'", 'null': 'True', 'to': "orm['net.DiskFile']"}), 'downsample_factor': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}), 'error_message': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'license': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['net.License']"}), 'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'parity': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '2'}), 'positional_error': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'processing_finished': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'processing_retries': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'processing_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'publicly_visible': ('django.db.models.fields.CharField', [], {'default': "'y'", 'max_length': '1'}), 'radius': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'scale_err': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'scale_est': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'scale_lower': ('django.db.models.fields.FloatField', [], {'default': '0.10000000000000001', 'null': 'True', 'blank': 'True'}), 'scale_type': ('django.db.models.fields.CharField', [], {'default': "'ul'", 'max_length': '2'}), 'scale_units': ('django.db.models.fields.CharField', [], {'default': "'degwidth'", 'max_length': '20'}), 'scale_upper': ('django.db.models.fields.FloatField', [], {'default': '180', 'null': 'True', 'blank': 'True'}), 'submitted_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'submissions'", 'null': 'True', 'to': "orm['auth.User']"}) }, 'net.tag': { 'Meta': {'object_name': 'Tag'}, 'text': ('django.db.models.fields.CharField', [], {'max_length': '4096', 'primary_key': 'True'}) }, 'net.taggeduserimage': { 'Meta': {'object_name': 'TaggedUserImage'}, 'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['net.Tag']"}), 'tagger': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}), 'user_image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['net.UserImage']"}) }, 'net.tanwcs': { 'Meta': {'object_name': 'TanWCS'}, 'cd11': ('django.db.models.fields.FloatField', [], {}), 'cd12': ('django.db.models.fields.FloatField', [], {}), 'cd21': ('django.db.models.fields.FloatField', [], {}), 'cd22': ('django.db.models.fields.FloatField', [], {}), 'crpix1': ('django.db.models.fields.FloatField', [], {}), 'crpix2': ('django.db.models.fields.FloatField', [], {}), 'crval1': ('django.db.models.fields.FloatField', [], {}), 'crval2': ('django.db.models.fields.FloatField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'imageh': ('django.db.models.fields.FloatField', [], {}), 'imagew': ('django.db.models.fields.FloatField', [], {}) }, 'net.userimage': { 'Meta': {'object_name': 'UserImage'}, 'comment_receiver': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['net.CommentReceiver']", 'unique': 'True'}), 'description': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), 'flags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'user_images'", 'symmetrical': 'False', 'through': "orm['net.FlaggedUserImage']", 'to': "orm['net.Flag']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['net.Image']"}), 'license': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['net.License']"}), 'original_file_name': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'publicly_visible': ('django.db.models.fields.CharField', [], {'default': "'y'", 'max_length': '1'}), 'sky_objects': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'user_images'", 'symmetrical': 'False', 'to': "orm['net.SkyObject']"}), 'submission': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_images'", 'to': "orm['net.Submission']"}), 'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'user_images'", 'symmetrical': 'False', 'through': "orm['net.TaggedUserImage']", 'to': "orm['net.Tag']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_images'", 'null': 'True', 'to': "orm['auth.User']"}) }, 'net.userprofile': { 'Meta': {'object_name': 'UserProfile'}, 'apikey': ('django.db.models.fields.CharField', [], {'max_length': '16'}), 'default_license': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': "orm['net.License']"}), 'display_name': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"}) } } complete_apps = ['net']
gpl-2.0
-8,568,439,465,542,566,000
73.255892
203
0.540446
false
3.638673
false
false
false
MSEMJEJME/Get-Dumped
renpy/statements.py
1
3307
# Copyright 2004-2012 Tom Rothamel <[email protected]> # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # This module contains code to support user-defined statements. import renpy # The statement registry. It's a map from tuples giving the prefixes of # statements to dictionaries giving the methods used for that statement. registry = { } parsers = renpy.parser.ParseTrie() def register(name, parse=None, lint=None, execute=None, predict=None, next=None, scry=None, block=False, init=False): #@ReservedAssignment name = tuple(name.split()) registry[name] = dict(parse=parse, lint=lint, execute=execute, predict=predict, next=next, scry=scry) # The function that is called to create an ast.UserStatement. def parse_user_statement(l, loc): renpy.exports.push_error_handler(l.error) try: rv = renpy.ast.UserStatement(loc, l.text, l.subblock) if not block: l.expect_noblock(" ".join(name) + " statement") l.advance() else: l.expect_block(" ".join(name) + " statement") l.advance() finally: renpy.exports.pop_error_handler() if init and not l.init: rv = renpy.ast.Init(loc, [ rv ], 0) return rv renpy.parser.statements.add(name, parse_user_statement) # The function that is called to get our parse data. def parse_data(l): return (name, registry[name]["parse"](l)) parsers.add(name, parse_data) def parse(node, line, subblock): block = [ (node.filename, node.linenumber, line, subblock) ] l = renpy.parser.Lexer(block) l.advance() renpy.exports.push_error_handler(l.error) try: pf = parsers.parse(l) if pf is None: l.error("Could not find user-defined statement at runtime.") return pf(l) finally: renpy.exports.pop_error_handler() def call(method, parsed, *args, **kwargs): name, parsed = parsed method = registry[name].get(method) if method is None: return None return method(parsed, *args, **kwargs)
gpl-2.0
-8,841,751,182,915,426,000
32.07
138
0.643786
false
4.087763
false
false
false
RayRuizhiLiao/ITK_4D
Modules/ThirdParty/pygccxml/src/pygccxml/parser/etree_scanner.py
1
2166
# Copyright 2014-2016 Insight Software Consortium. # Copyright 2004-2008 Roman Yakovenko. # Distributed under the Boost Software License, Version 1.0. # See http://www.boost.org/LICENSE_1_0.txt import warnings from . import scanner # keep py2exe happy import xml.etree.ElementTree import xml.etree.cElementTree as ElementTree class etree_saxifier_t(object): def __init__(self, etree, handler): """ Deprecated since 1.8.0. Will be removed in 1.9.0. """ warnings.warn("etree_saxifier_t is deprecated.\n", DeprecationWarning) self.__root_elem = etree.getroot() self.__handler = handler def saxify(self): self.__handler.startDocument() self.__recursive_saxify(self.__root_elem) self.__handler.endDocument() def __recursive_saxify(self, element): self.__handler.startElement(element.tag, element.attrib) for e in element: self.__recursive_saxify(e) self.__handler.endElement(element.tag) class etree_scanner_t(scanner.scanner_t): def __init__(self, xml_file, decl_factory, *args): """ Deprecated since 1.8.0. Will be removed in 1.9.0. """ warnings.warn( "etree_scanner_t is deprecated.\n" + "Please use ietree_scanner_t instead.", DeprecationWarning) scanner.scanner_t.__init__(self, xml_file, decl_factory, *args) def read(self): tree = ElementTree.parse(self.xml_file) saxifier = etree_saxifier_t(tree, self) saxifier.saxify() class ietree_scanner_t(scanner.scanner_t): def __init__(self, xml_file, decl_factory, *args): scanner.scanner_t.__init__(self, xml_file, decl_factory, *args) def read(self): context = ElementTree.iterparse( self.xml_file, events=("start", "end")) for event, elem in context: if event == 'start': self.startElement(elem.tag, elem.attrib) else: self.endElement(elem.tag) elem.clear() self.endDocument()
apache-2.0
-8,333,672,104,491,799,000
28.507042
78
0.591874
false
3.854093
false
false
false
dnlcrl/PyFunt
tools/cythonize.py
1
6618
#!/usr/bin/env python """ cythonize SOURCE: https://github.com/scipy/scipy/blob/master/setup.py Cythonize pyx files into C files as needed. Usage: cythonize [root_dir] Default [root_dir] is 'pyfunt'. Checks pyx files to see if they have been changed relative to their corresponding C files. If they have, then runs cython on these files to recreate the C files. The script thinks that the pyx files have changed relative to the C files by comparing hashes stored in a database file. Simple script to invoke Cython (and Tempita) on all .pyx (.pyx.in) files; while waiting for a proper build system. Uses file hashes to figure out if rebuild is needed. For now, this script should be run by developers when changing Cython files only, and the resulting C files checked in, so that end-users (and Python-only developers) do not get the Cython/Tempita dependencies. Originally written by Dag Sverre Seljebotn, and copied here from: https://raw.github.com/dagss/private-scipy-refactor/cythonize/cythonize.py Note: this script does not check any of the dependent C libraries; it only operates on the Cython .pyx files. """ from __future__ import division, print_function, absolute_import import os import re import sys import hashlib import subprocess HASH_FILE = 'cythonize.dat' DEFAULT_ROOT = 'pyfunt' # WindowsError is not defined on unix systems try: WindowsError except NameError: WindowsError = None # # Rules # def process_pyx(fromfile, tofile): try: from Cython.Compiler.Version import version as cython_version from distutils.version import LooseVersion if LooseVersion(cython_version) < LooseVersion('0.22'): raise Exception('Building PyFunt requires Cython >= 0.22') except ImportError: pass flags = ['--fast-fail'] if tofile.endswith('.cxx'): flags += ['--cplus'] try: try: # if fromfile == 'im2col_cython.pyx': # print('compiling im2col_cython') # r = subprocess.call( # ['python', 'pyfunt/layers/setup.py', 'build_ext', '--inplace']) # else: r = subprocess.call( ['cython'] + flags + ["-o", tofile, fromfile]) if r != 0: raise Exception('Cython failed') except OSError: # There are ways of installing Cython that don't result in a cython # executable on the path, see gh-2397. r = subprocess.call([sys.executable, '-c', 'import sys; from Cython.Compiler.Main import ' 'setuptools_main as main; sys.exit(main())'] + flags + ["-o", tofile, fromfile]) if r != 0: raise Exception("Cython either isn't installed or it failed.") except OSError: raise OSError('Cython needs to be installed') def process_tempita_pyx(fromfile, tofile): try: try: from Cython import Tempita as tempita except ImportError: import tempita except ImportError: raise Exception('Building PyFunt requires Tempita: ' 'pip install --user Tempita') from_filename = tempita.Template.from_filename template = from_filename(fromfile, encoding=sys.getdefaultencoding()) pyxcontent = template.substitute() assert fromfile.endswith('.pyx.in') pyxfile = fromfile[:-len('.pyx.in')] + '.pyx' with open(pyxfile, "w") as f: f.write(pyxcontent) process_pyx(pyxfile, tofile) rules = { # fromext : function '.pyx': process_pyx, '.pyx.in': process_tempita_pyx } # # Hash db # def load_hashes(filename): # Return { filename : (sha1 of input, sha1 of output) } if os.path.isfile(filename): hashes = {} with open(filename, 'r') as f: for line in f: filename, inhash, outhash = line.split() hashes[filename] = (inhash, outhash) else: hashes = {} return hashes def save_hashes(hash_db, filename): with open(filename, 'w') as f: for key, value in sorted(hash_db.items()): f.write("%s %s %s\n" % (key, value[0], value[1])) def sha1_of_file(filename): h = hashlib.sha1() with open(filename, "rb") as f: h.update(f.read()) return h.hexdigest() # # Main program # def normpath(path): path = path.replace(os.sep, '/') if path.startswith('./'): path = path[2:] return path def get_hash(frompath, topath): from_hash = sha1_of_file(frompath) to_hash = sha1_of_file(topath) if os.path.exists(topath) else None return (from_hash, to_hash) def process(path, fromfile, tofile, processor_function, hash_db): fullfrompath = os.path.join(path, fromfile) fulltopath = os.path.join(path, tofile) current_hash = get_hash(fullfrompath, fulltopath) if current_hash == hash_db.get(normpath(fullfrompath), None): print('%s has not changed' % fullfrompath) return orig_cwd = os.getcwd() try: os.chdir(path) print('Processing %s to %s' % (fullfrompath, fulltopath)) processor_function(fromfile, tofile) finally: os.chdir(orig_cwd) # changed target file, recompute hash current_hash = get_hash(fullfrompath, fulltopath) # store hash in db hash_db[normpath(fullfrompath)] = current_hash def find_process_files(root_dir): hash_db = load_hashes(HASH_FILE) for cur_dir, dirs, files in os.walk(root_dir): for filename in files: in_file = os.path.join(cur_dir, filename + ".in") if filename.endswith('.pyx') and os.path.isfile(in_file): continue for fromext, function in rules.items(): if filename.endswith(fromext): toext = ".c" with open(os.path.join(cur_dir, filename), 'rb') as f: data = f.read() m = re.search( br"^\s*#\s*distutils:\s*language\s*=\s*c\+\+\s*$", data, re.I | re.M) if m: toext = ".cxx" fromfile = filename tofile = filename[:-len(fromext)] + toext process(cur_dir, fromfile, tofile, function, hash_db) save_hashes(hash_db, HASH_FILE) def main(): try: root_dir = sys.argv[1] except IndexError: root_dir = DEFAULT_ROOT find_process_files(root_dir) if __name__ == '__main__': main()
mit
-7,944,039,619,602,638,000
29.219178
97
0.599577
false
3.711722
false
false
false
deepmind/interval-bound-propagation
interval_bound_propagation/src/simplex_bounds.py
1
7609
# coding=utf-8 # Copyright 2019 The Interval Bound Propagation Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Naive bound calculation for common neural network layers.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from interval_bound_propagation.src import bounds as basic_bounds from interval_bound_propagation.src import relative_bounds import sonnet as snt import tensorflow.compat.v1 as tf class SimplexBounds(basic_bounds.AbstractBounds): """Specifies a bounding simplex within an embedding space.""" def __init__(self, vertices, nominal, r): """Initialises the simplex bounds. Args: vertices: Tensor of shape (num_vertices, *input_shape) or of shape (batch_size, num_vertices, *input_shape) containing the vertices in embedding space. nominal: Tensor of shape (batch_size, *input_shape) specifying the unperturbed inputs in embedding space, where `*input_shape` denotes either (embedding_size,) for flat input (e.g. bag-of-words) or (input_length, embedding_channels) for sequence input. r: Scalar specifying the dilation factor of the simplex. The dilated simplex will have vertices `nominal + r * (vertices-nominal)`. """ super(SimplexBounds, self).__init__() self._vertices = vertices self._nominal = nominal self._r = r @property def vertices(self): return self._vertices @property def nominal(self): return self._nominal @property def r(self): return self._r @property def shape(self): return self.nominal.shape.as_list() @classmethod def convert(cls, bounds): if not isinstance(bounds, cls): raise ValueError('Cannot convert "{}" to "{}"'.format(bounds, cls.__name__)) return bounds def apply_batch_reshape(self, wrapper, shape): reshape = snt.BatchReshape(shape) if self.vertices.shape.ndims == self.nominal.shape.ndims: reshape_vertices = reshape else: reshape_vertices = snt.BatchReshape(shape, preserve_dims=2) return SimplexBounds(reshape_vertices(self.vertices), reshape(self.nominal), self.r) def apply_linear(self, wrapper, w, b): mapped_centres = tf.matmul(self.nominal, w) mapped_vertices = tf.tensordot(self.vertices, w, axes=1) lb, ub = _simplex_bounds(mapped_vertices, mapped_centres, self.r, -2) nominal_out = tf.matmul(self.nominal, w) if b is not None: nominal_out += b return relative_bounds.RelativeIntervalBounds(lb, ub, nominal_out) def apply_conv1d(self, wrapper, w, b, padding, stride): mapped_centres = tf.nn.conv1d(self.nominal, w, padding=padding, stride=stride) if self.vertices.shape.ndims == 3: # `self.vertices` has no batch dimension; its shape is # (num_vertices, input_length, embedding_channels). mapped_vertices = tf.nn.conv1d(self.vertices, w, padding=padding, stride=stride) elif self.vertices.shape.ndims == 4: # `self.vertices` has shape # (batch_size, num_vertices, input_length, embedding_channels). # Vertices are different for each example in the batch, # e.g. for word perturbations. mapped_vertices = snt.BatchApply( lambda x: tf.nn.conv1d(x, w, padding=padding, stride=stride))( self.vertices) else: raise ValueError('"vertices" must have either 3 or 4 dimensions.') lb, ub = _simplex_bounds(mapped_vertices, mapped_centres, self.r, -3) nominal_out = tf.nn.conv1d(self.nominal, w, padding=padding, stride=stride) if b is not None: nominal_out += b return relative_bounds.RelativeIntervalBounds(lb, ub, nominal_out) def apply_conv2d(self, wrapper, w, b, padding, strides): mapped_centres = tf.nn.convolution(self.nominal, w, padding=padding, strides=strides) if self.vertices.shape.ndims == 4: # `self.vertices` has no batch dimension; its shape is # (num_vertices, input_height, input_width, input_channels). mapped_vertices = tf.nn.convolution(self.vertices, w, padding=padding, strides=strides) elif self.vertices.shape.ndims == 5: # `self.vertices` has shape # (batch_size, num_vertices, input_height, input_width, input_channels). # Vertices are different for each example in the batch. mapped_vertices = snt.BatchApply( lambda x: tf.nn.convolution(x, w, padding=padding, strides=strides))( self.vertices) else: raise ValueError('"vertices" must have either 4 or 5 dimensions.') lb, ub = _simplex_bounds(mapped_vertices, mapped_centres, self.r, -4) nominal_out = tf.nn.convolution(self.nominal, w, padding=padding, strides=strides) if b is not None: nominal_out += b return relative_bounds.RelativeIntervalBounds(lb, ub, nominal_out) def apply_increasing_monotonic_fn(self, wrapper, fn, *args, **parameters): if fn.__name__ in ('add', 'reduce_mean', 'reduce_sum', 'avg_pool'): if self.vertices.shape.ndims == self.nominal.shape.ndims: vertices_fn = fn else: vertices_fn = snt.BatchApply(fn, n_dims=2) return SimplexBounds( vertices_fn(self.vertices, *[bounds.vertices for bounds in args]), fn(self.nominal, *[bounds.nominal for bounds in args]), self.r) elif fn.__name__ == 'quotient': return SimplexBounds( self.vertices / tf.expand_dims(parameters['denom'], axis=1), fn(self.nominal), self.r) else: return super(SimplexBounds, self).apply_increasing_monotonic_fn( wrapper, fn, *args, **parameters) def _simplex_bounds(mapped_vertices, mapped_centres, r, axis): """Calculates naive bounds on the given layer-mapped vertices. Args: mapped_vertices: Tensor of shape (num_vertices, *output_shape) or of shape (batch_size, num_vertices, *output_shape) containing the vertices in the layer's output space. mapped_centres: Tensor of shape (batch_size, *output_shape) containing the layer's nominal outputs. r: Scalar in [0, 1) specifying the radius (in vocab space) of the simplex. axis: Index of the `num_vertices` dimension of `mapped_vertices`. Returns: lb_out: Tensor of shape (batch_size, *output_shape) with lower bounds on the outputs of the affine layer. ub_out: Tensor of shape (batch_size, *output_shape) with upper bounds on the outputs of the affine layer. """ # Use the negative of r, instead of the complement of r, as # we're shifting the input domain to be centred at the origin. lb_out = -r * mapped_centres + r * tf.reduce_min(mapped_vertices, axis=axis) ub_out = -r * mapped_centres + r * tf.reduce_max(mapped_vertices, axis=axis) return lb_out, ub_out
apache-2.0
3,811,566,715,278,191,600
38.020513
79
0.655539
false
3.770565
false
false
false
sigurdga/nidarholm
organization/templatetags/grouplistings.py
1
2466
from django import template from django.contrib.auth.models import Group from organization.models import GroupCategory, Role import re register = template.Library() def roles_for_user_in_group(user, group): return Role.objects.filter(membership__user=user, membership__group=group) def phone_number_format(number): if number: m = re.search(r'^((?:4|9)\d{2})(\d{2})(\d{3})$', number) if m: return "%s %s %s" % (m.group(1), m.group(2), m.group(3)) else: n = re.search(r'^(\d{2})(\d{2})(\d{2})(\d{2})$', number) if n: return "%s %s %s %s" % (n.group(1), n.group(2), n.group(3), n.group(4)) else: return number @register.simple_tag def list_groups(request, group_name, groupcategory_name): """Give a group and a not related group category. Lists all groups in groupcategory, filtered on users in the given group. """ group = Group.objects.get(name__iexact=group_name) groupcategory = GroupCategory.objects.get(name=groupcategory_name) #TODO: Add 404 on exceptions ret = '<ul class="reset">' for groupprofile in groupcategory.groupprofile_set.all(): ret += "<li>" ret += "<h2>" + groupprofile.group.name + "</h2>" ret += "<table>" for u in groupprofile.group.user_set.all(): # groupprofile.group.user_set.filter(groups=group) is too eager #if u.groups.filter(id=group.id).exists(): if u.userprofile_set.filter(status__lt=4): ret += "<tr>" if request.organization.group in request.user.groups.all(): ret += "<td class=\"col4\"><a href=\"" + u.get_absolute_url() +"\">" + u.get_full_name() + "</a></td>" else: ret += "<td class=\"col4\">" + u.get_full_name() + "</td>" ret += "<td>" + ", ".join([ role.name for role in roles_for_user_in_group(u, group) ]) + "</td>" if request.user.groups.filter(id=group.id): ret += "<td class=\"col2\">%s</td>" % (phone_number_format(u.get_profile().cellphone) or "",) ret += "<td class=\"col5\">%s</td>" % (u.email,) ret += "<td>" + ", ".join([ role.name for role in roles_for_user_in_group(u, groupprofile.group) ]) + "</td>" ret += "</tr>" ret += "</table>" ret += "</li>" ret += "</ul>" return ret
agpl-3.0
-7,402,660,224,905,612,000
41.517241
125
0.539335
false
3.401379
false
false
false
lorensen/VTKExamples
src/Python/GeometricObjects/Polygon.py
1
1535
#!/usr/bin/env python import vtk def main(): colors = vtk.vtkNamedColors() # Setup four points points = vtk.vtkPoints() points.InsertNextPoint(0.0, 0.0, 0.0) points.InsertNextPoint(1.0, 0.0, 0.0) points.InsertNextPoint(1.0, 1.0, 0.0) points.InsertNextPoint(0.0, 1.0, 0.0) # Create the polygon polygon = vtk.vtkPolygon() polygon.GetPointIds().SetNumberOfIds(4) # make a quad polygon.GetPointIds().SetId(0, 0) polygon.GetPointIds().SetId(1, 1) polygon.GetPointIds().SetId(2, 2) polygon.GetPointIds().SetId(3, 3) # Add the polygon to a list of polygons polygons = vtk.vtkCellArray() polygons.InsertNextCell(polygon) # Create a PolyData polygonPolyData = vtk.vtkPolyData() polygonPolyData.SetPoints(points) polygonPolyData.SetPolys(polygons) # Create a mapper and actor mapper = vtk.vtkPolyDataMapper() mapper.SetInputData(polygonPolyData) actor = vtk.vtkActor() actor.SetMapper(mapper) actor.GetProperty().SetColor(colors.GetColor3d("Silver")) # Visualize renderer = vtk.vtkRenderer() renderWindow = vtk.vtkRenderWindow() renderWindow.SetWindowName("Polygon") renderWindow.AddRenderer(renderer) renderWindowInteractor = vtk.vtkRenderWindowInteractor() renderWindowInteractor.SetRenderWindow(renderWindow) renderer.AddActor(actor) renderer.SetBackground(colors.GetColor3d("Salmon")) renderWindow.Render() renderWindowInteractor.Start() if __name__ == '__main__': main()
apache-2.0
-4,295,740,643,100,989,000
26.909091
61
0.695765
false
3.449438
false
false
false
angelicadly/prog-script
tekton-master/backend/appengine/routes/rotas/rest.py
1
1044
# -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals from gaebusiness.business import CommandExecutionException from tekton.gae.middleware.json_middleware import JsonResponse from rota_app import facade def index(): cmd = facade.list_rotas_cmd() rota_list = cmd() short_form=facade.rota_short_form() rota_short = [short_form.fill_with_model(m) for m in rota_list] return JsonResponse(rota_short) def save(**rota_properties): cmd = facade.save_rota_cmd(**rota_properties) return _save_or_update_json_response(cmd) def update(rota_id, **rota_properties): cmd = facade.update_rota_cmd(rota_id, **rota_properties) return _save_or_update_json_response(cmd) def delete(rota_id): facade.delete_rota_cmd(rota_id)() def _save_or_update_json_response(cmd): try: rota = cmd() except CommandExecutionException: return JsonResponse({'errors': cmd.errors}) short_form=facade.rota_short_form() return JsonResponse(short_form.fill_with_model(rota))
mit
-1,036,920,398,554,202,000
27.216216
67
0.704981
false
3.144578
false
false
false
akshayka/edxclassify
edxclassify/classifiers/feature_generation.py
1
3989
from edxclassify.feature_spec import FEATURE_COLUMNS from edxclassify.classifiers.word_lists import * from edxclassify.data_cleaners.dc_util import compress_likert import re import nltk from nltk.tokenize import sent_tokenize, word_tokenize def to_int(value, aux=None): if value == '': return 0 return int(value) def to_float(value, aux=None): if value == '': return 0 return 1 if float(value) > 0.94 else 0 def is_anonymous(value, aux=None): return 1 if value.lower() == 'true' else 0 def is_comment_thread(value, aux=None): return 1 if value.lower() == 'commentthread' else 0 def count_question_marks(document, aux=None): count = 0 for c in document: if c == '?': count = count + 1 return count # TODO: How do these play with logistic regression? # TODO: Idea -- feature for sentiment ~ 1 iff #pos > #neg def count_negative_words(document, token_patrn): words = re.findall(token_patrn, document) count = 0 for w in words: if w in NEGATIVE_WORDS: count = count + 1 return count def count_urgent_words(document, token_patrn): words = re.findall(token_patrn, document) count = 0 for w in words: if w in URGENT_WORDS: return 1 return 0 def count_opinion_words(document, token_patrn): words = re.findall(token_patrn, document) count = 0 for w in words: if w in OPINION_WORDS: count = count + 1 return count def count_nouns(document, aux=None): tagged_words = [] for s in sent_tokenize(document.decode('utf-8')): tagged_words.extend(nltk.pos_tag(word_tokenize(s))) count = 0 for word, tag in tagged_words: if tag == 'NN': count = count + 1 return count # TODO: We might want to discretize the grades and number of attempts class FeatureExtractor: def __init__(self, feature_name): self.feature_name = feature_name def fit(self, X, y=None): return self def transform(self, X, y=None): idx = FEATURE_COLUMNS[self.feature_name] return [row[idx] for row in X] class FeatureCurator: def __init__(self, feature_name, curate_function, aux=None): self.feature_name = feature_name self.curate = curate_function self.aux=aux def fit(self, X, y=None): return self def transform(self, X, y=None): return [{self.feature_name + ' feature': self.curate(value, self.aux)} for value in X] def fit_transform(self, X, y=None): return self.transform(X) class ChainedClassifier: def __init__(self, clf, column, guess): self.clf = clf self.column = column self.y_chain = None self.guess = guess def fit(self, X, y=None): # Note that the extracted values will be in # [0, 2] for non-binary variables (confusion, # sentiment, urgency), {0, 1} otherwise. if self.column == 'confusion' or\ self.column == 'sentiment' or\ self.column == 'urgency': self.y_chain = [compress_likert( record[FEATURE_COLUMNS[self.column]], binary=False)\ for record in X] else: self.y_chain = [int(record[FEATURE_COLUMNS[self.column]])\ for record in X] self.clf.train(X, self.y_chain) def transform(self, X, y=None): if self.y_chain is not None and not self.guess: predictions = self.y_chain # This is critical -- it ensures # that we don't use the gold set values when # predicting. self.y_chain = None else: predictions = self.clf.test(X) return [{self.column + ' prediction': value} for value in predictions] def fit_transform(self, X, y=None): self.fit(X) return self.transform(X)
gpl-2.0
2,360,410,365,754,236,000
28.992481
78
0.591376
false
3.659633
false
false
false
srmagura/goodnight-lead
gl_site/statistics/views.py
1
6287
# View imports from django.http import JsonResponse, HttpResponse from django.shortcuts import render from gl_site.custom_auth import login_required # Forms from gl_site.statistics.statistics_form import statistics_request_form, statistics_download_form # Data from .data_generation import format_graph_data, format_file_data, generate_data_from_sessions, get_queryset, validate_sessions from gl_site.statistics import inventory_keys # IO from django.core.files.base import ContentFile from io import BytesIO # JSON import json # Excel import xlsxwriter # Response statuses BAD_REQUEST = 400 FORBIDDEN = 403 METHOD_NOT_ALLOWED = 405 # Error messages METHOD_NOT_ALLOWED_MESSAGE = "Method not allowed." INVALID_DATA_SELECTION = "Invalid data selection." @login_required def view_statistics(request): """ View responsable for initially loading the statistics page """ # Get the proper queryset and generate the form querysets = get_queryset(request.user) form = statistics_request_form( querysets['organizations'], querysets['sessions'] ) downloads = statistics_download_form( querysets['organizations'], querysets['sessions'], auto_id='id_downloads_%s' ) return render(request, 'statistics/statistics.html', { 'form': form, 'downloads': downloads, 'statistics_active': True, }) @login_required def load_data(request): """ Returns a JSON respons containing statistics data """ # Deny non GET requests if (request.method != 'GET'): return JsonResponse([METHOD_NOT_ALLOWED_MESSAGE], status=METHOD_NOT_ALLOWED, safe=False) # Get the querysets accessable by the user querysets = get_queryset(request.user) # Build the submitted form from request data form = statistics_request_form( querysets['organizations'], querysets['sessions'], request.GET ) # Validate the form if (not form.is_valid()): return JsonResponse([INVALID_DATA_SELECTION], status=FORBIDDEN, safe=False) try: # Validate sessions sessions = validate_sessions( form.cleaned_data['organization'], form.cleaned_data['session'], request.user ) # Generate and format the data data = generate_data_from_sessions(sessions, request.user) data = format_graph_data(data) # Return the JSON encoded response return JsonResponse(data, safe=False) except LookupError as e: return JsonResponse([str(e)], status=BAD_REQUEST, safe=False) def download_data(request): # Get the querysets accessable by the user querysets = get_queryset(request.user) # Get the selected downloads downloads = statistics_download_form( querysets['organizations'], querysets['sessions'], request.GET, auto_id='id_downloads_%s' ) # If it is a valid choice if (downloads.is_valid()): data = [] try: # Validate sessions sessions = validate_sessions( downloads.cleaned_data['organization'], downloads.cleaned_data['session'], request.user ) # Generate the data data = generate_data_from_sessions(sessions, request.user) data = format_file_data(data) except LookupError: pass else: data_file = ContentFile('') # Finalize the output if (downloads.cleaned_data['file_type'] == 'application/xlsx'): # Create an excel workbook wrapped around python byte io. # Use in memory to prevent the use of temp files. output = BytesIO() workbook = xlsxwriter.Workbook(output, {'in_memory': True}) # Create a worksheet. worksheet = workbook.add_worksheet() # Set ID, Organization, and Session headers worksheet.write('A1', 'User ID') worksheet.write('B1', 'Organization') worksheet.write('C1', 'Session') # Add all user IDs (row number), organization, and session information row = 2 for user in data: worksheet.write('A{}'.format(row), row - 1) worksheet.write('B{}'.format(row), user['organization']) worksheet.write('C{}'.format(row), user['session']) row += 1 # Print inventory data starting at column D prefix = '' column = ord('D') for inventory in inventory_keys: # Print all metrics within the inventory for key in inventory['keys']: # If column is greater than 'Z' move to 'AA' if (column > ord('Z')): prefix = 'A' column = ord('A') # Write the column header: Inventory - Metric worksheet.write(prefix + chr(column) + '1', inventory['name'] + ' - ' + key) # Print metric data for each user row = 2 for user in data: inventory_name = inventory['name'] # Only print if the user has data for this inventory if (inventory_name in user and key in user[inventory_name]): cell = (prefix + chr(column) + '{}').format(row) worksheet.write(cell, user[inventory['name']][key]) # Move on to the next row row += 1 # Move on to the next column column += 1 # Close the workbook workbook.close() # Get the output bytes for creating a django file output = output.getvalue() # Set the appropriate application extension extension = '.xlsx' else: # Generate the JSON output string output = json.dumps(data) # Set the appropriate application extension extension = '.json' # Generate the data file data_file = ContentFile(output) # Create the response containing the file response = HttpResponse( data_file, content_type=downloads.cleaned_data['file_type'] ) response['Content-Disposition'] = 'attachment; filename=statistics{}'.format(extension) return response
gpl-3.0
-8,336,534,110,600,097,000
30.435
126
0.606171
false
4.474733
false
false
false
adamfast/faadata
faadata/aircraft/parser.py
1
3304
import datetime class AircraftManufacturerCode(object): def __init__(self, record): self.code = record[:7].strip() self.manufacturer = record[8:38].strip() self.model = record[39:59].strip() self.aircraft_type = record[60].strip() self.engine_type = record[62].strip() self.category = record[64].strip() self.builder_certification_code = record[66].strip() self.number_of_engines = record[68:70].strip() self.number_of_seats = record[71:74].strip() self.aircraft_weight = record[75:82].strip() self.cruising_speed = record[83:87].strip() class AircraftRegistration(object): def __init__(self, record): # first parse the fixed-width self.n_number = record[:5].strip() self.serial_number = record[6:36].strip() self.aircraft_mfr_model_code = record[37:44].strip() self.engine_mfr_model_code = record[45:50].strip() self.year_mfg = record[51:55].strip() if record[56].strip(): self.type_registrant = record[56].strip() else: self.type_registrant = None self.registrant_name = record[58:108].strip() self.street1 = record[109:142].strip() self.street2 = record[143:176].strip() self.city = record[177:195].strip() self.state = record[196:198].strip() self.zip_code = record[199:209].strip() self.region = record[210].strip() self.county = record[212:215].strip() self.country = record[216:218].strip() if record[219:227].strip(): self.last_activity_date = datetime.datetime.strptime(record[219:227], "%Y%m%d").date() else: self.last_activity_date = None if record[228:236].strip(): self.certificate_issue_date = datetime.datetime.strptime(record[228:236], "%Y%m%d").date() else: self.certificate_issue_date = None self.airworthiness_classification_code = record[237:238].strip() if record[248].strip(): self.aircraft_type = record[248].strip() else: self.aircraft_type = None if record[250:252].strip(): self.engine_type = record[250:252].strip() else: self.engine_type = None self.status_code = record[253:255].strip() self.mode_s_code = record[256:264].strip() self.fractional_ownership = record[265].strip() if record[267:275].strip(): self.airworthiness_date = datetime.datetime.strptime(record[267:275], "%Y%m%d").date() else: self.airworthiness_date = None self.other_name_1 = record[276:326].strip() self.other_name_2 = record[327:377].strip() self.other_name_3 = record[378:428].strip() self.other_name_4 = record[429:479].strip() self.other_name_5 = record[480:530].strip() if record[531:539].strip(): self.expiration_date = datetime.datetime.strptime(record[531:539], "%Y%m%d").date() else: self.expiration_date = None self.unique_id = record[540:548].strip() self.kit_manufacturer = record[549:579].strip() self.kit_model = record[580:600].strip() self.mode_s_code_hex = record[601:611].strip()
bsd-3-clause
-8,092,445,956,844,940,000
43.648649
102
0.592918
false
3.448852
false
false
false
Aravinthu/odoo
odoo/fields.py
1
104967
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. """ High-level objects for fields. """ from collections import OrderedDict, defaultdict from datetime import date, datetime from functools import partial from operator import attrgetter import itertools import logging import pytz try: from xmlrpc.client import MAXINT except ImportError: #pylint: disable=bad-python3-import from xmlrpclib import MAXINT import psycopg2 from .sql_db import LazyCursor from .tools import float_repr, float_round, frozendict, html_sanitize, human_size, pg_varchar, ustr, OrderedSet, pycompat, sql from .tools import DEFAULT_SERVER_DATE_FORMAT as DATE_FORMAT from .tools import DEFAULT_SERVER_DATETIME_FORMAT as DATETIME_FORMAT from .tools.translate import html_translate, _ DATE_LENGTH = len(date.today().strftime(DATE_FORMAT)) DATETIME_LENGTH = len(datetime.now().strftime(DATETIME_FORMAT)) EMPTY_DICT = frozendict() RENAMED_ATTRS = [('select', 'index'), ('digits_compute', 'digits')] _logger = logging.getLogger(__name__) _schema = logging.getLogger(__name__[:-7] + '.schema') Default = object() # default value for __init__() methods def copy_cache(records, env): """ Recursively copy the cache of ``records`` to the environment ``env``. """ src, dst = records.env.cache, env.cache todo, done = set(records), set() while todo: record = todo.pop() if record not in done: done.add(record) target = record.with_env(env) for field in src.get_fields(record): value = src.get(record, field) dst.set(target, field, value) if value and field.type in ('many2one', 'one2many', 'many2many', 'reference'): todo.update(field.convert_to_record(value, record)) def resolve_mro(model, name, predicate): """ Return the list of successively overridden values of attribute ``name`` in mro order on ``model`` that satisfy ``predicate``. """ result = [] for cls in type(model).__mro__: if name in cls.__dict__: value = cls.__dict__[name] if not predicate(value): break result.append(value) return result class MetaField(type): """ Metaclass for field classes. """ by_type = {} def __new__(meta, name, bases, attrs): """ Combine the ``_slots`` dict from parent classes, and determine ``__slots__`` for them on the new class. """ base_slots = {} for base in reversed(bases): base_slots.update(getattr(base, '_slots', ())) slots = dict(base_slots) slots.update(attrs.get('_slots', ())) attrs['__slots__'] = set(slots) - set(base_slots) attrs['_slots'] = slots return type.__new__(meta, name, bases, attrs) def __init__(cls, name, bases, attrs): super(MetaField, cls).__init__(name, bases, attrs) if not hasattr(cls, 'type'): return if cls.type and cls.type not in MetaField.by_type: MetaField.by_type[cls.type] = cls # compute class attributes to avoid calling dir() on fields cls.related_attrs = [] cls.description_attrs = [] for attr in dir(cls): if attr.startswith('_related_'): cls.related_attrs.append((attr[9:], attr)) elif attr.startswith('_description_'): cls.description_attrs.append((attr[13:], attr)) _global_seq = iter(itertools.count()) class Field(MetaField('DummyField', (object,), {})): """ The field descriptor contains the field definition, and manages accesses and assignments of the corresponding field on records. The following attributes may be provided when instanciating a field: :param string: the label of the field seen by users (string); if not set, the ORM takes the field name in the class (capitalized). :param help: the tooltip of the field seen by users (string) :param readonly: whether the field is readonly (boolean, by default ``False``) :param required: whether the value of the field is required (boolean, by default ``False``) :param index: whether the field is indexed in database (boolean, by default ``False``) :param default: the default value for the field; this is either a static value, or a function taking a recordset and returning a value; use ``default=None`` to discard default values for the field :param states: a dictionary mapping state values to lists of UI attribute-value pairs; possible attributes are: 'readonly', 'required', 'invisible'. Note: Any state-based condition requires the ``state`` field value to be available on the client-side UI. This is typically done by including it in the relevant views, possibly made invisible if not relevant for the end-user. :param groups: comma-separated list of group xml ids (string); this restricts the field access to the users of the given groups only :param bool copy: whether the field value should be copied when the record is duplicated (default: ``True`` for normal fields, ``False`` for ``one2many`` and computed fields, including property fields and related fields) :param string oldname: the previous name of this field, so that ORM can rename it automatically at migration .. _field-computed: .. rubric:: Computed fields One can define a field whose value is computed instead of simply being read from the database. The attributes that are specific to computed fields are given below. To define such a field, simply provide a value for the attribute ``compute``. :param compute: name of a method that computes the field :param inverse: name of a method that inverses the field (optional) :param search: name of a method that implement search on the field (optional) :param store: whether the field is stored in database (boolean, by default ``False`` on computed fields) :param compute_sudo: whether the field should be recomputed as superuser to bypass access rights (boolean, by default ``False``) The methods given for ``compute``, ``inverse`` and ``search`` are model methods. Their signature is shown in the following example:: upper = fields.Char(compute='_compute_upper', inverse='_inverse_upper', search='_search_upper') @api.depends('name') def _compute_upper(self): for rec in self: rec.upper = rec.name.upper() if rec.name else False def _inverse_upper(self): for rec in self: rec.name = rec.upper.lower() if rec.upper else False def _search_upper(self, operator, value): if operator == 'like': operator = 'ilike' return [('name', operator, value)] The compute method has to assign the field on all records of the invoked recordset. The decorator :meth:`odoo.api.depends` must be applied on the compute method to specify the field dependencies; those dependencies are used to determine when to recompute the field; recomputation is automatic and guarantees cache/database consistency. Note that the same method can be used for several fields, you simply have to assign all the given fields in the method; the method will be invoked once for all those fields. By default, a computed field is not stored to the database, and is computed on-the-fly. Adding the attribute ``store=True`` will store the field's values in the database. The advantage of a stored field is that searching on that field is done by the database itself. The disadvantage is that it requires database updates when the field must be recomputed. The inverse method, as its name says, does the inverse of the compute method: the invoked records have a value for the field, and you must apply the necessary changes on the field dependencies such that the computation gives the expected value. Note that a computed field without an inverse method is readonly by default. The search method is invoked when processing domains before doing an actual search on the model. It must return a domain equivalent to the condition: ``field operator value``. .. _field-related: .. rubric:: Related fields The value of a related field is given by following a sequence of relational fields and reading a field on the reached model. The complete sequence of fields to traverse is specified by the attribute :param related: sequence of field names Some field attributes are automatically copied from the source field if they are not redefined: ``string``, ``help``, ``readonly``, ``required`` (only if all fields in the sequence are required), ``groups``, ``digits``, ``size``, ``translate``, ``sanitize``, ``selection``, ``comodel_name``, ``domain``, ``context``. All semantic-free attributes are copied from the source field. By default, the values of related fields are not stored to the database. Add the attribute ``store=True`` to make it stored, just like computed fields. Related fields are automatically recomputed when their dependencies are modified. .. _field-company-dependent: .. rubric:: Company-dependent fields Formerly known as 'property' fields, the value of those fields depends on the company. In other words, users that belong to different companies may see different values for the field on a given record. :param company_dependent: whether the field is company-dependent (boolean) .. _field-incremental-definition: .. rubric:: Incremental definition A field is defined as class attribute on a model class. If the model is extended (see :class:`~odoo.models.Model`), one can also extend the field definition by redefining a field with the same name and same type on the subclass. In that case, the attributes of the field are taken from the parent class and overridden by the ones given in subclasses. For instance, the second class below only adds a tooltip on the field ``state``:: class First(models.Model): _name = 'foo' state = fields.Selection([...], required=True) class Second(models.Model): _inherit = 'foo' state = fields.Selection(help="Blah blah blah") """ type = None # type of the field (string) relational = False # whether the field is a relational one translate = False # whether the field is translated column_type = None # database column type (ident, spec) column_format = '%s' # placeholder for value in queries column_cast_from = () # column types that may be cast to this _slots = { 'args': EMPTY_DICT, # the parameters given to __init__() '_attrs': EMPTY_DICT, # the field's non-slot attributes '_module': None, # the field's module name '_setup_done': None, # the field's setup state: None, 'base' or 'full' '_sequence': None, # absolute ordering of the field 'automatic': False, # whether the field is automatically created ("magic" field) 'inherited': False, # whether the field is inherited (_inherits) 'name': None, # name of the field 'model_name': None, # name of the model of this field 'comodel_name': None, # name of the model of values (if relational) 'store': True, # whether the field is stored in database 'index': False, # whether the field is indexed in database 'manual': False, # whether the field is a custom field 'copy': True, # whether the field is copied over by BaseModel.copy() 'depends': (), # collection of field dependencies 'recursive': False, # whether self depends on itself 'compute': None, # compute(recs) computes field on recs 'compute_sudo': False, # whether field should be recomputed as admin 'inverse': None, # inverse(recs) inverses field on recs 'search': None, # search(recs, operator, value) searches on self 'related': None, # sequence of field names, for related fields 'related_sudo': True, # whether related fields should be read as admin 'company_dependent': False, # whether ``self`` is company-dependent (property field) 'default': None, # default(recs) returns the default value 'string': None, # field label 'help': None, # field tooltip 'readonly': False, # whether the field is readonly 'required': False, # whether the field is required 'states': None, # set readonly and required depending on state 'groups': None, # csv list of group xml ids 'change_default': False, # whether the field may trigger a "user-onchange" 'deprecated': None, # whether the field is deprecated 'related_field': None, # corresponding related field 'group_operator': None, # operator for aggregating values 'group_expand': None, # name of method to expand groups in read_group() 'prefetch': True, # whether the field is prefetched 'context_dependent': False, # whether the field's value depends on context } def __init__(self, string=Default, **kwargs): kwargs['string'] = string self._sequence = kwargs['_sequence'] = next(_global_seq) args = {key: val for key, val in kwargs.items() if val is not Default} self.args = args or EMPTY_DICT self._setup_done = None def new(self, **kwargs): """ Return a field of the same type as ``self``, with its own parameters. """ return type(self)(**kwargs) def __getattr__(self, name): """ Access non-slot field attribute. """ try: return self._attrs[name] except KeyError: raise AttributeError(name) def __setattr__(self, name, value): """ Set slot or non-slot field attribute. """ try: object.__setattr__(self, name, value) except AttributeError: if self._attrs: self._attrs[name] = value else: self._attrs = {name: value} # replace EMPTY_DICT def set_all_attrs(self, attrs): """ Set all field attributes at once (with slot defaults). """ # optimization: we assign slots only assign = object.__setattr__ for key, val in self._slots.items(): assign(self, key, attrs.pop(key, val)) if attrs: assign(self, '_attrs', attrs) def __delattr__(self, name): """ Remove non-slot field attribute. """ try: del self._attrs[name] except KeyError: raise AttributeError(name) def __str__(self): return "%s.%s" % (self.model_name, self.name) def __repr__(self): return "%s.%s" % (self.model_name, self.name) ############################################################################ # # Base field setup: things that do not depend on other models/fields # def setup_base(self, model, name): """ Base setup: things that do not depend on other models/fields. """ if self._setup_done and not self.related: # optimization for regular fields: keep the base setup self._setup_done = 'base' else: # do the base setup from scratch self._setup_attrs(model, name) if not self.related: self._setup_regular_base(model) self._setup_done = 'base' # # Setup field parameter attributes # def _can_setup_from(self, field): """ Return whether ``self`` can retrieve parameters from ``field``. """ return isinstance(field, type(self)) def _get_attrs(self, model, name): """ Return the field parameter attributes as a dictionary. """ # determine all inherited field attributes attrs = {} if not (self.args.get('automatic') or self.args.get('manual')): # magic and custom fields do not inherit from parent classes for field in reversed(resolve_mro(model, name, self._can_setup_from)): attrs.update(field.args) attrs.update(self.args) # necessary in case self is not in class attrs['args'] = self.args attrs['model_name'] = model._name attrs['name'] = name # initialize ``self`` with ``attrs`` if attrs.get('compute'): # by default, computed fields are not stored, not copied and readonly attrs['store'] = attrs.get('store', False) attrs['copy'] = attrs.get('copy', False) attrs['readonly'] = attrs.get('readonly', not attrs.get('inverse')) attrs['context_dependent'] = attrs.get('context_dependent', True) if attrs.get('related'): # by default, related fields are not stored and not copied attrs['store'] = attrs.get('store', False) attrs['copy'] = attrs.get('copy', False) if attrs.get('company_dependent'): # by default, company-dependent fields are not stored and not copied attrs['store'] = False attrs['copy'] = attrs.get('copy', False) attrs['default'] = self._default_company_dependent attrs['compute'] = self._compute_company_dependent if not attrs.get('readonly'): attrs['inverse'] = self._inverse_company_dependent attrs['search'] = self._search_company_dependent attrs['context_dependent'] = attrs.get('context_dependent', True) if attrs.get('translate'): # by default, translatable fields are context-dependent attrs['context_dependent'] = attrs.get('context_dependent', True) return attrs def _setup_attrs(self, model, name): """ Initialize the field parameter attributes. """ attrs = self._get_attrs(model, name) self.set_all_attrs(attrs) # check for renamed attributes (conversion errors) for key1, key2 in RENAMED_ATTRS: if key1 in attrs: _logger.warning("Field %s: parameter %r is no longer supported; use %r instead.", self, key1, key2) # prefetch only stored, column, non-manual and non-deprecated fields if not (self.store and self.column_type) or self.manual or self.deprecated: self.prefetch = False if not self.string and not self.related: # related fields get their string from their parent field self.string = ( name[:-4] if name.endswith('_ids') else name[:-3] if name.endswith('_id') else name ).replace('_', ' ').title() # self.default must be a callable if self.default is not None: value = self.default self.default = value if callable(value) else lambda model: value ############################################################################ # # Full field setup: everything else, except recomputation triggers # def setup_full(self, model): """ Full setup: everything else, except recomputation triggers. """ if self._setup_done != 'full': if not self.related: self._setup_regular_full(model) else: self._setup_related_full(model) self._setup_done = 'full' # # Setup of non-related fields # def _setup_regular_base(self, model): """ Setup the attributes of a non-related field. """ def make_depends(deps): return tuple(deps(model) if callable(deps) else deps) if isinstance(self.compute, pycompat.string_types): # if the compute method has been overridden, concatenate all their _depends self.depends = () for method in resolve_mro(model, self.compute, callable): self.depends += make_depends(getattr(method, '_depends', ())) else: self.depends = make_depends(getattr(self.compute, '_depends', ())) def _setup_regular_full(self, model): """ Setup the inverse field(s) of ``self``. """ pass # # Setup of related fields # def _setup_related_full(self, model): """ Setup the attributes of a related field. """ # fix the type of self.related if necessary if isinstance(self.related, pycompat.string_types): self.related = tuple(self.related.split('.')) # determine the chain of fields, and make sure they are all set up target = model for name in self.related: field = target._fields[name] field.setup_full(target) target = target[name] self.related_field = field # check type consistency if self.type != field.type: raise TypeError("Type of related field %s is inconsistent with %s" % (self, field)) # determine dependencies, compute, inverse, and search self.depends = ('.'.join(self.related),) self.compute = self._compute_related if not (self.readonly or field.readonly): self.inverse = self._inverse_related if field._description_searchable: # allow searching on self only if the related field is searchable self.search = self._search_related # copy attributes from field to self (string, help, etc.) for attr, prop in self.related_attrs: if not getattr(self, attr): setattr(self, attr, getattr(field, prop)) for attr, value in field._attrs.items(): if attr not in self._attrs: setattr(self, attr, value) # special case for states: copy it only for inherited fields if not self.states and self.inherited: self.states = field.states # special case for inherited required fields if self.inherited and field.required: self.required = True def traverse_related(self, record): """ Traverse the fields of the related field `self` except for the last one, and return it as a pair `(last_record, last_field)`. """ for name in self.related[:-1]: record = record[name][:1].with_prefetch(record._prefetch) return record, self.related_field def _compute_related(self, records): """ Compute the related field ``self`` on ``records``. """ # when related_sudo, bypass access rights checks when reading values others = records.sudo() if self.related_sudo else records for record, other in pycompat.izip(records, others): if not record.id and record.env != other.env: # draft records: copy record's cache to other's cache first copy_cache(record, other.env) other, field = self.traverse_related(other) record[self.name] = other[field.name] def _inverse_related(self, records): """ Inverse the related field ``self`` on ``records``. """ # store record values, otherwise they may be lost by cache invalidation! record_value = {record: record[self.name] for record in records} for record in records: other, field = self.traverse_related(record) if other: other[field.name] = record_value[record] def _search_related(self, records, operator, value): """ Determine the domain to search on field ``self``. """ return [('.'.join(self.related), operator, value)] # properties used by _setup_related_full() to copy values from related field _related_comodel_name = property(attrgetter('comodel_name')) _related_string = property(attrgetter('string')) _related_help = property(attrgetter('help')) _related_readonly = property(attrgetter('readonly')) _related_groups = property(attrgetter('groups')) _related_group_operator = property(attrgetter('group_operator')) @property def base_field(self): """ Return the base field of an inherited field, or ``self``. """ return self.related_field.base_field if self.inherited else self # # Company-dependent fields # def _default_company_dependent(self, model): return model.env['ir.property'].get(self.name, self.model_name) def _compute_company_dependent(self, records): Property = records.env['ir.property'] values = Property.get_multi(self.name, self.model_name, records.ids) for record in records: record[self.name] = values.get(record.id) def _inverse_company_dependent(self, records): Property = records.env['ir.property'] values = { record.id: self.convert_to_write(record[self.name], record) for record in records } Property.set_multi(self.name, self.model_name, values) def _search_company_dependent(self, records, operator, value): Property = records.env['ir.property'] return Property.search_multi(self.name, self.model_name, operator, value) # # Setup of field triggers # # The triggers of ``self`` are a collection of pairs ``(field, path)`` of # fields that depend on ``self``. When ``self`` is modified, it invalidates # the cache of each ``field``, and determines the records to recompute based # on ``path``. See method ``modified`` below for details. # def resolve_deps(self, model): """ Return the dependencies of ``self`` as tuples ``(model, field, path)``, where ``path`` is an optional list of field names. """ model0 = model result = [] # add self's own dependencies for dotnames in self.depends: if dotnames == self.name: _logger.warning("Field %s depends on itself; please fix its decorator @api.depends().", self) model, path = model0, dotnames.split('.') for i, fname in enumerate(path): field = model._fields[fname] result.append((model, field, path[:i])) model = model0.env.get(field.comodel_name) # add self's model dependencies for mname, fnames in model0._depends.items(): model = model0.env[mname] for fname in fnames: field = model._fields[fname] result.append((model, field, None)) # add indirect dependencies from the dependencies found above for model, field, path in list(result): for inv_field in model._field_inverses[field]: inv_model = model0.env[inv_field.model_name] inv_path = None if path is None else path + [field.name] result.append((inv_model, inv_field, inv_path)) return result def setup_triggers(self, model): """ Add the necessary triggers to invalidate/recompute ``self``. """ for model, field, path in self.resolve_deps(model): if field is not self: path_str = None if path is None else ('.'.join(path) or 'id') model._field_triggers.add(field, (self, path_str)) elif path: self.recursive = True model._field_triggers.add(field, (self, '.'.join(path))) ############################################################################ # # Field description # def get_description(self, env): """ Return a dictionary that describes the field ``self``. """ desc = {'type': self.type} for attr, prop in self.description_attrs: value = getattr(self, prop) if callable(value): value = value(env) if value is not None: desc[attr] = value return desc # properties used by get_description() _description_store = property(attrgetter('store')) _description_manual = property(attrgetter('manual')) _description_depends = property(attrgetter('depends')) _description_related = property(attrgetter('related')) _description_company_dependent = property(attrgetter('company_dependent')) _description_readonly = property(attrgetter('readonly')) _description_required = property(attrgetter('required')) _description_states = property(attrgetter('states')) _description_groups = property(attrgetter('groups')) _description_change_default = property(attrgetter('change_default')) _description_deprecated = property(attrgetter('deprecated')) @property def _description_searchable(self): return bool(self.store or self.search) @property def _description_sortable(self): return self.store or (self.inherited and self.related_field._description_sortable) def _description_string(self, env): if self.string and env.lang: model_name = self.base_field.model_name field_string = env['ir.translation'].get_field_string(model_name) return field_string.get(self.name) or self.string return self.string def _description_help(self, env): if self.help and env.lang: model_name = self.base_field.model_name field_help = env['ir.translation'].get_field_help(model_name) return field_help.get(self.name) or self.help return self.help ############################################################################ # # Conversion of values # def cache_key(self, record): """ Return the key to get/set the value of ``self`` on ``record`` in cache, the full cache key being ``(self, record.id, key)``. """ env = record.env return env if self.context_dependent else (env.cr, env.uid) def null(self, record): """ Return the null value for this field in the record format. """ return False def convert_to_column(self, value, record, values=None): """ Convert ``value`` from the ``write`` format to the SQL format. """ if value is None or value is False: return None return pycompat.to_native(value) def convert_to_cache(self, value, record, validate=True): """ Convert ``value`` to the cache format; ``value`` may come from an assignment, or have the format of methods :meth:`BaseModel.read` or :meth:`BaseModel.write`. If the value represents a recordset, it should be added for prefetching on ``record``. :param bool validate: when True, field-specific validation of ``value`` will be performed """ return value def convert_to_record(self, value, record): """ Convert ``value`` from the cache format to the record format. If the value represents a recordset, it should share the prefetching of ``record``. """ return value def convert_to_read(self, value, record, use_name_get=True): """ Convert ``value`` from the record format to the format returned by method :meth:`BaseModel.read`. :param bool use_name_get: when True, the value's display name will be computed using :meth:`BaseModel.name_get`, if relevant for the field """ return False if value is None else value def convert_to_write(self, value, record): """ Convert ``value`` from the record format to the format of method :meth:`BaseModel.write`. """ return self.convert_to_read(value, record) def convert_to_onchange(self, value, record, names): """ Convert ``value`` from the record format to the format returned by method :meth:`BaseModel.onchange`. :param names: a tree of field names (for relational fields only) """ return self.convert_to_read(value, record) def convert_to_export(self, value, record): """ Convert ``value`` from the record format to the export format. """ if not value: return '' return value if record._context.get('export_raw_data') else ustr(value) def convert_to_display_name(self, value, record): """ Convert ``value`` from the record format to a suitable display name. """ return ustr(value) ############################################################################ # # Update database schema # def update_db(self, model, columns): """ Update the database schema to implement this field. :param model: an instance of the field's model :param columns: a dict mapping column names to their configuration in database :return: ``True`` if the field must be recomputed on existing rows """ if not self.column_type: return column = columns.get(self.name) if not column and hasattr(self, 'oldname'): # column not found; check whether it exists under its old name column = columns.get(self.oldname) if column: sql.rename_column(model._cr, model._table, self.oldname, self.name) # create/update the column, not null constraint, indexes self.update_db_column(model, column) self.update_db_notnull(model, column) self.update_db_index(model, column) return not column def update_db_column(self, model, column): """ Create/update the column corresponding to ``self``. :param model: an instance of the field's model :param column: the column's configuration (dict) if it exists, or ``None`` """ if not column: # the column does not exist, create it sql.create_column(model._cr, model._table, self.name, self.column_type[1], self.string) return if column['udt_name'] == self.column_type[0]: return if column['udt_name'] in self.column_cast_from: sql.convert_column(model._cr, model._table, self.name, self.column_type[1]) else: newname = (self.name + '_moved{}').format i = 0 while sql.column_exists(model._cr, model._table, newname(i)): i += 1 if column['is_nullable'] == 'NO': sql.drop_not_null(model._cr, model._table, self.name) sql.rename_column(model._cr, model._table, self.name, newname(i)) sql.create_column(model._cr, model._table, self.name, self.column_type[1], self.string) def update_db_notnull(self, model, column): """ Add or remove the NOT NULL constraint on ``self``. :param model: an instance of the field's model :param column: the column's configuration (dict) if it exists, or ``None`` """ has_notnull = column and column['is_nullable'] == 'NO' if not column or (self.required and not has_notnull): # the column is new or it becomes required; initialize its values if model._table_has_rows(): model._init_column(self.name) if self.required and not has_notnull: sql.set_not_null(model._cr, model._table, self.name) elif not self.required and has_notnull: sql.drop_not_null(model._cr, model._table, self.name) def update_db_index(self, model, column): """ Add or remove the index corresponding to ``self``. :param model: an instance of the field's model :param column: the column's configuration (dict) if it exists, or ``None`` """ indexname = '%s_%s_index' % (model._table, self.name) if self.index: sql.create_index(model._cr, indexname, model._table, ['"%s"' % self.name]) else: sql.drop_index(model._cr, indexname, model._table) ############################################################################ # # Read from/write to database # def read(self, records): """ Read the value of ``self`` on ``records``, and store it in cache. """ return NotImplementedError("Method read() undefined on %s" % self) def write(self, records, value, create=False): """ Write the value of ``self`` on ``records``. The ``value`` must be in the format of method :meth:`BaseModel.write`. :param create: whether ``records`` have just been created (to enable some optimizations) """ return NotImplementedError("Method write() undefined on %s" % self) ############################################################################ # # Descriptor methods # def __get__(self, record, owner): """ return the value of field ``self`` on ``record`` """ if record is None: return self # the field is accessed through the owner class if record: # only a single record may be accessed record.ensure_one() try: value = record.env.cache.get(record, self) except KeyError: # cache miss, determine value and retrieve it if record.id: self.determine_value(record) else: self.determine_draft_value(record) value = record.env.cache.get(record, self) else: # null record -> return the null value for this field value = self.convert_to_cache(False, record, validate=False) return self.convert_to_record(value, record) def __set__(self, record, value): """ set the value of field ``self`` on ``record`` """ env = record.env # only a single record may be updated record.ensure_one() # adapt value to the cache level value = self.convert_to_cache(value, record) if env.in_draft or not record.id: # determine dependent fields spec = self.modified_draft(record) # set value in cache, inverse field, and mark record as dirty record.env.cache.set(record, self, value) if env.in_onchange: for invf in record._field_inverses[self]: invf._update(record[self.name], record) record._set_dirty(self.name) # determine more dependent fields, and invalidate them if self.relational: spec += self.modified_draft(record) env.cache.invalidate(spec) else: # Write to database write_value = self.convert_to_write(self.convert_to_record(value, record), record) record.write({self.name: write_value}) # Update the cache unless value contains a new record if not (self.relational and not all(value)): record.env.cache.set(record, self, value) ############################################################################ # # Computation of field values # def _compute_value(self, records): """ Invoke the compute method on ``records``. """ # initialize the fields to their corresponding null value in cache fields = records._field_computed[self] cache = records.env.cache for field in fields: for record in records: cache.set(record, field, field.convert_to_cache(False, record, validate=False)) if isinstance(self.compute, pycompat.string_types): getattr(records, self.compute)() else: self.compute(records) def compute_value(self, records): """ Invoke the compute method on ``records``; the results are in cache. """ fields = records._field_computed[self] with records.env.do_in_draft(), records.env.protecting(fields, records): try: self._compute_value(records) except (AccessError, MissingError): # some record is forbidden or missing, retry record by record for record in records: try: self._compute_value(record) except Exception as exc: record.env.cache.set_failed(record, [self], exc) def determine_value(self, record): """ Determine the value of ``self`` for ``record``. """ env = record.env if self.store and not (self.compute and env.in_onchange): # this is a stored field or an old-style function field if self.compute: # this is a stored computed field, check for recomputation recs = record._recompute_check(self) if recs: # recompute the value (only in cache) self.compute_value(recs) # HACK: if result is in the wrong cache, copy values if recs.env != env: computed = record._field_computed[self] for source, target in pycompat.izip(recs, recs.with_env(env)): try: values = {f.name: source[f.name] for f in computed} target._cache.update(target._convert_to_cache(values, validate=False)) except MissingError as exc: target._cache.set_failed(target._fields, exc) # the result is saved to database by BaseModel.recompute() return # read the field from database record._prefetch_field(self) elif self.compute: # this is either a non-stored computed field, or a stored computed # field in onchange mode if self.recursive: self.compute_value(record) else: recs = record._in_cache_without(self) recs = recs.with_prefetch(record._prefetch) self.compute_value(recs) else: # this is a non-stored non-computed field record.env.cache.set(record, self, self.convert_to_cache(False, record, validate=False)) def determine_draft_value(self, record): """ Determine the value of ``self`` for the given draft ``record``. """ if self.compute: fields = record._field_computed[self] with record.env.protecting(fields, record): self._compute_value(record) else: null = self.convert_to_cache(False, record, validate=False) record.env.cache.set_special(record, self, lambda: null) def determine_inverse(self, records): """ Given the value of ``self`` on ``records``, inverse the computation. """ if isinstance(self.inverse, pycompat.string_types): getattr(records, self.inverse)() else: self.inverse(records) def determine_domain(self, records, operator, value): """ Return a domain representing a condition on ``self``. """ if isinstance(self.search, pycompat.string_types): return getattr(records, self.search)(operator, value) else: return self.search(records, operator, value) ############################################################################ # # Notification when fields are modified # def modified_draft(self, records): """ Same as :meth:`modified`, but in draft mode. """ env = records.env # invalidate the fields on the records in cache that depend on # ``records``, except fields currently being computed spec = [] for field, path in records._field_triggers[self]: if not field.compute: # Note: do not invalidate non-computed fields. Such fields may # require invalidation in general (like *2many fields with # domains) but should not be invalidated in this case, because # we would simply lose their values during an onchange! continue target = env[field.model_name] protected = env.protected(field) if path == 'id' and field.model_name == records._name: target = records - protected elif path and env.in_onchange: target = (env.cache.get_records(target, field) - protected).filtered( lambda rec: rec if path == 'id' else rec._mapped_cache(path) & records ) else: target = env.cache.get_records(target, field) - protected if target: spec.append((field, target._ids)) return spec class Boolean(Field): type = 'boolean' column_type = ('bool', 'bool') def convert_to_column(self, value, record, values=None): return bool(value) def convert_to_cache(self, value, record, validate=True): return bool(value) def convert_to_export(self, value, record): if record._context.get('export_raw_data'): return value return ustr(value) class Integer(Field): type = 'integer' column_type = ('int4', 'int4') _slots = { 'group_operator': 'sum', } _description_group_operator = property(attrgetter('group_operator')) def convert_to_column(self, value, record, values=None): return int(value or 0) def convert_to_cache(self, value, record, validate=True): if isinstance(value, dict): # special case, when an integer field is used as inverse for a one2many return value.get('id', False) return int(value or 0) def convert_to_read(self, value, record, use_name_get=True): # Integer values greater than 2^31-1 are not supported in pure XMLRPC, # so we have to pass them as floats :-( if value and value > MAXINT: return float(value) return value def _update(self, records, value): # special case, when an integer field is used as inverse for a one2many cache = records.env.cache for record in records: cache.set(record, self, value.id or 0) def convert_to_export(self, value, record): if value or value == 0: return value if record._context.get('export_raw_data') else ustr(value) return '' class Float(Field): """ The precision digits are given by the attribute :param digits: a pair (total, decimal), or a function taking a database cursor and returning a pair (total, decimal) """ type = 'float' column_cast_from = ('int4', 'numeric', 'float8') _slots = { '_digits': None, # digits argument passed to class initializer 'group_operator': 'sum', } def __init__(self, string=Default, digits=Default, **kwargs): super(Float, self).__init__(string=string, _digits=digits, **kwargs) @property def column_type(self): # Explicit support for "falsy" digits (0, False) to indicate a NUMERIC # field with no fixed precision. The values are saved in the database # with all significant digits. # FLOAT8 type is still the default when there is no precision because it # is faster for most operations (sums, etc.) return ('numeric', 'numeric') if self.digits is not None else \ ('float8', 'double precision') @property def digits(self): if callable(self._digits): with LazyCursor() as cr: return self._digits(cr) else: return self._digits _related__digits = property(attrgetter('_digits')) _description_digits = property(attrgetter('digits')) _description_group_operator = property(attrgetter('group_operator')) def convert_to_column(self, value, record, values=None): result = float(value or 0.0) digits = self.digits if digits: precision, scale = digits result = float_repr(float_round(result, precision_digits=scale), precision_digits=scale) return result def convert_to_cache(self, value, record, validate=True): # apply rounding here, otherwise value in cache may be wrong! value = float(value or 0.0) if not validate: return value digits = self.digits return float_round(value, precision_digits=digits[1]) if digits else value def convert_to_export(self, value, record): if value or value == 0.0: return value if record._context.get('export_raw_data') else ustr(value) return '' class Monetary(Field): """ The decimal precision and currency symbol are taken from the attribute :param currency_field: name of the field holding the currency this monetary field is expressed in (default: `currency_id`) """ type = 'monetary' column_type = ('numeric', 'numeric') column_cast_from = ('float8',) _slots = { 'currency_field': None, 'group_operator': 'sum', } def __init__(self, string=Default, currency_field=Default, **kwargs): super(Monetary, self).__init__(string=string, currency_field=currency_field, **kwargs) _related_currency_field = property(attrgetter('currency_field')) _description_currency_field = property(attrgetter('currency_field')) _description_group_operator = property(attrgetter('group_operator')) def _setup_regular_full(self, model): super(Monetary, self)._setup_regular_full(model) if not self.currency_field: # pick a default, trying in order: 'currency_id', 'x_currency_id' if 'currency_id' in model._fields: self.currency_field = 'currency_id' elif 'x_currency_id' in model._fields: self.currency_field = 'x_currency_id' assert self.currency_field in model._fields, \ "Field %s with unknown currency_field %r" % (self, self.currency_field) def convert_to_column(self, value, record, values=None): # retrieve currency from values or record if values and self.currency_field in values: field = record._fields[self.currency_field] currency = field.convert_to_cache(values[self.currency_field], record) currency = field.convert_to_record(currency, record) else: # Note: this is wrong if 'record' is several records with different # currencies, which is functional nonsense and should not happen currency = record[:1][self.currency_field] value = float(value or 0.0) if currency: return float_repr(currency.round(value), currency.decimal_places) return value def convert_to_cache(self, value, record, validate=True): # cache format: float value = float(value or 0.0) if validate and record[self.currency_field]: # FIXME @rco-odoo: currency may not be already initialized if it is # a function or related field! value = record[self.currency_field].round(value) return value def convert_to_read(self, value, record, use_name_get=True): return value def convert_to_write(self, value, record): return value class _String(Field): """ Abstract class for string fields. """ _slots = { 'translate': False, # whether the field is translated } def __init__(self, string=Default, **kwargs): # translate is either True, False, or a callable if 'translate' in kwargs and not callable(kwargs['translate']): kwargs['translate'] = bool(kwargs['translate']) super(_String, self).__init__(string=string, **kwargs) _related_translate = property(attrgetter('translate')) def _description_translate(self, env): return bool(self.translate) def get_trans_terms(self, value): """ Return the sequence of terms to translate found in `value`. """ if not callable(self.translate): return [value] if value else [] terms = [] self.translate(terms.append, value) return terms def get_trans_func(self, records): """ Return a translation function `translate` for `self` on the given records; the function call `translate(record_id, value)` translates the field value to the language given by the environment of `records`. """ if callable(self.translate): rec_src_trans = records.env['ir.translation']._get_terms_translations(self, records) def translate(record_id, value): src_trans = rec_src_trans[record_id] return self.translate(src_trans.get, value) else: rec_trans = records.env['ir.translation']._get_ids( '%s,%s' % (self.model_name, self.name), 'model', records.env.lang, records.ids) def translate(record_id, value): return rec_trans.get(record_id) or value return translate def check_trans_value(self, value): """ Check and possibly sanitize the translated term `value`. """ if callable(self.translate): # do a "no-translation" to sanitize the value callback = lambda term: None return self.translate(callback, value) else: return value class Char(_String): """ Basic string field, can be length-limited, usually displayed as a single-line string in clients. :param int size: the maximum size of values stored for that field :param translate: enable the translation of the field's values; use ``translate=True`` to translate field values as a whole; ``translate`` may also be a callable such that ``translate(callback, value)`` translates ``value`` by using ``callback(term)`` to retrieve the translation of terms. """ type = 'char' column_cast_from = ('text',) _slots = { 'size': None, # maximum size of values (deprecated) } @property def column_type(self): return ('varchar', pg_varchar(self.size)) def update_db_column(self, model, column): if ( column and column['udt_name'] == 'varchar' and column['character_maximum_length'] and (self.size is None or column['character_maximum_length'] < self.size) ): # the column's varchar size does not match self.size; convert it sql.convert_column(model._cr, model._table, self.name, self.column_type[1]) super(Char, self).update_db_column(model, column) _related_size = property(attrgetter('size')) _description_size = property(attrgetter('size')) def _setup_regular_base(self, model): super(Char, self)._setup_regular_base(model) assert self.size is None or isinstance(self.size, int), \ "Char field %s with non-integer size %r" % (self, self.size) def convert_to_column(self, value, record, values=None): if value is None or value is False: return None # we need to convert the string to a unicode object to be able # to evaluate its length (and possibly truncate it) reliably return pycompat.to_text(value)[:self.size] def convert_to_cache(self, value, record, validate=True): if value is None or value is False: return False return pycompat.to_text(value)[:self.size] class Text(_String): """ Very similar to :class:`~.Char` but used for longer contents, does not have a size and usually displayed as a multiline text box. :param translate: enable the translation of the field's values; use ``translate=True`` to translate field values as a whole; ``translate`` may also be a callable such that ``translate(callback, value)`` translates ``value`` by using ``callback(term)`` to retrieve the translation of terms. """ type = 'text' column_type = ('text', 'text') column_cast_from = ('varchar',) def convert_to_cache(self, value, record, validate=True): if value is None or value is False: return False return ustr(value) class Html(_String): type = 'html' column_type = ('text', 'text') _slots = { 'sanitize': True, # whether value must be sanitized 'sanitize_tags': True, # whether to sanitize tags (only a white list of attributes is accepted) 'sanitize_attributes': True, # whether to sanitize attributes (only a white list of attributes is accepted) 'sanitize_style': False, # whether to sanitize style attributes 'strip_style': False, # whether to strip style attributes (removed and therefore not sanitized) 'strip_classes': False, # whether to strip classes attributes } def _setup_attrs(self, model, name): super(Html, self)._setup_attrs(model, name) # Translated sanitized html fields must use html_translate or a callable. if self.translate is True and self.sanitize: self.translate = html_translate _related_sanitize = property(attrgetter('sanitize')) _related_sanitize_tags = property(attrgetter('sanitize_tags')) _related_sanitize_attributes = property(attrgetter('sanitize_attributes')) _related_sanitize_style = property(attrgetter('sanitize_style')) _related_strip_style = property(attrgetter('strip_style')) _related_strip_classes = property(attrgetter('strip_classes')) _description_sanitize = property(attrgetter('sanitize')) _description_sanitize_tags = property(attrgetter('sanitize_tags')) _description_sanitize_attributes = property(attrgetter('sanitize_attributes')) _description_sanitize_style = property(attrgetter('sanitize_style')) _description_strip_style = property(attrgetter('strip_style')) _description_strip_classes = property(attrgetter('strip_classes')) def convert_to_column(self, value, record, values=None): if value is None or value is False: return None if self.sanitize: return html_sanitize( value, silent=True, sanitize_tags=self.sanitize_tags, sanitize_attributes=self.sanitize_attributes, sanitize_style=self.sanitize_style, strip_style=self.strip_style, strip_classes=self.strip_classes) return value def convert_to_cache(self, value, record, validate=True): if value is None or value is False: return False if validate and self.sanitize: return html_sanitize( value, silent=True, sanitize_tags=self.sanitize_tags, sanitize_attributes=self.sanitize_attributes, sanitize_style=self.sanitize_style, strip_style=self.strip_style, strip_classes=self.strip_classes) return value class Date(Field): type = 'date' column_type = ('date', 'date') column_cast_from = ('timestamp',) @staticmethod def today(*args): """ Return the current day in the format expected by the ORM. This function may be used to compute default values. """ return date.today().strftime(DATE_FORMAT) @staticmethod def context_today(record, timestamp=None): """ Return the current date as seen in the client's timezone in a format fit for date fields. This method may be used to compute default values. :param datetime timestamp: optional datetime value to use instead of the current date and time (must be a datetime, regular dates can't be converted between timezones.) :rtype: str """ today = timestamp or datetime.now() context_today = None tz_name = record._context.get('tz') or record.env.user.tz if tz_name: try: today_utc = pytz.timezone('UTC').localize(today, is_dst=False) # UTC = no DST context_today = today_utc.astimezone(pytz.timezone(tz_name)) except Exception: _logger.debug("failed to compute context/client-specific today date, using UTC value for `today`", exc_info=True) return (context_today or today).strftime(DATE_FORMAT) @staticmethod def from_string(value): """ Convert an ORM ``value`` into a :class:`date` value. """ if not value: return None value = value[:DATE_LENGTH] return datetime.strptime(value, DATE_FORMAT).date() @staticmethod def to_string(value): """ Convert a :class:`date` value into the format expected by the ORM. """ return value.strftime(DATE_FORMAT) if value else False def convert_to_cache(self, value, record, validate=True): if not value: return False if isinstance(value, pycompat.string_types): if validate: # force parsing for validation self.from_string(value) return value[:DATE_LENGTH] return self.to_string(value) def convert_to_export(self, value, record): if not value: return '' return self.from_string(value) if record._context.get('export_raw_data') else ustr(value) class Datetime(Field): type = 'datetime' column_type = ('timestamp', 'timestamp') column_cast_from = ('date',) @staticmethod def now(*args): """ Return the current day and time in the format expected by the ORM. This function may be used to compute default values. """ return datetime.now().strftime(DATETIME_FORMAT) @staticmethod def context_timestamp(record, timestamp): """Returns the given timestamp converted to the client's timezone. This method is *not* meant for use as a default initializer, because datetime fields are automatically converted upon display on client side. For default values :meth:`fields.datetime.now` should be used instead. :param datetime timestamp: naive datetime value (expressed in UTC) to be converted to the client timezone :rtype: datetime :return: timestamp converted to timezone-aware datetime in context timezone """ assert isinstance(timestamp, datetime), 'Datetime instance expected' tz_name = record._context.get('tz') or record.env.user.tz utc_timestamp = pytz.utc.localize(timestamp, is_dst=False) # UTC = no DST if tz_name: try: context_tz = pytz.timezone(tz_name) return utc_timestamp.astimezone(context_tz) except Exception: _logger.debug("failed to compute context/client-specific timestamp, " "using the UTC value", exc_info=True) return utc_timestamp @staticmethod def from_string(value): """ Convert an ORM ``value`` into a :class:`datetime` value. """ if not value: return None value = value[:DATETIME_LENGTH] if len(value) == DATE_LENGTH: value += " 00:00:00" return datetime.strptime(value, DATETIME_FORMAT) @staticmethod def to_string(value): """ Convert a :class:`datetime` value into the format expected by the ORM. """ return value.strftime(DATETIME_FORMAT) if value else False def convert_to_cache(self, value, record, validate=True): if not value: return False if isinstance(value, pycompat.string_types): if validate: # force parsing for validation self.from_string(value) value = value[:DATETIME_LENGTH] if len(value) == DATE_LENGTH: value += " 00:00:00" return value return self.to_string(value) def convert_to_export(self, value, record): if not value: return '' return self.from_string(value) if record._context.get('export_raw_data') else ustr(value) def convert_to_display_name(self, value, record): assert record, 'Record expected' return Datetime.to_string(Datetime.context_timestamp(record, Datetime.from_string(value))) # http://initd.org/psycopg/docs/usage.html#binary-adaptation # Received data is returned as buffer (in Python 2) or memoryview (in Python 3). _BINARY = memoryview if pycompat.PY2: _BINARY = buffer #pylint: disable=buffer-builtin class Binary(Field): type = 'binary' _slots = { 'prefetch': False, # not prefetched by default 'context_dependent': True, # depends on context (content or size) 'attachment': False, # whether value is stored in attachment } @property def column_type(self): return None if self.attachment else ('bytea', 'bytea') _description_attachment = property(attrgetter('attachment')) def convert_to_column(self, value, record, values=None): # Binary values may be byte strings (python 2.6 byte array), but # the legacy OpenERP convention is to transfer and store binaries # as base64-encoded strings. The base64 string may be provided as a # unicode in some circumstances, hence the str() cast here. # This str() coercion will only work for pure ASCII unicode strings, # on purpose - non base64 data must be passed as a 8bit byte strings. if not value: return None if isinstance(value, bytes): return psycopg2.Binary(value) return psycopg2.Binary(pycompat.text_type(value).encode('ascii')) def convert_to_cache(self, value, record, validate=True): if isinstance(value, _BINARY): return bytes(value) if isinstance(value, pycompat.integer_types) and \ (record._context.get('bin_size') or record._context.get('bin_size_' + self.name)): # If the client requests only the size of the field, we return that # instead of the content. Presumably a separate request will be done # to read the actual content, if necessary. return human_size(value) return value def read(self, records): # values are stored in attachments, retrieve them assert self.attachment domain = [ ('res_model', '=', records._name), ('res_field', '=', self.name), ('res_id', 'in', records.ids), ] # Note: the 'bin_size' flag is handled by the field 'datas' itself data = {att.res_id: att.datas for att in records.env['ir.attachment'].sudo().search(domain)} cache = records.env.cache for record in records: cache.set(record, self, data.get(record.id, False)) def write(self, records, value, create=False): # retrieve the attachments that stores the value, and adapt them assert self.attachment if create: atts = records.env['ir.attachment'].sudo() else: atts = records.env['ir.attachment'].sudo().search([ ('res_model', '=', records._name), ('res_field', '=', self.name), ('res_id', 'in', records.ids), ]) with records.env.norecompute(): if value: # update the existing attachments atts.write({'datas': value}) # create the missing attachments for record in (records - records.browse(atts.mapped('res_id'))): atts.create({ 'name': self.name, 'res_model': record._name, 'res_field': self.name, 'res_id': record.id, 'type': 'binary', 'datas': value, }) else: atts.unlink() class Selection(Field): """ :param selection: specifies the possible values for this field. It is given as either a list of pairs (``value``, ``string``), or a model method, or a method name. :param selection_add: provides an extension of the selection in the case of an overridden field. It is a list of pairs (``value``, ``string``). The attribute ``selection`` is mandatory except in the case of :ref:`related fields <field-related>` or :ref:`field extensions <field-incremental-definition>`. """ type = 'selection' _slots = { 'selection': None, # [(value, string), ...], function or method name } def __init__(self, selection=Default, string=Default, **kwargs): super(Selection, self).__init__(selection=selection, string=string, **kwargs) @property def column_type(self): if (self.selection and isinstance(self.selection, list) and isinstance(self.selection[0][0], int)): return ('int4', 'integer') else: return ('varchar', pg_varchar()) def _setup_regular_base(self, model): super(Selection, self)._setup_regular_base(model) assert self.selection is not None, "Field %s without selection" % self def _setup_related_full(self, model): super(Selection, self)._setup_related_full(model) # selection must be computed on related field field = self.related_field self.selection = lambda model: field._description_selection(model.env) def _setup_attrs(self, model, name): super(Selection, self)._setup_attrs(model, name) # determine selection (applying 'selection_add' extensions) for field in reversed(resolve_mro(model, name, self._can_setup_from)): # We cannot use field.selection or field.selection_add here # because those attributes are overridden by ``_setup_attrs``. if 'selection' in field.args: self.selection = field.args['selection'] if 'selection_add' in field.args: # use an OrderedDict to update existing values selection_add = field.args['selection_add'] self.selection = list(OrderedDict(self.selection + selection_add).items()) def _description_selection(self, env): """ return the selection list (pairs (value, label)); labels are translated according to context language """ selection = self.selection if isinstance(selection, pycompat.string_types): return getattr(env[self.model_name], selection)() if callable(selection): return selection(env[self.model_name]) # translate selection labels if env.lang: name = "%s,%s" % (self.model_name, self.name) translate = partial( env['ir.translation']._get_source, name, 'selection', env.lang) return [(value, translate(label) if label else label) for value, label in selection] else: return selection def get_values(self, env): """ return a list of the possible values """ selection = self.selection if isinstance(selection, pycompat.string_types): selection = getattr(env[self.model_name], selection)() elif callable(selection): selection = selection(env[self.model_name]) return [value for value, _ in selection] def convert_to_cache(self, value, record, validate=True): if not validate: return value or False if value in self.get_values(record.env): return value elif not value: return False raise ValueError("Wrong value for %s: %r" % (self, value)) def convert_to_export(self, value, record): if not isinstance(self.selection, list): # FIXME: this reproduces an existing buggy behavior! return value if value else '' for item in self._description_selection(record.env): if item[0] == value: return item[1] return False class Reference(Selection): type = 'reference' @property def column_type(self): return ('varchar', pg_varchar()) def convert_to_cache(self, value, record, validate=True): # cache format: (res_model, res_id) or False def process(res_model, res_id): record._prefetch[res_model].add(res_id) return (res_model, res_id) if isinstance(value, BaseModel): if not validate or (value._name in self.get_values(record.env) and len(value) <= 1): return process(value._name, value.id) if value else False elif isinstance(value, pycompat.string_types): res_model, res_id = value.split(',') if record.env[res_model].browse(int(res_id)).exists(): return process(res_model, int(res_id)) else: return False elif not value: return False raise ValueError("Wrong value for %s: %r" % (self, value)) def convert_to_record(self, value, record): return value and record.env[value[0]].browse([value[1]], record._prefetch) def convert_to_read(self, value, record, use_name_get=True): return "%s,%s" % (value._name, value.id) if value else False def convert_to_export(self, value, record): return value.name_get()[0][1] if value else '' def convert_to_display_name(self, value, record): return ustr(value and value.display_name) class _Relational(Field): """ Abstract class for relational fields. """ relational = True _slots = { 'domain': [], # domain for searching values 'context': {}, # context for searching values } def _setup_regular_base(self, model): super(_Relational, self)._setup_regular_base(model) if self.comodel_name not in model.pool: _logger.warning("Field %s with unknown comodel_name %r", self, self.comodel_name) self.comodel_name = '_unknown' @property def _related_domain(self): if callable(self.domain): # will be called with another model than self's return lambda recs: self.domain(recs.env[self.model_name]) else: # maybe not correct if domain is a string... return self.domain _related_context = property(attrgetter('context')) _description_relation = property(attrgetter('comodel_name')) _description_context = property(attrgetter('context')) def _description_domain(self, env): return self.domain(env[self.model_name]) if callable(self.domain) else self.domain def null(self, record): return record.env[self.comodel_name] class Many2one(_Relational): """ The value of such a field is a recordset of size 0 (no record) or 1 (a single record). :param comodel_name: name of the target model (string) :param domain: an optional domain to set on candidate values on the client side (domain or string) :param context: an optional context to use on the client side when handling that field (dictionary) :param ondelete: what to do when the referred record is deleted; possible values are: ``'set null'``, ``'restrict'``, ``'cascade'`` :param auto_join: whether JOINs are generated upon search through that field (boolean, by default ``False``) :param delegate: set it to ``True`` to make fields of the target model accessible from the current model (corresponds to ``_inherits``) The attribute ``comodel_name`` is mandatory except in the case of related fields or field extensions. """ type = 'many2one' column_type = ('int4', 'int4') _slots = { 'ondelete': 'set null', # what to do when value is deleted 'auto_join': False, # whether joins are generated upon search 'delegate': False, # whether self implements delegation } def __init__(self, comodel_name=Default, string=Default, **kwargs): super(Many2one, self).__init__(comodel_name=comodel_name, string=string, **kwargs) def _setup_attrs(self, model, name): super(Many2one, self)._setup_attrs(model, name) # determine self.delegate if not self.delegate: self.delegate = name in model._inherits.values() def update_db(self, model, columns): comodel = model.env[self.comodel_name] if not model.is_transient() and comodel.is_transient(): raise ValueError('Many2one %s from Model to TransientModel is forbidden' % self) if model.is_transient() and not comodel.is_transient(): # Many2one relations from TransientModel Model are annoying because # they can block deletion due to foreign keys. So unless stated # otherwise, we default them to ondelete='cascade'. self.ondelete = self.ondelete or 'cascade' return super(Many2one, self).update_db(model, columns) def update_db_column(self, model, column): super(Many2one, self).update_db_column(model, column) model.pool.post_init(self.update_db_foreign_key, model, column) def update_db_foreign_key(self, model, column): comodel = model.env[self.comodel_name] # ir_actions is inherited, so foreign key doesn't work on it if not comodel._auto or comodel._table == 'ir_actions': return # create/update the foreign key, and reflect it in 'ir.model.constraint' process = sql.fix_foreign_key if column else sql.add_foreign_key new = process(model._cr, model._table, self.name, comodel._table, 'id', self.ondelete or 'set null') if new: conname = '%s_%s_fkey' % (model._table, self.name) model.env['ir.model.constraint']._reflect_constraint(model, conname, 'f', None, self._module) def _update(self, records, value): """ Update the cached value of ``self`` for ``records`` with ``value``. """ cache = records.env.cache for record in records: cache.set(record, self, self.convert_to_cache(value, record, validate=False)) def convert_to_column(self, value, record, values=None): return value or None def convert_to_cache(self, value, record, validate=True): # cache format: tuple(ids) def process(ids): return record._prefetch[self.comodel_name].update(ids) or ids if type(value) in IdType: return process((value,)) elif isinstance(value, BaseModel): if not validate or (value._name == self.comodel_name and len(value) <= 1): return process(value._ids) raise ValueError("Wrong value for %s: %r" % (self, value)) elif isinstance(value, tuple): # value is either a pair (id, name), or a tuple of ids return process(value[:1]) elif isinstance(value, dict): return process(record.env[self.comodel_name].new(value)._ids) else: return () def convert_to_record(self, value, record): return record.env[self.comodel_name]._browse(value, record.env, record._prefetch) def convert_to_read(self, value, record, use_name_get=True): if use_name_get and value: # evaluate name_get() as superuser, because the visibility of a # many2one field value (id and name) depends on the current record's # access rights, and not the value's access rights. try: # performance: value.sudo() prefetches the same records as value return value.sudo().name_get()[0] except MissingError: # Should not happen, unless the foreign key is missing. return False else: return value.id def convert_to_write(self, value, record): return value.id def convert_to_export(self, value, record): return value.name_get()[0][1] if value else '' def convert_to_display_name(self, value, record): return ustr(value.display_name) def convert_to_onchange(self, value, record, names): if not value.id: return False return super(Many2one, self).convert_to_onchange(value, record, names) class _RelationalMulti(_Relational): """ Abstract class for relational fields *2many. """ _slots = { 'context_dependent': True, # depends on context (active_test) } def _update(self, records, value): """ Update the cached value of ``self`` for ``records`` with ``value``. """ cache = records.env.cache for record in records: if cache.contains(record, self): val = self.convert_to_cache(record[self.name] | value, record, validate=False) cache.set(record, self, val) else: cache.set_special(record, self, self._update_getter(record, value)) def _update_getter(self, record, value): def getter(): # determine the current field's value, and update it in cache only cache = record.env.cache cache.remove(record, self) val = self.convert_to_cache(record[self.name] | value, record, validate=False) cache.set(record, self, val) return val return getter def convert_to_cache(self, value, record, validate=True): # cache format: tuple(ids) def process(ids): return record._prefetch[self.comodel_name].update(ids) or ids if isinstance(value, BaseModel): if not validate or (value._name == self.comodel_name): return process(value._ids) elif isinstance(value, (list, tuple)): # value is a list/tuple of commands, dicts or record ids comodel = record.env[self.comodel_name] # determine the value ids; by convention empty on new records ids = OrderedSet(record[self.name].ids if record.id else ()) # modify ids with the commands for command in value: if isinstance(command, (tuple, list)): if command[0] == 0: ids.add(comodel.new(command[2], command[1]).id) elif command[0] == 1: comodel.browse(command[1]).update(command[2]) ids.add(command[1]) elif command[0] == 2: # note: the record will be deleted by write() ids.discard(command[1]) elif command[0] == 3: ids.discard(command[1]) elif command[0] == 4: ids.add(command[1]) elif command[0] == 5: ids.clear() elif command[0] == 6: ids = OrderedSet(command[2]) elif isinstance(command, dict): ids.add(comodel.new(command).id) else: ids.add(command) # return result as a tuple return process(tuple(ids)) elif not value: return () raise ValueError("Wrong value for %s: %s" % (self, value)) def convert_to_record(self, value, record): return record.env[self.comodel_name]._browse(value, record.env, record._prefetch) def convert_to_read(self, value, record, use_name_get=True): return value.ids def convert_to_write(self, value, record): # make result with new and existing records result = [(6, 0, [])] for record in value: if not record.id: values = {name: record[name] for name in record._cache} values = record._convert_to_write(values) result.append((0, 0, values)) elif record._is_dirty(): values = {name: record[name] for name in record._get_dirty()} values = record._convert_to_write(values) result.append((1, record.id, values)) else: result[0][2].append(record.id) return result def convert_to_onchange(self, value, record, names): # return the recordset value as a list of commands; the commands may # give all fields values, the client is responsible for figuring out # which fields are actually dirty result = [(5,)] for record in value: vals = { name: value._fields[name].convert_to_onchange(record[name], record, subnames) for name, subnames in names.items() if name != 'id' } if not record.id: result.append((0, record.id.ref or 0, vals)) elif vals: result.append((1, record.id, vals)) else: result.append((4, record.id)) return result def convert_to_export(self, value, record): return ','.join(name for id, name in value.name_get()) if value else '' def convert_to_display_name(self, value, record): raise NotImplementedError() def _compute_related(self, records): """ Compute the related field ``self`` on ``records``. """ super(_RelationalMulti, self)._compute_related(records) if self.related_sudo: # determine which records in the relation are actually accessible target = records.mapped(self.name) target_ids = set(target.search([('id', 'in', target.ids)]).ids) accessible = lambda target: target.id in target_ids # filter values to keep the accessible records only for record in records: record[self.name] = record[self.name].filtered(accessible) def _setup_regular_base(self, model): super(_RelationalMulti, self)._setup_regular_base(model) if isinstance(self.domain, list): self.depends += tuple( self.name + '.' + arg[0] for arg in self.domain if isinstance(arg, (tuple, list)) and isinstance(arg[0], pycompat.string_types) ) class One2many(_RelationalMulti): """ One2many field; the value of such a field is the recordset of all the records in ``comodel_name`` such that the field ``inverse_name`` is equal to the current record. :param comodel_name: name of the target model (string) :param inverse_name: name of the inverse ``Many2one`` field in ``comodel_name`` (string) :param domain: an optional domain to set on candidate values on the client side (domain or string) :param context: an optional context to use on the client side when handling that field (dictionary) :param auto_join: whether JOINs are generated upon search through that field (boolean, by default ``False``) :param limit: optional limit to use upon read (integer) The attributes ``comodel_name`` and ``inverse_name`` are mandatory except in the case of related fields or field extensions. """ type = 'one2many' _slots = { 'inverse_name': None, # name of the inverse field 'auto_join': False, # whether joins are generated upon search 'limit': None, # optional limit to use upon read 'copy': False, # o2m are not copied by default } def __init__(self, comodel_name=Default, inverse_name=Default, string=Default, **kwargs): super(One2many, self).__init__( comodel_name=comodel_name, inverse_name=inverse_name, string=string, **kwargs ) def _setup_regular_full(self, model): super(One2many, self)._setup_regular_full(model) if self.inverse_name: # link self to its inverse field and vice-versa comodel = model.env[self.comodel_name] invf = comodel._fields[self.inverse_name] # In some rare cases, a ``One2many`` field can link to ``Int`` field # (res_model/res_id pattern). Only inverse the field if this is # a ``Many2one`` field. if isinstance(invf, Many2one): model._field_inverses.add(self, invf) comodel._field_inverses.add(invf, self) _description_relation_field = property(attrgetter('inverse_name')) def convert_to_onchange(self, value, record, names): names = names.copy() names.pop(self.inverse_name, None) return super(One2many, self).convert_to_onchange(value, record, names) def update_db(self, model, columns): if self.comodel_name in model.env: comodel = model.env[self.comodel_name] if self.inverse_name not in comodel._fields: raise UserError(_("No inverse field %r found for %r") % (self.inverse_name, self.comodel_name)) def read(self, records): # retrieve the lines in the comodel comodel = records.env[self.comodel_name].with_context(**self.context) inverse = self.inverse_name get_id = (lambda rec: rec.id) if comodel._fields[inverse].type == 'many2one' else int domain = self.domain(records) if callable(self.domain) else self.domain domain = domain + [(inverse, 'in', records.ids)] lines = comodel.search(domain, limit=self.limit) # group lines by inverse field (without prefetching other fields) group = defaultdict(list) for line in lines.with_context(prefetch_fields=False): # line[inverse] may be a record or an integer group[get_id(line[inverse])].append(line.id) # store result in cache cache = records.env.cache for record in records: cache.set(record, self, tuple(group[record.id])) def write(self, records, value, create=False): comodel = records.env[self.comodel_name].with_context(**self.context) inverse = self.inverse_name with records.env.norecompute(): for act in (value or []): if act[0] == 0: for record in records: act[2][inverse] = record.id comodel.create(act[2]) elif act[0] == 1: comodel.browse(act[1]).write(act[2]) elif act[0] == 2: comodel.browse(act[1]).unlink() elif act[0] == 3: inverse_field = comodel._fields[inverse] if inverse_field.ondelete == 'cascade': comodel.browse(act[1]).unlink() else: comodel.browse(act[1]).write({inverse: False}) elif act[0] == 4: record = records[-1] line = comodel.browse(act[1]) line_sudo = line.sudo().with_context(prefetch_fields=False) if int(line_sudo[inverse]) != record.id: line.write({inverse: record.id}) elif act[0] == 5: domain = self.domain(records) if callable(self.domain) else self.domain domain = domain + [(inverse, 'in', records.ids)] inverse_field = comodel._fields[inverse] if inverse_field.ondelete == 'cascade': comodel.search(domain).unlink() else: comodel.search(domain).write({inverse: False}) elif act[0] == 6: record = records[-1] comodel.browse(act[2]).write({inverse: record.id}) query = "SELECT id FROM %s WHERE %s=%%s AND id <> ALL(%%s)" % (comodel._table, inverse) comodel._cr.execute(query, (record.id, act[2] or [0])) lines = comodel.browse([row[0] for row in comodel._cr.fetchall()]) inverse_field = comodel._fields[inverse] if inverse_field.ondelete == 'cascade': lines.unlink() else: lines.write({inverse: False}) class Many2many(_RelationalMulti): """ Many2many field; the value of such a field is the recordset. :param comodel_name: name of the target model (string) The attribute ``comodel_name`` is mandatory except in the case of related fields or field extensions. :param relation: optional name of the table that stores the relation in the database (string) :param column1: optional name of the column referring to "these" records in the table ``relation`` (string) :param column2: optional name of the column referring to "those" records in the table ``relation`` (string) The attributes ``relation``, ``column1`` and ``column2`` are optional. If not given, names are automatically generated from model names, provided ``model_name`` and ``comodel_name`` are different! :param domain: an optional domain to set on candidate values on the client side (domain or string) :param context: an optional context to use on the client side when handling that field (dictionary) :param limit: optional limit to use upon read (integer) """ type = 'many2many' _slots = { 'relation': None, # name of table 'column1': None, # column of table referring to model 'column2': None, # column of table referring to comodel 'auto_join': False, # whether joins are generated upon search 'limit': None, # optional limit to use upon read } def __init__(self, comodel_name=Default, relation=Default, column1=Default, column2=Default, string=Default, **kwargs): super(Many2many, self).__init__( comodel_name=comodel_name, relation=relation, column1=column1, column2=column2, string=string, **kwargs ) def _setup_regular_base(self, model): super(Many2many, self)._setup_regular_base(model) if self.store: if not (self.relation and self.column1 and self.column2): # table name is based on the stable alphabetical order of tables comodel = model.env[self.comodel_name] if not self.relation: tables = sorted([model._table, comodel._table]) assert tables[0] != tables[1], \ "%s: Implicit/canonical naming of many2many relationship " \ "table is not possible when source and destination models " \ "are the same" % self self.relation = '%s_%s_rel' % tuple(tables) if not self.column1: self.column1 = '%s_id' % model._table if not self.column2: self.column2 = '%s_id' % comodel._table # check validity of table name check_pg_name(self.relation) def _setup_regular_full(self, model): super(Many2many, self)._setup_regular_full(model) if self.relation: m2m = model.pool._m2m # if inverse field has already been setup, it is present in m2m invf = m2m.get((self.relation, self.column2, self.column1)) if invf: comodel = model.env[self.comodel_name] model._field_inverses.add(self, invf) comodel._field_inverses.add(invf, self) else: # add self in m2m, so that its inverse field can find it m2m[(self.relation, self.column1, self.column2)] = self def update_db(self, model, columns): cr = model._cr # Do not reflect relations for custom fields, as they do not belong to a # module. They are automatically removed when dropping the corresponding # 'ir.model.field'. if not self.manual: model.pool.post_init(model.env['ir.model.relation']._reflect_relation, model, self.relation, self._module) if not sql.table_exists(cr, self.relation): comodel = model.env[self.comodel_name] query = """ CREATE TABLE "{rel}" ("{id1}" INTEGER NOT NULL, "{id2}" INTEGER NOT NULL, UNIQUE("{id1}","{id2}")); COMMENT ON TABLE "{rel}" IS %s; CREATE INDEX ON "{rel}" ("{id1}"); CREATE INDEX ON "{rel}" ("{id2}") """.format(rel=self.relation, id1=self.column1, id2=self.column2) cr.execute(query, ['RELATION BETWEEN %s AND %s' % (model._table, comodel._table)]) _schema.debug("Create table %r: m2m relation between %r and %r", self.relation, model._table, comodel._table) model.pool.post_init(self.update_db_foreign_keys, model) return True def update_db_foreign_keys(self, model): """ Add the foreign keys corresponding to the field's relation table. """ cr = model._cr comodel = model.env[self.comodel_name] reflect = model.env['ir.model.constraint']._reflect_constraint # create foreign key references with ondelete=cascade, unless the targets are SQL views if sql.table_kind(cr, model._table) != 'v': sql.add_foreign_key(cr, self.relation, self.column1, model._table, 'id', 'cascade') reflect(model, '%s_%s_fkey' % (self.relation, self.column1), 'f', None, self._module) if sql.table_kind(cr, comodel._table) != 'v': sql.add_foreign_key(cr, self.relation, self.column2, comodel._table, 'id', 'cascade') reflect(model, '%s_%s_fkey' % (self.relation, self.column2), 'f', None, self._module) def read(self, records): comodel = records.env[self.comodel_name] # String domains are supposed to be dynamic and evaluated on client-side # only (thus ignored here). domain = self.domain if isinstance(self.domain, list) else [] wquery = comodel._where_calc(domain) comodel._apply_ir_rules(wquery, 'read') order_by = comodel._generate_order_by(None, wquery) from_c, where_c, where_params = wquery.get_sql() query = """ SELECT {rel}.{id1}, {rel}.{id2} FROM {rel}, {from_c} WHERE {where_c} AND {rel}.{id1} IN %s AND {rel}.{id2} = {tbl}.id {order_by} {limit} OFFSET {offset} """.format(rel=self.relation, id1=self.column1, id2=self.column2, tbl=comodel._table, from_c=from_c, where_c=where_c or '1=1', limit=(' LIMIT %d' % self.limit) if self.limit else '', offset=0, order_by=order_by) where_params.append(tuple(records.ids)) # retrieve lines and group them by record group = defaultdict(list) records._cr.execute(query, where_params) for row in records._cr.fetchall(): group[row[0]].append(row[1]) # store result in cache cache = records.env.cache for record in records: cache.set(record, self, tuple(group[record.id])) def write(self, records, value, create=False): cr = records._cr comodel = records.env[self.comodel_name] parts = dict(rel=self.relation, id1=self.column1, id2=self.column2) clear = False # whether the relation should be cleared links = {} # {id: True (link it) or False (unlink it)} for act in (value or []): if not isinstance(act, (list, tuple)) or not act: continue if act[0] == 0: for record in records: links[comodel.create(act[2]).id] = True elif act[0] == 1: comodel.browse(act[1]).write(act[2]) elif act[0] == 2: comodel.browse(act[1]).unlink() elif act[0] == 3: links[act[1]] = False elif act[0] == 4: links[act[1]] = True elif act[0] == 5: clear = True links.clear() elif act[0] == 6: clear = True links = dict.fromkeys(act[2], True) if clear and not create: # remove all records for which user has access rights clauses, params, tables = comodel.env['ir.rule'].domain_get(comodel._name) cond = " AND ".join(clauses) if clauses else "1=1" query = """ DELETE FROM {rel} USING {tables} WHERE {rel}.{id1} IN %s AND {rel}.{id2}={table}.id AND {cond} """.format(table=comodel._table, tables=','.join(tables), cond=cond, **parts) cr.execute(query, [tuple(records.ids)] + params) # link records to the ids such that links[id] = True if any(links.values()): # beware of duplicates when inserting query = """ INSERT INTO {rel} ({id1}, {id2}) (SELECT a, b FROM unnest(%s) AS a, unnest(%s) AS b) EXCEPT (SELECT {id1}, {id2} FROM {rel} WHERE {id1} IN %s) """.format(**parts) ids = [id for id, flag in links.items() if flag] for sub_ids in cr.split_for_in_conditions(ids): cr.execute(query, (records.ids, list(sub_ids), tuple(records.ids))) # unlink records from the ids such that links[id] = False if not all(links.values()): query = """ DELETE FROM {rel} WHERE {id1} IN %s AND {id2} IN %s """.format(**parts) ids = [id for id, flag in links.items() if not flag] for sub_ids in cr.split_for_in_conditions(ids): cr.execute(query, (tuple(records.ids), sub_ids)) class Id(Field): """ Special case for field 'id'. """ type = 'integer' column_type = ('int4', 'int4') _slots = { 'string': 'ID', 'store': True, 'readonly': True, } def update_db(self, model, columns): pass # this column is created with the table def __get__(self, record, owner): if record is None: return self # the field is accessed through the class owner if not record: return False return record.ensure_one()._ids[0] def __set__(self, record, value): raise TypeError("field 'id' cannot be assigned") # imported here to avoid dependency cycle issues from odoo import SUPERUSER_ID from .exceptions import AccessError, MissingError, UserError from .models import check_pg_name, BaseModel, IdType
agpl-3.0
8,913,938,050,853,976,000
41.087811
126
0.585813
false
4.358009
false
false
false
RyadElssalihine/RyadElssalihine
user_manager/views.py
1
1508
# Create your views here. from rest_framework import status from rest_framework.decorators import api_view from rest_framework.response import Response from models import Profile,Application,Tab,Page,Footer from serializer import ProfileSerializer,TabSerializer,FooterSerializer from django.shortcuts import render from rest_framework.parsers import JSONParser from django.contrib.auth import authenticate, login ,logout from django.shortcuts import redirect from django.http import HttpResponse,HttpRequest from forms import ConnexionForm from django.core.urlresolvers import reverse import RyadEssalihine from django.contrib.auth.decorators import login_required @api_view(['POST']) def register(request): pass @api_view(['GET']) def user_list(request): profiles=Profile.objects.all() serializer=ProfileSerializer(profiles,many=True) return Response(serializer.data) @api_view(['GET']) def user_get(request): try: profiles = Profile.objects.get(user_id=request.user.id) except Profile.DoesNotExist: return Response(status=status.HTTP_404_NOT_FOUND) serializer=ProfileSerializer(profiles) return Response(serializer.data) @api_view(['GET']) def tabs_list(request): tabs=Tab.objects.all() serializer= TabSerializer(tabs,many=True) return Response(serializer.data) @api_view(['GET']) def footers_list(request): footers=Footer.objects.all() serializer=FooterSerializer(footers,many=True) return Response(serializer.data)
gpl-2.0
-4,443,966,142,931,526,700
27.45283
71
0.772546
false
3.896641
false
false
false
mareknetusil/twist
cbc/twist/kinematics.py
1
4324
__author__ = "Harish Narayanan" __copyright__ = "Copyright (C) 2009 Simula Research Laboratory and %s" % __author__ __license__ = "GNU GPL Version 3 or any later version" from dolfin import * #from cbc.twist.coordinate_system import CartesianSystem # Renaming grad to Grad because it looks nicer in the reference # configuration from ufl import grad as ufl_grad # Deformation gradient def DeformationGradient(u): I = SecondOrderIdentity(u) return variable(I + Grad(u)) def Grad(v): return ufl_grad(v) # Infinitesimal strain tensor def InfinitesimalStrain(u): return variable(0.5*(Grad(u) + Grad(u).T)) # Second order identity tensor def SecondOrderIdentity(u): return variable(Identity(u.geometric_dimension())) # Determinant of the deformation gradient def Jacobian(u): F = DeformationGradient(u) return variable(det(F)) # Right Cauchy-Green tensor def RightCauchyGreen(u): F = DeformationGradient(u) return variable(F.T*F) # Green-Lagrange strain tensor def GreenLagrangeStrain(u): I = SecondOrderIdentity(u) C = RightCauchyGreen(u) return variable(0.5*(C - I)) # Left Cauchy-Green tensor def LeftCauchyGreen(u): F = DeformationGradient(u) return variable(F*F.T) # Euler-Almansi strain tensor def EulerAlmansiStrain(u): I = SecondOrderIdentity(u) b = LeftCauchyGreen(u) return variable(0.5*(I - inv(b))) # Invariants of an arbitrary tensor, A def Invariants(A): I1 = tr(A) I2 = 0.5*(tr(A)**2 - tr(A*A)) I3 = det(A) return [I1, I2, I3] # Invariants of the (right/left) Cauchy-Green tensor #TODO: NEEDS TESTING def CauchyGreenInvariants(u): C = RightCauchyGreen(u) [I1, I2, I3] = Invariants(C) return [variable(I1), variable(I2), variable(I3)] # Isochoric part of the deformation gradient #TODO: NEEDS TESTING def IsochoricDeformationGradient(u): F = DeformationGradient(u) J = Jacobian(u) return variable(J**(-1.0/3.0)*F) # Isochoric part of the right Cauchy-Green tensor #TODO: NEEDS TESTING def IsochoricRightCauchyGreen(u): C = RightCauchyGreen(u) J = Jacobian(u) return variable(J**(-2.0/3.0)*C) # Invariants of the ischoric part of the (right/left) Cauchy-Green # tensor. Note that I3bar = 1 by definition. #TODO: NEEDS TESTING def IsochoricCauchyGreenInvariants(u): Cbar = IsochoricRightCauchyGreen(u) [I1bar, I2bar, I3bar] = Invariants(Cbar) return [variable(I1bar), variable(I2bar)] # Principal stretches #TODO: NEEDS TESTING def PrincipalStretches(u): C = RightCauchyGreen(u) S = FunctionSpace(u.function_space().mesh(), "CG", 1) if (u.cell().geometric_dimension() == 2): D = sqrt(tr(C)*tr(C) - 4.0*det(C)) eig1 = sqrt(0.5*(tr(C) + D)) eig2 = sqrt(0.5*(tr(C) - D)) return [variable(eig1), variable(eig2)] if (u.cell().geometric_dimension() == 3): c = (1.0/3.0)*tr(C) D = C - c*SecondOrderIdentity(u) q = (1.0/2.0)*det(D) p = (1.0/6.0)*inner(D, D) ph = project(p, S) if (norm(ph) < DOLFIN_EPS): eig1 = sqrt(c) eig2 = sqrt(c) eig3 = sqrt(c) else: phi = (1.0/3.0)*atan(sqrt(p**3.0 - q**2.0)/q) if (phi < 0.0): phi = phi + DOLFIN_PI/3.0 end eig1 = sqrt(c + 2*sqrt(p)*cos(phi)) eig2 = sqrt(c - sqrt(p)*(cos(phi) + sqrt(3)*sin(phi))) eig3 = sqrt(c - sqrt(p)*(cos(phi) - sqrt(3)*sin(phi))) return [variable(eig1), variable(eig2), variable(eig3)] # Pull-back of a two-tensor from the current to the reference # configuration #TODO: NEEDS TESTING def PiolaTransform(A, u): J = Jacobian(u) F = DeformationGradient(u) B = J*A*inv(F).T return B # Push-forward of a two-tensor from the reference to the current # configuration #TODO: NEEDS TESTING def InversePiolaTransform(A, u): J = Jacobian(u) F = DeformationGradient(u) B = (1/J)*A*F.T return B # Computes M*C^nM # for n = 1 equals to the stretch in the direction M #TODO: NEEDS TESTING def DirectionalStretch(u, M, degree = 1): C = RightCauchyGreen(u) Cpow = SecondOrderIdentity(u) if degree >= 1: for i in range(degree): Cpow = C*Cpow directionalstretch = inner(M,Cpow*M) return variable(directionalstretch)
gpl-3.0
1,692,405,160,494,801,000
26.896774
83
0.639685
false
2.88459
true
false
false
snarfed/webmention-tools
bin/demo.py
1
1241
#!/usr/bin/env python # -*- coding: utf-8 -*- from webmentiontools.urlinfo import UrlInfo from webmentiontools.webmentionio import WebmentionIO # If you have an access token from webmention.io, # set it here. Some calls require it. webmention_io_token = None wio = WebmentionIO(webmention_io_token) # Get all links "mentioning" http://indiewebcamp.com/webmention target_url = 'http://indiewebcamp.com/webmention' ret = wio.linksToURL(target_url) if not ret: print wio.error else: for link in ret['links']: print print 'Webmention.io ID: %s' % link['id'] print ' Source: %s' % link['source'] print ' Verification Date: %s' % link['verified_date'] # Now use UrlInfo to get some more information about the source. # Most web apps showing webmentions, will probably do something # like this. info = UrlInfo(link['source']) print ' Source URL info:' print ' Title: %s' % info.title() print ' Pub Date: %s' % info.pubDate() print ' in-reply-to: %s' % info.inReplyTo() print ' Author image: %s' % info.image() print ' Snippet: %s' % info.snippetWithLink(target_url)
mit
8,639,230,686,331,052,000
33.472222
72
0.617244
false
3.428177
false
false
false
XiaochenCui/algorithm_submit
app/auth/views.py
1
5804
from flask import render_template, redirect, request, url_for, flash from flask.ext.login import login_user, logout_user, login_required, \ current_user from . import auth from .. import db from ..models import User from ..email import send_email from .forms import LoginForm, RegistrationForm, ChangePasswordForm, \ PasswordResetRequestForm, PasswordResetForm, ChangeEmailForm @auth.before_app_request def before_request(): if current_user.is_authenticated: current_user.ping() if not current_user.confirmed \ and request.endpoint[:5] != 'auth.' \ and request.endpoint != 'static': return redirect(url_for('auth.unconfirmed')) @auth.route('/unconfirmed') def unconfirmed(): if current_user.is_anonymous or current_user.confirmed: return redirect(url_for('main.index')) return render_template('auth/unconfirmed.html') @auth.route('/login', methods=['GET', 'POST']) def login(): form = LoginForm() if form.validate_on_submit(): user = User.query.filter_by(email=form.email.data).first() if user is not None and user.verify_password(form.password.data): login_user(user, form.remember_me.data) return redirect(request.args.get('next') or url_for('main.index')) flash('帐号或密码不可用') return render_template('auth/login.html', form=form) @auth.route('/logout') @login_required def logout(): logout_user() flash('你已登出') return redirect(url_for('main.index')) @auth.route('/register', methods=['GET', 'POST']) def register(): form = RegistrationForm() if form.validate_on_submit(): user = User(email=form.email.data, username=form.username.data, password=form.password.data) db.session.add(user) db.session.commit() token = user.generate_confirmation_token() send_email(user.email, '验证你的帐号', 'auth/email/confirm', user=user, token=token) flash('验证邮件已发送') return redirect(url_for('auth.login')) return render_template('auth/register.html', form=form) @auth.route('/confirm/<token>') @login_required def confirm(token): if current_user.confirmed: return redirect(url_for('main.index')) if current_user.confirm(token): flash('帐号已激活') else: flash('验证链接不可用或已过期') return redirect(url_for('main.index')) @auth.route('/confirm') @login_required def resend_confirmation(): token = current_user.generate_confirmation_token() send_email(current_user.email, '验证帐号', 'auth/email/confirm', user=current_user, token=token) flash('新的验证邮件已经发送到你的邮箱') return redirect(url_for('main.index')) @auth.route('/change-password', methods=['GET', 'POST']) @login_required def change_password(): form = ChangePasswordForm() if form.validate_on_submit(): if current_user.verify_password(form.old_password.data): current_user.password = form.password.data db.session.add(current_user) flash('密码更改成功') return redirect(url_for('main.index')) else: flash('密码不可用') return render_template("auth/change_password.html", form=form) @auth.route('/reset', methods=['GET', 'POST']) def password_reset_request(): if not current_user.is_anonymous: return redirect(url_for('main.index')) form = PasswordResetRequestForm() if form.validate_on_submit(): user = User.query.filter_by(email=form.email.data).first() if user: token = user.generate_reset_token() send_email(user.email, '重新设置你的密码', 'auth/email/reset_password', user=user, token=token, next=request.args.get('next')) flash('重置密码的邮件已经发送到你的邮箱') return redirect(url_for('auth.login')) return render_template('auth/reset_password.html', form=form) @auth.route('/reset/<token>', methods=['GET', 'POST']) def password_reset(token): if not current_user.is_anonymous: return redirect(url_for('main.index')) form = PasswordResetForm() if form.validate_on_submit(): user = User.query.filter_by(email=form.email.data).first() if user is None: return redirect(url_for('main.index')) if user.reset_password(token, form.password.data): flash('Your password has been updated.') return redirect(url_for('auth.login')) else: return redirect(url_for('main.index')) return render_template('auth/reset_password.html', form=form) @auth.route('/change-email', methods=['GET', 'POST']) @login_required def change_email_request(): form = ChangeEmailForm() if form.validate_on_submit(): if current_user.verify_password(form.password.data): new_email = form.email.data token = current_user.generate_email_change_token(new_email) send_email(new_email, '验证邮箱', 'auth/email/change_email', user=current_user, token=token) flash('验证邮件已经发送到你的邮箱') return redirect(url_for('main.index')) else: flash('邮箱或密码不可用') return render_template("auth/change_email.html", form=form) @auth.route('/change-email/<token>') @login_required def change_email(token): if current_user.change_email(token): flash('你的邮箱地址已经更新') else: flash('错误的请求') return redirect(url_for('main.index'))
mit
5,879,881,981,366,820,000
32.95092
78
0.630105
false
3.413942
false
false
false
kangwonlee/ECA
lab_07_linear_algebra/gauss_jordan.py
1
1766
# -*- coding: utf8 -*- from pprint import pprint import linear_algebra as la def gauss_jordan(A): # 행렬의 크기 n_row = len(A) n_column = len(A[0]) # 단위 행렬과의 Augmented Matrix 를 만듦 AI = [] for i_row in xrange(n_row): AI_row = [0.0] * (n_column * 2) for j_column in xrange(n_column): AI_row[j_column] = A[i_row][j_column] for j_column in xrange(n_column, n_column * 2): AI_row[j_column] = 0.0 AI_row[n_column + i_row] = 1.0 AI.append(AI_row) print "Augmented matrix" print '1234567890' * 7 pprint(AI, width=30) # pivot 반복문 for i_pivot in xrange(n_row): # pivot 행을 pivot 요소로 나눔. # pivot 요소는 1이 됨 ratio = 1.0 / float(AI[i_pivot][i_pivot]) for k_column in xrange(n_column * 2): AI[i_pivot][k_column] *= ratio # 행 반복문 for j_row in xrange(0, n_row): if j_row != i_pivot: ratio = -AI[j_row][i_pivot] # 열 반복문 for k_column in xrange(n_column * 2): AI[j_row][k_column] += ratio * AI[i_pivot][k_column] # 이 반복문이 끝나고 나면 주 대각선 이외의 요소는 모두 0 print "After Gauss Jordan" pprint(AI) # 오른쪽의 행렬을 떼어냄 result = [] for i_row in xrange(n_row): result.append(AI[i_row][n_column:]) return result if "__main__" == __name__: A = [[3, 2, 1], [2, 3, 2], [1, 2, 3]] A_inverse = gauss_jordan(A) print "A inverse" pprint(A_inverse) I_expected = la.multiply_matrix_matrix(A, A_inverse) print "I expected" pprint(I_expected)
apache-2.0
1,296,910,529,811,983,000
23.666667
72
0.514742
false
2.296192
false
false
false
Chris7/django-djangui
djangui/models/mixins.py
1
1697
from __future__ import absolute_import __author__ = 'chris' from django.forms.models import model_to_dict import six class UpdateScriptsMixin(object): def save(self, **kwargs): super(UpdateScriptsMixin, self).save(**kwargs) from ..backend.utils import load_scripts load_scripts() class DjanguiPy2Mixin(object): def __unicode__(self): return unicode(self.__str__()) # from # http://stackoverflow.com/questions/1355150/django-when-saving-how-can-you-check-if-a-field-has-changed class ModelDiffMixin(object): """ A model mixin that tracks model fields' values and provide some useful api to know what fields have been changed. """ def __init__(self, *args, **kwargs): super(ModelDiffMixin, self).__init__(*args, **kwargs) self.__initial = self._dict @property def diff(self): d1 = self.__initial d2 = self._dict diffs = [(k, (v, d2[k])) for k, v in d1.items() if v != d2[k]] return dict(diffs) @property def has_changed(self): return bool(self.diff) @property def changed_fields(self): return self.diff.keys() def get_field_diff(self, field_name): """ Returns a diff for field if it's changed and None otherwise. """ return self.diff.get(field_name, None) def save(self, *args, **kwargs): """ Saves model and set initial state. """ super(ModelDiffMixin, self).save(*args, **kwargs) self.__initial = self._dict @property def _dict(self): return model_to_dict(self, fields=[field.name for field in self._meta.fields])
gpl-3.0
4,384,404,271,976,613,400
27.3
104
0.602829
false
3.77951
false
false
false
manaris/jythonMusic
15. simpleCircleInstrument.py
1
2279
# simpleCircleInstrument.py # # Demonstrates how to use mouse and keyboard events to build a simple # drawing musical instrument. # from gui import * from music import * from math import sqrt ### initialize variables ###################### minPitch = C1 # instrument pitch range maxPitch = C8 # create display d = Display("Circle Instrument") # default dimensions (600 x 400) d.setColor( Color(51, 204, 255) ) # set background to turquoise beginX = 0 # holds starting x coordinate for next circle beginY = 0 # holds starting y coordinate # maximum circle diameter - same as diagonal of display maxDiameter = sqrt(d.getWidth()**2 + d.getHeight()**2) # calculate it ### define callback functions ###################### def beginCircle(x, y): # for when mouse is pressed global beginX, beginY beginX = x # remember new circle's coordinates beginY = y def endCircleAndPlayNote(endX, endY): # for when mouse is released global beginX, beginY, d, maxDiameter, minPitch, maxPitch # calculate circle parameters # first, calculate distance between begin and end points diameter = sqrt( (beginX-endX)**2 + (beginY-endY)**2 ) diameter = int(diameter) # in pixels - make it an integer radius = diameter/2 # get radius centerX = (beginX + endX)/2 # circle center is halfway between... centerY = (beginY + endY)/2 # ...begin and end points # draw circle with yellow color, unfilled, 3 pixels thick d.drawCircle(centerX, centerY, radius, Color.YELLOW, False, 3) # create note pitch = mapScale(diameter, 0, maxDiameter, minPitch, maxPitch, MAJOR_SCALE) # invert pitch (larger diameter, lower pitch) pitch = maxPitch - pitch # and play note Play.note(pitch, 0, 5000) # start immediately, hold for 5 secs def clearOnSpacebar(key): # for when a key is pressed global d # if they pressed space, clear display and stop the music if key == VK_SPACE: d.removeAll() # remove all shapes Play.allNotesOff() # stop all notes ### assign callback functions to display event handlers ############# d.onMouseDown( beginCircle ) d.onMouseUp( endCircleAndPlayNote ) d.onKeyDown( clearOnSpacebar )
gpl-3.0
2,095,986,917,681,406,200
31.571429
70
0.660816
false
3.628981
false
false
false
intelligent-agent/redeem
tests/gcode/test_M83.py
1
1638
from __future__ import absolute_import from .MockPrinter import MockPrinter from redeem.Path import Path class M83_Tests(MockPrinter): def test_gcodes_M83_from_absolute(self): """ set state as it should be after a G90, all axes absolute """ self.printer.axes_absolute = ["X", "Y", "Z", "E", "H", "A", "B", "C"] self.printer.axes_relative = [] self.printer.movement == Path.ABSOLUTE self.execute_gcode("M83") self.assertEqual(self.printer.movement, Path.MIXED) self.assertEqual(self.printer.axes_absolute, ["X", "Y", "Z"]) self.assertEqual(self.printer.axes_relative, ["E", "H", "A", "B", "C"]) def test_gcodes_M83_from_relative(self): """ set state as it should be after a G91, all axes relative """ self.printer.axes_absolute = [] self.printer.axes_relative = ["X", "Y", "Z", "E", "H", "A", "B", "C"] self.printer.movement == Path.RELATIVE self.execute_gcode("M83") self.assertEqual(self.printer.movement, Path.RELATIVE) self.assertEqual(self.printer.axes_relative, ["X", "Y", "Z", "E", "H", "A", "B", "C"]) self.assertEqual(self.printer.axes_absolute, []) def test_gcodes_M83_from_mixed(self): """ set state as it should be after a G90/M83, XYZ absolute and extruders relative """ self.printer.axes_absolute = ["X", "Y", "Z"] self.printer.axes_relative = ["E", "H", "A", "B", "C"] self.printer.movement == Path.MIXED self.execute_gcode("M83") self.assertEqual(self.printer.movement, Path.MIXED) self.assertEqual(self.printer.axes_relative, ["E", "H", "A", "B", "C"]) self.assertEqual(self.printer.axes_absolute, ["X", "Y", "Z"])
gpl-3.0
4,424,008,043,799,721,000
44.5
90
0.639194
false
3.005505
false
false
false
jayvdb/travis_log_fetch
tests/test_github.py
1
1132
"""Test Github resolution.""" from __future__ import absolute_import, unicode_literals from travis_log_fetch.config import ( _get_github, get_options, ) from travis_log_fetch.get import ( get_forks, ) import pytest # Note 'foo' is a real Github user, but they do not # have repos bar or baz class TestForks(object): def test_invalid(self): options = get_options() if not options.access_token: pytest.skip("github access token needed") _github = _get_github() pytest.raises(AssertionError, get_forks, _github, 'foo/bar') def test_zero(self): options = get_options() if not options.access_token: pytest.skip("github access token needed") _github = _get_github() forks = get_forks(_github, 'travispy/on_pypy') assert len(forks) == 0 def test_fork(self): options = get_options() if not options.access_token: pytest.skip("github access token needed") _github = _get_github() forks = get_forks(_github, 'menegazzo/travispy') assert 'jayvdb/travispy' in forks
mit
-6,854,272,443,919,678,000
27.3
68
0.621025
false
3.559748
true
false
false
jacobajit/ion
intranet/apps/events/views.py
1
11092
# -*- coding: utf-8 -*- import datetime import logging import bleach from django import http from django.contrib import messages from django.contrib.auth.decorators import login_required from django.core import exceptions from django.shortcuts import get_object_or_404, redirect, render from .forms import AdminEventForm, EventForm from .models import Event logger = logging.getLogger(__name__) @login_required def events_view(request): """Events homepage. Shows a list of events occurring in the next week, month, and future. """ is_events_admin = request.user.has_admin_permission('events') if request.method == "POST": if "approve" in request.POST and is_events_admin: event_id = request.POST.get('approve') event = get_object_or_404(Event, id=event_id) event.rejected = False event.approved = True event.approved_by = request.user event.save() messages.success(request, "Approved event {}".format(event)) if "reject" in request.POST and is_events_admin: event_id = request.POST.get('reject') event = get_object_or_404(Event, id=event_id) event.approved = False event.rejected = True event.rejected_by = request.user event.save() messages.success(request, "Rejected event {}".format(event)) if is_events_admin and "show_all" in request.GET: viewable_events = (Event.objects.prefetch_related("groups")) else: viewable_events = (Event.objects.visible_to_user(request.user).prefetch_related("groups")) # get date objects for week and month today = datetime.date.today() delta = today - datetime.timedelta(days=today.weekday()) this_week = (delta, delta + datetime.timedelta(days=7)) this_month = (this_week[1], this_week[1] + datetime.timedelta(days=31)) events_categories = [ { "title": "This week", "events": viewable_events.filter(time__gte=this_week[0], time__lt=this_week[1]) }, { "title": "This month", "events": viewable_events.filter(time__gte=this_month[0], time__lt=this_month[1]) }, { "title": "Future", "events": viewable_events.filter(time__gte=this_month[1]) } ] if is_events_admin: unapproved_events = (Event.objects.filter(approved=False, rejected=False).prefetch_related("groups")) events_categories = [{"title": "Awaiting Approval", "events": unapproved_events}] + events_categories if is_events_admin and "show_all" in request.GET: events_categories.append({"title": "Past", "events": viewable_events.filter(time__lt=this_week[0])}) context = { "events": events_categories, "num_events": viewable_events.count(), "is_events_admin": is_events_admin, "events_admin": is_events_admin, "show_attend": True, "show_icon": True } return render(request, "events/home.html", context) @login_required def join_event_view(request, id): """Join event page. If a POST request, actually add or remove the attendance of the current user. Otherwise, display a page with confirmation. id: event id """ event = get_object_or_404(Event, id=id) if request.method == "POST": if not event.show_attending: return redirect("events") if "attending" in request.POST: attending = request.POST.get("attending") attending = (attending == "true") if attending: event.attending.add(request.user) else: event.attending.remove(request.user) return redirect("events") context = {"event": event, "is_events_admin": request.user.has_admin_permission('events')} return render(request, "events/join_event.html", context) @login_required def event_roster_view(request, id): """Show the event roster. Users with hidden eighth period permissions will not be displayed. Users will be able to view all other users, along with a count of the number of hidden users. (Same as 8th roster page.) Admins will see a full roster at the bottom. id: event id """ event = get_object_or_404(Event, id=id) full_roster = list(event.attending.all()) viewable_roster = [] num_hidden_members = 0 for p in full_roster: if p.can_view_eighth: viewable_roster.append(p) else: num_hidden_members += 1 context = { "event": event, "viewable_roster": viewable_roster, "full_roster": full_roster, "num_hidden_members": num_hidden_members, "is_events_admin": request.user.has_admin_permission('events'), } return render(request, "events/roster.html", context) @login_required def add_event_view(request): """Add event page. Currently, there is an approval process for events. If a user is an events administrator, they can create events directly. Otherwise, their event is added in the system but must be approved. """ is_events_admin = request.user.has_admin_permission('events') if not is_events_admin: return redirect("request_event") if request.method == "POST": form = EventForm(data=request.POST, all_groups=request.user.has_admin_permission('groups')) logger.debug(form) if form.is_valid(): obj = form.save() obj.user = request.user # SAFE HTML obj.description = bleach.linkify(obj.description) # auto-approve if admin obj.approved = True obj.approved_by = request.user messages.success(request, "Because you are an administrator, this event was auto-approved.") obj.created_hook(request) obj.save() return redirect("events") else: messages.error(request, "Error adding event") else: form = EventForm(all_groups=request.user.has_admin_permission('groups')) context = {"form": form, "action": "add", "action_title": "Add" if is_events_admin else "Submit", "is_events_admin": is_events_admin} return render(request, "events/add_modify.html", context) @login_required def request_event_view(request): """Request event page. Currently, there is an approval process for events. If a user is an events administrator, they can create events directly. Otherwise, their event is added in the system but must be approved. """ is_events_admin = False if request.method == "POST": form = EventForm(data=request.POST, all_groups=request.user.has_admin_permission('groups')) logger.debug(form) if form.is_valid(): obj = form.save() obj.user = request.user # SAFE HTML obj.description = bleach.linkify(obj.description) messages.success(request, "Your event needs to be approved by an administrator. If approved, it should appear on Intranet within 24 hours.") obj.created_hook(request) obj.save() return redirect("events") else: messages.error(request, "Error adding event") else: form = EventForm(all_groups=request.user.has_admin_permission('groups')) context = {"form": form, "action": "add", "action_title": "Submit", "is_events_admin": is_events_admin} return render(request, "events/add_modify.html", context) @login_required def modify_event_view(request, id=None): """Modify event page. You may only modify an event if you were the creator or you are an administrator. id: event id """ event = get_object_or_404(Event, id=id) is_events_admin = request.user.has_admin_permission('events') if not is_events_admin: raise exceptions.PermissionDenied if request.method == "POST": if is_events_admin: form = AdminEventForm(data=request.POST, instance=event, all_groups=request.user.has_admin_permission('groups')) else: form = EventForm(data=request.POST, instance=event, all_groups=request.user.has_admin_permission('groups')) logger.debug(form) if form.is_valid(): obj = form.save() obj.user = request.user # SAFE HTML obj.description = bleach.linkify(obj.description) obj.save() messages.success(request, "Successfully modified event.") # return redirect("events") else: messages.error(request, "Error adding event.") else: if is_events_admin: form = AdminEventForm(instance=event, all_groups=request.user.has_admin_permission('groups')) else: form = EventForm(instance=event, all_groups=request.user.has_admin_permission('groups')) context = {"form": form, "action": "modify", "action_title": "Modify", "id": id, "is_events_admin": is_events_admin} return render(request, "events/add_modify.html", context) @login_required def delete_event_view(request, id): """Delete event page. You may only delete an event if you were the creator or you are an administrator. Confirmation page if not POST. id: event id """ event = get_object_or_404(Event, id=id) if not request.user.has_admin_permission('events'): raise exceptions.PermissionDenied if request.method == "POST": try: event.delete() messages.success(request, "Successfully deleted event.") except Event.DoesNotExist: pass return redirect("events") else: return render(request, "events/delete.html", {"event": event}) @login_required def show_event_view(request): """ Unhide an event that was hidden by the logged-in user. events_hidden in the user model is the related_name for "users_hidden" in the EventUserMap model. """ if request.method == "POST": event_id = request.POST.get("event_id") if event_id: event = Event.objects.get(id=event_id) event.user_map.users_hidden.remove(request.user) event.user_map.save() return http.HttpResponse("Unhidden") return http.Http404() else: return http.HttpResponseNotAllowed(["POST"], "HTTP 405: METHOD NOT ALLOWED") @login_required def hide_event_view(request): """ Hide an event for the logged-in user. events_hidden in the user model is the related_name for "users_hidden" in the EventUserMap model. """ if request.method == "POST": event_id = request.POST.get("event_id") if event_id: event = Event.objects.get(id=event_id) event.user_map.users_hidden.add(request.user) event.user_map.save() return http.HttpResponse("Hidden") return http.Http404() else: return http.HttpResponseNotAllowed(["POST"], "HTTP 405: METHOD NOT ALLOWED")
gpl-2.0
-4,660,954,090,174,681,000
33.554517
152
0.627209
false
3.883754
false
false
false
TheVirtualLtd/bda.plone.shop
src/bda/plone/shop/vocabularies.py
1
6162
# -*- coding: utf-8 -*- from bda.plone.checkout.vocabularies import country_vocabulary from bda.plone.checkout.vocabularies import gender_vocabulary from bda.plone.payment import Payments from bda.plone.shipping import Shippings from bda.plone.shop import message_factory as _ from bda.plone.shop.utils import get_shop_article_settings from bda.plone.shop.utils import get_shop_tax_settings from zope.interface import provider from zope.schema.interfaces import IVocabularyFactory from zope.schema.vocabulary import SimpleTerm from zope.schema.vocabulary import SimpleVocabulary # This are the overall avaiable quantity units which then can be reduced in # control panel. If you need to provide more quantity units add it here or # patch this vocab AVAILABLE_QUANTITY_UNITS = { 'quantity': _('quantity', default='Quantity'), 'meter': _('meter', default='Meter'), 'kilo': _('kilo', default='Kilo'), 'liter': _('liter', default='Liter'), } @provider(IVocabularyFactory) def AvailableQuantityUnitVocabulary(context): # vocab is used in shop settings control panel items = AVAILABLE_QUANTITY_UNITS.items() return SimpleVocabulary([SimpleTerm(value=k, title=v) for k, v in items]) @provider(IVocabularyFactory) def QuantityUnitVocabulary(context): # vocab is used for buyable items try: settings = get_shop_article_settings() except KeyError: # happens GS profile application if registry entries not present yet return AvailableQuantityUnitVocabulary(context) if not settings: return terms = [] for quantity_unit in settings.quantity_units: title = AVAILABLE_QUANTITY_UNITS.get(quantity_unit, quantity_unit) terms.append(SimpleTerm(value=quantity_unit, title=title)) return SimpleVocabulary(terms) # This are the overall avaiable VAT values which then can be reduced in # control panel. If you need to provide more vat values add it here or # patch this vocab AVAILABLE_VAT_VALUES = { '0': '0%', '2.5': '2,5%', '3.8': '3,8%', '8': '8%', '10': '10%', '15': '15%', '20': '20%', '25': '25%', } @provider(IVocabularyFactory) def AvailableVatVocabulary(context): # vocab is used in shop settings control panel items = AVAILABLE_VAT_VALUES.items() items = sorted(items, key=lambda x: x[0]) return SimpleVocabulary([SimpleTerm(value=k, title=v) for k, v in items]) @provider(IVocabularyFactory) def VatVocabulary(context): # vocab is used for buyable items. try: settings = get_shop_tax_settings() except KeyError: # happens GS profile application if registry entries not present yet return AvailableVatVocabulary(context) settings.vat terms = [] if settings.vat: for vat in settings.vat: title = AVAILABLE_VAT_VALUES.get(vat, vat) terms.append(SimpleTerm(value=vat, title=title)) return SimpleVocabulary(terms) # This are the overall avaiable currency values available in # control panel. If you need to provide more currencies add it here or # patch this vocab AVAILABLE_CURRENCIES = { 'EUR': _('EUR', default='Euro'), 'USD': _('USD', default='US Dollar'), 'INR': _('INR', default='Indian Rupee'), 'CAD': _('CAD', default='Canadian Dollar'), 'CHF': _('CHF', default='Swiss Franc'), 'GBP': _('GBP', default='British Pound Sterling'), 'AUD': _('AUD', default='Australian Dollar'), 'NOK': _('NOK', default='Norwegian Krone'), 'SEK': _('SEK', default='Swedish Krona'), 'DKK': _('DKK', default='Danish Krone'), 'YEN': _('YEN', default='Japanese Yen'), 'NZD': _('NZD', default='New Zealand Dollar'), } @provider(IVocabularyFactory) def AvailableCurrenciesVocabulary(context): items = AVAILABLE_CURRENCIES.items() return SimpleVocabulary([SimpleTerm(value=k, title=v) for k, v in items]) @provider(IVocabularyFactory) def CurrencyDisplayOptionsVocabulary(context): items = [ ('yes', _('yes', default='Yes')), ('no', _('no', default='No')), ('symbol', _('symbol', default='Symbol')), ] return SimpleVocabulary([SimpleTerm(value=k, title=v) for k, v in items]) @provider(IVocabularyFactory) def GenderVocabulary(context): return SimpleVocabulary([SimpleTerm(value=k, title=v) for k, v in gender_vocabulary()]) @provider(IVocabularyFactory) def CountryVocabulary(context): """VocabularyFactory for countries from ISO3166 source. """ return SimpleVocabulary([SimpleTerm(value=k, title=v) for k, v in country_vocabulary()]) @provider(IVocabularyFactory) def AvailableShippingMethodsVocabulary(context): shippings = Shippings(context).shippings items = [(shipping.sid, shipping.label) for shipping in shippings] return SimpleVocabulary([SimpleTerm(value=k, title=v) for k, v in items]) @provider(IVocabularyFactory) def ShippingMethodsVocabulary(context): try: items = Shippings(context).vocab except (KeyError, TypeError): # happens GS profile application if registry entries not present yet return AvailableShippingMethodsVocabulary(context) return SimpleVocabulary([SimpleTerm(value=k, title=v) for k, v in items]) @provider(IVocabularyFactory) def AvailablePaymentMethodsVocabulary(context): payments = Payments(context).payments items = [(payment.pid, payment.label) for payment in payments] return SimpleVocabulary([SimpleTerm(value=k, title=v) for k, v in items]) @provider(IVocabularyFactory) def PaymentMethodsVocabulary(context): try: items = Payments(context).vocab except KeyError: # happens GS profile application if registry entries not present yet return AvailablePaymentMethodsVocabulary(context) return SimpleVocabulary([SimpleTerm(value=k, title=v) for k, v in items]) @provider(IVocabularyFactory) def SurchargeablePaymentMethodsVocabulary(context): payments = Payments(context).payments items = [(payment.pid, payment.label) for payment in payments] return SimpleVocabulary([SimpleTerm(value=k, title=v) for k, v in items])
bsd-3-clause
-2,564,873,223,783,599,600
34.011364
77
0.697339
false
3.535284
false
false
false
elaske/mufund
tests.py
1
3839
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Author: Evan Laske # @Date: 2014-03-01 21:45:31 # @Last Modified by: Evan Laske # @Last Modified time: 2015-09-15 23:51:12 import urllib import urllib2 from bs4 import BeautifulSoup import html5lib import re from StockQuote import StockQuote from MutualFundData import MutualFundData import logging import argparse def main(): parser = argparse.ArgumentParser() parser.add_argument('tickers', metavar='ticker', nargs='+', help='The ticker(s) of the funds to predict.') parser.add_argument('--logfile', dest='logfile', default='', help='Specify a log file to log info to.') parser.add_argument('--loglevel', dest='loglevel', default='', help='Specify a logging level to output.') args = parser.parse_args() # Logging configuration args logConfigArgs = dict() # If the log level was specified if args.loglevel: # Convert it to something usable numeric_level = getattr(logging, args.loglevel.upper(), None) # Double-check it's a valid logging level if not isinstance(numeric_level, int): raise ValueError('Invalid log level: %s' % args.loglevel) logConfigArgs['level'] = numeric_level # If there was any of the logging files specified... if args.logfile: logConfigArgs['filename'] = args.logfile # This will make the log file be overwritten each time. logConfigArgs['filemode'] = 'w' # If any of the logging arguments are specified, configure logging if args.logfile or args.loglevel: logging.basicConfig(**logConfigArgs) # Gather the data from the given stocks testStockQuote(args.tickers) # Test the mutual fund data gathering testMutualFund(args.tickers) def testStockQuote(tickers): """ """ for ticker in tickers: sq = StockQuote(ticker) print sq.ticker, sq.price, sq.change, sq.percent def testMutualFund(tickers): """ """ for ticker in tickers: mfd = MutualFundData(ticker) print mfd.price, mfd.change, mfd.percent holdings = mfd.holdings() print holdings for h in holdings: print 'Retrieving {0} data...'.format(h) sq = StockQuote(h) delta = float(holdings[h])*float(sq.percent)/100 holdings[h] = [holdings[h], sq.price, sq.change, sq.percent, delta] print delta, holdings[h], 'Complete.' print sq #print holdings print '\nESTIMATED CHANGE: {0}\nTOTAL COMPOSITION: {1}'.format( sum([v[4] for (k,v) in holdings.items()]), sum([float(v[0]) for (k,v) in holdings.items()])) def randomTest(): ticker = "FBIOX" quoteURL = 'http://quotes.morningstar.com/fund/f?t=' portfolioURL = 'http://portfolios.morningstar.com/fund/summary?t=' holdingsURL = 'http://portfolios.morningstar.com/fund/holdings?t=' googleFinanceURL = 'http://www.google.com/finance?q=' # Test with a stock #sq = StockQuote("goog") #print sq.price, sq.change, sq.percent #print sq # Test with a mutual fund sq = StockQuote("fiuix") print sq.price, sq.change, sq.percent mfd = MutualFundData("FBIOX") print mfd.price, mfd.change, mfd.percent holdings = mfd.holdings() #print holdings for h in holdings: print 'Retrieving {0} data...'.format(h) sq = StockQuote(h) delta = float(holdings[h])*float(sq.percent)/100 holdings[h] = [holdings[h], sq.price, sq.change, sq.percent, delta] print 'Complete.' #print holdings print '\nESTIMATED CHANGE: {0}\nTOTAL COMPOSITION: {1}'.format( sum([v[4] for (k,v) in holdings.items()]), sum([float(v[0]) for (k,v) in holdings.items()])) # Standard main call if __name__ == "__main__": main()
gpl-3.0
6,559,670,225,888,140,000
32.684211
110
0.636624
false
3.502737
true
false
false
Foldblade/EORS
Mypackage/back_to_yesterday.py
1
3091
# encoding:utf-8 ''' ———————————————————————————————— back_to_yesterday.py 对备份文件的回档,所谓‘回到昨天’功能。 实现原理:删除源文件。解压备份的zip,自动覆盖。 ———————————————————————————————— ''' import os import zipfile import shutil import time def back_to_yesterday(): where_script = os.path.split(os.path.realpath(__file__))[0] # print(where_script) where_rootmenu = where_script[:where_script.rfind('\\')] # print(where_rootmenu) def unzip(zipfilepath, unzippath): # zipfilepath 为需要解压的文件路径,unzippath为解压的目标目录 # e.g. unzip(where_rootmenu + '/cache/cache.zip', where_rootmenu + '/cache') f = zipfile.ZipFile(zipfilepath, 'r') for file in f.infolist(): d = file.date_time gettime = "%s/%s/%s %s:%s" % (d[0], d[1], d[2], d[3], d[4]) # 获取文件原修改时间 f.extract(file, unzippath) filep = os.path.join(unzippath, file.filename) timearry = time.mktime(time.strptime(gettime, '%Y/%m/%d %H:%M')) os.utime(filep, (timearry, timearry)) # 重写文件原修改时间 return def clear_unexist(dirname, zipfilename): zipfilepath = (where_rootmenu + '/backup/' + zipfilename) fileinzip = [] f = zipfile.ZipFile(zipfilepath, 'r') for filename in f.namelist(): # print(filename) fileinzip.append(filename) for parent, dirnames, filenames in os.walk(dirname): for filename in filenames: # print ("parent is:" + parent) # print("filename is:" + filename) # print ("the full name of the file is:" + os.path.join(parent,filename)) if filename not in fileinzip: os.remove(os.path.join(parent, filename)) # 删除压缩包内不存在的文件 return clear_unexist(where_rootmenu + '/cache', 'cache.zip') clear_unexist(where_rootmenu + '/data', 'data.zip') clear_unexist(where_rootmenu + '/output', 'output.zip') # 删除压缩包内不存在的文件 shutil.copyfile(where_rootmenu + '/backup/cache.zip', where_rootmenu + '/cache/cache.zip') shutil.copyfile(where_rootmenu + '/backup/output.zip', where_rootmenu + '/output/output.zip') shutil.copyfile(where_rootmenu + '/backup/data.zip', where_rootmenu + '/data/data.zip') # 拷贝备份zip到各自目录下 unzip(where_rootmenu + '/cache/cache.zip', where_rootmenu + '/cache') unzip(where_rootmenu + '/output/output.zip', where_rootmenu + '/output') unzip(where_rootmenu + '/data/data.zip', where_rootmenu + '/data') # 解压文件 os.remove(where_rootmenu + '/cache/cache.zip') os.remove(where_rootmenu + '/output/output.zip') os.remove(where_rootmenu + '/data/data.zip') # 删除拷贝的zip文件 print('成功穿越回昨日!!') return
gpl-3.0
-882,281,870,340,508,500
35.5
97
0.595705
false
2.798964
false
false
false
XiMuYouZi/PythonDemo
Crawler/Zhihu/zhihuuser/spiders/zhihu_user.py
1
4440
# -*- coding: utf-8 -*- # 爬取知乎全站的用户信息 import json from scrapy import Spider, Request from Crawler.Zhihu.zhihuuser.items import UserItem class ZhihuSpider(Spider): #忽略301,302重定向请求 # handle_httpstatus_list = [301, 302] name = "zhihu_user" allowed_domains = ["www.zhihu.com"] user_url = 'https://www.zhihu.com/api/v4/members/{user}?include={include}' follows_url = 'https://www.zhihu.com/api/v4/members/{user}/followees?include={include}&offset={offset}&limit={limit}' followers_url = 'https://www.zhihu.com/api/v4/members/{user}/followers?include={include}&offset={offset}&limit={limit}' start_user = 'excited-vczh' user_query = 'locations,employments,gender,educations,business,voteup_count,thanked_Count,follower_count,following_count,cover_url,following_topic_count,following_question_count,following_favlists_count,following_columns_count,answer_count,articles_count,pins_count,question_count,commercial_question_count,favorite_count,favorited_count,logs_count,marked_answers_count,marked_answers_text,message_thread_token,account_status,is_active,is_force_renamed,is_bind_sina,sina_weibo_url,sina_weibo_name,show_sina_weibo,is_blocking,is_blocked,is_following,is_followed,mutual_followees_count,vote_to_count,vote_from_count,thank_to_count,thank_from_count,thanked_count,description,hosted_live_count,participated_live_count,allow_message,industry_category,org_name,org_homepage,badge[?(type=best_answerer)].topics' follows_query = 'data[*].answer_count,articles_count,gender,follower_count,is_followed,is_following,badge[?(type=best_answerer)].topics' followers_query = 'data[*].answer_count,articles_count,gender,follower_count,is_followed,is_following,badge[?(type=best_answerer)].topics' def start_requests(self): yield Request(self.user_url.format(user=self.start_user, include=self.user_query), self.parse_user,dont_filter=True) yield Request(self.follows_url.format(user=self.start_user, include=self.follows_query, limit=20, offset=0), self.parse_follows,dont_filter=True) yield Request(self.followers_url.format(user=self.start_user, include=self.followers_query, limit=20, offset=0), self.parse_followers,dont_filter=True) def parse(self, response): print(response.text) #解析每个用户的信息 def parse_user(self, response): result = json.loads(response.text,strict=False) print('解析每个用户的信息\n: ',result) item = UserItem() #解析用户信息 for field in item.fields: if field in result.keys(): item[field] = result.get(field) yield item # 生成该用户的关注和粉丝用户的Request yield Request( self.follows_url.format(user=result.get('url_token'), include=self.follows_query, limit=20, offset=0), self.parse_follows) yield Request( self.followers_url.format(user=result.get('url_token'), include=self.followers_query, limit=20, offset=0), self.parse_followers) #解析他的关注列表 def parse_follows(self, response): results = json.loads(response.text,strict=False) print('解析他的关注列表\n: ',results) if 'data' in results.keys(): for result in results.get('data'): yield Request(self.user_url.format(user=result.get('url_token'), include=self.user_query), self.parse_user) if 'paging' in results.keys() and results.get('paging').get('is_end') == False: next_page = results.get('paging').get('next') yield Request(next_page, self.parse_follows) #解析他的粉丝列表 def parse_followers(self, response): results = json.loads(response.text,strict=False) print('解析他的粉丝列表\n: ',results) if 'data' in results.keys(): for result in results.get('data'): yield Request(self.user_url.format(user=result.get('url_token'), include=self.user_query), self.parse_user) if 'paging' in results.keys() and results.get('paging').get('is_end') == False: next_page = results.get('paging').get('next') yield Request(next_page, self.parse_followers)
mit
4,831,802,806,455,866,000
46.88764
808
0.666823
false
3.152367
false
false
false
sgzwiz/brython
tests/console.py
1
2190
import sys import time import random #this sucks.. cannot find dis since "root" path is blah/test #we might need to create a variable we pass via the brython function # to state what the root path is. # For now, we'll hardcode a relative path. :( sys.path.append("../Lib") import dis _rand=random.random() editor=JSObject(ace).edit("editor") editor.getSession().setMode("ace/mode/python") if sys.has_local_storage: from local_storage import storage else: storage = False def reset_src(): if storage: editor.setValue(storage["py_src"]) else: editor.setValue('for i in range(10):\n\tprint(i)') editor.scrollToRow(0) editor.gotoLine(0) def write(data): doc["console"].value += str(data) sys.stdout = object() sys.stdout.write = write sys.stderr = object() sys.stderr.write = write def to_str(xx): return str(xx) doc['version'].text = '.'.join(map(to_str,sys.version_info)) output = '' def show_console(): doc["console"].value = output doc["console"].cols = 60 def clear_text(): editor.setValue('') if sys.has_local_storage: storage["py_src"]='' doc["console"].value='' def run(): global output doc["console"].value='' src = editor.getValue() if storage: storage["py_src"]=src t0 = time.time() exec(src) output = doc["console"].value print('<completed in %s ms>' %(time.time()-t0)) # load a Python script def on_complete(req): editor.setValue(req.text) editor.scrollToRow(0) editor.gotoLine(0) def load(evt): _name=evt.target.value req = ajax() req.on_complete = on_complete req.open('GET',_name+'?foo=%s' % _rand,False) req.send() def show_js(): src = editor.getValue() doc["console"].value = dis.dis(src) def change_theme(evt): _theme=evt.target.value editor.setTheme(_theme) if storage: storage["ace_theme"]=_theme def reset_theme(): if storage: if storage["ace_theme"] is not None: if storage["ace_theme"].startswith("ace/theme/"): editor.setTheme(storage["ace_theme"]) doc["ace_theme"].value=storage["ace_theme"] reset_src() reset_theme()
bsd-3-clause
-4,744,852,278,866,266,000
19.660377
68
0.630594
false
3.254086
false
false
false
devs1991/test_edx_docmode
venv/lib/python2.7/site-packages/ratelimitbackend/backends.py
1
2730
import logging import warnings from datetime import datetime, timedelta from django.contrib.auth.backends import ModelBackend from django.core.cache import cache from .exceptions import RateLimitException logger = logging.getLogger('ratelimitbackend') class RateLimitMixin(object): """ A mixin to enable rate-limiting in an existing authentication backend. """ cache_prefix = 'ratelimitbackend-' minutes = 5 requests = 30 username_key = 'username' def authenticate(self, **kwargs): request = kwargs.pop('request', None) username = kwargs[self.username_key] if request is not None: counts = self.get_counters(request) if sum(counts.values()) >= self.requests: logger.warning( u"Login rate-limit reached: username '{0}', IP {1}".format( username, self.get_ip(request), ) ) raise RateLimitException('Rate-limit reached', counts) else: warnings.warn(u"No request passed to the backend, unable to " u"rate-limit. Username was '%s'" % username, stacklevel=2) user = super(RateLimitMixin, self).authenticate(**kwargs) if user is None and request is not None: logger.info( u"Login failed: username '{0}', IP {1}".format( username, self.get_ip(request), ) ) cache_key = self.get_cache_key(request) self.cache_incr(cache_key) return user def get_counters(self, request): return cache.get_many(self.keys_to_check(request)) def keys_to_check(self, request): now = datetime.now() return [ self.key( request, now - timedelta(minutes=minute), ) for minute in range(self.minutes + 1) ] def get_cache_key(self, request): return self.key(request, datetime.now()) def key(self, request, dt): return '%s%s-%s' % ( self.cache_prefix, self.get_ip(request), dt.strftime('%Y%m%d%H%M'), ) def get_ip(self, request): return request.META['REMOTE_ADDR'] def cache_incr(self, key): """ Non-atomic cache increment operation. Not optimal but consistent across different cache backends. """ cache.set(key, cache.get(key, 0) + 1, self.expire_after()) def expire_after(self): """Cache expiry delay""" return (self.minutes + 1) * 60 class RateLimitModelBackend(RateLimitMixin, ModelBackend): pass
agpl-3.0
6,461,471,023,397,237,000
29.674157
79
0.563736
false
4.299213
false
false
false
joetsoi/moonstone
python/main.py
1
1239
from collections import namedtuple from struct import unpack, unpack_from Segment = namedtuple('Segment', 'offset length') ViewportDimension = namedtuple('ViewportDimension', 'right left') class MainExe(object): def __init__(self, file_path): data_segment = Segment(0x138a0, 0xf460) with open(file_path, 'rb') as f: f.seek(data_segment.offset) data_segment_data = f.read(data_segment.length) self.bold_f_char_lookup = unpack( '>96B', data_segment_data[0x8006:0x8006 + (128 - 32)] ) self.screen_dimensions = ViewportDimension(*unpack( '<2H', data_segment_data[0x8002:0x8006] )) self.strings = { 'created by': unpack( '<5H', data_segment_data[0x8DCC:0x8DCC + 10] #should back 10 ), 'Loading...': unpack( '<5H', data_segment_data[0x8de0:0x8de0 + 10] ), 'Rob Anderson': unpack( '<5H', data_segment_data[0x8dd6:0x8dd6 + 10] ), } self.palette = unpack( '<32H', data_segment_data[0x892:0x892 + 0x40] )
agpl-3.0
326,647,625,653,657,340
27.813953
69
0.51816
false
3.529915
false
false
false
teoliphant/scipy
scipy/ndimage/filters.py
2
40010
# Copyright (C) 2003-2005 Peter J. Verveer # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # # 3. The name of the author may not be used to endorse or promote # products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS # OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE # GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import math import numpy import _ni_support import _nd_image from scipy.misc import doccer __all__ = ['correlate1d', 'convolve1d', 'gaussian_filter1d', 'gaussian_filter', 'prewitt', 'sobel', 'generic_laplace', 'laplace', 'gaussian_laplace', 'generic_gradient_magnitude', 'gaussian_gradient_magnitude', 'correlate', 'convolve', 'uniform_filter1d', 'uniform_filter', 'minimum_filter1d', 'maximum_filter1d', 'minimum_filter', 'maximum_filter', 'rank_filter', 'median_filter', 'percentile_filter', 'generic_filter1d', 'generic_filter'] _input_doc = \ """input : array-like input array to filter""" _axis_doc = \ """axis : integer, optional axis of ``input`` along which to calculate. Default is -1""" _output_doc = \ """output : array, optional The ``output`` parameter passes an array in which to store the filter output.""" _size_foot_doc = \ """size : scalar or tuple, optional See footprint, below footprint : array, optional Either ``size`` or ``footprint`` must be defined. ``size`` gives the shape that is taken from the input array, at every element position, to define the input to the filter function. ``footprint`` is a boolean array that specifies (implicitly) a shape, but also which of the elements within this shape will get passed to the filter function. Thus ``size=(n,m)`` is equivalent to ``footprint=np.ones((n,m))``. We adjust ``size`` to the number of dimensions of the input array, so that, if the input array is shape (10,10,10), and ``size`` is 2, then the actual size used is (2,2,2). """ _mode_doc = \ """mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional The ``mode`` parameter determines how the array borders are handled, where ``cval`` is the value when mode is equal to 'constant'. Default is 'reflect'""" _cval_doc = \ """cval : scalar, optional Value to fill past edges of input if ``mode`` is 'constant'. Default is 0.0""" _origin_doc = \ """origin : scalar, optional The ``origin`` parameter controls the placement of the filter. Default 0""" _extra_arguments_doc = \ """extra_arguments : sequence, optional Sequence of extra positional arguments to pass to passed function""" _extra_keywords_doc = \ """extra_keywords : dict, optional dict of extra keyword arguments to pass to passed function""" docdict = { 'input':_input_doc, 'axis':_axis_doc, 'output':_output_doc, 'size_foot':_size_foot_doc, 'mode':_mode_doc, 'cval':_cval_doc, 'origin':_origin_doc, 'extra_arguments':_extra_arguments_doc, 'extra_keywords':_extra_keywords_doc, } docfiller = doccer.filldoc(docdict) @docfiller def correlate1d(input, weights, axis = -1, output = None, mode = "reflect", cval = 0.0, origin = 0): """Calculate a one-dimensional correlation along the given axis. The lines of the array along the given axis are correlated with the given weights. Parameters ---------- %(input)s weights : array one-dimensional sequence of numbers %(axis)s %(output)s %(mode)s %(cval)s %(origin)s """ input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') output, return_value = _ni_support._get_output(output, input) weights = numpy.asarray(weights, dtype=numpy.float64) if weights.ndim != 1 or weights.shape[0] < 1: raise RuntimeError('no filter weights given') if not weights.flags.contiguous: weights = weights.copy() axis = _ni_support._check_axis(axis, input.ndim) if ((len(weights) // 2 + origin < 0) or (len(weights) // 2 + origin > len(weights))): raise ValueError('invalid origin') mode = _ni_support._extend_mode_to_code(mode) _nd_image.correlate1d(input, weights, axis, output, mode, cval, origin) return return_value @docfiller def convolve1d(input, weights, axis = -1, output = None, mode = "reflect", cval = 0.0, origin = 0): """Calculate a one-dimensional convolution along the given axis. The lines of the array along the given axis are convolved with the given weights. Parameters ---------- %(input)s weights : ndarray one-dimensional sequence of numbers %(axis)s %(output)s %(mode)s %(cval)s %(origin)s """ weights = weights[::-1] origin = -origin if not len(weights) & 1: origin -= 1 return correlate1d(input, weights, axis, output, mode, cval, origin) @docfiller def gaussian_filter1d(input, sigma, axis = -1, order = 0, output = None, mode = "reflect", cval = 0.0): """One-dimensional Gaussian filter. Parameters ---------- %(input)s sigma : scalar standard deviation for Gaussian kernel %(axis)s order : {0, 1, 2, 3}, optional An order of 0 corresponds to convolution with a Gaussian kernel. An order of 1, 2, or 3 corresponds to convolution with the first, second or third derivatives of a Gaussian. Higher order derivatives are not implemented %(output)s %(mode)s %(cval)s """ if order not in range(4): raise ValueError('Order outside 0..3 not implemented') sd = float(sigma) # make the length of the filter equal to 4 times the standard # deviations: lw = int(4.0 * sd + 0.5) weights = [0.0] * (2 * lw + 1) weights[lw] = 1.0 sum = 1.0 sd = sd * sd # calculate the kernel: for ii in range(1, lw + 1): tmp = math.exp(-0.5 * float(ii * ii) / sd) weights[lw + ii] = tmp weights[lw - ii] = tmp sum += 2.0 * tmp for ii in range(2 * lw + 1): weights[ii] /= sum # implement first, second and third order derivatives: if order == 1 : # first derivative weights[lw] = 0.0 for ii in range(1, lw + 1): x = float(ii) tmp = -x / sd * weights[lw + ii] weights[lw + ii] = -tmp weights[lw - ii] = tmp elif order == 2: # second derivative weights[lw] *= -1.0 / sd for ii in range(1, lw + 1): x = float(ii) tmp = (x * x / sd - 1.0) * weights[lw + ii] / sd weights[lw + ii] = tmp weights[lw - ii] = tmp elif order == 3: # third derivative weights[lw] = 0.0 sd2 = sd * sd for ii in range(1, lw + 1): x = float(ii) tmp = (3.0 - x * x / sd) * x * weights[lw + ii] / sd2 weights[lw + ii] = -tmp weights[lw - ii] = tmp return correlate1d(input, weights, axis, output, mode, cval, 0) @docfiller def gaussian_filter(input, sigma, order = 0, output = None, mode = "reflect", cval = 0.0): """Multi-dimensional Gaussian filter. Parameters ---------- %(input)s sigma : scalar or sequence of scalars standard deviation for Gaussian kernel. The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes. order : {0, 1, 2, 3} or sequence from same set, optional The order of the filter along each axis is given as a sequence of integers, or as a single number. An order of 0 corresponds to convolution with a Gaussian kernel. An order of 1, 2, or 3 corresponds to convolution with the first, second or third derivatives of a Gaussian. Higher order derivatives are not implemented %(output)s %(mode)s %(cval)s Notes ----- The multi-dimensional filter is implemented as a sequence of one-dimensional convolution filters. The intermediate arrays are stored in the same data type as the output. Therefore, for output types with a limited precision, the results may be imprecise because intermediate results may be stored with insufficient precision. """ input = numpy.asarray(input) output, return_value = _ni_support._get_output(output, input) orders = _ni_support._normalize_sequence(order, input.ndim) if not set(orders).issubset(set(range(4))): raise ValueError('Order outside 0..4 not implemented') sigmas = _ni_support._normalize_sequence(sigma, input.ndim) axes = range(input.ndim) axes = [(axes[ii], sigmas[ii], orders[ii]) for ii in range(len(axes)) if sigmas[ii] > 1e-15] if len(axes) > 0: for axis, sigma, order in axes: gaussian_filter1d(input, sigma, axis, order, output, mode, cval) input = output else: output[...] = input[...] return return_value @docfiller def prewitt(input, axis = -1, output = None, mode = "reflect", cval = 0.0): """Calculate a Prewitt filter. Parameters ---------- %(input)s %(axis)s %(output)s %(mode)s %(cval)s """ input = numpy.asarray(input) axis = _ni_support._check_axis(axis, input.ndim) output, return_value = _ni_support._get_output(output, input) correlate1d(input, [-1, 0, 1], axis, output, mode, cval, 0) axes = [ii for ii in range(input.ndim) if ii != axis] for ii in axes: correlate1d(output, [1, 1, 1], ii, output, mode, cval, 0,) return return_value @docfiller def sobel(input, axis = -1, output = None, mode = "reflect", cval = 0.0): """Calculate a Sobel filter. Parameters ---------- %(input)s %(axis)s %(output)s %(mode)s %(cval)s """ input = numpy.asarray(input) axis = _ni_support._check_axis(axis, input.ndim) output, return_value = _ni_support._get_output(output, input) correlate1d(input, [-1, 0, 1], axis, output, mode, cval, 0) axes = [ii for ii in range(input.ndim) if ii != axis] for ii in axes: correlate1d(output, [1, 2, 1], ii, output, mode, cval, 0) return return_value @docfiller def generic_laplace(input, derivative2, output = None, mode = "reflect", cval = 0.0, extra_arguments = (), extra_keywords = None): """Calculate a multidimensional laplace filter using the provided second derivative function. Parameters ---------- %(input)s derivative2 : callable Callable with the following signature:: derivative2(input, axis, output, mode, cval, *extra_arguments, **extra_keywords) See `extra_arguments`, `extra_keywords` below. %(output)s %(mode)s %(cval)s %(extra_keywords)s %(extra_arguments)s """ if extra_keywords is None: extra_keywords = {} input = numpy.asarray(input) output, return_value = _ni_support._get_output(output, input) axes = range(input.ndim) if len(axes) > 0: derivative2(input, axes[0], output, mode, cval, *extra_arguments, **extra_keywords) for ii in range(1, len(axes)): tmp = derivative2(input, axes[ii], output.dtype, mode, cval, *extra_arguments, **extra_keywords) output += tmp else: output[...] = input[...] return return_value @docfiller def laplace(input, output = None, mode = "reflect", cval = 0.0): """Calculate a multidimensional laplace filter using an estimation for the second derivative based on differences. Parameters ---------- %(input)s %(output)s %(mode)s %(cval)s """ def derivative2(input, axis, output, mode, cval): return correlate1d(input, [1, -2, 1], axis, output, mode, cval, 0) return generic_laplace(input, derivative2, output, mode, cval) @docfiller def gaussian_laplace(input, sigma, output = None, mode = "reflect", cval = 0.0): """Calculate a multidimensional laplace filter using gaussian second derivatives. Parameters ---------- %(input)s sigma : scalar or sequence of scalars The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes.. %(output)s %(mode)s %(cval)s """ input = numpy.asarray(input) def derivative2(input, axis, output, mode, cval, sigma): order = [0] * input.ndim order[axis] = 2 return gaussian_filter(input, sigma, order, output, mode, cval) return generic_laplace(input, derivative2, output, mode, cval, extra_arguments = (sigma,)) @docfiller def generic_gradient_magnitude(input, derivative, output = None, mode = "reflect", cval = 0.0, extra_arguments = (), extra_keywords = None): """Calculate a gradient magnitude using the provided function for the gradient. Parameters ---------- %(input)s derivative : callable Callable with the following signature:: derivative(input, axis, output, mode, cval, *extra_arguments, **extra_keywords) See `extra_arguments`, `extra_keywords` below. `derivative` can assume that `input` and `output` are ndarrays. Note that the output from `derivative` is modified inplace; be careful to copy important inputs before returning them. %(output)s %(mode)s %(cval)s %(extra_keywords)s %(extra_arguments)s """ if extra_keywords is None: extra_keywords = {} input = numpy.asarray(input) output, return_value = _ni_support._get_output(output, input) axes = range(input.ndim) if len(axes) > 0: derivative(input, axes[0], output, mode, cval, *extra_arguments, **extra_keywords) numpy.multiply(output, output, output) for ii in range(1, len(axes)): tmp = derivative(input, axes[ii], output.dtype, mode, cval, *extra_arguments, **extra_keywords) numpy.multiply(tmp, tmp, tmp) output += tmp # This allows the sqrt to work with a different default casting if numpy.version.short_version > '1.6.1': numpy.sqrt(output, output, casting='unsafe') else: numpy.sqrt(output, output) else: output[...] = input[...] return return_value @docfiller def gaussian_gradient_magnitude(input, sigma, output = None, mode = "reflect", cval = 0.0): """Calculate a multidimensional gradient magnitude using gaussian derivatives. Parameters ---------- %(input)s sigma : scalar or sequence of scalars The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes.. %(output)s %(mode)s %(cval)s """ input = numpy.asarray(input) def derivative(input, axis, output, mode, cval, sigma): order = [0] * input.ndim order[axis] = 1 return gaussian_filter(input, sigma, order, output, mode, cval) return generic_gradient_magnitude(input, derivative, output, mode, cval, extra_arguments = (sigma,)) def _correlate_or_convolve(input, weights, output, mode, cval, origin, convolution): input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') origins = _ni_support._normalize_sequence(origin, input.ndim) weights = numpy.asarray(weights, dtype=numpy.float64) wshape = [ii for ii in weights.shape if ii > 0] if len(wshape) != input.ndim: raise RuntimeError('filter weights array has incorrect shape.') if convolution: weights = weights[tuple([slice(None, None, -1)] * weights.ndim)] for ii in range(len(origins)): origins[ii] = -origins[ii] if not weights.shape[ii] & 1: origins[ii] -= 1 for origin, lenw in zip(origins, wshape): if (lenw // 2 + origin < 0) or (lenw // 2 + origin > lenw): raise ValueError('invalid origin') if not weights.flags.contiguous: weights = weights.copy() output, return_value = _ni_support._get_output(output, input) mode = _ni_support._extend_mode_to_code(mode) _nd_image.correlate(input, weights, output, mode, cval, origins) return return_value @docfiller def correlate(input, weights, output = None, mode = 'reflect', cval = 0.0, origin = 0): """ Multi-dimensional correlation. The array is correlated with the given kernel. Parameters ---------- input : array-like input array to filter weights : ndarray array of weights, same number of dimensions as input output : array, optional The ``output`` parameter passes an array in which to store the filter output. mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional The ``mode`` parameter determines how the array borders are handled, where ``cval`` is the value when mode is equal to 'constant'. Default is 'reflect' cval : scalar, optional Value to fill past edges of input if ``mode`` is 'constant'. Default is 0.0 origin : scalar, optional The ``origin`` parameter controls the placement of the filter. Default 0 See Also -------- convolve : Convolve an image with a kernel. """ return _correlate_or_convolve(input, weights, output, mode, cval, origin, False) @docfiller def convolve(input, weights, output = None, mode = 'reflect', cval = 0.0, origin = 0): """ Multi-dimensional convolution. The array is convolved with the given kernel. Parameters ---------- input : array_like Input array to filter. weights : array_like Array of weights, same number of dimensions as input output : ndarray, optional The `output` parameter passes an array in which to store the filter output. mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional the `mode` parameter determines how the array borders are handled. For 'constant' mode, values beyond borders are set to be `cval`. Default is 'reflect'. cval : scalar, optional Value to fill past edges of input if `mode` is 'constant'. Default is 0.0 origin : array_like, optional The `origin` parameter controls the placement of the filter. Default is 0. Returns ------- result : ndarray The result of convolution of `input` with `weights`. See Also -------- correlate : Correlate an image with a kernel. Notes ----- Each value in result is :math:`C_i = \\sum_j{I_{i+j-k} W_j}`, where W is the `weights` kernel, j is the n-D spatial index over :math:`W`, I is the `input` and k is the coordinate of the center of W, specified by `origin` in the input parameters. Examples -------- Perhaps the simplest case to understand is ``mode='constant', cval=0.0``, because in this case borders (i.e. where the `weights` kernel, centered on any one value, extends beyond an edge of `input`. >>> a = np.array([[1, 2, 0, 0], .... [5, 3, 0, 4], .... [0, 0, 0, 7], .... [9, 3, 0, 0]]) >>> k = np.array([[1,1,1],[1,1,0],[1,0,0]]) >>> from scipy import ndimage >>> ndimage.convolve(a, k, mode='constant', cval=0.0) array([[11, 10, 7, 4], [10, 3, 11, 11], [15, 12, 14, 7], [12, 3, 7, 0]]) Setting ``cval=1.0`` is equivalent to padding the outer edge of `input` with 1.0's (and then extracting only the original region of the result). >>> ndimage.convolve(a, k, mode='constant', cval=1.0) array([[13, 11, 8, 7], [11, 3, 11, 14], [16, 12, 14, 10], [15, 6, 10, 5]]) With ``mode='reflect'`` (the default), outer values are reflected at the edge of `input` to fill in missing values. >>> b = np.array([[2, 0, 0], [1, 0, 0], [0, 0, 0]]) >>> k = np.array([[0,1,0],[0,1,0],[0,1,0]]) >>> ndimage.convolve(b, k, mode='reflect') array([[5, 0, 0], [3, 0, 0], [1, 0, 0]]) This includes diagonally at the corners. >>> k = np.array([[1,0,0],[0,1,0],[0,0,1]]) >>> ndimage.convolve(b, k) array([[4, 2, 0], [3, 2, 0], [1, 1, 0]]) With ``mode='nearest'``, the single nearest value in to an edge in `input` is repeated as many times as needed to match the overlapping `weights`. >>> c = np.array([[2, 0, 1], [1, 0, 0], [0, 0, 0]]) >>> k = np.array([[0, 1, 0], [0, 1, 0], [0, 1, 0], [0, 1, 0], [0, 1, 0]]) >>> ndimage.convolve(c, k, mode='nearest') array([[7, 0, 3], [5, 0, 2], [3, 0, 1]]) """ return _correlate_or_convolve(input, weights, output, mode, cval, origin, True) @docfiller def uniform_filter1d(input, size, axis = -1, output = None, mode = "reflect", cval = 0.0, origin = 0): """Calculate a one-dimensional uniform filter along the given axis. The lines of the array along the given axis are filtered with a uniform filter of given size. Parameters ---------- %(input)s size : integer length of uniform filter %(axis)s %(output)s %(mode)s %(cval)s %(origin)s """ input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') axis = _ni_support._check_axis(axis, input.ndim) if size < 1: raise RuntimeError('incorrect filter size') output, return_value = _ni_support._get_output(output, input) if (size // 2 + origin < 0) or (size // 2 + origin >= size): raise ValueError('invalid origin') mode = _ni_support._extend_mode_to_code(mode) _nd_image.uniform_filter1d(input, size, axis, output, mode, cval, origin) return return_value @docfiller def uniform_filter(input, size = 3, output = None, mode = "reflect", cval = 0.0, origin = 0): """Multi-dimensional uniform filter. Parameters ---------- %(input)s size : int or sequence of ints The sizes of the uniform filter are given for each axis as a sequence, or as a single number, in which case the size is equal for all axes. %(output)s %(mode)s %(cval)s %(origin)s Notes ----- The multi-dimensional filter is implemented as a sequence of one-dimensional uniform filters. The intermediate arrays are stored in the same data type as the output. Therefore, for output types with a limited precision, the results may be imprecise because intermediate results may be stored with insufficient precision. """ input = numpy.asarray(input) output, return_value = _ni_support._get_output(output, input) sizes = _ni_support._normalize_sequence(size, input.ndim) origins = _ni_support._normalize_sequence(origin, input.ndim) axes = range(input.ndim) axes = [(axes[ii], sizes[ii], origins[ii]) for ii in range(len(axes)) if sizes[ii] > 1] if len(axes) > 0: for axis, size, origin in axes: uniform_filter1d(input, int(size), axis, output, mode, cval, origin) input = output else: output[...] = input[...] return return_value @docfiller def minimum_filter1d(input, size, axis = -1, output = None, mode = "reflect", cval = 0.0, origin = 0): """Calculate a one-dimensional minimum filter along the given axis. The lines of the array along the given axis are filtered with a minimum filter of given size. Parameters ---------- %(input)s size : int length along which to calculate 1D minimum %(axis)s %(output)s %(mode)s %(cval)s %(origin)s """ input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') axis = _ni_support._check_axis(axis, input.ndim) if size < 1: raise RuntimeError('incorrect filter size') output, return_value = _ni_support._get_output(output, input) if (size // 2 + origin < 0) or (size // 2 + origin >= size): raise ValueError('invalid origin') mode = _ni_support._extend_mode_to_code(mode) _nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval, origin, 1) return return_value @docfiller def maximum_filter1d(input, size, axis = -1, output = None, mode = "reflect", cval = 0.0, origin = 0): """Calculate a one-dimensional maximum filter along the given axis. The lines of the array along the given axis are filtered with a maximum filter of given size. Parameters ---------- %(input)s size : int length along which to calculate 1D maximum %(axis)s %(output)s %(mode)s %(cval)s %(origin)s """ input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') axis = _ni_support._check_axis(axis, input.ndim) if size < 1: raise RuntimeError('incorrect filter size') output, return_value = _ni_support._get_output(output, input) if (size // 2 + origin < 0) or (size // 2 + origin >= size): raise ValueError('invalid origin') mode = _ni_support._extend_mode_to_code(mode) _nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval, origin, 0) return return_value def _min_or_max_filter(input, size, footprint, structure, output, mode, cval, origin, minimum): if structure is None: if footprint is None: if size is None: raise RuntimeError("no footprint provided") separable= True else: footprint = numpy.asarray(footprint) footprint = footprint.astype(bool) if numpy.alltrue(numpy.ravel(footprint),axis=0): size = footprint.shape footprint = None separable = True else: separable = False else: structure = numpy.asarray(structure, dtype=numpy.float64) separable = False if footprint is None: footprint = numpy.ones(structure.shape, bool) else: footprint = numpy.asarray(footprint) footprint = footprint.astype(bool) input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') output, return_value = _ni_support._get_output(output, input) origins = _ni_support._normalize_sequence(origin, input.ndim) if separable: sizes = _ni_support._normalize_sequence(size, input.ndim) axes = range(input.ndim) axes = [(axes[ii], sizes[ii], origins[ii]) for ii in range(len(axes)) if sizes[ii] > 1] if minimum: filter_ = minimum_filter1d else: filter_ = maximum_filter1d if len(axes) > 0: for axis, size, origin in axes: filter_(input, int(size), axis, output, mode, cval, origin) input = output else: output[...] = input[...] else: fshape = [ii for ii in footprint.shape if ii > 0] if len(fshape) != input.ndim: raise RuntimeError('footprint array has incorrect shape.') for origin, lenf in zip(origins, fshape): if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf): raise ValueError('invalid origin') if not footprint.flags.contiguous: footprint = footprint.copy() if structure is not None: if len(structure.shape) != input.ndim: raise RuntimeError('structure array has incorrect shape') if not structure.flags.contiguous: structure = structure.copy() mode = _ni_support._extend_mode_to_code(mode) _nd_image.min_or_max_filter(input, footprint, structure, output, mode, cval, origins, minimum) return return_value @docfiller def minimum_filter(input, size = None, footprint = None, output = None, mode = "reflect", cval = 0.0, origin = 0): """Calculates a multi-dimensional minimum filter. Parameters ---------- %(input)s %(size_foot)s %(output)s %(mode)s %(cval)s %(origin)s """ return _min_or_max_filter(input, size, footprint, None, output, mode, cval, origin, 1) @docfiller def maximum_filter(input, size = None, footprint = None, output = None, mode = "reflect", cval = 0.0, origin = 0): """Calculates a multi-dimensional maximum filter. Parameters ---------- %(input)s %(size_foot)s %(output)s %(mode)s %(cval)s %(origin)s """ return _min_or_max_filter(input, size, footprint, None, output, mode, cval, origin, 0) @docfiller def _rank_filter(input, rank, size = None, footprint = None, output = None, mode = "reflect", cval = 0.0, origin = 0, operation = 'rank'): input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') origins = _ni_support._normalize_sequence(origin, input.ndim) if footprint is None: if size is None: raise RuntimeError("no footprint or filter size provided") sizes = _ni_support._normalize_sequence(size, input.ndim) footprint = numpy.ones(sizes, dtype=bool) else: footprint = numpy.asarray(footprint, dtype=bool) fshape = [ii for ii in footprint.shape if ii > 0] if len(fshape) != input.ndim: raise RuntimeError('filter footprint array has incorrect shape.') for origin, lenf in zip(origins, fshape): if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf): raise ValueError('invalid origin') if not footprint.flags.contiguous: footprint = footprint.copy() filter_size = numpy.where(footprint, 1, 0).sum() if operation == 'median': rank = filter_size // 2 elif operation == 'percentile': percentile = rank if percentile < 0.0: percentile += 100.0 if percentile < 0 or percentile > 100: raise RuntimeError('invalid percentile') if percentile == 100.0: rank = filter_size - 1 else: rank = int(float(filter_size) * percentile / 100.0) if rank < 0: rank += filter_size if rank < 0 or rank >= filter_size: raise RuntimeError('rank not within filter footprint size') if rank == 0: return minimum_filter(input, None, footprint, output, mode, cval, origin) elif rank == filter_size - 1: return maximum_filter(input, None, footprint, output, mode, cval, origin) else: output, return_value = _ni_support._get_output(output, input) mode = _ni_support._extend_mode_to_code(mode) _nd_image.rank_filter(input, rank, footprint, output, mode, cval, origins) return return_value @docfiller def rank_filter(input, rank, size = None, footprint = None, output = None, mode = "reflect", cval = 0.0, origin = 0): """Calculates a multi-dimensional rank filter. Parameters ---------- %(input)s rank : integer The rank parameter may be less then zero, i.e., rank = -1 indicates the largest element. %(size_foot)s %(output)s %(mode)s %(cval)s %(origin)s """ return _rank_filter(input, rank, size, footprint, output, mode, cval, origin, 'rank') @docfiller def median_filter(input, size = None, footprint = None, output = None, mode = "reflect", cval = 0.0, origin = 0): """ Calculates a multi-dimensional median filter. Parameters ---------- input : array-like input array to filter size : scalar or tuple, optional See footprint, below footprint : array, optional Either ``size`` or ``footprint`` must be defined. ``size`` gives the shape that is taken from the input array, at every element position, to define the input to the filter function. ``footprint`` is a boolean array that specifies (implicitly) a shape, but also which of the elements within this shape will get passed to the filter function. Thus ``size=(n,m)`` is equivalent to ``footprint=np.ones((n,m))``. We adjust ``size`` to the number of dimensions of the input array, so that, if the input array is shape (10,10,10), and ``size`` is 2, then the actual size used is (2,2,2). output : array, optional The ``output`` parameter passes an array in which to store the filter output. mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional The ``mode`` parameter determines how the array borders are handled, where ``cval`` is the value when mode is equal to 'constant'. Default is 'reflect' cval : scalar, optional Value to fill past edges of input if ``mode`` is 'constant'. Default is 0.0 origin : scalar, optional The ``origin`` parameter controls the placement of the filter. Default 0 """ return _rank_filter(input, 0, size, footprint, output, mode, cval, origin, 'median') @docfiller def percentile_filter(input, percentile, size = None, footprint = None, output = None, mode = "reflect", cval = 0.0, origin = 0): """Calculates a multi-dimensional percentile filter. Parameters ---------- %(input)s percentile : scalar The percentile parameter may be less then zero, i.e., percentile = -20 equals percentile = 80 %(size_foot)s %(output)s %(mode)s %(cval)s %(origin)s """ return _rank_filter(input, percentile, size, footprint, output, mode, cval, origin, 'percentile') @docfiller def generic_filter1d(input, function, filter_size, axis = -1, output = None, mode = "reflect", cval = 0.0, origin = 0, extra_arguments = (), extra_keywords = None): """Calculate a one-dimensional filter along the given axis. generic_filter1d iterates over the lines of the array, calling the given function at each line. The arguments of the line are the input line, and the output line. The input and output lines are 1D double arrays. The input line is extended appropriately according to the filter size and origin. The output line must be modified in-place with the result. Parameters ---------- %(input)s function : callable function to apply along given axis filter_size : scalar length of the filter %(axis)s %(output)s %(mode)s %(cval)s %(origin)s %(extra_arguments)s %(extra_keywords)s """ if extra_keywords is None: extra_keywords = {} input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') output, return_value = _ni_support._get_output(output, input) if filter_size < 1: raise RuntimeError('invalid filter size') axis = _ni_support._check_axis(axis, input.ndim) if ((filter_size // 2 + origin < 0) or (filter_size // 2 + origin >= filter_size)): raise ValueError('invalid origin') mode = _ni_support._extend_mode_to_code(mode) _nd_image.generic_filter1d(input, function, filter_size, axis, output, mode, cval, origin, extra_arguments, extra_keywords) return return_value @docfiller def generic_filter(input, function, size = None, footprint = None, output = None, mode = "reflect", cval = 0.0, origin = 0, extra_arguments = (), extra_keywords = None): """Calculates a multi-dimensional filter using the given function. At each element the provided function is called. The input values within the filter footprint at that element are passed to the function as a 1D array of double values. Parameters ---------- %(input)s function : callable function to apply at each element %(size_foot)s %(output)s %(mode)s %(cval)s %(origin)s %(extra_arguments)s %(extra_keywords)s """ if extra_keywords is None: extra_keywords = {} input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') origins = _ni_support._normalize_sequence(origin, input.ndim) if footprint is None: if size is None: raise RuntimeError("no footprint or filter size provided") sizes = _ni_support._normalize_sequence(size, input.ndim) footprint = numpy.ones(sizes, dtype=bool) else: footprint = numpy.asarray(footprint) footprint = footprint.astype(bool) fshape = [ii for ii in footprint.shape if ii > 0] if len(fshape) != input.ndim: raise RuntimeError('filter footprint array has incorrect shape.') for origin, lenf in zip(origins, fshape): if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf): raise ValueError('invalid origin') if not footprint.flags.contiguous: footprint = footprint.copy() output, return_value = _ni_support._get_output(output, input) mode = _ni_support._extend_mode_to_code(mode) _nd_image.generic_filter(input, function, footprint, output, mode, cval, origins, extra_arguments, extra_keywords) return return_value
bsd-3-clause
-4,108,254,230,189,439,500
34.004374
79
0.597751
false
3.896192
false
false
false
vaniakosmos/memes-reposter
apps/imgur/migrations/0001_initial.py
1
1605
# Generated by Django 2.0.3 on 2018-06-30 17:27 import django.core.validators from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='ImgurConfig', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('score_limit', models.IntegerField(default=1000, validators=[django.core.validators.MinValueValidator(0)])), ('good_tags', models.TextField(blank=True)), ('bad_tags', models.TextField(blank=True)), ('exclude_mode', models.BooleanField(default=True, help_text='If true posts with bad tags will be filtered out. Otherwise only posts from with good tags will pass the filter.')), ('channel_username', models.CharField(max_length=200, null=True)), ('chat_id', models.BigIntegerField(null=True)), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='Post', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('imgur_id', models.CharField(max_length=200)), ('title', models.TextField()), ('is_album', models.BooleanField()), ('tags', models.TextField()), ('images_links', models.TextField()), ], ), ]
mit
-4,660,612,646,727,245,000
38.146341
194
0.560748
false
4.533898
false
false
false
dsweet04/rekall
rekall-core/rekall/plugins/windows/heap_analysis.py
1
16866
# Rekall Memory Forensics # Copyright 2014 Google Inc. All Rights Reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or (at # your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # """The module implements user mode heap analysis. Recent versions of windows use the Low Fragmentation Heap (LFH). http://illmatics.com/Windows%208%20Heap%20Internals.pdf http://illmatics.com/Understanding_the_LFH.pdf http://www.leviathansecurity.com/blog/understanding-the-windows-allocator-a-redux/ """ from rekall import scan from rekall.plugins import core from rekall.plugins.windows import common from rekall_lib import utils class InspectHeap(common.WinProcessFilter): """Inspect the process heap. This prints a lot of interesting facts about the process heap. It is also the foundation to many other plugins which find things in the process heaps. NOTE: Currently we only support Windows 7 64 bit. """ name = "inspect_heap" __args = [ dict(name="free", type="Boolean", help="Also show freed chunks."), dict(name="heaps", type="ArrayIntParser", help="Only show these heaps (default show all)") ] mode = "mode_amd64" def __init__(self, *args, **kwargs): super(InspectHeap, self).__init__(*args, **kwargs) self.segments = utils.SortedCollection() def enumerate_lfh_heap_allocations(self, heap, skip_freed=False): """Dump the low fragmentation heap.""" seen_blocks = set() for lfh_block in heap.FrontEndHeap.SubSegmentZones.list_of_type( "_LFH_BLOCK_ZONE", "ListEntry"): block_length = lfh_block.FreePointer.v() - lfh_block.obj_end segments = heap.obj_profile.Array( target="_HEAP_SUBSEGMENT", offset=lfh_block.obj_end, size=block_length) for segment in segments: allocation_length = segment.BlockSize * 16 if segment.UserBlocks.v() in seen_blocks: break seen_blocks.add(segment.UserBlocks.v()) for entry in segment.UserBlocks.Entries: # http://www.leviathansecurity.com/blog/understanding-the-windows-allocator-a-redux/ # Skip freed blocks if requested. if skip_freed and entry.UnusedBytes & 0x38: continue UnusedBytes = entry.UnusedBytes & 0x3f - 0x8 # The actual length of user allocation is the difference # between the HEAP allocation bin size and the unused bytes # at the end of the allocation. data_len = allocation_length - UnusedBytes # The data length can not be larger than the allocation # minus the critical parts of _HEAP_ENTRY. Sometimes, # allocations overrun into the next element's _HEAP_ENTRY so # they can store data in the next entry's # entry.PreviousBlockPrivateData. In this case the # allocation length seems to be larger by 8 bytes. if data_len > allocation_length - 0x8: data_len -= 0x8 yield (heap.obj_profile.String(entry.obj_end, term=None, length=data_len), allocation_length) def enumerate_backend_heap_allocations(self, heap): """Enumerate all allocations for _EPROCESS instance.""" for seg in heap.Segments: seg_end = seg.LastValidEntry.v() # Ensure sanity. if seg.Heap.deref() != heap: continue # The segment is empty - often seg_end is zero here. if seg_end < seg.FirstEntry.v(): break for entry in seg.FirstEntry.walk_list("NextEntry", True): # If this is the last entry it goes until the end of the # segment. start = entry.obj_offset + 0x10 if start > seg_end: break allocation = entry.Allocation yield allocation def GenerateHeaps(self): task = self.session.GetParameter("process_context") resolver = self.session.address_resolver # Try to load the ntdll profile. ntdll_mod = resolver.GetModuleByName("ntdll") if not ntdll_mod: return ntdll_prof = ntdll_mod.profile # Set the ntdll profile on the _PEB member. peb = task.m("Peb").cast( "Pointer", target="_PEB", profile=ntdll_prof, vm=task.get_process_address_space()) for heap in peb.ProcessHeaps: yield heap def render(self, renderer): cc = self.session.plugins.cc() with cc: for task in self.filter_processes(): cc.SwitchProcessContext(task) renderer.section() renderer.format("{0:r}\n", task) for heap in self.GenerateHeaps(): self.render_process_heap_info(heap, renderer) def render_low_frag_info(self, heap, renderer): """Displays information about the low fragmentation front end.""" renderer.format("Low Fragmentation Front End Information:\n") renderer.table_header([ dict(name="Entry", style="address"), ("Alloc", "allocation_length", "4"), ("Length", "length", ">4"), dict(name="Data"), ]) # Render the LFH allocations in increasing allocation sizes. Collect # them first, then display by sorted allocation size, and offset. entries_by_size = {} for entry, allocation_length in self.enumerate_lfh_heap_allocations( heap): entries_by_size.setdefault(allocation_length, []).append(entry) for allocation_length, entries in sorted(entries_by_size.iteritems()): for entry in sorted(entries, key=lambda x: x.obj_offset): data = entry.v()[:64] renderer.table_row( entry, allocation_length, entry.length, utils.HexDumpedString(data), ) def render_process_heap_info(self, heap, renderer): if (self.plugin_args.heaps and heap.ProcessHeapsListIndex not in self.plugin_args.heaps): return if 1 <= heap.ProcessHeapsListIndex <= 64: renderer.format("Heap {0}: {1:#x} ({2})\nBackend Info:\n\n", heap.ProcessHeapsListIndex, heap.BaseAddress, heap.FrontEndHeapType) renderer.table_header([ dict(name="Segment", type="TreeNode", width=18, child=dict(style="address")), ("End", "segment_end", "[addr]"), ("Length", "length", "8"), dict(name="Data"), ]) for seg in heap.Segments: seg_start = seg.FirstEntry.obj_offset seg_end = seg.LastValidEntry.v() renderer.table_row( seg_start, seg_end, seg_end - seg_start, depth=1) for entry in seg.FirstEntry.walk_list("NextEntry", True): # If this is the last entry it goes until the end of the # segment. start = entry.obj_offset + 0x10 if start > seg_end: break if entry.Flags.LAST_ENTRY: end = seg.LastValidEntry.v() else: end = entry.obj_offset + entry.Size * 16 data = heap.obj_vm.read(start, min(16, end-start)) renderer.table_row( entry, end, end - start, utils.HexDumpedString(data), depth=2) if heap.FrontEndHeapType.LOW_FRAG: self.render_low_frag_info(heap, renderer) class ShowAllocation(common.WindowsCommandPlugin): """Show the allocation containing the address.""" name = "show_allocation" __args = [ dict(name="address", type="ArrayIntParser", positional=True, help="The address to display"), dict(name="preamble", type="IntParser", default=32, help="How many bytes prior to the address to display."), dict(name="length", type="IntParser", default=50 * 16, help="How many bytes after the address to display.") ] def BuildAllocationMap(self): """Build a map of all allocations for fast looksup.""" allocations = utils.RangedCollection() inspect_heap = self.session.plugins.inspect_heap() for heap in inspect_heap.GenerateHeaps(): # First do the backend allocations. for allocation in inspect_heap.enumerate_backend_heap_allocations( heap): # Include the header in the allocation. allocations.insert( allocation.obj_offset - 16, allocation.obj_offset + allocation.length + 16, (allocation.obj_offset, allocation.length, "B")) self.session.report_progress( "Enumerating backend allocation: %#x", lambda allocation=allocation: allocation.obj_offset) # Now do the LFH allocations (These will mask the subsegments in the # RangedCollection). for _ in inspect_heap.enumerate_lfh_heap_allocations( heap, skip_freed=False): allocation, allocation_length = _ self.session.report_progress( "Enumerating frontend allocation: %#x", lambda: allocation.obj_offset) # Front end allocations do not have their own headers. allocations.insert( allocation.obj_offset, allocation.obj_offset + allocation_length, (allocation.obj_offset, allocation_length, "F")) return allocations def __init__(self, *args, **kwargs): super(ShowAllocation, self).__init__(*args, **kwargs) self.offset = None # Get cached allocations for current process context. task = self.session.GetParameter("process_context") cache_key = "heap_allocations_%x" % task.obj_offset self.allocations = self.session.GetParameter(cache_key) if self.allocations == None: self.allocations = self.BuildAllocationMap() # Cache the allocations for next time. self.session.SetCache(cache_key, self.allocations) def GetAllocationForAddress(self, address): return self.allocations.get_containing_range(address) def CreateAllocationMap(self, start, length, alloc_start, alloc_type): address_map = core.AddressMap() # For backend allocs we highlight the heap entry before them. if alloc_type == "B": address_map.AddRange(alloc_start-16, alloc_start, "_HEAP_ENTRY") # Try to interpret pointers to other allocations and highlight them. count = length / 8 for pointer in self.profile.Array( offset=start, count=count, target="Pointer"): name = None alloc_start, alloc_length, alloc_type = ( self.allocations.get_containing_range(pointer.v())) if alloc_type is not None: # First check if the pointer points inside this allocation. if alloc_start == start + 16: name = "+%#x(%#x)" % (pointer.v() - start, pointer.v()) else: name = "%#x(%s@%#x)" % ( pointer.v(), alloc_length, alloc_start) else: # Maybe it is a resolvable address. name = ",".join(self.session.address_resolver.format_address( pointer.v(), max_distance=1024*1024)) if name: address_map.AddRange( pointer.obj_offset, pointer.obj_offset + 8, # Color it using a unique color related to the address. This # helps to visually relate the same address across different # dumps. "%s" % name, color_index=pointer.obj_offset) return address_map def render(self, renderer): for address in self.plugin_args.address: # If the user requested to view more than one address we do not # support plugin continuation (with v() plugin). if len(self.plugin_args.address) > 1: self.offset = None alloc_start, alloc_length, alloc_type = ( self.allocations.get_containing_range(address)) if not alloc_type: renderer.format("Allocation not found for address " "{0:style=address} in any heap.\n", address) alloc_start = address alloc_length = 50 * 16 alloc_type = None else: renderer.format( "Address {0:style=address} is {1} bytes into " "{2} allocation of size {3} " "({4:style=address} - {5:style=address})\n", address, address - alloc_start, alloc_type, alloc_length, alloc_start, alloc_start + alloc_length) # Start dumping preamble before the address if self.offset is not # specified. It will be specified when we run the plugin again using # v(). if self.offset is None: # Start dumping a little before the requested address, but do # not go before the start of the allocation. start = max(alloc_start, address - self.plugin_args.preamble) else: # Continue dumping from the last run. start = self.offset # Also show the _HEAP_ENTRY before backend allocations (Front end # allocations do not have a _HEAP_ENTRY). if alloc_type == "B": start -= 16 length = min(alloc_start + alloc_length - start, self.plugin_args.length) dump = self.session.plugins.dump( offset=start, length=length, address_map=self.CreateAllocationMap( start, length, alloc_start, alloc_type)) dump.render(renderer) self.offset = dump.offset class FindReferenceAlloc(common.WindowsCommandPlugin): """Show allocations that refer to an address.""" name = "show_referrer_alloc" __args = [ dict(name="address", type="IntParser", positional=True, required=True, help="The address to display") ] def get_referrers(self, address, maxlen=None): addr = self.profile.address() addr.write(address) pointer_scanner = scan.BaseScanner( address_space=self.session.GetParameter("default_address_space"), session=self.session, checks=[ ('StringCheck', dict(needle=addr.obj_vm.getvalue())) ]) # Just scan the entire userspace address space. This means we might find # hits outside the heap but this is usually useful as it would locate # static pointers in dlls. if maxlen is None: maxlen = self.session.GetParameter("highest_usermode_address") for hit in pointer_scanner.scan(maxlen=maxlen): yield hit def render(self, renderer): show_allocation = None for hit in self.get_referrers(self.address): show_allocation = self.session.plugins.show_allocation(hit) show_allocation.render(renderer) return show_allocation
gpl-2.0
5,344,717,608,450,970,000
37.594966
104
0.563441
false
4.506011
false
false
false
google/makani
gs/monitor2/apps/plugins/indicators/servo.py
1
17733
# Copyright 2020 Makani Technologies LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """"Monitor indicators from the ground station.""" import collections import operator from makani.analysis.checks import avionics_util from makani.analysis.checks import check_range from makani.analysis.control import flap_limits from makani.avionics.common import pack_avionics_messages from makani.avionics.common import servo_types as servo_common from makani.avionics.firmware.monitors import servo_types from makani.avionics.network import aio_labels from makani.control import control_types from makani.gs.monitor2.apps.layout import indicator from makani.gs.monitor2.apps.layout import stoplights from makani.gs.monitor2.apps.plugins import common from makani.gs.monitor2.apps.plugins.indicators import avionics from makani.lib.python import c_helpers from makani.lib.python import struct_tree import numpy _SERVO_WARNING_HELPER = c_helpers.EnumHelper('ServoWarning', servo_common) _SERVO_ERROR_HELPER = c_helpers.EnumHelper('ServoError', servo_common) _SERVO_STATUS_HELPER = c_helpers.EnumHelper('ServoStatus', servo_common) _SERVO_LABELS_HELPER = c_helpers.EnumHelper('ServoLabel', aio_labels, prefix='kServo') _SERVO_ANALOG_VOLTAGE_HELPER = c_helpers.EnumHelper('ServoAnalogVoltage', servo_types) _SERVO_MON_WARNING_HELPER = c_helpers.EnumHelper('ServoMonitorWarning', servo_types) _SERVO_MON_ERROR_HELPER = c_helpers.EnumHelper('ServoMonitorError', servo_types) _ACTUATOR_STATE_HELPER = c_helpers.EnumHelper('ActuatorState', pack_avionics_messages, exclude='ActuatorStateCommand') class BaseServoIndicator(avionics.BaseActuatorIndicator): """Base class with utilities shared by servo indicators.""" def __init__(self, mode, label, precision, servo_labels=_SERVO_LABELS_HELPER.ShortNames(), show_label=True): super(BaseServoIndicator, self).__init__( mode, label, precision, servo_labels, 'Servo', _SERVO_LABELS_HELPER, common.MAX_NO_UPDATE_COUNT_SERVO_STATUS, full_comms_message_type='ServoStatus', tether_attribute='servo_statuses', show_label=show_label) class BaseArmedIndicator(BaseServoIndicator): """Base indicator for servos' armed status.""" def _GetSingleValue(self, arg_idx, *args): """Obtain a single value for one servo, invoked within _GetAvailableValues. Args: arg_idx: The index referring to the n-th servo. *args: The list of attributes to the indicator. The attributes vary in different modes. For FULL_COMMS_MODE, it is the list of ServoStatus messages for each servo, so args[arg_idx] refers to the servo's message struct. For SPARSE_COMMS_MODE, it is [TetherDown.servo_statuses, valid, timestamp_sec], so args[0][`EnumValue(A2)`] refers to the state of servo A2. Returns: The servo status of the n-th servo. """ if self._mode == common.FULL_COMMS_MODE: if struct_tree.IsValidElement(args[arg_idx]): return args[arg_idx].flags.status else: return None elif self._mode == common.SPARSE_COMMS_MODE: return self._GetTetherValue(args[0], self._node_labels[arg_idx], 'state') else: assert False @indicator.ReturnIfInputInvalid('--', stoplights.STOPLIGHT_UNAVAILABLE) def _Filter(self, *args): """Get the armed information of all servos. Args: *args: The list of attributes to the indicator. The attributes vary in different modes. For FULL_COMMS_MODE, it is the list of ServoStatus messages for each servo, so args[arg_idx] refers to the servo's message struct. For SPARSE_COMMS_MODE, it is [TetherDown.servo_statuses, valid, timestamp_sec], so args[0][`EnumValue(A2)`] refers to the state of servo A2. Returns: The text and stoplight to show. """ servo_status = self._GetAvailableValues(*args) if self._mode == common.FULL_COMMS_MODE: status_helper = _SERVO_STATUS_HELPER expecting = ['Armed'] elif self._mode == common.SPARSE_COMMS_MODE: status_helper = _ACTUATOR_STATE_HELPER expecting = ['Armed', 'Running'] else: assert False return self._CheckStatusFlags(servo_status, status_helper, expecting, stoplights.STOPLIGHT_ERROR) class BaseR22TemperatureIndicator(BaseServoIndicator): """Base indicator for servos' R22 temperatures.""" def __init__(self, *args, **kwargs): super(BaseR22TemperatureIndicator, self).__init__(*args, show_label=False, **kwargs) self._normal_ranges = check_range.BuildRanges([[None, 65]]) self._warning_ranges = check_range.BuildRanges([[None, 75]]) def _GetSingleValue(self, arg_idx, *args): if self._mode == common.FULL_COMMS_MODE: if struct_tree.IsValidElement(args[arg_idx]): return args[arg_idx].r22.temperature else: return None elif self._mode == common.SPARSE_COMMS_MODE: return self._GetTetherValue( args[0], self._node_labels[arg_idx], 'r22_temp') else: assert False @indicator.ReturnIfInputInvalid('', stoplights.STOPLIGHT_UNAVAILABLE) def _Filter(self, *args): temperatures, stoplight = self._GetFieldInfo( self._normal_ranges, self._warning_ranges, None, *args) return self._DictToString(temperatures), stoplight class BaseLvBusIndicator(indicator.BaseIndicator): """The base class for low voltage bus indicators.""" _voltage_names = ['LvA', 'LvB'] def __init__(self, servos, name): self._short_names = servos super(BaseLvBusIndicator, self).__init__(name) def _GatherVoltageData(self, messages): """Gather voltage data from the messages.""" voltages = collections.defaultdict(dict) any_value = False warning = False errors = [] for servo in self._short_names: if 'ServoStatus.Servo' + servo not in messages: continue any_value = True populated = messages[ 'ServoStatus.Servo%s.servo_mon.analog_populated' % servo] for voltage_name in self._voltage_names: # Guard against bad voltage names. if voltage_name not in _SERVO_ANALOG_VOLTAGE_HELPER: errors.append('Servo %s: Invalid voltage (%s)' % (servo, voltage_name)) continue index = _SERVO_ANALOG_VOLTAGE_HELPER.Value(voltage_name) if not avionics_util.TestMask(populated, index): continue voltages[voltage_name][servo] = messages[ 'ServoStatus.Servo%s.servo_mon.analog_data[%d]' % (servo, index)] warning |= avionics_util.CheckWarning( messages['ServoStatus.Servo%s.servo_mon.flags' % servo], _SERVO_MON_WARNING_HELPER.Value(voltage_name)) if errors: stoplight = stoplights.STOPLIGHT_ERROR elif not any_value: stoplight = stoplights.STOPLIGHT_UNAVAILABLE elif warning: stoplight = stoplights.STOPLIGHT_WARNING else: stoplight = stoplights.STOPLIGHT_NORMAL return voltages, stoplight, errors def Filter(self, messages): if not messages: return '--', stoplights.STOPLIGHT_UNAVAILABLE voltages, stoplight, errors = self._GatherVoltageData(messages) results = [' ' + ' '.join(v.rjust(4) for v in self._voltage_names)] for servo in self._short_names: servo_text = '%s:' % servo for voltage_name in self._voltage_names: if voltage_name in voltages and servo in voltages[voltage_name]: servo_text += ' %5.1f' % voltages[voltage_name][servo] else: servo_text += ' --'.rjust(6) results.append(servo_text) return '\n'.join(errors + results), stoplight class ArmedTailIndicator(BaseArmedIndicator): def __init__(self, mode): super(ArmedTailIndicator, self).__init__( mode, 'Tail Armed', 0, ['E1', 'E2', 'R1', 'R2']) class ArmedPortIndicator(BaseArmedIndicator): def __init__(self, mode): super(ArmedPortIndicator, self).__init__( mode, 'Port Armed', 0, ['A1', 'A2', 'A4']) class ArmedStarboardIndicator(BaseArmedIndicator): def __init__(self, mode): super(ArmedStarboardIndicator, self).__init__( mode, 'Starboard Armed', 0, ['A5', 'A7', 'A8']) class R22TemperatureTailIndicator(BaseR22TemperatureIndicator): def __init__(self, mode): super(R22TemperatureTailIndicator, self).__init__( mode, 'Tail R22 Temp', 0, ['E1', 'E2', 'R1', 'R2']) class R22TemperaturePortIndicator(BaseR22TemperatureIndicator): def __init__(self, mode): super(R22TemperaturePortIndicator, self).__init__( mode, 'Port R22 Temp', 0, ['A1', 'A2', 'A4']) class R22TemperatureStarboardIndicator(BaseR22TemperatureIndicator): def __init__(self, mode): super(R22TemperatureStarboardIndicator, self).__init__( mode, 'Star R22 Temp', 0, ['A5', 'A7', 'A8']) class LvBusTailIndicator(BaseLvBusIndicator): def __init__(self): super(LvBusTailIndicator, self).__init__( ['E1', 'E2', 'R1', 'R2'], 'Tail Bus [V]') class LvBusPortIndicator(BaseLvBusIndicator): def __init__(self): super(LvBusPortIndicator, self).__init__( ['A1', 'A2', 'A4'], 'Port Bus [V]') class LvBusStarboardIndicator(BaseLvBusIndicator): def __init__(self): super(LvBusStarboardIndicator, self).__init__( ['A5', 'A7', 'A8'], 'Starboard Bus [V]') class BasePosChart(avionics.ActuatorCmdDictChart): """The indicator to show servo position angles.""" def __init__(self, mode, name, servo_labels, show_cmd=True, **base_kwargs): super(BasePosChart, self).__init__( mode, name, servo_labels, 'Servo', _SERVO_LABELS_HELPER, common.MAX_NO_UPDATE_COUNT_SERVO_STATUS, show_cmd=show_cmd, full_comms_message_type='ServoStatus', tether_attribute='servo_statuses', precision=0, **base_kwargs) def _GetValuePerNode(self, arg_idx, *args): if self._mode == common.FULL_COMMS_MODE: return (numpy.rad2deg(args[arg_idx].angle_estimate) if struct_tree.IsValidElement(args[arg_idx]) else None) elif self._mode == common.SPARSE_COMMS_MODE: rad = self._GetTetherValue(args[0], self._node_labels[arg_idx], 'angle') return numpy.rad2deg(rad) if rad is not None else None else: assert False def _GetCmdValue(self, servo, controller_command): servo_idx = _SERVO_LABELS_HELPER.Value(servo) return numpy.rad2deg(controller_command.servo_angle[servo_idx]) class RudPosChart(BasePosChart): def __init__(self, mode, **widget_kwargs): nodes = ['R1', 'R2'] super(RudPosChart, self).__init__( mode, 'Rud Pos [&deg;]', nodes, show_cmd=True, **widget_kwargs) limits = flap_limits.FlapsToServos( flap_limits.GetControlCrosswindLimits())['R1'] limits = numpy.rad2deg(limits).tolist() self._SetLimits({ self._ObservationLabel(n): ( check_range.Interval(limits, inclusiveness=(False, False)), check_range.AllInclusiveRange()) for n in nodes }, [control_types.kFlightModeCrosswindNormal, control_types.kFlightModeCrosswindPrepTransOut]) class ElePosChart(BasePosChart): def __init__(self, mode, **widget_kwargs): nodes = ['E1', 'E2'] super(ElePosChart, self).__init__( mode, 'Ele Pos [&deg;]', nodes, show_cmd=True, **widget_kwargs) limits = flap_limits.FlapsToServos( flap_limits.GetControlCrosswindLimits())['E1'] limits = numpy.rad2deg(limits).tolist() self._SetLimits({ self._ObservationLabel(n): ( check_range.Interval(limits, inclusiveness=(False, False)), check_range.AllInclusiveRange()) for n in nodes }, [control_types.kFlightModeCrosswindNormal, control_types.kFlightModeCrosswindPrepTransOut]) class PortPosChart(BasePosChart): def __init__(self, mode, **widget_kwargs): super(PortPosChart, self).__init__( mode, 'Port Ail Pos [&deg;]', ['A1', 'A2', 'A4'], show_cmd=True, **widget_kwargs) self._SetLimits({ self._ObservationLabel(n): ( check_range.Interval( numpy.rad2deg(flap_limits.FlapsToServos( flap_limits.GetControlCrosswindLimits())[n]).tolist(), inclusiveness=(False, False)), check_range.AllInclusiveRange()) for n in ['A1', 'A2'] }, [control_types.kFlightModeCrosswindNormal, control_types.kFlightModeCrosswindPrepTransOut]) class StarboardPosChart(BasePosChart): def __init__(self, mode, **widget_kwargs): super(StarboardPosChart, self).__init__( mode, 'Star Ail Pos [&deg;]', ['A5', 'A7', 'A8'], show_cmd=True, **widget_kwargs) self._SetLimits({ self._ObservationLabel(n): ( check_range.Interval( numpy.rad2deg(flap_limits.FlapsToServos( flap_limits.GetControlCrosswindLimits())[n]).tolist(), inclusiveness=(False, False)), check_range.AllInclusiveRange()) for n in ['A7', 'A8'] }, [control_types.kFlightModeCrosswindNormal, control_types.kFlightModeCrosswindPrepTransOut]) class LvBusSummaryIndicator(BaseLvBusIndicator): """The summary class for low voltage bus indicators.""" _voltage_names = ['LvA', 'LvB'] def __init__(self): super(LvBusSummaryIndicator, self).__init__( _SERVO_LABELS_HELPER.ShortNames(), 'Servo LV Bus [V]') def Filter(self, messages): if not messages: return '--', stoplights.STOPLIGHT_UNAVAILABLE all_voltages, stoplight, errors = self._GatherVoltageData(messages) all_stats = {} for voltage_name in self._voltage_names: voltages = all_voltages[voltage_name] sorted_pairs = sorted(voltages.items(), key=operator.itemgetter(1)) num_units = len(voltages) all_stats[voltage_name] = { 'min': sorted_pairs[0] if voltages else None, 'max': sorted_pairs[-1] if voltages else None, 'median': sorted_pairs[num_units / 2] if voltages else None, } delimiter = ' ' results = [' '.rjust(7) + delimiter + delimiter.join(v.rjust(8) for v in self._voltage_names)] for metric in ['min', 'max', 'median']: text = metric.rjust(7) for voltage_name in self._voltage_names: stats = all_stats[voltage_name] text += delimiter if stats[metric] is not None: if isinstance(stats[metric], tuple): text += '{: 2.1f}({:2})'.format( stats[metric][1], stats[metric][0]) else: text += '{: 7.1f}'.format(stats[metric]) else: text += '--'.rjust(8) results.append(text) return '\n'.join(errors + results), stoplight class StatusIndicator(BaseServoIndicator): """Summary servo status.""" @indicator.RegisterModes(common.FULL_COMMS_MODE, common.SPARSE_COMMS_MODE) def __init__(self, mode, **format_kwargs): super(StatusIndicator, self).__init__(mode, 'Servo Status', 0) self._format_kwargs = format_kwargs def _GetSingleValue(self, arg_idx, *args): if self._mode == common.FULL_COMMS_MODE: if struct_tree.IsValidElement(args[arg_idx]): return [args[arg_idx].flags, args[arg_idx].servo_mon.flags] else: return None elif self._mode == common.SPARSE_COMMS_MODE: return self._GetTetherValue( args[0], self._node_labels[arg_idx], 'state') else: assert False @indicator.ReturnIfInputInvalid('', stoplights.STOPLIGHT_UNAVAILABLE) def _Filter(self, *attributes): any_warning_or_error = False warnings = collections.defaultdict(list) errors = collections.defaultdict(list) report_by_servo = collections.defaultdict(list) any_servo = False reports = self._GetAvailableValues(*attributes) for servo in _SERVO_LABELS_HELPER.ShortNames(): if servo not in reports or reports[servo] is None: continue if self._mode == common.FULL_COMMS_MODE: flags, mon_flags = reports[servo] any_servo = True if common.CheckFlags(servo, report_by_servo, warnings, errors, flags, _SERVO_WARNING_HELPER, _SERVO_ERROR_HELPER): any_warning_or_error = True if common.CheckFlags( servo, report_by_servo, warnings, errors, mon_flags, _SERVO_MON_WARNING_HELPER, _SERVO_MON_ERROR_HELPER): any_warning_or_error = True elif self._mode == common.SPARSE_COMMS_MODE: any_servo = True if reports[servo] & _ACTUATOR_STATE_HELPER.Value('Error'): any_warning_or_error = True report_by_servo[servo].append(('ERROR', 'status')) errors['status'].append(servo) return common.SummarizeWarningsAndErrors( any_servo, report_by_servo, warnings, errors, any_warning_or_error, **self._format_kwargs)
apache-2.0
5,210,630,127,275,359,000
35.189796
79
0.646366
false
3.526148
false
false
false