repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
jaseg/python-mpv | setup.py | 1 | 1155 | #!/usr/bin/env python3
from setuptools import setup
setup(
name = 'python-mpv',
version = '0.5.1',
py_modules = ['mpv'],
description = 'A python interface to the mpv media player',
url = 'https://github.com/jaseg/python-mpv',
author = 'jaseg',
author_email = '[email protected]',
license = 'AGPLv3+',
extras_require = {
'screenshot_raw': ['Pillow']
},
tests_require = ['xvfbwrapper'],
test_suite = 'tests',
keywords = ['mpv', 'library', 'video', 'audio', 'player', 'display',
'multimedia'],
python_requires='>=3.5',
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: X11 Applications',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)',
'Natural Language :: English',
'Operating System :: POSIX',
'Programming Language :: C',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.5',
'Topic :: Multimedia :: Sound/Audio :: Players',
'Topic :: Multimedia :: Video :: Display']
)
| agpl-3.0 | 5,502,061,537,492,389,000 | 34 | 93 | 0.575758 | false | 3.737864 | false | true | false |
mahabuber/erpnext | erpnext/accounts/doctype/sales_invoice/sales_invoice.py | 2 | 26538 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import frappe.defaults
from frappe.utils import cint, flt
from frappe import _, msgprint, throw
from erpnext.accounts.party import get_party_account, get_due_date
from erpnext.controllers.stock_controller import update_gl_entries_after
from frappe.model.mapper import get_mapped_doc
from erpnext.controllers.selling_controller import SellingController
from erpnext.accounts.utils import get_account_currency
from erpnext.stock.doctype.delivery_note.delivery_note import update_billed_amount_based_on_so
form_grid_templates = {
"items": "templates/form_grid/item_grid.html"
}
class SalesInvoice(SellingController):
def __init__(self, arg1, arg2=None):
super(SalesInvoice, self).__init__(arg1, arg2)
self.status_updater = [{
'source_dt': 'Sales Invoice Item',
'target_field': 'billed_amt',
'target_ref_field': 'amount',
'target_dt': 'Sales Order Item',
'join_field': 'so_detail',
'target_parent_dt': 'Sales Order',
'target_parent_field': 'per_billed',
'source_field': 'amount',
'join_field': 'so_detail',
'percent_join_field': 'sales_order',
'status_field': 'billing_status',
'keyword': 'Billed',
'overflow_type': 'billing'
}]
def set_indicator(self):
"""Set indicator for portal"""
if self.outstanding_amount > 0:
self.indicator_color = "orange"
self.indicator_title = _("Unpaid")
else:
self.indicator_color = "green"
self.indicator_title = _("Paid")
def validate(self):
super(SalesInvoice, self).validate()
self.validate_posting_time()
self.so_dn_required()
self.validate_proj_cust()
self.validate_with_previous_doc()
self.validate_uom_is_integer("stock_uom", "qty")
self.check_stop_or_close_sales_order("sales_order")
self.validate_debit_to_acc()
self.validate_fixed_asset_account()
self.clear_unallocated_advances("Sales Invoice Advance", "advances")
self.validate_advance_jv("Sales Order")
self.add_remarks()
self.validate_write_off_account()
if cint(self.is_pos):
self.validate_pos()
if cint(self.update_stock):
self.validate_dropship_item()
self.validate_item_code()
self.validate_warehouse()
self.update_current_stock()
self.validate_delivery_note()
if not self.is_opening:
self.is_opening = 'No'
self.set_against_income_account()
self.validate_c_form()
self.validate_time_logs_are_submitted()
self.validate_multiple_billing("Delivery Note", "dn_detail", "amount", "items")
self.update_packing_list()
def on_submit(self):
super(SalesInvoice, self).on_submit()
if cint(self.update_stock) == 1:
self.update_stock_ledger()
else:
# Check for Approving Authority
if not self.recurring_id:
frappe.get_doc('Authorization Control').validate_approving_authority(self.doctype,
self.company, self.base_grand_total, self)
self.check_prev_docstatus()
if self.is_return:
# NOTE status updating bypassed for is_return
self.status_updater = []
self.update_status_updater_args()
self.update_prevdoc_status()
self.update_billing_status_in_dn()
# this sequence because outstanding may get -ve
self.make_gl_entries()
if not self.is_return:
self.update_billing_status_for_zero_amount_refdoc("Sales Order")
self.check_credit_limit()
if not cint(self.is_pos) == 1 and not self.is_return:
self.update_against_document_in_jv()
self.update_time_log_batch(self.name)
def before_cancel(self):
self.update_time_log_batch(None)
def on_cancel(self):
if cint(self.update_stock) == 1:
self.update_stock_ledger()
self.check_stop_or_close_sales_order("sales_order")
from erpnext.accounts.utils import remove_against_link_from_jv
remove_against_link_from_jv(self.doctype, self.name)
if self.is_return:
# NOTE status updating bypassed for is_return
self.status_updater = []
self.update_status_updater_args()
self.update_prevdoc_status()
self.update_billing_status_in_dn()
if not self.is_return:
self.update_billing_status_for_zero_amount_refdoc("Sales Order")
self.validate_c_form_on_cancel()
self.make_gl_entries_on_cancel()
def update_status_updater_args(self):
if cint(self.update_stock):
self.status_updater.extend([{
'source_dt':'Sales Invoice Item',
'target_dt':'Sales Order Item',
'target_parent_dt':'Sales Order',
'target_parent_field':'per_delivered',
'target_field':'delivered_qty',
'target_ref_field':'qty',
'source_field':'qty',
'join_field':'so_detail',
'percent_join_field':'sales_order',
'status_field':'delivery_status',
'keyword':'Delivered',
'second_source_dt': 'Delivery Note Item',
'second_source_field': 'qty',
'second_join_field': 'so_detail',
'overflow_type': 'delivery',
'extra_cond': """ and exists(select name from `tabSales Invoice`
where name=`tabSales Invoice Item`.parent and update_stock = 1)"""
},
{
'source_dt': 'Sales Invoice Item',
'target_dt': 'Sales Order Item',
'join_field': 'so_detail',
'target_field': 'returned_qty',
'target_parent_dt': 'Sales Order',
# 'target_parent_field': 'per_delivered',
# 'target_ref_field': 'qty',
'source_field': '-1 * qty',
# 'percent_join_field': 'sales_order',
# 'overflow_type': 'delivery',
'extra_cond': """ and exists (select name from `tabSales Invoice` where name=`tabSales Invoice Item`.parent and update_stock=1 and is_return=1)"""
}
])
def check_credit_limit(self):
from erpnext.selling.doctype.customer.customer import check_credit_limit
validate_against_credit_limit = False
for d in self.get("items"):
if not (d.sales_order or d.delivery_note):
validate_against_credit_limit = True
break
if validate_against_credit_limit:
check_credit_limit(self.customer, self.company)
def set_missing_values(self, for_validate=False):
pos = self.set_pos_fields(for_validate)
if not self.debit_to:
self.debit_to = get_party_account("Customer", self.customer, self.company)
if not self.due_date and self.customer:
self.due_date = get_due_date(self.posting_date, "Customer", self.customer, self.company)
super(SalesInvoice, self).set_missing_values(for_validate)
if pos:
return {"print_format": pos.get("print_format") }
def update_time_log_batch(self, sales_invoice):
for d in self.get("items"):
if d.time_log_batch:
tlb = frappe.get_doc("Time Log Batch", d.time_log_batch)
tlb.sales_invoice = sales_invoice
tlb.flags.ignore_validate_update_after_submit = True
tlb.save()
def validate_time_logs_are_submitted(self):
for d in self.get("items"):
if d.time_log_batch:
docstatus = frappe.db.get_value("Time Log Batch", d.time_log_batch, "docstatus")
if docstatus!=1:
frappe.throw(_("Time Log Batch {0} must be 'Submitted'").format(d.time_log_batch))
def set_pos_fields(self, for_validate=False):
"""Set retail related fields from POS Profiles"""
if cint(self.is_pos) != 1:
return
from erpnext.stock.get_item_details import get_pos_profile_item_details, get_pos_profile
pos = get_pos_profile(self.company)
if pos:
if not for_validate and not self.customer:
self.customer = pos.customer
self.mode_of_payment = pos.mode_of_payment
# self.set_customer_defaults()
for fieldname in ('territory', 'naming_series', 'currency', 'taxes_and_charges', 'letter_head', 'tc_name',
'selling_price_list', 'company', 'select_print_heading', 'cash_bank_account',
'write_off_account', 'write_off_cost_center'):
if (not for_validate) or (for_validate and not self.get(fieldname)):
self.set(fieldname, pos.get(fieldname))
if not for_validate:
self.update_stock = cint(pos.get("update_stock"))
# set pos values in items
for item in self.get("items"):
if item.get('item_code'):
for fname, val in get_pos_profile_item_details(pos,
frappe._dict(item.as_dict()), pos).items():
if (not for_validate) or (for_validate and not item.get(fname)):
item.set(fname, val)
# fetch terms
if self.tc_name and not self.terms:
self.terms = frappe.db.get_value("Terms and Conditions", self.tc_name, "terms")
# fetch charges
if self.taxes_and_charges and not len(self.get("taxes")):
self.set_taxes()
return pos
def get_advances(self):
if not self.is_return:
super(SalesInvoice, self).get_advances(self.debit_to, "Customer", self.customer,
"Sales Invoice Advance", "advances", "credit_in_account_currency", "sales_order")
def get_company_abbr(self):
return frappe.db.sql("select abbr from tabCompany where name=%s", self.company)[0][0]
def update_against_document_in_jv(self):
"""
Links invoice and advance voucher:
1. cancel advance voucher
2. split into multiple rows if partially adjusted, assign against voucher
3. submit advance voucher
"""
lst = []
for d in self.get('advances'):
if flt(d.allocated_amount) > 0:
args = {
'voucher_no' : d.journal_entry,
'voucher_detail_no' : d.jv_detail_no,
'against_voucher_type' : 'Sales Invoice',
'against_voucher' : self.name,
'account' : self.debit_to,
'party_type': 'Customer',
'party': self.customer,
'is_advance' : 'Yes',
'dr_or_cr' : 'credit_in_account_currency',
'unadjusted_amt' : flt(d.advance_amount),
'allocated_amt' : flt(d.allocated_amount)
}
lst.append(args)
if lst:
from erpnext.accounts.utils import reconcile_against_document
reconcile_against_document(lst)
def validate_debit_to_acc(self):
account = frappe.db.get_value("Account", self.debit_to,
["account_type", "report_type", "account_currency"], as_dict=True)
if not account:
frappe.throw(_("Debit To is required"))
if account.report_type != "Balance Sheet":
frappe.throw(_("Debit To account must be a Balance Sheet account"))
if self.customer and account.account_type != "Receivable":
frappe.throw(_("Debit To account must be a Receivable account"))
self.party_account_currency = account.account_currency
def validate_fixed_asset_account(self):
"""Validate Fixed Asset and whether Income Account Entered Exists"""
for d in self.get('items'):
is_asset_item = frappe.db.get_value("Item", d.item_code, "is_asset_item")
account_type = frappe.db.get_value("Account", d.income_account, "account_type")
if is_asset_item == 1 and account_type != 'Fixed Asset':
msgprint(_("Account {0} must be of type 'Fixed Asset' as Item {1} is an Asset Item").format(d.income_account, d.item_code), raise_exception=True)
def validate_with_previous_doc(self):
super(SalesInvoice, self).validate_with_previous_doc({
"Sales Order": {
"ref_dn_field": "sales_order",
"compare_fields": [["customer", "="], ["company", "="], ["project_name", "="],
["currency", "="]],
},
"Delivery Note": {
"ref_dn_field": "delivery_note",
"compare_fields": [["customer", "="], ["company", "="], ["project_name", "="],
["currency", "="]],
},
})
if cint(frappe.db.get_single_value('Selling Settings', 'maintain_same_sales_rate')) and not self.is_return:
self.validate_rate_with_reference_doc([
["Sales Order", "sales_order", "so_detail"],
["Delivery Note", "delivery_note", "dn_detail"]
])
def set_against_income_account(self):
"""Set against account for debit to account"""
against_acc = []
for d in self.get('items'):
if d.income_account not in against_acc:
against_acc.append(d.income_account)
self.against_income_account = ','.join(against_acc)
def add_remarks(self):
if not self.remarks: self.remarks = 'No Remarks'
def so_dn_required(self):
"""check in manage account if sales order / delivery note required or not."""
dic = {'Sales Order':'so_required','Delivery Note':'dn_required'}
for i in dic:
if frappe.db.get_value('Selling Settings', None, dic[i]) == 'Yes':
for d in self.get('items'):
if frappe.db.get_value('Item', d.item_code, 'is_stock_item') == 1 \
and not d.get(i.lower().replace(' ','_')):
msgprint(_("{0} is mandatory for Item {1}").format(i,d.item_code), raise_exception=1)
def validate_proj_cust(self):
"""check for does customer belong to same project as entered.."""
if self.project_name and self.customer:
res = frappe.db.sql("""select name from `tabProject`
where name = %s and (customer = %s or customer is null or customer = '')""",
(self.project_name, self.customer))
if not res:
throw(_("Customer {0} does not belong to project {1}").format(self.customer,self.project_name))
def validate_pos(self):
if not self.cash_bank_account and flt(self.paid_amount):
frappe.throw(_("Cash or Bank Account is mandatory for making payment entry"))
if flt(self.paid_amount) + flt(self.write_off_amount) \
- flt(self.base_grand_total) > 1/(10**(self.precision("base_grand_total") + 1)):
frappe.throw(_("""Paid amount + Write Off Amount can not be greater than Grand Total"""))
def validate_item_code(self):
for d in self.get('items'):
if not d.item_code:
msgprint(_("Item Code required at Row No {0}").format(d.idx), raise_exception=True)
def validate_warehouse(self):
super(SalesInvoice, self).validate_warehouse()
for d in self.get('items'):
if not d.warehouse:
frappe.throw(_("Warehouse required at Row No {0}").format(d.idx))
def validate_delivery_note(self):
for d in self.get("items"):
if d.delivery_note:
msgprint(_("Stock cannot be updated against Delivery Note {0}").format(d.delivery_note), raise_exception=1)
def validate_write_off_account(self):
if flt(self.write_off_amount) and not self.write_off_account:
msgprint(_("Please enter Write Off Account"), raise_exception=1)
def validate_c_form(self):
""" Blank C-form no if C-form applicable marked as 'No'"""
if self.amended_from and self.c_form_applicable == 'No' and self.c_form_no:
frappe.db.sql("""delete from `tabC-Form Invoice Detail` where invoice_no = %s
and parent = %s""", (self.amended_from, self.c_form_no))
frappe.db.set(self, 'c_form_no', '')
def validate_c_form_on_cancel(self):
""" Display message if C-Form no exists on cancellation of Sales Invoice"""
if self.c_form_applicable == 'Yes' and self.c_form_no:
msgprint(_("Please remove this Invoice {0} from C-Form {1}")
.format(self.name, self.c_form_no), raise_exception = 1)
def validate_dropship_item(self):
for item in self.items:
if item.sales_order:
if frappe.db.get_value("Sales Order Item", item.so_detail, "delivered_by_supplier"):
frappe.throw(_("Could not update stock, invoice contains drop shipping item."))
def update_current_stock(self):
for d in self.get('items'):
if d.item_code and d.warehouse:
bin = frappe.db.sql("select actual_qty from `tabBin` where item_code = %s and warehouse = %s", (d.item_code, d.warehouse), as_dict = 1)
d.actual_qty = bin and flt(bin[0]['actual_qty']) or 0
for d in self.get('packed_items'):
bin = frappe.db.sql("select actual_qty, projected_qty from `tabBin` where item_code = %s and warehouse = %s", (d.item_code, d.warehouse), as_dict = 1)
d.actual_qty = bin and flt(bin[0]['actual_qty']) or 0
d.projected_qty = bin and flt(bin[0]['projected_qty']) or 0
def update_packing_list(self):
if cint(self.update_stock) == 1:
from erpnext.stock.doctype.packed_item.packed_item import make_packing_list
make_packing_list(self)
else:
self.set('packed_items', [])
def get_warehouse(self):
user_pos_profile = frappe.db.sql("""select name, warehouse from `tabPOS Profile`
where ifnull(user,'') = %s and company = %s""", (frappe.session['user'], self.company))
warehouse = user_pos_profile[0][1] if user_pos_profile else None
if not warehouse:
global_pos_profile = frappe.db.sql("""select name, warehouse from `tabPOS Profile`
where (user is null or user = '') and company = %s""", self.company)
if global_pos_profile:
warehouse = global_pos_profile[0][1]
elif not user_pos_profile:
msgprint(_("POS Profile required to make POS Entry"), raise_exception=True)
return warehouse
def on_update(self):
if cint(self.is_pos) == 1:
if flt(self.paid_amount) == 0:
if self.cash_bank_account:
frappe.db.set(self, 'paid_amount',
flt(flt(self.grand_total) - flt(self.write_off_amount), self.precision("paid_amount")))
else:
# show message that the amount is not paid
frappe.db.set(self,'paid_amount',0)
frappe.msgprint(_("Note: Payment Entry will not be created since 'Cash or Bank Account' was not specified"))
else:
frappe.db.set(self,'paid_amount',0)
frappe.db.set(self, 'base_paid_amount',
flt(self.paid_amount*self.conversion_rate, self.precision("base_paid_amount")))
def check_prev_docstatus(self):
for d in self.get('items'):
if d.sales_order and frappe.db.get_value("Sales Order", d.sales_order, "docstatus") != 1:
frappe.throw(_("Sales Order {0} is not submitted").format(d.sales_order))
if d.delivery_note and frappe.db.get_value("Delivery Note", d.delivery_note, "docstatus") != 1:
throw(_("Delivery Note {0} is not submitted").format(d.delivery_note))
def make_gl_entries(self, repost_future_gle=True):
gl_entries = self.get_gl_entries()
if gl_entries:
from erpnext.accounts.general_ledger import make_gl_entries
# if POS and amount is written off, updating outstanding amt after posting all gl entries
update_outstanding = "No" if (cint(self.is_pos) or self.write_off_account) else "Yes"
make_gl_entries(gl_entries, cancel=(self.docstatus == 2),
update_outstanding=update_outstanding, merge_entries=False)
if update_outstanding == "No":
from erpnext.accounts.doctype.gl_entry.gl_entry import update_outstanding_amt
update_outstanding_amt(self.debit_to, "Customer", self.customer,
self.doctype, self.return_against if cint(self.is_return) else self.name)
if repost_future_gle and cint(self.update_stock) \
and cint(frappe.defaults.get_global_default("auto_accounting_for_stock")):
items, warehouses = self.get_items_and_warehouses()
update_gl_entries_after(self.posting_date, self.posting_time, warehouses, items)
elif self.docstatus == 2 and cint(self.update_stock) \
and cint(frappe.defaults.get_global_default("auto_accounting_for_stock")):
from erpnext.accounts.general_ledger import delete_gl_entries
delete_gl_entries(voucher_type=self.doctype, voucher_no=self.name)
def get_gl_entries(self, warehouse_account=None):
from erpnext.accounts.general_ledger import merge_similar_entries
gl_entries = []
self.make_customer_gl_entry(gl_entries)
self.make_tax_gl_entries(gl_entries)
self.make_item_gl_entries(gl_entries)
# merge gl entries before adding pos entries
gl_entries = merge_similar_entries(gl_entries)
self.make_pos_gl_entries(gl_entries)
self.make_write_off_gl_entry(gl_entries)
return gl_entries
def make_customer_gl_entry(self, gl_entries):
if self.grand_total:
# Didnot use base_grand_total to book rounding loss gle
grand_total_in_company_currency = flt(self.grand_total * self.conversion_rate,
self.precision("grand_total"))
gl_entries.append(
self.get_gl_dict({
"account": self.debit_to,
"party_type": "Customer",
"party": self.customer,
"against": self.against_income_account,
"debit": grand_total_in_company_currency,
"debit_in_account_currency": grand_total_in_company_currency \
if self.party_account_currency==self.company_currency else self.grand_total,
"against_voucher": self.return_against if cint(self.is_return) else self.name,
"against_voucher_type": self.doctype
}, self.party_account_currency)
)
def make_tax_gl_entries(self, gl_entries):
for tax in self.get("taxes"):
if flt(tax.base_tax_amount_after_discount_amount):
account_currency = get_account_currency(tax.account_head)
gl_entries.append(
self.get_gl_dict({
"account": tax.account_head,
"against": self.customer,
"credit": flt(tax.base_tax_amount_after_discount_amount),
"credit_in_account_currency": flt(tax.base_tax_amount_after_discount_amount) \
if account_currency==self.company_currency else flt(tax.tax_amount_after_discount_amount),
"cost_center": tax.cost_center
}, account_currency)
)
def make_item_gl_entries(self, gl_entries):
# income account gl entries
for item in self.get("items"):
if flt(item.base_net_amount):
account_currency = get_account_currency(item.income_account)
gl_entries.append(
self.get_gl_dict({
"account": item.income_account,
"against": self.customer,
"credit": item.base_net_amount,
"credit_in_account_currency": item.base_net_amount \
if account_currency==self.company_currency else item.net_amount,
"cost_center": item.cost_center
}, account_currency)
)
# expense account gl entries
if cint(frappe.defaults.get_global_default("auto_accounting_for_stock")) \
and cint(self.update_stock):
gl_entries += super(SalesInvoice, self).get_gl_entries()
def make_pos_gl_entries(self, gl_entries):
if cint(self.is_pos) and self.cash_bank_account and self.paid_amount:
bank_account_currency = get_account_currency(self.cash_bank_account)
# POS, make payment entries
gl_entries.append(
self.get_gl_dict({
"account": self.debit_to,
"party_type": "Customer",
"party": self.customer,
"against": self.cash_bank_account,
"credit": self.base_paid_amount,
"credit_in_account_currency": self.base_paid_amount \
if self.party_account_currency==self.company_currency else self.paid_amount,
"against_voucher": self.return_against if cint(self.is_return) else self.name,
"against_voucher_type": self.doctype,
}, self.party_account_currency)
)
gl_entries.append(
self.get_gl_dict({
"account": self.cash_bank_account,
"against": self.customer,
"debit": self.base_paid_amount,
"debit_in_account_currency": self.base_paid_amount \
if bank_account_currency==self.company_currency else self.paid_amount
}, bank_account_currency)
)
def make_write_off_gl_entry(self, gl_entries):
# write off entries, applicable if only pos
if self.write_off_account and self.write_off_amount:
write_off_account_currency = get_account_currency(self.write_off_account)
gl_entries.append(
self.get_gl_dict({
"account": self.debit_to,
"party_type": "Customer",
"party": self.customer,
"against": self.write_off_account,
"credit": self.base_write_off_amount,
"credit_in_account_currency": self.base_write_off_amount \
if self.party_account_currency==self.company_currency else self.write_off_amount,
"against_voucher": self.return_against if cint(self.is_return) else self.name,
"against_voucher_type": self.doctype
}, self.party_account_currency)
)
gl_entries.append(
self.get_gl_dict({
"account": self.write_off_account,
"against": self.customer,
"debit": self.base_write_off_amount,
"debit_in_account_currency": self.base_write_off_amount \
if write_off_account_currency==self.company_currency else self.write_off_amount,
"cost_center": self.write_off_cost_center
}, write_off_account_currency)
)
def update_billing_status_in_dn(self, update_modified=True):
updated_delivery_notes = []
for d in self.get("items"):
if d.dn_detail:
billed_amt = frappe.db.sql("""select sum(amount) from `tabSales Invoice Item`
where dn_detail=%s and docstatus=1""", d.dn_detail)
billed_amt = billed_amt and billed_amt[0][0] or 0
frappe.db.set_value("Delivery Note Item", d.dn_detail, "billed_amt", billed_amt, update_modified=update_modified)
updated_delivery_notes.append(d.delivery_note)
elif d.so_detail:
updated_delivery_notes += update_billed_amount_based_on_so(d.so_detail, update_modified)
for dn in set(updated_delivery_notes):
frappe.get_doc("Delivery Note", dn).update_billing_percentage(update_modified=update_modified)
def on_recurring(self, reference_doc):
for fieldname in ("c_form_applicable", "c_form_no", "write_off_amount"):
self.set(fieldname, reference_doc.get(fieldname))
self.due_date = None
def get_list_context(context=None):
from erpnext.controllers.website_list_for_contact import get_list_context
list_context = get_list_context(context)
list_context["title"] = _("My Invoices")
return list_context
@frappe.whitelist()
def get_bank_cash_account(mode_of_payment, company):
account = frappe.db.get_value("Mode of Payment Account",
{"parent": mode_of_payment, "company": company}, "default_account")
if not account:
frappe.msgprint(_("Please set default Cash or Bank account in Mode of Payment {0}").format(mode_of_payment))
return {
"account": account
}
@frappe.whitelist()
def make_delivery_note(source_name, target_doc=None):
def set_missing_values(source, target):
target.ignore_pricing_rule = 1
target.run_method("set_missing_values")
target.run_method("calculate_taxes_and_totals")
def update_item(source_doc, target_doc, source_parent):
target_doc.base_amount = (flt(source_doc.qty) - flt(source_doc.delivered_qty)) * \
flt(source_doc.base_rate)
target_doc.amount = (flt(source_doc.qty) - flt(source_doc.delivered_qty)) * \
flt(source_doc.rate)
target_doc.qty = flt(source_doc.qty) - flt(source_doc.delivered_qty)
doclist = get_mapped_doc("Sales Invoice", source_name, {
"Sales Invoice": {
"doctype": "Delivery Note",
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Invoice Item": {
"doctype": "Delivery Note Item",
"field_map": {
"name": "si_detail",
"parent": "against_sales_invoice",
"serial_no": "serial_no",
"sales_order": "against_sales_order",
"so_detail": "so_detail"
},
"postprocess": update_item,
"condition": lambda doc: doc.delivered_by_supplier!=1
},
"Sales Taxes and Charges": {
"doctype": "Sales Taxes and Charges",
"add_if_empty": True
},
"Sales Team": {
"doctype": "Sales Team",
"field_map": {
"incentives": "incentives"
},
"add_if_empty": True
}
}, target_doc, set_missing_values)
return doclist
@frappe.whitelist()
def make_sales_return(source_name, target_doc=None):
from erpnext.controllers.sales_and_purchase_return import make_return_doc
return make_return_doc("Sales Invoice", source_name, target_doc)
| agpl-3.0 | 6,422,347,278,987,420,000 | 35.204638 | 153 | 0.685771 | false | 3.065496 | false | false | false |
ahwillia/PyNeuron-Toolbox | PyNeuronToolbox/neuromorpho.py | 1 | 5790 | """
Scraper for querying NeuroMorpho.Org from Python.
For more on NeuroMorpho.Org, see:
Ascoli GA, Donohue DE, Halavi M. (2007) NeuroMorpho.Org: a central
resource for neuronal morphologies.J Neurosci., 27(35):9247-51
Run this file as a stand-alone script for a demo. The demo queries NeuroMorpho.Org
in general, and provides extra information about the cell mb100318-a which is
associated with the publication:
Bagnall, M. W., Hull, C., Bushong, E. A., Ellisman, M. H., & Scanziani, M. (2011).
Multiple clusters of release sites formed by individual thalamic afferents onto
cortical interneurons ensure reliable transmission. Neuron, 71(1), 180-194.
As a web-scraper, this module may break if the website structure changes, but it
was known to work as of April 2, 2016.
To get a list of all cell types, species, or regions, call cell_types(), species(),
or regions(), respectively.
Given a type from one of these lists, get the matching cell names via cell_names.
e.g. cell_names('blowfly')
To get the metadata for a given cell name, use metadata.
e.g. metadata('mb100318-a')
To get the morphology for a given cell name, use morphology.
e.g. morphology('mb100318-a')
An optional format keyword argument allows selecting between the original and the
standardized versions.
"""
import urllib2
import re
import json
import base64
_cache = {}
def _read_neuromorpho_table(bywhat):
"""Helper function, reads data from NeuroMorpho.Org, stores in cache."""
html = urllib2.urlopen('http://neuromorpho.org/by%s.jsp' % bywhat).read()
result = [m.strip() for m in re.findall("maketable\('(.*?)'\)", html)]
_cache[bywhat] = set(result)
return result
def cell_types():
"""Return a list of all cell types."""
return _read_neuromorpho_table('cell')
def species():
"""Return a list of all species."""
return _read_neuromorpho_table('species')
def regions():
"""Return a list of all the brain regions."""
return _read_neuromorpho_table('region')
def cell_names(category):
"""Return a list of all the names of cells of a given cell type, species, or region.
Examples:
cell_names('Aspiny')
cell_names('blowfly')
cell_names('amygdala')
"""
# figure out if category is a cell type, species, or region
# check the cached sets first
for bywhat, items in _cache.iteritems():
if category in items:
return _get_data_for_by(bywhat, category)
# no luck: try all three options
for bywhat in ['cell', 'region', 'species']:
result = _get_data_for_by(bywhat, category)
if result:
return result
return []
def _get_data_for_by(bywhat, category):
"""Helper function for cell_names."""
query_code = bywhat if bywhat != 'cell' else 'class'
html = urllib2.urlopen('http://neuromorpho.org/getdataforby%s.jsp?%s=%s' % (bywhat, query_code, category.replace(' ', '%20'))).read()
return [m for m in re.findall("neuron_name=(.*?)'", html)]
def metadata(neuron_name):
"""Return a dict of the metadata for the specified neuron.
Example:
metadata('mb100318-a')
"""
html = urllib2.urlopen('http://neuromorpho.org/neuron_info.jsp?neuron_name=%s' % neuron_name).read()
# remove non-breaking spaces
html = html.replace(' ', ' ')
# remove units
html = html.replace('μm<sup>2</sup>', ' ')
html = html.replace('μm', ' ')
html = html.replace('°', ' ')
html = html.replace('<b>x</b>', ' ')
html = html.replace('<sup>3</sup>', '')
html2 = html.replace('\n', '')
keys = [i[1][:-3].strip() for i in re.findall('<td align="right" width="50%"(.*?)>(.*?)</td>', html2)]
values = [i[1].strip() for i in re.findall('<td align="left"(.*?)>(.*?)</td>', html2)[2:]]
return dict(zip(keys, values))
def morphology(neuron_name, format='swc'):
"""Return the morphology associated with a given name.
Format options:
swc -- always "stanadardized" file format (default)
original -- original
Example:
morphology('mb100318-a', format='swc')
morphology('mb100318-a', format='original')
"""
url_paths_from_format = {'swc': 'CNG%20Version', 'original': 'Source-Version'}
assert(format in url_paths_from_format)
# locate the path to the downloads
html = urllib2.urlopen('http://neuromorpho.org/neuron_info.jsp?neuron_name=%s' % neuron_name).read()
if format == 'swc':
url = re.findall("<a href=dableFiles/(.*?)>Morphology File \(Standardized", html)[0]
else:
url = re.findall("<a href=dableFiles/(.*?)>Morphology File \(Original", html)[0]
return urllib2.urlopen('http://NeuroMorpho.org/dableFiles/%s' % url).read()
def download(neuron_name, filename=None):
format = 'swc'
if filename is not None and len(filename.split('.'))==0:
filename = base64.urlsafe_b64encode(filename+'.'+format)
if filename is None:
filename = base64.urlsafe_b64encode(neuron_name+'.'+format)
with open(filename, 'w') as f:
f.write(morphology(neuron_name, format=format))
if __name__ == '__main__':
print 'Demo of reading data from NeuroMorpho.Org'
print
for string, fn in zip(['cell types', 'brain regions', 'species'], [cell_types, regions, species]):
print 'All %s:' % string
print ', '.join(fn())
print
for category in ['amygdala', 'blowfly', 'Aspiny']:
print 'All %s:' % category
print ', '.join(cell_names(category))
print
print 'Metadata for mb100318-a:'
print json.dumps(metadata('mb100318-a'), indent=4)
print
print 'Morphology (standardized) for mb100318-a (first 10 lines):'
print '\n'.join(morphology('mb100318-a', format='standardized').split('\n')[:10]) | mit | -963,203,165,447,386,800 | 35.19375 | 137 | 0.647323 | false | 3.306682 | false | false | false |
cloudify-cosmo/cloudify-cli | cloudify_cli/tests/commands/test_deployments.py | 1 | 48285 | ########
# Copyright (c) 2018 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
############
from __future__ import unicode_literals
import json
import inspect
import datetime
import warnings
from uuid import UUID
from mock import patch, MagicMock, PropertyMock, Mock
from cloudify_rest_client import (
deployments,
executions,
blueprints,
deployment_updates,
execution_schedules
)
from cloudify.exceptions import NonRecoverableError
from cloudify_rest_client.exceptions import (
CloudifyClientError,
UnknownDeploymentInputError,
MissingRequiredDeploymentInputError
)
from cloudify_rest_client.deployment_modifications import (
DeploymentModification
)
from cloudify_rest_client.responses import ListResponse, Metadata
from cloudify_cli.constants import DEFAULT_TENANT_NAME
from cloudify_cli.exceptions import CloudifyCliError, CloudifyValidationError
from ... import exceptions
from .mocks import MockListResponse
from .test_base import CliCommandTest
from .constants import (BLUEPRINTS_DIR,
SAMPLE_BLUEPRINT_PATH,
SAMPLE_ARCHIVE_PATH,
SAMPLE_INPUTS_PATH)
class DeploymentUpdatesTest(CliCommandTest):
def _mock_wait_for_executions(self, value):
patcher = patch(
'cloudify_cli.execution_events_fetcher.wait_for_execution',
MagicMock(return_value=PropertyMock(error=value))
)
self.addCleanup(patcher.stop)
patcher.start()
def _mock_wait_for_blueprint_upload(self, value):
patcher = patch(
'cloudify_cli.utils.wait_for_blueprint_upload',
MagicMock(return_value=PropertyMock(error=value))
)
self.addCleanup(patcher.stop)
patcher.start()
def setUp(self):
super(DeploymentUpdatesTest, self).setUp()
self.client.license.check = Mock()
self.use_manager()
self.client.deployment_updates.update = MagicMock()
self.client.blueprints.upload = MagicMock()
self.client.executions = MagicMock()
self.client.deployment_updates.update_with_existing_blueprint = \
MagicMock()
self._mock_wait_for_executions(False)
self._mock_wait_for_blueprint_upload(False)
patcher = patch('cloudify_cli.inputs.inputs_to_dict', MagicMock())
self.addCleanup(patcher.stop)
patcher.start()
def test_deployment_update_get(self):
old_value = 'old value 1'
new_value = 'new value 1'
steps = [{'entity_id': 'step1'}, {'entity_id': 'step2'}]
self.client.deployment_updates.get = Mock(return_value={
'id': 'update-id-1',
'old_inputs': {'inp1': old_value},
'new_inputs': {'inp1': new_value},
'steps': steps,
'recursive_dependencies': {}
})
outcome = self.invoke('deployments get-update update-id-1')
self.assertIn(old_value, outcome.output)
self.assertIn(new_value, outcome.output)
for d in steps:
for k, v in d.items():
self.assertIn(str(k), outcome.output)
self.assertIn(str(v), outcome.output)
def test_deployment_update_preview(self):
old_value = 'old value 1'
new_value = 'new value 1'
steps = [
{'entity_id': 'nodes:step1', 'action': 'add'},
{'entity_id': 'nodes:step2', 'action': 'remove'},
]
self.client.deployment_updates.update_with_existing_blueprint = Mock(
return_value={
'id': 'update-id-1',
'old_inputs': {'inp1': old_value},
'new_inputs': {'inp1': new_value},
'steps': steps,
'recursive_dependencies': {'deployment': 'dependent_dep'}
})
outcome = self.invoke(
'deployments update dep-1 -b b2 --preview --json')
output = json.loads(outcome.output)
self.assertEqual(output['installed_nodes'], ['step1'])
self.assertEqual(output['uninstalled_nodes'], ['step2'])
self.assertEqual(output['recursive_dependencies'],
{'deployment': 'dependent_dep'})
# find out if the preview=True argument has been set. It might have
# been passed positionally or by name into the rest-client method,
# so let's use inspect to find out which argument value was actually
# the preview arg
calls = self.client.deployment_updates\
.update_with_existing_blueprint.mock_calls
self.assertEqual(len(calls), 1)
_, args, kwargs = calls[0]
call_args = inspect.getcallargs(
deployment_updates.DeploymentUpdatesClient(None)
.update_with_existing_blueprint,
*args, **kwargs)
self.assertTrue(call_args['preview'])
def test_deployment_update_update_plugins_is_false(self):
update_client_mock = Mock()
self.client.deployment_updates.update_with_existing_blueprint = \
update_client_mock
self.invoke('deployments update dep-1 -b b2 --dont-update-plugins')
calls = self.client.deployment_updates\
.update_with_existing_blueprint.mock_calls
self.assertEqual(len(calls), 1)
_, args, kwargs = calls[0]
call_args = inspect.getcallargs(
deployment_updates.DeploymentUpdatesClient(None)
.update_with_existing_blueprint,
*args, **kwargs)
self.assertIn('update_plugins', call_args)
self.assertFalse(call_args['update_plugins'])
def test_deployment_update_update_plugins_is_true(self):
update_client_mock = Mock()
self.client.deployment_updates.update_with_existing_blueprint = \
update_client_mock
self.invoke('deployments update dep-1 -b b2')
calls = self.client.deployment_updates\
.update_with_existing_blueprint.mock_calls
self.assertEqual(len(calls), 1)
_, args, kwargs = calls[0]
call_args = inspect.getcallargs(
deployment_updates.DeploymentUpdatesClient(None)
.update_with_existing_blueprint,
*args, **kwargs)
self.assertIn('update_plugins', call_args)
self.assertTrue(call_args['update_plugins'])
def test_deployment_update_get_json(self):
old_value = 'old value 1'
new_value = 'new value 1'
steps = [{'entity_id': 'step1'}, {'entity_id': 'step2'}]
self.client.deployment_updates.get = Mock(return_value={
'id': 'update-id-1',
'old_inputs': {'inp1': old_value},
'new_inputs': {'inp1': new_value},
'steps': steps
})
outcome = self.invoke('deployments get-update update-id-1 --json')
parsed = json.loads(outcome.output)
self.assertEqual(parsed['old_inputs'], {'inp1': old_value})
self.assertEqual(parsed['new_inputs'], {'inp1': new_value})
def test_deployment_update_successful(self):
outcome = self.invoke(
'cfy deployments update -p {0} '
'my_deployment'.format(SAMPLE_BLUEPRINT_PATH))
self.assertIn('Updating deployment my_deployment', outcome.logs)
self.assertIn('Finished executing workflow', outcome.logs)
self.assertIn(
'Successfully updated deployment my_deployment', outcome.logs)
def test_deployment_update_failure(self):
self._mock_wait_for_executions(True)
outcome = self.invoke(
'cfy deployments update -p {0} my_deployment'
.format(SAMPLE_BLUEPRINT_PATH),
err_str_segment='',
exception=exceptions.SuppressedCloudifyCliError)
logs = outcome.logs.split('\n')
self.assertIn('Updating deployment my_deployment', logs[-3])
self.assertIn('Execution of workflow', logs[-2])
self.assertIn('failed', logs[-2])
self.assertIn(
'Failed updating deployment my_deployment', logs[-1])
def test_deployment_update_json_parameter(self):
with warnings.catch_warnings(record=True) as warns:
self.invoke(
'cfy deployments update -p '
'{0} my_deployment --json-output'
.format(SAMPLE_BLUEPRINT_PATH))
# catch_warnings sometimes gets the same thing more than once,
# depending on how are the tests run. I don't know why.
self.assertTrue(warns)
self.assertIn('use the global', str(warns[0]))
def test_deployment_update_include_logs_parameter(self):
self.invoke(
'cfy deployments update -p '
'{0} my_deployment --include-logs'
.format(SAMPLE_BLUEPRINT_PATH))
def test_deployment_update_skip_install_flag(self):
self.invoke(
'cfy deployments update -p '
'{0} my_deployment --skip-install'
.format(SAMPLE_BLUEPRINT_PATH))
def test_deployment_update_skip_uninstall_flag(self):
self.invoke(
'cfy deployments update -p '
'{0} my_deployment --skip-uninstall'
.format(SAMPLE_BLUEPRINT_PATH))
def test_deployment_update_force_flag(self):
self.invoke(
'cfy deployments update -p '
'{0} my_deployment --force'
.format(SAMPLE_BLUEPRINT_PATH))
def test_deployment_update_override_workflow_parameter(self):
self.invoke(
'cfy deployments update -p '
'{0} my_deployment -w override-wf'
.format(SAMPLE_BLUEPRINT_PATH))
def test_deployment_update_archive_location_parameter(self):
self.invoke(
'cfy deployments update -p {0} my_deployment'
.format(SAMPLE_ARCHIVE_PATH))
def test_dep_update_archive_loc_and_bp_path_parameters_exclusion(self):
self.invoke(
'cfy deployments update -p '
'{0} -n {1}/helloworld/'
'blueprint2.yaml my_deployment'
.format(SAMPLE_BLUEPRINT_PATH, BLUEPRINTS_DIR),
err_str_segment='param should be passed only when updating'
' from an archive'
)
def test_deployment_update_blueprint_filename_parameter(self):
self.invoke(
'cfy deployments update -p '
'{0} -n blueprint.yaml my_deployment'
.format(SAMPLE_ARCHIVE_PATH))
def test_deployment_update_inputs_parameter(self):
self.invoke(
'cfy deployments update -p '
'{0} -i {1} my_deployment'
.format(SAMPLE_ARCHIVE_PATH, SAMPLE_INPUTS_PATH))
def test_deployment_update_multiple_inputs_parameter(self):
self.invoke(
'cfy deployments update -p '
'{0} -i {1} -i {1} my_deployment'
.format(SAMPLE_ARCHIVE_PATH, SAMPLE_INPUTS_PATH))
def test_deployment_update_no_deployment_id_parameter(self):
outcome = self.invoke(
'cfy deployments update -p '
'{0}'.format(SAMPLE_ARCHIVE_PATH),
err_str_segment='2', # Exit code
exception=SystemExit)
self.assertIn('missing argument', outcome.output.lower())
self.assertIn('DEPLOYMENT_ID', outcome.output)
def test_deployment_update_no_bp_path_nor_archive_loc_parameters(self):
self.invoke(
'cfy deployments update my_deployment',
err_str_segment='Must supply either a blueprint '
'(by id of an existing blueprint, or a path to a '
'new blueprint), or new inputs',
exception=CloudifyCliError)
def test_deployment_update_inputs_correct(self):
self.invoke(
'cfy deployments update -p '
'{0} -i {1} my_deployment --auto-correct-types'
.format(SAMPLE_ARCHIVE_PATH, SAMPLE_INPUTS_PATH))
class DeploymentsTest(CliCommandTest):
def setUp(self):
super(DeploymentsTest, self).setUp()
self.use_manager()
def test_deployment_create(self):
deployment = deployments.Deployment({
'deployment_id': 'deployment_id'
})
self.client.deployments.create = MagicMock(return_value=deployment)
self.invoke(
'cfy deployments create deployment -b a-blueprint-id')
def test_deployment_create_with_skip_plugins_validation_flag(self):
deployment = deployments.Deployment({
'deployment_id': 'deployment_id'
})
self.client.deployments.create = MagicMock(return_value=deployment)
self.invoke(
'cfy deployments create deployment -b a --skip-plugins-validation')
call_args = list(self.client.deployments.create.call_args)
self.assertIn('skip_plugins_validation', call_args[1])
self.assertEqual(call_args[1]['skip_plugins_validation'], True)
def test_deployment_create_without_skip_plugins_validation_flag(self):
deployment = deployments.Deployment({
'deployment_id': 'deployment_id'
})
self.client.deployments.create = MagicMock(return_value=deployment)
self.invoke(
'cfy deployments create deployment -b aa')
call_args = list(self.client.deployments.create.call_args)
self.assertIn('skip_plugins_validation', call_args[1])
self.assertEqual(call_args[1]['skip_plugins_validation'], False)
def test_deployment_create_with_site_name(self):
deployment = deployments.Deployment({'deployment_id': 'deployment_id'})
self.client.deployments.create = MagicMock(return_value=deployment)
self.invoke('cfy deployments create deployment -b a --site-name site')
call_args = list(self.client.deployments.create.call_args)
self.assertEqual(call_args[1]['site_name'], 'site')
def test_deployment_create_invalid_site_name(self):
error_msg = 'The `site_name` argument contains illegal characters'
self.invoke('cfy deployments create deployment -b a --site-name :site',
err_str_segment=error_msg,
exception=CloudifyValidationError)
def test_deployment_create_without_site_name(self):
deployment = deployments.Deployment({'deployment_id': 'deployment_id'})
self.client.deployments.create = MagicMock(return_value=deployment)
self.invoke('cfy deployments create deployment -b a')
call_args = list(self.client.deployments.create.call_args)
self.assertIsNone(call_args[1]['site_name'])
def test_deployments_delete(self):
self.client.deployments.delete = MagicMock()
self.client.executions.list = MagicMock(
side_effect=CloudifyClientError(
'`Deployment` with ID `my-dep` was not found')
)
self.invoke('cfy deployments delete my-dep')
def test_deployments_execute(self):
execute_response = executions.Execution({'status': 'started'})
get_execution_response = executions.Execution({
'status': 'terminated',
'workflow_id': 'mock_wf',
'deployment_id': 'deployment-id',
'blueprint_id': 'blueprint-id',
'error': '',
'id': 'id',
'created_at': datetime.datetime.now(),
'parameters': {}
})
success_event = {
'event_type': 'workflow_succeeded',
'type': 'foo',
'timestamp': '12345678',
'message': 'workflow execution succeeded',
'error_causes': '<error_causes>',
'deployment_id': 'deployment-id',
'execution_id': '<execution_id>',
'source_id': None,
'target_id': None,
'node_name': '<node_name>',
'operation': '<operation>',
'workflow_id': '<workflow_id>',
'node_instance_id': '<node_instance_id>',
}
get_events_response = MockListResponse([success_event], 1)
self.client.executions.start = MagicMock(
return_value=execute_response)
self.client.executions.get = MagicMock(
return_value=get_execution_response)
self.client.events.list = MagicMock(return_value=get_events_response)
self.invoke('cfy executions start install -d a-deployment-id')
def test_deployments_list_all(self):
self.client.deployments.list = MagicMock(
return_value=MockListResponse()
)
self.invoke('cfy deployments list')
self.invoke('cfy deployments list -t dummy_tenant')
self.invoke('cfy deployments list -a')
def test_deployments_list_of_blueprint(self):
deps = [
{
'blueprint_id': 'b1_blueprint',
'created_at': 'now',
'created_by': 'admin',
'updated_at': 'now',
'id': 'id',
'visibility': 'private',
'tenant_name': DEFAULT_TENANT_NAME
},
{
'blueprint_id': 'b1_blueprint',
'created_at': 'now',
'created_by': 'admin',
'updated_at': 'now',
'id': 'id',
'visibility': 'private',
'tenant_name': DEFAULT_TENANT_NAME
},
{
'blueprint_id': 'b2_blueprint',
'created_at': 'now',
'created_by': 'admin',
'updated_at': 'now',
'id': 'id',
'visibility': 'private',
'tenant_name': DEFAULT_TENANT_NAME
}
]
self.client.deployments.list = MagicMock(
return_value=MockListResponse(items=deps)
)
outcome = self.invoke('cfy deployments list -b b1_blueprint -v')
self.assertNotIn('b2_blueprint', outcome.logs)
self.assertIn('b1_blueprint', outcome.logs)
def test_deployments_execute_nonexistent_operation(self):
# Verifying that the CLI allows for arbitrary operation names,
# while also ensuring correct error-handling of nonexistent
# operations
expected_error = "operation nonexistent-operation doesn't exist"
self.client.executions.start = MagicMock(
side_effect=CloudifyClientError(expected_error))
command = \
'cfy executions start nonexistent-operation -d a-deployment-id'
self.invoke(
command,
err_str_segment=expected_error,
exception=CloudifyClientError)
def test_deployments_outputs(self):
outputs = deployments.DeploymentOutputs({
'deployment_id': 'dep1',
'outputs': {
'port': 8080
}
})
deployment = deployments.Deployment({
'outputs': {
'port': {
'description': 'Webserver port.',
'value': '...'
}
}
})
self.client.deployments.get = MagicMock(return_value=deployment)
self.client.deployments.outputs.get = MagicMock(return_value=outputs)
self.invoke('cfy deployments outputs dep1')
def test_deployments_outputs_json(self):
outputs = deployments.DeploymentOutputs({
'deployment_id': 'dep1',
'outputs': {
'port': 8080
}
})
deployment = deployments.Deployment({
'outputs': {
'port': {
'description': 'Webserver port.',
'value': '...'
}
}
})
self.client.deployments.get = MagicMock(return_value=deployment)
self.client.deployments.outputs.get = MagicMock(return_value=outputs)
outcome = self.invoke('cfy deployments outputs dep1 --json')
parsed = json.loads(outcome.output)
self.assertEqual(parsed, {
'port': {
'value': 8080,
'description': 'Webserver port.'
}
})
def test_deployments_inputs(self):
deployment = deployments.Deployment({
'deployment_id': 'deployment_id',
'inputs': {'key1': 'val1', 'key2': 'val2'}
})
expected_outputs = [
'Retrieving inputs for deployment deployment_id...',
'- "key1":',
'Value: val1',
'- "key2":',
'Value: val2',
]
self.client.deployments.get = MagicMock(return_value=deployment)
outcome = self.invoke('cfy deployments inputs deployment_id')
outcome = [o.strip() for o in outcome.logs.split('\n')]
for output in expected_outputs:
self.assertIn(output, outcome)
def test_deployments_inputs_json(self):
deployment = deployments.Deployment({
'deployment_id': 'deployment_id',
'inputs': {'key1': 'val1', 'key2': 'val2'}
})
self.client.deployments.get = MagicMock(return_value=deployment)
outcome = self.invoke('cfy deployments inputs deployment_id --json')
parsed = json.loads(outcome.output)
self.assertEqual(parsed, {'key1': 'val1', 'key2': 'val2'})
def test_missing_required_inputs(self):
self._test_deployment_inputs(
MissingRequiredDeploymentInputError,
{'input1': 'value1'},
['Unable to create deployment']
)
def test_invalid_input(self):
self._test_deployment_inputs(
UnknownDeploymentInputError,
{'input1': 'value1',
'input2': 'value2',
'input3': 'value3'},
['Unable to create deployment']
)
def test_deployments_set_visibility(self):
self.client.deployments.set_visibility = MagicMock()
self.invoke('cfy deployments set-visibility a-deployment-id -l '
'tenant')
self.invoke('cfy deployments set-visibility a-deployment-id -l '
'global')
def test_deployments_set_visibility_invalid_argument(self):
self.invoke(
'cfy deployments set-visibility a-deployment-id -l private',
err_str_segment='Invalid visibility: `private`',
exception=CloudifyCliError
)
self.invoke(
'cfy deployments set-visibility a-deployment-id -l bla',
err_str_segment='Invalid visibility: `bla`',
exception=CloudifyCliError
)
def test_deployments_set_visibility_missing_argument(self):
outcome = self.invoke(
'cfy deployments set-visibility a-deployment-id',
err_str_segment='2',
exception=SystemExit
)
self.assertIn('missing option', outcome.output.lower())
self.assertIn('--visibility', outcome.output)
def test_deployments_set_visibility_wrong_argument(self):
outcome = self.invoke(
'cfy deployments set-visibility a-deployment-id -g',
err_str_segment='2', # Exit code
exception=SystemExit
)
self.assertIn('Error: no such option: -g', outcome.output)
def test_deployments_create_mutually_exclusive_arguments(self):
outcome = self.invoke(
'cfy deployments create deployment -b a-blueprint-id -l tenant '
'--private-resource',
err_str_segment='2', # Exit code
exception=SystemExit
)
self.assertIn('mutually exclusive with arguments:', outcome.output)
def test_deployments_create_invalid_argument(self):
self.invoke(
'cfy deployments create deployment -b a-blueprint-id -l bla',
err_str_segment='Invalid visibility: `bla`',
exception=CloudifyCliError
)
def test_deployments_create_with_visibility(self):
self.client.deployments.create = MagicMock()
self.invoke('cfy deployments create deployment -b a-blueprint-id '
'-l private')
def test_deployments_set_site_with_site_name(self):
self.client.deployments.set_site = MagicMock()
self.invoke('cfy deployments set-site deployment_1 --site-name site')
call_args = list(self.client.deployments.set_site.call_args)
self.assertEqual(call_args[0][0], 'deployment_1')
self.assertEqual(call_args[1]['site_name'], 'site')
self.assertFalse(call_args[1]['detach_site'])
def test_deployments_set_site_without_options(self):
error_msg = 'Must provide either a `--site-name` of a valid site ' \
'or `--detach-site`'
self.invoke('cfy deployments set-site deployment_1',
err_str_segment=error_msg,
exception=CloudifyCliError)
def test_deployments_set_site_with_detach(self):
self.client.deployments.set_site = MagicMock()
self.invoke('cfy deployments set-site deployment_1 --detach-site')
call_args = list(self.client.deployments.set_site.call_args)
self.assertEqual(call_args[0][0], 'deployment_1')
self.assertIsNone(call_args[1]['site_name'])
self.assertTrue(call_args[1]['detach_site'])
def test_deployments_set_site_mutually_exclusive(self):
outcome = self.invoke(
'cfy deployments set-site deployment_1 -s site --detach-site',
err_str_segment='2', # Exit code
exception=SystemExit
)
error_msg = 'Error: Illegal usage: `detach_site` is ' \
'mutually exclusive with arguments: [site_name]'
self.assertIn(error_msg, outcome.output)
def test_deployment_set_site_no_deployment_id(self):
outcome = self.invoke('cfy deployments set-site',
err_str_segment='2', # Exit code
exception=SystemExit)
self.assertIn('missing argument', outcome.output.lower())
self.assertIn('DEPLOYMENT_ID', outcome.output)
def test_deployment_set_site_invalid_site_name(self):
error_msg = 'The `site_name` argument contains illegal characters'
self.invoke('cfy deployments set-site deployment_1 --site-name :site',
err_str_segment=error_msg,
exception=CloudifyValidationError)
def _test_deployment_inputs(self, exception_type,
inputs, expected_outputs=None):
def raise_error(*args, **kwargs):
raise exception_type('no inputs')
blueprint = blueprints.Blueprint({
'plan': {
'inputs': {
'input1': {'description': 'val1'},
'input2': {'description': 'val2'}
}
}
})
self.client.blueprints.get = MagicMock(return_value=blueprint)
self.client.deployments.create = raise_error
inputs_line = ' '.join(
['-i {0}={1}'.format(key, value) for
key, value in inputs.items()])
outcome = self.invoke(
'cfy deployments create deployment -b a-blueprint-id {0}'.format(
inputs_line),
exception=exceptions.SuppressedCloudifyCliError,
err_str_segment='no inputs'
)
outcome = [o.strip() for o in outcome.logs.split('\n')]
if not expected_outputs:
expected_outputs = []
for output in expected_outputs:
found = False
for outcome_line in outcome:
if output in outcome_line:
found = True
break
self.assertTrue(found, 'String ''{0}'' not found in outcome {1}'
.format(output, outcome))
def test_create_deployment_with_display_name(self):
dep_display_name = 'Depl\xf3yment'
self.client.deployments.create = Mock()
self.invoke('cfy deployments create -b bp1 -n {0} '
'dep1'.format(dep_display_name))
call_args = list(self.client.deployments.create.call_args)
self.assertEqual(call_args[1]['display_name'], dep_display_name)
def test_create_deployment_display_name_defaults_to_id(self):
dep_id = 'dep1'
self.client.deployments.create = Mock()
self.invoke('cfy deployments create -b bp1 {0}'.format(dep_id))
call_args = list(self.client.deployments.create.call_args)
self.assertEqual(call_args[1]['display_name'], dep_id)
def test_create_deployment_with_generated_id(self):
self.client.deployments.create = Mock()
self.invoke('cfy deployments create -b bp1 --generate-id')
call_args = list(self.client.deployments.create.call_args)
try:
UUID(call_args[0][1], version=4)
except ValueError:
raise Exception('The deployment was not created with a valid UUID')
def test_create_deployment_with_id_and_generate_id_fails(self):
self.invoke('cfy deployments create -b bp1 --generate-id dep1',
err_str_segment='cannot be provided',
exception=CloudifyCliError)
def test_list_deployments_with_search_name(self):
search_name_pattern = 'De#pl\xf3yment 1'
self.client.deployments.list = Mock(return_value=MockListResponse())
self.invoke('cfy deployments list --search-name '
'"{0}"'.format(search_name_pattern))
call_args = list(self.client.deployments.list.call_args)
self.assertEqual(call_args[1].get('_search_name'), search_name_pattern)
class DeploymentModificationsTest(CliCommandTest):
def _mock_wait_for_executions(self, value):
patcher = patch(
'cloudify_cli.execution_events_fetcher.wait_for_execution',
MagicMock(return_value=PropertyMock(error=value))
)
self.addCleanup(patcher.stop)
patcher.start()
def setUp(self):
super(DeploymentModificationsTest, self).setUp()
self.use_manager()
self._deployment_modifications = [
DeploymentModification({
'id': '0229a7d4-0bef-4d95-910d-a341663172e1',
'deployment_id': 'dep1',
'context': {
'workflow_id': 'scale',
'execution_id': '842686d6-e960-48a6-95b5-250fc26a7ed4',
},
'status': 'finished',
'tenant_name': 'default_tenant',
'created_at': datetime.datetime(2019, 8, 27, 16, 5, 24),
'visibility': 'tenant'
}),
DeploymentModification({
'id': 'e8962cbd-6645-4c60-9d6d-ee3215b39808',
'deployment_id': 'dep1',
'context': {
'workflow_id': 'scale',
'execution_id': 'c6bfc3de-ca19-4335-be77-b12edccba582',
},
'status': 'started',
'tenant_name': 'default_tenant',
'created_at': datetime.datetime(2019, 8, 27, 16, 35, 24),
'visibility': 'tenant'
}),
]
def test_deployment_modifications_list(self):
self.client.deployment_modifications.list = Mock(
return_value=ListResponse(
items=self._deployment_modifications,
metadata=Metadata({'pagination': {'total': 2}})
)
)
dps = self.invoke('cfy deployments modifications list dep1')
assert dps.logs == """Listing modifications of the deployment dep1...
Showing 2 of 2 deployment modifications"""
output_lines = dps.output.split('\n')
deployment_modification_found = 0
for line in output_lines:
if '0229a7d4-0bef-4d95-910d-a341663172e1' in line:
deployment_modification_found += 1
assert 'scale' in line
assert '842686d6-e960-48a6-95b5-250fc26a7ed4' in line
assert 'finished' in line
assert 'default_tenant' in line
assert '2019-08-27 16:05:24' in line
if 'e8962cbd-6645-4c60-9d6d-ee3215b39808' in line:
deployment_modification_found += 1
assert 'scale' in line
assert 'c6bfc3de-ca19-4335-be77-b12edccba582' in line
assert 'started' in line
assert 'default_tenant' in line
assert '2019-08-27 16:35:24' in line
assert deployment_modification_found == 2
def test_deployment_modifications_no_context(self):
deployment_modification = self._deployment_modifications[0]
deployment_modification.pop('context')
self.client.deployment_modifications.list = Mock(
return_value=ListResponse(
items=[deployment_modification],
metadata=Metadata({'pagination': {'total': 1}})
)
)
dps = self.invoke('cfy deployments modifications list dep1')
assert dps.logs == """Listing modifications of the deployment dep1...
Showing 1 of 1 deployment modifications"""
output_lines = dps.output.split('\n')
deployment_modification_found = 0
for line in output_lines:
if '0229a7d4-0bef-4d95-910d-a341663172e1' in line:
deployment_modification_found += 1
assert 'N/A' in line
assert 'finished' in line
assert 'default_tenant' in line
assert '2019-08-27 16:05:24' in line
assert deployment_modification_found == 1
def test_deployment_modifications_get(self):
deployment_modification = self._deployment_modifications[0]
deployment_modification.update(
{
'modified_nodes': {
'node1': []
},
'node_instances': {
'before_modification': [
{'id': 'node1_18fda8', 'node_id': 'node1'},
{'id': 'node2_z3t4uc', 'node_id': 'node2'},
],
'added_and_related': [
{'id': 'node2_z3t4uc', 'node_id': 'node2'},
{'id': 'node1_olbbe0', 'node_id': 'node1',
'modification': 'added'},
]
},
}
)
self.client.deployment_modifications.get = Mock(
return_value=deployment_modification
)
dps = self.invoke('cfy deployments modifications get '
'0229a7d4-0bef-4d95-910d-a341663172e1')
assert dps.logs == 'Retrieving deployment modification ' \
'0229a7d4-0bef-4d95-910d-a341663172e1...'
output_lines = dps.output.split('\n')
assert 'Modified nodes:' in output_lines
assert 'Node instances before modifications:' in output_lines
assert 'Added node instances:' in output_lines
assert 'Node instances before rollback:' not in output_lines
assert 'Removed node instances:' not in output_lines
added_title_idx = output_lines.index('Added node instances:')
assert 'node1_olbbe0 (node1)' in output_lines[added_title_idx + 1]
class DeploymentScheduleTest(CliCommandTest):
def setUp(self):
super(DeploymentScheduleTest, self).setUp()
self.use_manager()
def test_deployment_schedule_create(self):
self.client.execution_schedules.create = MagicMock(
return_value=execution_schedules.ExecutionSchedule({}))
self.invoke('cfy deployments schedule create dep1 backup '
'-s "12:00" -u "+1w +1d" -r 2d --tz EST')
now = datetime.datetime.utcnow()
expected_since = now.replace(
hour=17, minute=0, second=0, microsecond=0)
expected_until = now.replace(second=0, microsecond=0) + \
datetime.timedelta(days=8)
call_args = list(self.client.execution_schedules.create.call_args)
assert call_args[0][0] == 'backup'
assert call_args[1]['since'] == expected_since
assert call_args[1]['until'] == expected_until
assert call_args[1]['recurrence'] == '2d'
def test_deployment_schedule_create_with_schedule_name(self):
self.client.execution_schedules.create = MagicMock(
return_value=execution_schedules.ExecutionSchedule({}))
self.invoke('cfy deployments schedule create dep1 backup '
'-n back_me_up -s "1905-6-13 12:00" --tz GMT')
expected_since = \
datetime.datetime.strptime('1905-6-13 12:00', '%Y-%m-%d %H:%M')
call_args = list(self.client.execution_schedules.create.call_args)
assert call_args[0][0] == 'back_me_up'
assert call_args[1]['since'] == expected_since
assert not call_args[1]['recurrence']
assert not call_args[1]['until']
def test_deployment_schedule_create_missing_since(self):
outcome = self.invoke(
'cfy deployments schedule create dep1 backup',
err_str_segment='2', # Exit code
exception=SystemExit
)
self.assertIn("Missing option '-s' / '--since'", outcome.output)
def test_deployment_schedule_create_missing_workflow_id(self):
outcome = self.invoke(
'cfy deployments schedule create dep1 -s "12:33"',
err_str_segment='2', # Exit code
exception=SystemExit
)
self.assertIn("Missing argument 'WORKFLOW_ID'", outcome.output)
def test_deployment_schedule_create_bad_time_expressions(self):
self.client.execution_schedules.create = MagicMock(
return_value=execution_schedules.ExecutionSchedule({}))
command = 'cfy deployments schedule create dep1 install -s "{}"'
error_msg = '{} is not a legal time format. accepted formats are ' \
'YYYY-MM-DD HH:MM | HH:MM'
illegal_time_formats = ['blah', '15:33:18', '99:99',
'2000/1/1 09:17', '-1 min']
for time_format in illegal_time_formats:
self.invoke(
command.format(time_format),
err_str_segment=error_msg.format(time_format),
exception=NonRecoverableError)
illegal_time_deltas = ['+10 dobosh', '+rez']
for delta in illegal_time_deltas:
self.invoke(
command.format(delta),
err_str_segment='{} is not a legal time delta'.format(
delta.strip('+')),
exception=NonRecoverableError)
def test_deployment_schedule_create_bad_timezone(self):
self.invoke('cfy deployments schedule create dep1 install '
'-s "7:15" --tz Mars/SpaceX',
err_str_segment='Mars/SpaceX is not a recognized timezone',
exception=NonRecoverableError)
def test_deployment_schedule_create_months_delta(self):
self.client.execution_schedules.create = MagicMock(
return_value=execution_schedules.ExecutionSchedule({}))
self.invoke('cfy deployments schedule create dep backup -s "+13mo"')
call_args = list(self.client.execution_schedules.create.call_args)
now = datetime.datetime.utcnow()
current_month = now.month
current_year = now.year
current_day = now.day
expected_month = 1 if current_month == 12 else current_month + 1
expected_year = current_year + (2 if current_month == 12 else 1)
expected_since = now.replace(
second=0, microsecond=0,
year=expected_year, month=expected_month, day=1)
expected_since += datetime.timedelta(days=current_day - 1)
assert call_args[1]['since'] == expected_since
def test_deployment_schedule_create_years_delta(self):
self.client.execution_schedules.create = MagicMock(
return_value=execution_schedules.ExecutionSchedule({}))
self.invoke('cfy deployments schedule create dep backup -s "+2y"')
call_args = list(self.client.execution_schedules.create.call_args)
now = datetime.datetime.utcnow()
expected_since = now.replace(second=0, microsecond=0, year=now.year+2)
assert call_args[1]['since'] == expected_since
def test_deployment_schedule_create_hours_minutes_delta(self):
self.client.execution_schedules.create = MagicMock(
return_value=execution_schedules.ExecutionSchedule({}))
self.invoke('cfy deployments schedule create dep backup '
'-s "+25 hours+119min"')
call_args = list(self.client.execution_schedules.create.call_args)
expected_since = \
(datetime.datetime.utcnow().replace(second=0, microsecond=0) +
datetime.timedelta(days=1, hours=2, minutes=59))
assert call_args[1]['since'] == expected_since
def test_deployment_schedule_update(self):
self.client.execution_schedules.update = MagicMock(
return_value=execution_schedules.ExecutionSchedule({}))
self.invoke('cfy deployments schedule update dep sched-1 -r "3 weeks" '
'-u "22:00" --tz "Asia/Shanghai"')
expected_until = datetime.datetime.utcnow().replace(
hour=14, minute=0, second=0, microsecond=0)
call_args = list(self.client.execution_schedules.update.call_args)
assert call_args[0][0] == 'sched-1'
assert call_args[1]['recurrence'] == '3 weeks'
assert call_args[1]['until'] == expected_until
def test_deployment_schedule_enable(self):
mock_schedule = MagicMock()
mock_schedule.enabled = False
self.client.execution_schedules.get = MagicMock(
return_value=mock_schedule)
self.client.execution_schedules.update = MagicMock(
return_value=execution_schedules.ExecutionSchedule({}))
self.invoke('cfy deployments schedule enable dep sched-1')
call_args = list(self.client.execution_schedules.update.call_args)
assert call_args[1]['enabled']
def test_deployment_schedule_enable_already_enabled(self):
mock_schedule = MagicMock()
mock_schedule.enabled = True
self.client.execution_schedules.get = MagicMock(
return_value=mock_schedule)
self.invoke(
'cfy deployments schedule enable dep sched-1',
err_str_segment='Schedule `sched-1` on deployment `dep` is '
'already enabled',
exception=CloudifyCliError)
def test_deployment_schedule_disable(self):
mock_schedule = MagicMock()
mock_schedule.enabled = True
self.client.execution_schedules.get = MagicMock(
return_value=mock_schedule)
self.client.execution_schedules.update = MagicMock(
return_value=execution_schedules.ExecutionSchedule({}))
self.invoke('cfy deployments schedule disable dep sched-1')
call_args = list(self.client.execution_schedules.update.call_args)
assert not call_args[1]['enabled']
def test_deployment_schedule_disable_already_disabled(self):
mock_schedule = MagicMock()
mock_schedule.enabled = False
self.client.execution_schedules.get = MagicMock(
return_value=mock_schedule)
self.invoke(
'cfy deployments schedule disable dep sched-1',
err_str_segment='Schedule `sched-1` on deployment `dep` is '
'already disabled',
exception=CloudifyCliError)
def test_deployment_schedule_delete(self):
self.client.execution_schedules.delete = MagicMock(
return_value=execution_schedules.ExecutionSchedule({}))
self.invoke('cfy deployments schedule delete dep sched-1')
def test_deployment_schedule_list(self):
self.client.execution_schedules.list = \
self._get_deployment_schedules_list()
output = json.loads(
self.invoke('cfy deployments schedule list --json').output)
assert len(output) == 3
def test_deployment_schedule_list_filter_since(self):
self.client.execution_schedules.list = \
self._get_deployment_schedules_list()
# jan1 will be excluded: has no occurrences at/after Jan 2nd
output = json.loads(
self.invoke('cfy deployments schedule list -s "1900-1-2 0:00" '
'--tz GMT --json').output)
assert len(output) == 2
def test_deployment_schedule_list_filter_until(self):
self.client.execution_schedules.list = \
self._get_deployment_schedules_list()
# jan2_jan3 will be excluded: has no occurrences before Jan 2nd
output = json.loads(
self.invoke('cfy deployments schedule list -u "1900-1-2 0:00" '
'--tz GMT --json').output)
assert len(output) == 2
@staticmethod
def _get_deployment_schedules_list():
schedules = [
{
'id': 'jan1_jan2',
'deployment_id': 'dep1',
'all_next_occurrences': ['1900-1-1 12:00:00',
'1900-1-2 12:00:00'],
},
{
'id': 'jan2_jan3',
'deployment_id': 'dep1',
'all_next_occurrences': ['1900-1-2 12:00:00',
'1900-1-3 12:00:00'],
},
{
'id': 'jan1',
'deployment_id': 'dep2',
'all_next_occurrences': ['1900-1-1 12:00:00'],
}
]
return MagicMock(return_value=MockListResponse(items=schedules))
@staticmethod
def _get_deployment_schedule_detailed(enabled=True):
return MagicMock(
return_value=execution_schedules.ExecutionSchedule({
'id': 'sched_get',
'deployment_id': 'dep3',
'rule': {},
'execution_arguments': {},
'parameters': {},
'enabled': enabled,
'all_next_occurrences': ['1900-1-1 12:00:00',
'1900-1-2 12:00:00',
'1900-1-3 12:00:00']
}))
def test_deployment_schedule_get(self):
self.client.execution_schedules.get = \
self._get_deployment_schedule_detailed()
output = self.invoke('cfy deployments schedule get dep sched_get '
'--preview 2')
self.assertIn('Computed 3 upcoming occurrences. Listing first 2:',
output.output)
self.assertIn('| sched_get | dep3 |', output.output)
self.assertIn('1 1900-1-1 12:00:00', output.output)
self.assertIn('2 1900-1-2 12:00:00', output.output)
def test_deployment_schedule_get_no_preview(self):
self.client.execution_schedules.get = \
self._get_deployment_schedule_detailed()
output = self.invoke('cfy deployments schedule get dep sched_get')
self.assertIn('| sched_get | dep3 |', output.output)
self.assertNotIn('Computed 3 upcoming occurrences', output.output)
def test_deployment_schedule_get_no_preview_because_disabled(self):
self.client.execution_schedules.get = \
self._get_deployment_schedule_detailed(enabled=False)
output = self.invoke(
'cfy deployments schedule get dep sched_get --preview 1',
err_str_segment='Deployment schedule sched_get is disabled, '
'no upcoming occurrences',
exception=CloudifyCliError)
self.assertIn('| sched_get | dep3 |', output.output)
| apache-2.0 | -2,392,071,210,047,672,000 | 40.339897 | 79 | 0.589334 | false | 4.071248 | true | false | false |
Scille/parsec-cloud | parsec/backend/vlob.py | 1 | 13190 | # Parsec Cloud (https://parsec.cloud) Copyright (c) AGPLv3 2016-2021 Scille SAS
from typing import List, Tuple, Dict, Optional
from uuid import UUID
import pendulum
from parsec.utils import timestamps_in_the_ballpark
from parsec.api.protocol import (
DeviceID,
OrganizationID,
vlob_create_serializer,
vlob_read_serializer,
vlob_update_serializer,
vlob_poll_changes_serializer,
vlob_list_versions_serializer,
vlob_maintenance_get_reencryption_batch_serializer,
vlob_maintenance_save_reencryption_batch_serializer,
)
from parsec.backend.utils import catch_protocol_errors, api
class VlobError(Exception):
pass
class VlobAccessError(VlobError):
pass
class VlobVersionError(VlobError):
pass
class VlobTimestampError(VlobError):
pass
class VlobNotFoundError(VlobError):
pass
class VlobAlreadyExistsError(VlobError):
pass
class VlobEncryptionRevisionError(VlobError):
pass
class VlobInMaintenanceError(VlobError):
pass
class VlobNotInMaintenanceError(VlobError):
pass
class VlobMaintenanceError(VlobError):
pass
class BaseVlobComponent:
@api("vlob_create")
@catch_protocol_errors
async def api_vlob_create(self, client_ctx, msg):
msg = vlob_create_serializer.req_load(msg)
now = pendulum.now()
if not timestamps_in_the_ballpark(msg["timestamp"], now):
return {"status": "bad_timestamp", "reason": f"Timestamp is out of date."}
try:
await self.create(client_ctx.organization_id, client_ctx.device_id, **msg)
except VlobAlreadyExistsError as exc:
return vlob_create_serializer.rep_dump({"status": "already_exists", "reason": str(exc)})
except VlobAccessError:
return vlob_create_serializer.rep_dump({"status": "not_allowed"})
except VlobEncryptionRevisionError:
return vlob_create_serializer.rep_dump({"status": "bad_encryption_revision"})
except VlobInMaintenanceError:
return vlob_create_serializer.rep_dump({"status": "in_maintenance"})
return vlob_create_serializer.rep_dump({"status": "ok"})
@api("vlob_read")
@catch_protocol_errors
async def api_vlob_read(self, client_ctx, msg):
msg = vlob_read_serializer.req_load(msg)
try:
version, blob, author, created_on = await self.read(
client_ctx.organization_id, client_ctx.device_id, **msg
)
except VlobNotFoundError as exc:
return vlob_read_serializer.rep_dump({"status": "not_found", "reason": str(exc)})
except VlobAccessError:
return vlob_read_serializer.rep_dump({"status": "not_allowed"})
except VlobVersionError:
return vlob_read_serializer.rep_dump({"status": "bad_version"})
except VlobTimestampError:
return vlob_read_serializer.rep_dump({"status": "bad_timestamp"})
except VlobEncryptionRevisionError:
return vlob_create_serializer.rep_dump({"status": "bad_encryption_revision"})
except VlobInMaintenanceError:
return vlob_read_serializer.rep_dump({"status": "in_maintenance"})
return vlob_read_serializer.rep_dump(
{
"status": "ok",
"blob": blob,
"version": version,
"author": author,
"timestamp": created_on,
}
)
@api("vlob_update")
@catch_protocol_errors
async def api_vlob_update(self, client_ctx, msg):
msg = vlob_update_serializer.req_load(msg)
now = pendulum.now()
if not timestamps_in_the_ballpark(msg["timestamp"], now):
return {"status": "bad_timestamp", "reason": f"Timestamp is out of date."}
try:
await self.update(client_ctx.organization_id, client_ctx.device_id, **msg)
except VlobNotFoundError as exc:
return vlob_update_serializer.rep_dump({"status": "not_found", "reason": str(exc)})
except VlobAccessError:
return vlob_update_serializer.rep_dump({"status": "not_allowed"})
except VlobVersionError:
return vlob_update_serializer.rep_dump({"status": "bad_version"})
except VlobTimestampError:
return vlob_update_serializer.rep_dump({"status": "bad_timestamp"})
except VlobEncryptionRevisionError:
return vlob_create_serializer.rep_dump({"status": "bad_encryption_revision"})
except VlobInMaintenanceError:
return vlob_update_serializer.rep_dump({"status": "in_maintenance"})
return vlob_update_serializer.rep_dump({"status": "ok"})
@api("vlob_poll_changes")
@catch_protocol_errors
async def api_vlob_poll_changes(self, client_ctx, msg):
msg = vlob_poll_changes_serializer.req_load(msg)
# TODO: raise error if too many events since offset ?
try:
checkpoint, changes = await self.poll_changes(
client_ctx.organization_id,
client_ctx.device_id,
msg["realm_id"],
msg["last_checkpoint"],
)
except VlobAccessError:
return vlob_poll_changes_serializer.rep_dump({"status": "not_allowed"})
except VlobNotFoundError as exc:
return vlob_poll_changes_serializer.rep_dump(
{"status": "not_found", "reason": str(exc)}
)
except VlobInMaintenanceError:
return vlob_poll_changes_serializer.rep_dump({"status": "in_maintenance"})
return vlob_poll_changes_serializer.rep_dump(
{"status": "ok", "current_checkpoint": checkpoint, "changes": changes}
)
@api("vlob_list_versions")
@catch_protocol_errors
async def api_vlob_list_versions(self, client_ctx, msg):
msg = vlob_list_versions_serializer.req_load(msg)
try:
versions_dict = await self.list_versions(
client_ctx.organization_id, client_ctx.device_id, msg["vlob_id"]
)
except VlobAccessError:
return vlob_list_versions_serializer.rep_dump({"status": "not_allowed"})
except VlobNotFoundError as exc:
return vlob_list_versions_serializer.rep_dump(
{"status": "not_found", "reason": str(exc)}
)
except VlobInMaintenanceError:
return vlob_list_versions_serializer.rep_dump({"status": "in_maintenance"})
return vlob_list_versions_serializer.rep_dump({"status": "ok", "versions": versions_dict})
@api("vlob_maintenance_get_reencryption_batch")
@catch_protocol_errors
async def api_vlob_maintenance_get_reencryption_batch(self, client_ctx, msg):
msg = vlob_maintenance_get_reencryption_batch_serializer.req_load(msg)
try:
batch = await self.maintenance_get_reencryption_batch(
client_ctx.organization_id, client_ctx.device_id, **msg
)
except VlobAccessError:
return vlob_maintenance_get_reencryption_batch_serializer.rep_dump(
{"status": "not_allowed"}
)
except VlobNotFoundError as exc:
return vlob_maintenance_get_reencryption_batch_serializer.rep_dump(
{"status": "not_found", "reason": str(exc)}
)
except VlobNotInMaintenanceError as exc:
return vlob_maintenance_get_reencryption_batch_serializer.rep_dump(
{"status": "not_in_maintenance", "reason": str(exc)}
)
except VlobEncryptionRevisionError:
return vlob_create_serializer.rep_dump({"status": "bad_encryption_revision"})
except VlobMaintenanceError as exc:
return vlob_maintenance_get_reencryption_batch_serializer.rep_dump(
{"status": "maintenance_error", "reason": str(exc)}
)
return vlob_maintenance_get_reencryption_batch_serializer.rep_dump(
{
"status": "ok",
"batch": [
{"vlob_id": vlob_id, "version": version, "blob": blob}
for vlob_id, version, blob in batch
],
}
)
@api("vlob_maintenance_save_reencryption_batch")
@catch_protocol_errors
async def api_vlob_maintenance_save_reencryption_batch(self, client_ctx, msg):
msg = vlob_maintenance_save_reencryption_batch_serializer.req_load(msg)
try:
total, done = await self.maintenance_save_reencryption_batch(
client_ctx.organization_id,
client_ctx.device_id,
realm_id=msg["realm_id"],
encryption_revision=msg["encryption_revision"],
batch=[(x["vlob_id"], x["version"], x["blob"]) for x in msg["batch"]],
)
except VlobAccessError:
return vlob_maintenance_save_reencryption_batch_serializer.rep_dump(
{"status": "not_allowed"}
)
except VlobNotFoundError as exc:
return vlob_maintenance_save_reencryption_batch_serializer.rep_dump(
{"status": "not_found", "reason": str(exc)}
)
except VlobNotInMaintenanceError as exc:
return vlob_maintenance_get_reencryption_batch_serializer.rep_dump(
{"status": "not_in_maintenance", "reason": str(exc)}
)
except VlobEncryptionRevisionError:
return vlob_create_serializer.rep_dump({"status": "bad_encryption_revision"})
except VlobMaintenanceError as exc:
return vlob_maintenance_save_reencryption_batch_serializer.rep_dump(
{"status": "maintenance_error", "reason": str(exc)}
)
return vlob_maintenance_save_reencryption_batch_serializer.rep_dump(
{"status": "ok", "total": total, "done": done}
)
async def create(
self,
organization_id: OrganizationID,
author: DeviceID,
realm_id: UUID,
encryption_revision: int,
vlob_id: UUID,
timestamp: pendulum.DateTime,
blob: bytes,
) -> None:
"""
Raises:
VlobAlreadyExistsError
VlobEncryptionRevisionError: if encryption_revision mismatch
VlobInMaintenanceError
"""
raise NotImplementedError()
async def read(
self,
organization_id: OrganizationID,
author: DeviceID,
encryption_revision: int,
vlob_id: UUID,
version: Optional[int] = None,
timestamp: Optional[pendulum.DateTime] = None,
) -> Tuple[int, bytes, DeviceID, pendulum.DateTime]:
"""
Raises:
VlobAccessError
VlobVersionError
VlobNotFoundError
VlobEncryptionRevisionError: if encryption_revision mismatch
VlobInMaintenanceError
"""
raise NotImplementedError()
async def update(
self,
organization_id: OrganizationID,
author: DeviceID,
encryption_revision: int,
vlob_id: UUID,
version: int,
timestamp: pendulum.DateTime,
blob: bytes,
) -> None:
"""
Raises:
VlobAccessError
VlobVersionError
VlobTimestampError
VlobNotFoundError
VlobEncryptionRevisionError: if encryption_revision mismatch
VlobInMaintenanceError
"""
raise NotImplementedError()
async def poll_changes(
self, organization_id: OrganizationID, author: DeviceID, realm_id: UUID, checkpoint: int
) -> Tuple[int, Dict[UUID, int]]:
"""
Raises:
VlobInMaintenanceError
VlobNotFoundError
VlobAccessError
"""
raise NotImplementedError()
async def list_versions(
self, organization_id: OrganizationID, author: DeviceID, vlob_id: UUID
) -> Dict[int, Tuple[pendulum.DateTime, DeviceID]]:
"""
Raises:
VlobInMaintenanceError
VlobNotFoundError
VlobAccessError
"""
raise NotImplementedError()
async def maintenance_get_reencryption_batch(
self,
organization_id: OrganizationID,
author: DeviceID,
realm_id: UUID,
encryption_revision: int,
size: int,
) -> List[Tuple[UUID, int, bytes]]:
"""
Raises:
VlobNotFoundError
VlobAccessError
VlobEncryptionRevisionError
VlobMaintenanceError: not in maintenance
"""
raise NotImplementedError()
async def maintenance_save_reencryption_batch(
self,
organization_id: OrganizationID,
author: DeviceID,
realm_id: UUID,
encryption_revision: int,
batch: List[Tuple[UUID, int, bytes]],
) -> Tuple[int, int]:
"""
Raises:
VlobNotFoundError
VlobAccessError
VlobEncryptionRevisionError
VlobMaintenanceError: not in maintenance
"""
raise NotImplementedError()
| agpl-3.0 | -1,780,366,703,321,259,500 | 31.407862 | 100 | 0.605762 | false | 4.125743 | false | false | false |
nakagami/reportlab | src/reportlab/pdfbase/ttfonts.py | 1 | 45587 | #Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
__version__ = '$Id: ttfonts.py 3959 2012-09-27 14:39:39Z robin $'
__doc__="""TrueType font support
This defines classes to represent TrueType fonts. They know how to calculate
their own width and how to write themselves into PDF files. They support
subsetting and embedding and can represent all 16-bit Unicode characters.
Note on dynamic fonts
---------------------
Usually a Font in ReportLab corresponds to a fixed set of PDF objects (Font,
FontDescriptor, Encoding). But with dynamic font subsetting a single TTFont
will result in a number of Font/FontDescriptor/Encoding object sets, and the
contents of those will depend on the actual characters used for printing.
To support dynamic font subsetting a concept of "dynamic font" was introduced.
Dynamic Fonts have a _dynamicFont attribute set to 1.
Dynamic fonts have the following additional functions::
def splitString(self, text, doc):
'''Splits text into a number of chunks, each of which belongs to a
single subset. Returns a list of tuples (subset, string). Use
subset numbers with getSubsetInternalName. Doc is used to identify
a document so that different documents may have different dynamically
constructed subsets.'''
def getSubsetInternalName(self, subset, doc):
'''Returns the name of a PDF Font object corresponding to a given
subset of this dynamic font. Use this function instead of
PDFDocument.getInternalFontName.'''
You must never call PDFDocument.getInternalFontName for dynamic fonts.
If you have a traditional static font, mapping to PDF text output operators
is simple::
'%s 14 Tf (%s) Tj' % (getInternalFontName(psfontname), text)
If you have a dynamic font, use this instead::
for subset, chunk in font.splitString(text, doc):
'%s 14 Tf (%s) Tj' % (font.getSubsetInternalName(subset, doc), chunk)
(Tf is a font setting operator and Tj is a text ouput operator. You should
also escape invalid characters in Tj argument, see TextObject._formatText.
Oh, and that 14 up there is font size.)
Canvas and TextObject have special support for dynamic fonts.
"""
import sys
import string
from struct import pack, unpack, error as structError
from reportlab.lib.utils import getBytesIO, isUnicodeType
from reportlab.pdfbase import pdfmetrics, pdfdoc
from reportlab import rl_config
class TTFError(pdfdoc.PDFError):
"TrueType font exception"
pass
if sys.version_info[0] == 3:
def SUBSETN(n,table=bytes.maketrans(b'0123456789',b'ABCDEFGHIJ')):
return ('%6.6d'%n).translate(table)
else:
def SUBSETN(n,table=string.maketrans('0123456789','ABCDEFGHIJ')):
return ('%6.6d'%n).translate(table)
#
# Helpers
#
from codecs import utf_8_encode, latin_1_decode
def latin1_to_utf8(text):
"helper to convert when needed from latin input"
return utf_8_encode(latin_1_decode(text)[0])[0]
def makeToUnicodeCMap(fontname, subset):
"""Creates a ToUnicode CMap for a given subset. See Adobe
_PDF_Reference (ISBN 0-201-75839-3) for more information."""
cmap = [
"/CIDInit /ProcSet findresource begin",
"12 dict begin",
"begincmap",
"/CIDSystemInfo",
"<< /Registry (%s)" % fontname,
"/Ordering (%s)" % fontname,
"/Supplement 0",
">> def",
"/CMapName /%s def" % fontname,
"/CMapType 2 def",
"1 begincodespacerange",
"<00> <%02X>" % (len(subset) - 1),
"endcodespacerange",
"%d beginbfchar" % len(subset)
] + ["<%02X> <%04X>" % (i,v) for i,v in enumerate(subset)] + [
"endbfchar",
"endcmap",
"CMapName currentdict /CMap defineresource pop",
"end",
"end"
]
return '\n'.join(cmap)
def splice(stream, offset, value):
"""Splices the given value into stream at the given offset and
returns the resulting stream (the original is unchanged)"""
return stream[:offset] + value + stream[offset + len(value):]
def _set_ushort(stream, offset, value):
"""Writes the given unsigned short value into stream at the given
offset and returns the resulting stream (the original is unchanged)"""
return splice(stream, offset, pack(">H", value))
try:
import _rl_accel
except ImportError:
try:
from reportlab.lib import _rl_accel
except ImportError:
_rl_accel = None
try:
hex32 = _rl_accel.hex32
except:
def hex32(i):
return '0X%8.8X' % (i&0xFFFFFFFF)
try:
add32 = _rl_accel.add32L
calcChecksum = _rl_accel.calcChecksumL
except:
def add32(x, y):
"Calculate (x + y) modulo 2**32"
return (x+y) & 0xFFFFFFFF
def calcChecksum(data):
"""Calculates TTF-style checksums"""
if len(data)&3: data = data + (4-(len(data)&3))*b"\0"
return sum(unpack(">%dl" % (len(data)>>2), data)) & 0xFFFFFFFF
del _rl_accel
#
# TrueType font handling
#
GF_ARG_1_AND_2_ARE_WORDS = 1 << 0
GF_ARGS_ARE_XY_VALUES = 1 << 1
GF_ROUND_XY_TO_GRID = 1 << 2
GF_WE_HAVE_A_SCALE = 1 << 3
GF_RESERVED = 1 << 4
GF_MORE_COMPONENTS = 1 << 5
GF_WE_HAVE_AN_X_AND_Y_SCALE = 1 << 6
GF_WE_HAVE_A_TWO_BY_TWO = 1 << 7
GF_WE_HAVE_INSTRUCTIONS = 1 << 8
GF_USE_MY_METRICS = 1 << 9
GF_OVERLAP_COMPOUND = 1 << 10
GF_SCALED_COMPONENT_OFFSET = 1 << 11
GF_UNSCALED_COMPONENT_OFFSET = 1 << 12
def TTFOpenFile(fn):
'''Opens a TTF file possibly after searching TTFSearchPath
returns (filename,file)
'''
from reportlab.lib.utils import rl_isfile, open_for_read
try:
f = open_for_read(fn,'rb')
return fn, f
except IOError:
import os
if not os.path.isabs(fn):
for D in rl_config.TTFSearchPath:
tfn = os.path.join(D,fn)
if rl_isfile(tfn):
f = open_for_read(tfn,'rb')
return tfn, f
raise TTFError('Can\'t open file "%s"' % fn)
class TTFontParser:
"Basic TTF file parser"
ttfVersions = (0x00010000,0x74727565,0x74746366)
ttcVersions = (0x00010000,0x00020000)
fileKind='TTF'
def __init__(self, file, validate=0,subfontIndex=0):
"""Loads and parses a TrueType font file. file can be a filename or a
file object. If validate is set to a false values, skips checksum
validation. This can save time, especially if the font is large.
"""
self.validate = validate
self.readFile(file)
isCollection = self.readHeader()
if isCollection:
self.readTTCHeader()
self.getSubfont(subfontIndex)
else:
if self.validate: self.checksumFile()
self.readTableDirectory()
self.subfontNameX = ''
def readTTCHeader(self):
self.ttcVersion = self.read_ulong()
self.fileKind = 'TTC'
self.ttfVersions = self.ttfVersions[:-1]
if self.ttcVersion not in self.ttcVersions:
raise TTFError('"%s" is not a %s file: can\'t read version 0x%8.8x' %(self.filename,self.fileKind,self.ttcVersion))
self.numSubfonts = self.read_ulong()
self.subfontOffsets = []
a = self.subfontOffsets.append
for i in range(self.numSubfonts):
a(self.read_ulong())
def getSubfont(self,subfontIndex):
if self.fileKind!='TTC':
raise TTFError('"%s" is not a TTC file: use this method' % (self.filename,self.fileKind))
try:
pos = self.subfontOffsets[subfontIndex]
except IndexError:
raise TTFError('TTC file "%s": bad subfontIndex %s not in [0,%d]' % (self.filename,subfontIndex,self.numSubfonts-1))
self.seek(pos)
self.readHeader()
self.readTableDirectory()
self.subfontNameX = '-'+str(subfontIndex)
def readTableDirectory(self):
try:
self.numTables = self.read_ushort()
self.searchRange = self.read_ushort()
self.entrySelector = self.read_ushort()
self.rangeShift = self.read_ushort()
# Read table directory
self.table = {}
self.tables = []
for n in range(self.numTables):
record = {}
record['tag'] = self.read_tag()
record['checksum'] = self.read_ulong()
record['offset'] = self.read_ulong()
record['length'] = self.read_ulong()
self.tables.append(record)
self.table[record['tag']] = record
except:
raise TTFError('Corrupt %s file "%s" cannot read Table Directory' % (self.fileKind, self.filename))
if self.validate: self.checksumTables()
def readHeader(self):
'''read the sfnt header at the current position'''
try:
self.version = version = self.read_ulong()
except:
raise TTFError('"%s" is not a %s file: can\'t read version' %(self.filename,self.fileKind))
if version==0x4F54544F:
raise TTFError('%s file "%s": postscript outlines are not supported'%(self.fileKind,self.filename))
if version not in self.ttfVersions:
raise TTFError('Not a TrueType font: version=0x%8.8X' % version)
return version==self.ttfVersions[-1]
def readFile(self,f):
if hasattr(f,'read'):
self.filename = '(ttf)'
else:
self.filename, f = TTFOpenFile(f)
self._ttf_data = f.read()
self._pos = 0
def checksumTables(self):
# Check the checksums for all tables
for t in self.tables:
table = self.get_chunk(t['offset'], t['length'])
checksum = calcChecksum(table)
if t['tag'] == 'head':
adjustment = unpack('>l', table[8:8+4])[0]
checksum = add32(checksum, -adjustment)
xchecksum = t['checksum']
if xchecksum != checksum:
raise TTFError('TTF file "%s": invalid checksum %s table: %s (expected %s)' % (self.filename,hex32(checksum),t['tag'],hex32(xchecksum)))
def checksumFile(self):
# Check the checksums for the whole file
checksum = calcChecksum(self._ttf_data)
if 0xB1B0AFBA!=checksum:
raise TTFError('TTF file "%s": invalid checksum %s (expected 0xB1B0AFBA) len: %d &3: %d' % (self.filename,hex32(checksum),len(self._ttf_data),(len(self._ttf_data)&3)))
def get_table_pos(self, tag):
"Returns the offset and size of a given TTF table."
offset = self.table[tag]['offset']
length = self.table[tag]['length']
return (offset, length)
def seek(self, pos):
"Moves read pointer to a given offset in file."
self._pos = pos
def skip(self, delta):
"Skip the given number of bytes."
self._pos = self._pos + delta
def seek_table(self, tag, offset_in_table = 0):
"""Moves read pointer to the given offset within a given table and
returns absolute offset of that position in the file."""
self._pos = self.get_table_pos(tag)[0] + offset_in_table
return self._pos
def read_tag(self):
"Read a 4-character tag"
self._pos += 4
s = self._ttf_data[self._pos - 4:self._pos]
if sys.version_info[0]==3 and not (s is str):
s = s.decode('utf-8')
return s
def read_ushort(self):
"Reads an unsigned short"
self._pos += 2
return unpack('>H',self._ttf_data[self._pos-2:self._pos])[0]
def read_ulong(self):
"Reads an unsigned long"
self._pos += 4
return unpack('>L',self._ttf_data[self._pos - 4:self._pos])[0]
def read_short(self):
"Reads a signed short"
self._pos += 2
try:
return unpack('>h',self._ttf_data[self._pos-2:self._pos])[0]
except structError as error:
raise TTFError(error)
def get_ushort(self, pos):
"Return an unsigned short at given position"
return unpack('>H',self._ttf_data[pos:pos+2])[0]
def get_ulong(self, pos):
"Return an unsigned long at given position"
return unpack('>L',self._ttf_data[pos:pos+4])[0]
def get_chunk(self, pos, length):
"Return a chunk of raw data at given position"
return self._ttf_data[pos:pos+length]
def get_table(self, tag):
"Return the given TTF table"
pos, length = self.get_table_pos(tag)
return self._ttf_data[pos:pos+length]
class TTFontMaker:
"Basic TTF file generator"
def __init__(self):
"Initializes the generator."
self.tables = {}
def add(self, tag, data):
"Adds a table to the TTF file."
if tag == 'head':
data = splice(data, 8, b'\0\0\0\0')
self.tables[tag] = data
def makeStream(self):
"Finishes the generation and returns the TTF file as a string"
stm = getBytesIO()
write = stm.write
numTables = len(self.tables)
searchRange = 1
entrySelector = 0
while searchRange * 2 <= numTables:
searchRange = searchRange * 2
entrySelector = entrySelector + 1
searchRange = searchRange * 16
rangeShift = numTables * 16 - searchRange
# Header
write(pack(">lHHHH", 0x00010000, numTables, searchRange,
entrySelector, rangeShift))
# Table directory
tables = list(self.tables.items())
tables.sort() # XXX is this the correct order?
offset = 12 + numTables * 16
for tag, data in tables:
if tag == 'head':
head_start = offset
checksum = calcChecksum(data)
if isUnicodeType(tag):
tag = tag.encode('utf-8')
write(tag)
write(pack(">LLL", checksum, offset, len(data)))
paddedLength = (len(data)+3)&~3
offset = offset + paddedLength
# Table data
for tag, data in tables:
data += b"\0\0\0"
write(data[:len(data)&~3])
checksum = calcChecksum(stm.getvalue())
checksum = add32(0xB1B0AFBA, -checksum)
stm.seek(head_start + 8)
write(pack('>L', checksum))
return stm.getvalue()
class TTFontFile(TTFontParser):
"TTF file parser and generator"
def __init__(self, file, charInfo=1, validate=0,subfontIndex=0):
"""Loads and parses a TrueType font file.
file can be a filename or a file object. If validate is set to a false
values, skips checksum validation. This can save time, especially if
the font is large. See TTFontFile.extractInfo for more information.
"""
TTFontParser.__init__(self, file, validate=validate,subfontIndex=subfontIndex)
self.extractInfo(charInfo)
def extractInfo(self, charInfo=1):
"""
Extract typographic information from the loaded font file.
The following attributes will be set::
name PostScript font name
flags Font flags
ascent Typographic ascender in 1/1000ths of a point
descent Typographic descender in 1/1000ths of a point
capHeight Cap height in 1/1000ths of a point (0 if not available)
bbox Glyph bounding box [l,t,r,b] in 1/1000ths of a point
_bbox Glyph bounding box [l,t,r,b] in unitsPerEm
unitsPerEm Glyph units per em
italicAngle Italic angle in degrees ccw
stemV stem weight in 1/1000ths of a point (approximate)
If charInfo is true, the following will also be set::
defaultWidth default glyph width in 1/1000ths of a point
charWidths dictionary of character widths for every supported UCS character
code
This will only work if the font has a Unicode cmap (platform 3,
encoding 1, format 4 or platform 0 any encoding format 4). Setting
charInfo to false avoids this requirement
"""
# name - Naming table
name_offset = self.seek_table("name")
format = self.read_ushort()
if format != 0:
raise TTFError("Unknown name table format (%d)" % format)
numRecords = self.read_ushort()
string_data_offset = name_offset + self.read_ushort()
names = {1:None,2:None,3:None,4:None,6:None}
K = names.keys()
nameCount = len(names)
for i in range(numRecords):
platformId = self.read_ushort()
encodingId = self.read_ushort()
languageId = self.read_ushort()
nameId = self.read_ushort()
length = self.read_ushort()
offset = self.read_ushort()
if nameId not in K: continue
N = None
if platformId == 3 and encodingId == 1 and languageId == 0x409: # Microsoft, Unicode, US English, PS Name
opos = self._pos
try:
self.seek(string_data_offset + offset)
if length % 2 != 0:
raise TTFError("PostScript name is UTF-16BE string of odd length")
length /= 2
N = []
A = N.append
while length > 0:
char = self.read_ushort()
A(chr(char))
length -= 1
N = ''.join(N)
finally:
self._pos = opos
elif platformId == 1 and encodingId == 0 and languageId == 0: # Macintosh, Roman, English, PS Name
# According to OpenType spec, if PS name exists, it must exist
# both in MS Unicode and Macintosh Roman formats. Apparently,
# you can find live TTF fonts which only have Macintosh format.
N = self.get_chunk(string_data_offset + offset, length)
if N and names[nameId]==None:
if sys.version_info[0]==3 and not (N is str):
N = N.decode('utf-8')
names[nameId] = N
nameCount -= 1
if nameCount==0: break
if names[6] is not None:
psName = names[6].replace(" ", "-") #Dinu Gherman's fix for font names with spaces
elif names[4] is not None:
psName = names[4].replace(" ", "-")
# Fine, one last try before we bail.
elif names[1] is not None:
psName = names[1].replace(" ", "-")
else:
psName = None
# Don't just assume, check for None since some shoddy fonts cause crashes here...
if not psName:
raise TTFError("Could not find PostScript font name")
for c in psName:
oc = ord(c)
if oc>126 or c in ' [](){}<>/%':
raise TTFError("psName=%r contains invalid character '%s' ie U+%04X" % (psName,c,ord(c)))
self.name = psName
self.familyName = names[1] or psName
self.styleName = names[2] or 'Regular'
self.fullName = names[4] or psName
self.uniqueFontID = names[3] or psName
# head - Font header table
self.seek_table("head")
ver_maj, ver_min = self.read_ushort(), self.read_ushort()
if ver_maj != 1:
raise TTFError('Unknown head table version %d.%04x' % (ver_maj, ver_min))
self.fontRevision = self.read_ushort(), self.read_ushort()
self.skip(4)
magic = self.read_ulong()
if magic != 0x5F0F3CF5:
raise TTFError('Invalid head table magic %04x' % magic)
self.skip(2)
self.unitsPerEm = unitsPerEm = self.read_ushort()
scale = lambda x, unitsPerEm=unitsPerEm: x * 1000. / unitsPerEm
self.skip(16)
xMin = self.read_short()
yMin = self.read_short()
xMax = self.read_short()
yMax = self.read_short()
self.bbox = [scale(i) for i in [xMin, yMin, xMax, yMax]]
self.skip(3*2)
indexToLocFormat = self.read_ushort()
glyphDataFormat = self.read_ushort()
# OS/2 - OS/2 and Windows metrics table
# (needs data from head table)
if "OS/2" in self.table:
self.seek_table("OS/2")
version = self.read_ushort()
self.skip(2)
usWeightClass = self.read_ushort()
self.skip(2)
fsType = self.read_ushort()
if fsType == 0x0002 or (fsType & 0x0300) != 0:
raise TTFError('Font does not allow subsetting/embedding (%04X)' % fsType)
self.skip(58) #11*2 + 10 + 4*4 + 4 + 3*2
sTypoAscender = self.read_short()
sTypoDescender = self.read_short()
self.ascent = scale(sTypoAscender) # XXX: for some reason it needs to be multiplied by 1.24--1.28
self.descent = scale(sTypoDescender)
if version > 1:
self.skip(16) #3*2 + 2*4 + 2
sCapHeight = self.read_short()
self.capHeight = scale(sCapHeight)
else:
self.capHeight = self.ascent
else:
# Microsoft TTFs require an OS/2 table; Apple ones do not. Try to
# cope. The data is not very important anyway.
usWeightClass = 500
self.ascent = scale(yMax)
self.descent = scale(yMin)
self.capHeight = self.ascent
# There's no way to get stemV from a TTF file short of analyzing actual outline data
# This fuzzy formula is taken from pdflib sources, but we could just use 0 here
self.stemV = 50 + int((usWeightClass / 65.0) ** 2)
# post - PostScript table
# (needs data from OS/2 table)
self.seek_table("post")
ver_maj, ver_min = self.read_ushort(), self.read_ushort()
if ver_maj not in (1, 2, 3, 4):
# Adobe/MS documents 1, 2, 2.5, 3; Apple also has 4.
# From Apple docs it seems that we do not need to care
# about the exact version, so if you get this error, you can
# try to remove this check altogether.
raise TTFError('Unknown post table version %d.%04x' % (ver_maj, ver_min))
self.italicAngle = self.read_short() + self.read_ushort() / 65536.0
self.underlinePosition = self.read_short()
self.underlineThickness = self.read_short()
isFixedPitch = self.read_ulong()
self.flags = FF_SYMBOLIC # All fonts that contain characters
# outside the original Adobe character
# set are considered "symbolic".
if self.italicAngle!= 0:
self.flags = self.flags | FF_ITALIC
if usWeightClass >= 600: # FW_REGULAR == 500, FW_SEMIBOLD == 600
self.flags = self.flags | FF_FORCEBOLD
if isFixedPitch:
self.flags = self.flags | FF_FIXED
# XXX: FF_SERIF? FF_SCRIPT? FF_ALLCAP? FF_SMALLCAP?
# hhea - Horizontal header table
self.seek_table("hhea")
ver_maj, ver_min = self.read_ushort(), self.read_ushort()
if ver_maj != 1:
raise TTFError('Unknown hhea table version %d.%04x' % (ver_maj, ver_min))
self.skip(28)
metricDataFormat = self.read_ushort()
if metricDataFormat != 0:
raise TTFError('Unknown horizontal metric data format (%d)' % metricDataFormat)
numberOfHMetrics = self.read_ushort()
if numberOfHMetrics == 0:
raise TTFError('Number of horizontal metrics is 0')
# maxp - Maximum profile table
self.seek_table("maxp")
ver_maj, ver_min = self.read_ushort(), self.read_ushort()
if ver_maj != 1:
raise TTFError('Unknown maxp table version %d.%04x' % (ver_maj, ver_min))
numGlyphs = self.read_ushort()
if not charInfo:
self.charToGlyph = None
self.defaultWidth = None
self.charWidths = None
return
if glyphDataFormat != 0:
raise TTFError('Unknown glyph data format (%d)' % glyphDataFormat)
# cmap - Character to glyph index mapping table
cmap_offset = self.seek_table("cmap")
self.skip(2)
cmapTableCount = self.read_ushort()
unicode_cmap_offset = None
for n in range(cmapTableCount):
platformID = self.read_ushort()
encodingID = self.read_ushort()
offset = self.read_ulong()
if platformID == 3 and encodingID == 1: # Microsoft, Unicode
format = self.get_ushort(cmap_offset + offset)
if format == 4:
unicode_cmap_offset = cmap_offset + offset
break
elif platformID == 0: # Unicode -- assume all encodings are compatible
format = self.get_ushort(cmap_offset + offset)
if format == 4:
unicode_cmap_offset = cmap_offset + offset
break
if unicode_cmap_offset is None:
raise TTFError('Font does not have cmap for Unicode (platform 3, encoding 1, format 4 or platform 0 any encoding format 4)')
self.seek(unicode_cmap_offset + 2)
length = self.read_ushort()
limit = unicode_cmap_offset + length
self.skip(2)
segCount = int(self.read_ushort() / 2.0)
self.skip(6)
endCount = list(map(lambda x, self=self: self.read_ushort(), range(segCount)))
self.skip(2)
startCount = list(map(lambda x, self=self: self.read_ushort(), range(segCount)))
idDelta =list(map(lambda x, self=self: self.read_short(), range(segCount)))
idRangeOffset_start = self._pos
idRangeOffset = list(map(lambda x, self=self: self.read_ushort(), range(segCount)))
# Now it gets tricky.
glyphToChar = {}
charToGlyph = {}
for n in range(segCount):
for unichar in range(startCount[n], endCount[n] + 1):
if idRangeOffset[n] == 0:
glyph = (unichar + idDelta[n]) & 0xFFFF
else:
offset = (unichar - startCount[n]) * 2 + idRangeOffset[n]
offset = idRangeOffset_start + 2 * n + offset
if offset >= limit:
# workaround for broken fonts (like Thryomanes)
glyph = 0
else:
glyph = self.get_ushort(offset)
if glyph != 0:
glyph = (glyph + idDelta[n]) & 0xFFFF
charToGlyph[unichar] = glyph
if glyph in glyphToChar:
glyphToChar[glyph].append(unichar)
else:
glyphToChar[glyph] = [unichar]
self.charToGlyph = charToGlyph
# hmtx - Horizontal metrics table
# (needs data from hhea, maxp, and cmap tables)
self.seek_table("hmtx")
aw = None
self.charWidths = {}
self.hmetrics = []
for glyph in range(numberOfHMetrics):
# advance width and left side bearing. lsb is actually signed
# short, but we don't need it anyway (except for subsetting)
aw, lsb = self.read_ushort(), self.read_ushort()
self.hmetrics.append((aw, lsb))
aw = scale(aw)
if glyph == 0:
self.defaultWidth = aw
if glyph in glyphToChar:
for char in glyphToChar[glyph]:
self.charWidths[char] = aw
for glyph in range(numberOfHMetrics, numGlyphs):
# the rest of the table only lists advance left side bearings.
# so we reuse aw set by the last iteration of the previous loop
lsb = self.read_ushort()
self.hmetrics.append((aw, lsb))
if glyph in glyphToChar:
for char in glyphToChar[glyph]:
self.charWidths[char] = aw
# loca - Index to location
self.seek_table('loca')
self.glyphPos = []
if indexToLocFormat == 0:
for n in range(numGlyphs + 1):
self.glyphPos.append(self.read_ushort() << 1)
elif indexToLocFormat == 1:
for n in range(numGlyphs + 1):
self.glyphPos.append(self.read_ulong())
else:
raise TTFError('Unknown location table format (%d)' % indexToLocFormat)
# Subsetting
def makeSubset(self, subset):
"""Create a subset of a TrueType font"""
output = TTFontMaker()
# Build a mapping of glyphs in the subset to glyph numbers in
# the original font. Also build a mapping of UCS codes to
# glyph values in the new font.
# Start with 0 -> 0: "missing character"
glyphMap = [0] # new glyph index -> old glyph index
glyphSet = {0:0} # old glyph index -> new glyph index
codeToGlyph = {} # unicode -> new glyph index
for code in subset:
if code in self.charToGlyph:
originalGlyphIdx = self.charToGlyph[code]
else:
originalGlyphIdx = 0
if originalGlyphIdx not in glyphSet:
glyphSet[originalGlyphIdx] = len(glyphMap)
glyphMap.append(originalGlyphIdx)
codeToGlyph[code] = glyphSet[originalGlyphIdx]
# Also include glyphs that are parts of composite glyphs
start = self.get_table_pos('glyf')[0]
n = 0
while n < len(glyphMap):
originalGlyphIdx = glyphMap[n]
glyphPos = self.glyphPos[originalGlyphIdx]
glyphLen = self.glyphPos[originalGlyphIdx + 1] - glyphPos
n += 1
if not glyphLen: continue
self.seek(start + glyphPos)
numberOfContours = self.read_short()
if numberOfContours < 0:
# composite glyph
self.skip(8)
flags = GF_MORE_COMPONENTS
while flags & GF_MORE_COMPONENTS:
flags = self.read_ushort()
glyphIdx = self.read_ushort()
if glyphIdx not in glyphSet:
glyphSet[glyphIdx] = len(glyphMap)
glyphMap.append(glyphIdx)
if flags & GF_ARG_1_AND_2_ARE_WORDS:
self.skip(4)
else:
self.skip(2)
if flags & GF_WE_HAVE_A_SCALE:
self.skip(2)
elif flags & GF_WE_HAVE_AN_X_AND_Y_SCALE:
self.skip(4)
elif flags & GF_WE_HAVE_A_TWO_BY_TWO:
self.skip(8)
numGlyphs = n = len(glyphMap)
while n > 1 and self.hmetrics[n][0] == self.hmetrics[n - 1][0]:
n -= 1
numberOfHMetrics = n
# The following tables are simply copied from the original
for tag in ('name', 'OS/2', 'cvt ', 'fpgm', 'prep'):
try:
output.add(tag, self.get_table(tag))
except KeyError:
# Apparently some of the tables are optional (cvt, fpgm, prep).
# The lack of the required ones (name, OS/2) would have already
# been caught before.
pass
# post - PostScript
post = b"\x00\x03\x00\x00" + self.get_table('post')[4:16] + b"\x00" * 16
output.add('post', post)
# hhea - Horizontal Header
hhea = self.get_table('hhea')
hhea = _set_ushort(hhea, 34, numberOfHMetrics)
output.add('hhea', hhea)
# maxp - Maximum Profile
maxp = self.get_table('maxp')
maxp = _set_ushort(maxp, 4, numGlyphs)
output.add('maxp', maxp)
# cmap - Character to glyph mapping
# XXX maybe use format 0 if possible, not 6?
entryCount = len(subset)
length = 10 + entryCount * 2
cmap = [0, 1, # version, number of tables
1, 0, 0,12, # platform, encoding, offset (hi,lo)
6, length, 0, # format, length, language
0,
entryCount] + \
[codeToGlyph.get(code) for code in subset]
cmap = pack(*([">%dH" % len(cmap)] + cmap))
output.add('cmap', cmap)
# hmtx - Horizontal Metrics
hmtx = []
for n in range(numGlyphs):
originalGlyphIdx = glyphMap[n]
aw, lsb = self.hmetrics[originalGlyphIdx]
if n < numberOfHMetrics:
hmtx.append(int(aw))
hmtx.append(int(lsb))
hmtx = pack(*([">%dH" % len(hmtx)] + hmtx))
output.add('hmtx', hmtx)
# glyf - Glyph data
glyphData = self.get_table('glyf')
offsets = []
glyf = []
pos = 0
for n in range(numGlyphs):
offsets.append(pos)
originalGlyphIdx = glyphMap[n]
glyphPos = self.glyphPos[originalGlyphIdx]
glyphLen = self.glyphPos[originalGlyphIdx + 1] - glyphPos
data = glyphData[glyphPos:glyphPos+glyphLen]
# Fix references in composite glyphs
if glyphLen > 2 and unpack(">h", data[:2])[0] < 0:
# composite glyph
pos_in_glyph = 10
flags = GF_MORE_COMPONENTS
while flags & GF_MORE_COMPONENTS:
flags = unpack(">H", data[pos_in_glyph:pos_in_glyph+2])[0]
glyphIdx = unpack(">H", data[pos_in_glyph+2:pos_in_glyph+4])[0]
data = _set_ushort(data, pos_in_glyph + 2, glyphSet[glyphIdx])
pos_in_glyph = pos_in_glyph + 4
if flags & GF_ARG_1_AND_2_ARE_WORDS:
pos_in_glyph = pos_in_glyph + 4
else:
pos_in_glyph = pos_in_glyph + 2
if flags & GF_WE_HAVE_A_SCALE:
pos_in_glyph = pos_in_glyph + 2
elif flags & GF_WE_HAVE_AN_X_AND_Y_SCALE:
pos_in_glyph = pos_in_glyph + 4
elif flags & GF_WE_HAVE_A_TWO_BY_TWO:
pos_in_glyph = pos_in_glyph + 8
glyf.append(data)
pos = pos + glyphLen
if pos % 4 != 0:
padding = 4 - pos % 4
glyf.append(b'\0' * padding)
pos = pos + padding
offsets.append(pos)
output.add('glyf', b"".join(glyf))
# loca - Index to location
loca = []
if (pos + 1) >> 1 > 0xFFFF:
indexToLocFormat = 1 # long format
for offset in offsets:
loca.append(offset)
loca = pack(*([">%dL" % len(loca)] + loca))
else:
indexToLocFormat = 0 # short format
for offset in offsets:
loca.append(offset >> 1)
loca = pack(*([">%dH" % len(loca)] + loca))
output.add('loca', loca)
# head - Font header
head = self.get_table('head')
head = _set_ushort(head, 50, indexToLocFormat)
output.add('head', head)
return output.makeStream()
#
# TrueType font embedding
#
# PDF font flags (see PDF Reference Guide table 5.19)
FF_FIXED = 1 << 1-1
FF_SERIF = 1 << 2-1
FF_SYMBOLIC = 1 << 3-1
FF_SCRIPT = 1 << 4-1
FF_NONSYMBOLIC = 1 << 6-1
FF_ITALIC = 1 << 7-1
FF_ALLCAP = 1 << 17-1
FF_SMALLCAP = 1 << 18-1
FF_FORCEBOLD = 1 << 19-1
class TTFontFace(TTFontFile, pdfmetrics.TypeFace):
"""TrueType typeface.
Conceptually similar to a single byte typeface, but the glyphs are
identified by UCS character codes instead of glyph names."""
def __init__(self, filename, validate=0, subfontIndex=0):
"Loads a TrueType font from filename."
pdfmetrics.TypeFace.__init__(self, None)
TTFontFile.__init__(self, filename, validate=validate, subfontIndex=subfontIndex)
def getCharWidth(self, code):
"Returns the width of character U+<code>"
return self.charWidths.get(code, self.defaultWidth)
def addSubsetObjects(self, doc, fontname, subset):
"""Generate a TrueType font subset and add it to the PDF document.
Returns a PDFReference to the new FontDescriptor object."""
fontFile = pdfdoc.PDFStream()
fontFile.content = self.makeSubset(subset)
fontFile.dictionary['Length1'] = len(fontFile.content)
if doc.compression:
fontFile.filters = [pdfdoc.PDFZCompress]
fontFileRef = doc.Reference(fontFile, 'fontFile:%s(%s)' % (self.filename, fontname))
flags = self.flags & ~ FF_NONSYMBOLIC
flags = flags | FF_SYMBOLIC
fontDescriptor = pdfdoc.PDFDictionary({
'Type': '/FontDescriptor',
'Ascent': self.ascent,
'CapHeight': self.capHeight,
'Descent': self.descent,
'Flags': flags,
'FontBBox': pdfdoc.PDFArray(self.bbox),
'FontName': pdfdoc.PDFName(fontname),
'ItalicAngle': self.italicAngle,
'StemV': self.stemV,
'FontFile2': fontFileRef,
})
return doc.Reference(fontDescriptor, 'fontDescriptor:' + fontname)
class TTEncoding:
"""Encoding for TrueType fonts (always UTF-8).
TTEncoding does not directly participate in PDF object creation, since
we need a number of different 8-bit encodings for every generated font
subset. TTFont itself cares about that."""
def __init__(self):
self.name = "UTF-8"
class TTFont:
"""Represents a TrueType font.
Its encoding is always UTF-8.
Note: you cannot use the same TTFont object for different documents
at the same time.
Example of usage:
font = ttfonts.TTFont('PostScriptFontName', '/path/to/font.ttf')
pdfmetrics.registerFont(font)
canvas.setFont('PostScriptFontName', size)
canvas.drawString(x, y, "Some text encoded in UTF-8")
"""
class State:
namePrefix = 'F'
def __init__(self,asciiReadable=None):
self.assignments = {}
self.nextCode = 0
self.internalName = None
self.frozen = 0
if asciiReadable is None:
asciiReadable = rl_config.ttfAsciiReadable
if asciiReadable:
# Let's add the first 128 unicodes to the 0th subset, so ' '
# always has code 32 (for word spacing to work) and the ASCII
# output is readable
subset0 = list(range(128))
self.subsets = [subset0]
for n in subset0:
self.assignments[n] = n
self.nextCode = 128
else:
self.subsets = [[32]*33]
self.assignments[32] = 32
_multiByte = 1 # We want our own stringwidth
_dynamicFont = 1 # We want dynamic subsetting
def __init__(self, name, filename, validate=0, subfontIndex=0,asciiReadable=None):
"""Loads a TrueType font from filename.
If validate is set to a false values, skips checksum validation. This
can save time, especially if the font is large.
"""
self.fontName = name
self.face = TTFontFace(filename, validate=validate, subfontIndex=subfontIndex)
self.encoding = TTEncoding()
from weakref import WeakKeyDictionary
self.state = WeakKeyDictionary()
if asciiReadable is None:
asciiReadable = rl_config.ttfAsciiReadable
self._asciiReadable = asciiReadable
def _py_stringWidth(self, text, size, encoding='utf-8'):
"Calculate text width"
if not isUnicodeType(text):
text = text.decode(encoding or 'utf-8') # encoding defaults to utf-8
g = self.face.charWidths.get
dw = self.face.defaultWidth
return 0.001*size*sum([g(ord(u),dw) for u in text])
stringWidth = _py_stringWidth
def _assignState(self,doc,asciiReadable=None,namePrefix=None):
'''convenience function for those wishing to roll their own state properties'''
if asciiReadable is None:
asciiReadable = self._asciiReadable
try:
state = self.state[doc]
except KeyError:
state = self.state[doc] = TTFont.State(asciiReadable)
if namePrefix is not None:
state.namePrefix = namePrefix
return state
def splitString(self, text, doc, encoding='utf-8'):
"""Splits text into a number of chunks, each of which belongs to a
single subset. Returns a list of tuples (subset, string). Use subset
numbers with getSubsetInternalName. Doc is needed for distinguishing
subsets when building different documents at the same time."""
asciiReadable = self._asciiReadable
try: state = self.state[doc]
except KeyError: state = self.state[doc] = TTFont.State(asciiReadable)
curSet = -1
cur = []
results = []
if not isUnicodeType(text):
text = text.decode(encoding or 'utf-8') # encoding defaults to utf-8
assignments = state.assignments
subsets = state.subsets
for code in map(ord,text):
if code in assignments:
n = assignments[code]
else:
if state.frozen:
raise pdfdoc.PDFError("Font %s is already frozen, cannot add new character U+%04X" % (self.fontName, code))
n = state.nextCode
if n&0xFF==32:
# make code 32 always be a space character
if n!=32: subsets[n >> 8].append(32)
state.nextCode += 1
n = state.nextCode
state.nextCode += 1
assignments[code] = n
if n>32:
if not(n&0xFF): subsets.append([])
subsets[n >> 8].append(code)
else:
subsets[0][n] = code
if (n >> 8) != curSet:
if cur:
results.append((curSet, ''.join(map(chr,cur))))
curSet = (n >> 8)
cur = []
cur.append(n & 0xFF)
if cur:
results.append((curSet,''.join(map(chr,cur))))
return results
def getSubsetInternalName(self, subset, doc):
"""Returns the name of a PDF Font object corresponding to a given
subset of this dynamic font. Use this function instead of
PDFDocument.getInternalFontName."""
try: state = self.state[doc]
except KeyError: state = self.state[doc] = TTFont.State(self._asciiReadable)
if subset < 0 or subset >= len(state.subsets):
raise IndexError('Subset %d does not exist in font %s' % (subset, self.fontName))
if state.internalName is None:
state.internalName = state.namePrefix +repr(len(doc.fontMapping) + 1)
doc.fontMapping[self.fontName] = '/' + state.internalName
doc.delayedFonts.append(self)
return '/%s+%d' % (state.internalName, subset)
def addObjects(self, doc):
"""Makes one or more PDF objects to be added to the document. The
caller supplies the internal name to be used (typically F1, F2, ... in
sequence).
This method creates a number of Font and FontDescriptor objects. Every
FontDescriptor is a (no more than) 256 character subset of the original
TrueType font."""
try: state = self.state[doc]
except KeyError: state = self.state[doc] = TTFont.State(self._asciiReadable)
state.frozen = 1
for n,subset in enumerate(state.subsets):
internalName = self.getSubsetInternalName(n, doc)[1:]
baseFontName = "%s+%s%s" % (SUBSETN(n),self.face.name,self.face.subfontNameX)
pdfFont = pdfdoc.PDFTrueTypeFont()
pdfFont.__Comment__ = 'Font %s subset %d' % (self.fontName, n)
pdfFont.Name = internalName
pdfFont.BaseFont = baseFontName
pdfFont.FirstChar = 0
pdfFont.LastChar = len(subset) - 1
widths = map(self.face.getCharWidth, subset)
pdfFont.Widths = pdfdoc.PDFArray(widths)
cmapStream = pdfdoc.PDFStream()
cmapStream.content = makeToUnicodeCMap(baseFontName, subset)
if doc.compression:
cmapStream.filters = [pdfdoc.PDFZCompress]
pdfFont.ToUnicode = doc.Reference(cmapStream, 'toUnicodeCMap:' + baseFontName)
pdfFont.FontDescriptor = self.face.addSubsetObjects(doc, baseFontName, subset)
# link it in
ref = doc.Reference(pdfFont, internalName)
fontDict = doc.idToObject['BasicFonts'].dict
fontDict[internalName] = pdfFont
del self.state[doc]
try:
from _rl_accel import _instanceStringWidthTTF
import new
TTFont.stringWidth = new.instancemethod(_instanceStringWidthTTF,None,TTFont)
except ImportError:
pass
| bsd-3-clause | 468,004,839,853,736,800 | 38.67537 | 179 | 0.566653 | false | 3.866254 | false | false | false |
lightalchemist/ML-algorithms | dim_reduction/spectral_embedding.py | 1 | 1410 | # -*- coding: utf-8 -*-
import numpy as np
def compute_pairwise_distance_matrix(X, k):
"""Compute pairwise distances between each point in X
and its k-nearest neighbors."""
from scipy.spatial import KDTree
kdtree = KDTree(X)
A = np.zeros((X.shape[0], X.shape[0]), dtype=np.float)
for i, x in enumerate(X):
distances, idxs = kdtree.query(x, k+1) # k+1 as one pt is the pt itself.
for d, j in zip(distances, idxs):
A[i, j] = d**2 # Store squared euclidean distance
return A
def transform(X, k=2, n_neighbors=5, sigma=1, eps=0.0001):
"""Perform dimension reduction using the eigenvectors
of the graph laplacian computed from a given set of data.
Project data X from original dimension n to k,
where X is a numpy array of dimension [m x n] and k <= n.
"""
W = compute_pairwise_distance_matrix(X, n_neighbors)
W = np.maximum(W, W.T) # Ensure W symmetric.
W[W > 0] = np.exp(- W[W > 0] / (2 * sigma**2)) # Apply gaussian kernel
D = np.diag(np.sum(W, axis=1)) # Row sum of W
L = D - W
# L = L + eps * np.eye(len(X)) # Improve the condition of the graph laplacian
Dinvsqrt = np.sqrt(np.linalg.pinv(D))
L = Dinvsqrt.dot(L).dot(Dinvsqrt) # Normalized graph laplacian
V, U = np.linalg.eigh(L)
idx = np.argsort(V) # Sort in ascending order
V, U = V[idx], U[:, idx]
return U[:, :k]
| mit | 6,210,406,793,321,975,000 | 33.390244 | 82 | 0.614184 | false | 3.019272 | false | false | false |
mick-d/nipype | nipype/interfaces/nipy/model.py | 7 | 13161 | # -*- coding: utf-8 -*-
from __future__ import print_function, division, unicode_literals, absolute_import
from builtins import range, str, bytes
import os
import nibabel as nb
import numpy as np
from ...utils.misc import package_check
from ...utils import NUMPY_MMAP
from ..base import (BaseInterface, TraitedSpec, traits, File, OutputMultiPath,
BaseInterfaceInputSpec, isdefined)
have_nipy = True
try:
package_check('nipy')
except Exception as e:
have_nipy = False
else:
import nipy.modalities.fmri.design_matrix as dm
import nipy.modalities.fmri.glm as GLM
if have_nipy:
try:
BlockParadigm = dm.BlockParadigm
except AttributeError:
from nipy.modalities.fmri.experimental_paradigm import BlockParadigm
class FitGLMInputSpec(BaseInterfaceInputSpec):
session_info = traits.List(minlen=1, maxlen=1, mandatory=True,
desc=('Session specific information generated by'
' ``modelgen.SpecifyModel``, FitGLM does '
'not support multiple runs uless they are '
'concatenated (see SpecifyModel options)'))
hrf_model = traits.Enum('Canonical', 'Canonical With Derivative', 'FIR',
desc=("that specifies the hemodynamic reponse "
"function it can be 'Canonical', 'Canonical "
"With Derivative' or 'FIR'"), usedefault=True)
drift_model = traits.Enum("Cosine", "Polynomial", "Blank",
desc=("string that specifies the desired drift "
"model, to be chosen among 'Polynomial', "
"'Cosine', 'Blank'"), usedefault=True)
TR = traits.Float(mandatory=True)
model = traits.Enum("ar1", "spherical",
desc=("autoregressive mode is available only for the "
"kalman method"), usedefault=True)
method = traits.Enum("kalman", "ols",
desc=("method to fit the model, ols or kalma; kalman "
"is more time consuming but it supports "
"autoregressive model"), usedefault=True)
mask = traits.File(exists=True,
desc=("restrict the fitting only to the region defined "
"by this mask"))
normalize_design_matrix = traits.Bool(False,
desc=("normalize (zscore) the "
"regressors before fitting"),
usedefault=True)
save_residuals = traits.Bool(False, usedefault=True)
plot_design_matrix = traits.Bool(False, usedefault=True)
class FitGLMOutputSpec(TraitedSpec):
beta = File(exists=True)
nvbeta = traits.Any()
s2 = File(exists=True)
dof = traits.Any()
constants = traits.Any()
axis = traits.Any()
reg_names = traits.List()
residuals = traits.File()
a = File(exists=True)
class FitGLM(BaseInterface):
'''
Fit GLM model based on the specified design. Supports only single or concatenated runs.
'''
input_spec = FitGLMInputSpec
output_spec = FitGLMOutputSpec
def _run_interface(self, runtime):
session_info = self.inputs.session_info
functional_runs = self.inputs.session_info[0]['scans']
if isinstance(functional_runs, (str, bytes)):
functional_runs = [functional_runs]
nii = nb.load(functional_runs[0])
data = nii.get_data()
if isdefined(self.inputs.mask):
mask = nb.load(self.inputs.mask).get_data() > 0
else:
mask = np.ones(nii.shape[:3]) == 1
timeseries = data.copy()[mask, :]
del data
for functional_run in functional_runs[1:]:
nii = nb.load(functional_run, mmap=NUMPY_MMAP)
data = nii.get_data()
npdata = data.copy()
del data
timeseries = np.concatenate((timeseries, npdata[mask, :]), axis=1)
del npdata
nscans = timeseries.shape[1]
if 'hpf' in list(session_info[0].keys()):
hpf = session_info[0]['hpf']
drift_model = self.inputs.drift_model
else:
hpf = 0
drift_model = "Blank"
reg_names = []
for reg in session_info[0]['regress']:
reg_names.append(reg['name'])
reg_vals = np.zeros((nscans, len(reg_names)))
for i in range(len(reg_names)):
reg_vals[:, i] = np.array(session_info[0]['regress'][i]['val']).reshape(1, -1)
frametimes = np.linspace(0, (nscans - 1) * self.inputs.TR, nscans)
conditions = []
onsets = []
duration = []
for i, cond in enumerate(session_info[0]['cond']):
onsets += cond['onset']
conditions += [cond['name']] * len(cond['onset'])
if len(cond['duration']) == 1:
duration += cond['duration'] * len(cond['onset'])
else:
duration += cond['duration']
if conditions:
paradigm = BlockParadigm(con_id=conditions, onset=onsets, duration=duration)
else:
paradigm = None
design_matrix, self._reg_names = dm.dmtx_light(frametimes, paradigm, drift_model=drift_model, hfcut=hpf,
hrf_model=self.inputs.hrf_model,
add_regs=reg_vals,
add_reg_names=reg_names
)
if self.inputs.normalize_design_matrix:
for i in range(len(self._reg_names) - 1):
design_matrix[:, i] = ((design_matrix[:, i] -
design_matrix[:, i].mean()) /
design_matrix[:, i].std())
if self.inputs.plot_design_matrix:
import pylab
pylab.pcolor(design_matrix)
pylab.savefig("design_matrix.pdf")
pylab.close()
pylab.clf()
glm = GLM.GeneralLinearModel()
glm.fit(timeseries.T, design_matrix, method=self.inputs.method, model=self.inputs.model)
self._beta_file = os.path.abspath("beta.nii")
beta = np.zeros(mask.shape + (glm.beta.shape[0],))
beta[mask, :] = glm.beta.T
nb.save(nb.Nifti1Image(beta, nii.affine), self._beta_file)
self._s2_file = os.path.abspath("s2.nii")
s2 = np.zeros(mask.shape)
s2[mask] = glm.s2
nb.save(nb.Nifti1Image(s2, nii.affine), self._s2_file)
if self.inputs.save_residuals:
explained = np.dot(design_matrix, glm.beta)
residuals = np.zeros(mask.shape + (nscans,))
residuals[mask, :] = timeseries - explained.T
self._residuals_file = os.path.abspath("residuals.nii")
nb.save(nb.Nifti1Image(residuals, nii.affine), self._residuals_file)
self._nvbeta = glm.nvbeta
self._dof = glm.dof
self._constants = glm._constants
self._axis = glm._axis
if self.inputs.model == "ar1":
self._a_file = os.path.abspath("a.nii")
a = np.zeros(mask.shape)
a[mask] = glm.a.squeeze()
nb.save(nb.Nifti1Image(a, nii.affine), self._a_file)
self._model = glm.model
self._method = glm.method
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs["beta"] = self._beta_file
outputs["nvbeta"] = self._nvbeta
outputs["s2"] = self._s2_file
outputs["dof"] = self._dof
outputs["constants"] = self._constants
outputs["axis"] = self._axis
outputs["reg_names"] = self._reg_names
if self.inputs.model == "ar1":
outputs["a"] = self._a_file
if self.inputs.save_residuals:
outputs["residuals"] = self._residuals_file
return outputs
class EstimateContrastInputSpec(BaseInterfaceInputSpec):
contrasts = traits.List(
traits.Either(traits.Tuple(traits.Str,
traits.Enum('T'),
traits.List(traits.Str),
traits.List(traits.Float)),
traits.Tuple(traits.Str,
traits.Enum('T'),
traits.List(traits.Str),
traits.List(traits.Float),
traits.List(traits.Float)),
traits.Tuple(traits.Str,
traits.Enum('F'),
traits.List(traits.Either(traits.Tuple(traits.Str,
traits.Enum('T'),
traits.List(traits.Str),
traits.List(traits.Float)),
traits.Tuple(traits.Str,
traits.Enum('T'),
traits.List(traits.Str),
traits.List(traits.Float),
traits.List(traits.Float)))))),
desc="""List of contrasts with each contrast being a list of the form:
[('name', 'stat', [condition list], [weight list], [session list])]. if
session list is None or not provided, all sessions are used. For F
contrasts, the condition list should contain previously defined
T-contrasts.""", mandatory=True)
beta = File(exists=True, desc="beta coefficients of the fitted model", mandatory=True)
nvbeta = traits.Any(mandatory=True)
s2 = File(exists=True, desc="squared variance of the residuals", mandatory=True)
dof = traits.Any(desc="degrees of freedom", mandatory=True)
constants = traits.Any(mandatory=True)
axis = traits.Any(mandatory=True)
reg_names = traits.List(mandatory=True)
mask = traits.File(exists=True)
class EstimateContrastOutputSpec(TraitedSpec):
stat_maps = OutputMultiPath(File(exists=True))
z_maps = OutputMultiPath(File(exists=True))
p_maps = OutputMultiPath(File(exists=True))
class EstimateContrast(BaseInterface):
'''
Estimate contrast of a fitted model.
'''
input_spec = EstimateContrastInputSpec
output_spec = EstimateContrastOutputSpec
def _run_interface(self, runtime):
beta_nii = nb.load(self.inputs.beta)
if isdefined(self.inputs.mask):
mask = nb.load(self.inputs.mask).get_data() > 0
else:
mask = np.ones(beta_nii.shape[:3]) == 1
glm = GLM.GeneralLinearModel()
nii = nb.load(self.inputs.beta)
glm.beta = beta_nii.get_data().copy()[mask, :].T
glm.nvbeta = self.inputs.nvbeta
glm.s2 = nb.load(self.inputs.s2).get_data().copy()[mask]
glm.dof = self.inputs.dof
glm._axis = self.inputs.axis
glm._constants = self.inputs.constants
reg_names = self.inputs.reg_names
self._stat_maps = []
self._p_maps = []
self._z_maps = []
for contrast_def in self.inputs.contrasts:
name = contrast_def[0]
_ = contrast_def[1]
contrast = np.zeros(len(reg_names))
for i, reg_name in enumerate(reg_names):
if reg_name in contrast_def[2]:
idx = contrast_def[2].index(reg_name)
contrast[i] = contrast_def[3][idx]
est_contrast = glm.contrast(contrast)
stat_map = np.zeros(mask.shape)
stat_map[mask] = est_contrast.stat().T
stat_map_file = os.path.abspath(name + "_stat_map.nii")
nb.save(nb.Nifti1Image(stat_map, nii.affine), stat_map_file)
self._stat_maps.append(stat_map_file)
p_map = np.zeros(mask.shape)
p_map[mask] = est_contrast.pvalue().T
p_map_file = os.path.abspath(name + "_p_map.nii")
nb.save(nb.Nifti1Image(p_map, nii.affine), p_map_file)
self._p_maps.append(p_map_file)
z_map = np.zeros(mask.shape)
z_map[mask] = est_contrast.zscore().T
z_map_file = os.path.abspath(name + "_z_map.nii")
nb.save(nb.Nifti1Image(z_map, nii.affine), z_map_file)
self._z_maps.append(z_map_file)
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs["stat_maps"] = self._stat_maps
outputs["p_maps"] = self._p_maps
outputs["z_maps"] = self._z_maps
return outputs
| bsd-3-clause | 6,562,482,735,676,853,000 | 39.74613 | 112 | 0.52458 | false | 3.957005 | false | false | false |
X-dark/Flexget | flexget/__init__.py | 1 | 1135 | #!/usr/bin/python
from __future__ import unicode_literals, division, absolute_import
import os
import logging
from flexget import logger
from flexget.options import get_parser
from flexget import plugin
from flexget.manager import Manager
__version__ = '{git}'
log = logging.getLogger('main')
def main(args=None):
"""Main entry point for Command Line Interface"""
logger.initialize()
plugin.load_plugins()
options = get_parser().parse_args(args)
manager = Manager(options)
log_level = logging.getLevelName(options.loglevel.upper())
log_file = os.path.expanduser(manager.options.logfile)
# If an absolute path is not specified, use the config directory.
if not os.path.isabs(log_file):
log_file = os.path.join(manager.config_base, log_file)
logger.start(log_file, log_level)
if options.profile:
try:
import cProfile as profile
except ImportError:
import profile
profile.runctx('manager.start()', globals(), locals(),
os.path.join(manager.config_base, options.profile))
else:
manager.start()
| mit | -4,021,792,851,769,180,700 | 26.682927 | 74 | 0.672247 | false | 3.968531 | false | false | false |
spcui/virt-test | virttest/libvirt_xml/vol_xml.py | 1 | 3408 | """
Module simplifying manipulation of XML described at
http://libvirt.org/
"""
from virttest.libvirt_xml import base, accessors
class VolXMLBase(base.LibvirtXMLBase):
"""
Accessor methods for VolXML class.
Properties:
name: string, operates on XML name tag
uuid: string, operates on uuid tag
type: string, operates on type tag
capacity: integer, operates on capacity attribute of capacity tag
allocation: integer, operates on allocation attribute of allocation
available: integer, operates on available attribute of available
source: nothing
"""
__slots__ = base.LibvirtXMLBase.__slots__ + ('name', 'key',
'capacity', 'allocation',
'format', 'path')
__uncompareable__ = base.LibvirtXMLBase.__uncompareable__
__schema_name__ = "storagevol"
def __init__(self, virsh_instance=base.virsh):
accessors.XMLElementText('name', self, parent_xpath='/',
tag_name='name')
accessors.XMLElementText('key', self, parent_xpath='/',
tag_name='key')
accessors.XMLElementInt('capacity', self, parent_xpath='/',
tag_name='capacity')
accessors.XMLElementInt('allocation', self, parent_xpath='/',
tag_name='allocation')
accessors.XMLAttribute('format', self, parent_xpath='/target',
tag_name='format', attribute='type')
accessors.XMLElementText('path', self, parent_xpath='/target',
tag_name='path')
super(VolXMLBase, self).__init__(virsh_instance=virsh_instance)
class VolXML(VolXMLBase):
"""
Manipulators of a Virtual Vol through it's XML definition.
"""
__slots__ = VolXMLBase.__slots__
def __init__(self, vol_name='default', virsh_instance=base.virsh):
"""
Initialize new instance with empty XML
"""
super(VolXML, self).__init__(virsh_instance=virsh_instance)
self.xml = u"<volume><name>%s</name></volume>" % vol_name
@staticmethod
def new_from_vol_dumpxml(vol_name, pool_name, virsh_instance=base.virsh):
"""
Return new VolXML instance from virsh vol-dumpxml command
:param vol_name: Name of vol to vol-dumpxml
:param virsh_instance: virsh module or instance to use
:return: New initialized VolXML instance
"""
volxml = VolXML(virsh_instance=virsh_instance)
volxml['xml'] = virsh_instance.vol_dumpxml(vol_name, pool_name)\
.stdout.strip()
return volxml
@staticmethod
def get_vol_details_by_name(vol_name, pool_name, virsh_instance=base.virsh):
"""
Return Vol's uuid by Vol's name.
:param vol_name: Vol's name
:return: Vol's uuid
"""
volume_xml = {}
vol_xml = VolXML.new_from_vol_dumpxml(vol_name, pool_name,
virsh_instance)
volume_xml['key'] = vol_xml.key
volume_xml['path'] = vol_xml.path
volume_xml['format'] = vol_xml.format
volume_xml['capacity'] = vol_xml.capacity
volume_xml['allocation'] = vol_xml.allocation
return volume_xml
| gpl-2.0 | -8,600,053,957,075,828,000 | 35.645161 | 80 | 0.569542 | false | 4.286792 | false | false | false |
switowski/invenio | invenio/modules/submit/models.py | 1 | 19892 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2011, 2012, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""WebSubmit database models."""
from invenio.ext.sqlalchemy import db
from sqlalchemy.dialects import mysql
from sqlalchemy.schema import Index
class SbmACTION(db.Model):
"""Represent a SbmACTION record."""
__tablename__ = 'sbmACTION'
lactname = db.Column(db.Text, nullable=True)
sactname = db.Column(db.Char(3), nullable=False, server_default='',
primary_key=True)
dir = db.Column(db.Text, nullable=True)
cd = db.Column(db.Date, nullable=True)
md = db.Column(db.Date, nullable=True)
actionbutton = db.Column(db.Text, nullable=True)
statustext = db.Column(db.Text, nullable=True)
class SbmALLFUNCDESCR(db.Model):
"""Represent a SbmALLFUNCDESCR record."""
__tablename__ = 'sbmALLFUNCDESCR'
# FIX ME pk
function = db.Column(db.String(40), nullable=False, server_default='',
primary_key=True)
description = db.Column(db.TinyText, nullable=True)
class SbmAPPROVAL(db.Model):
"""Represent a SbmAPPROVAL record."""
__tablename__ = 'sbmAPPROVAL'
doctype = db.Column(db.String(10), nullable=False,
server_default='')
categ = db.Column(db.String(50), nullable=False,
server_default='')
rn = db.Column(db.String(50), nullable=False, server_default='',
primary_key=True)
status = db.Column(db.String(10), nullable=False,
server_default='')
dFirstReq = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
dLastReq = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
dAction = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
access = db.Column(db.String(20), nullable=False,
server_default='0')
note = db.Column(db.Text, nullable=False)
class SbmCATEGORIES(db.Model):
"""Represent a SbmCATEGORIES record."""
__tablename__ = 'sbmCATEGORIES'
doctype = db.Column(db.String(10), nullable=False, server_default='',
primary_key=True, index=True)
sname = db.Column(db.String(75), nullable=False, server_default='',
primary_key=True, index=True)
lname = db.Column(db.String(75), nullable=False,
server_default='')
score = db.Column(db.TinyInteger(3, unsigned=True), nullable=False,
server_default='0')
class SbmCHECKS(db.Model):
"""Represent a SbmCHECKS record."""
__tablename__ = 'sbmCHECKS'
chname = db.Column(db.String(15), nullable=False, server_default='',
primary_key=True)
chdesc = db.Column(db.Text, nullable=True)
cd = db.Column(db.Date, nullable=True)
md = db.Column(db.Date, nullable=True)
chefi1 = db.Column(db.Text, nullable=True)
chefi2 = db.Column(db.Text, nullable=True)
class SbmCOLLECTION(db.Model):
"""Represents a SbmCOLLECTION record."""
__tablename__ = 'sbmCOLLECTION'
id = db.Column(db.Integer(11), nullable=False,
primary_key=True,
autoincrement=True)
name = db.Column(db.String(100), nullable=False,
server_default='')
class SbmCOLLECTIONSbmCOLLECTION(db.Model):
"""Represents a SbmCOLLECTIONSbmCOLLECTION record."""
__tablename__ = 'sbmCOLLECTION_sbmCOLLECTION'
id = db.Column(db.Integer(11), nullable=False, autoincrement=True,
primary_key=True)
_id_father = db.Column(db.Integer(11), db.ForeignKey(SbmCOLLECTION.id),
nullable=True, name='id_father')
id_son = db.Column(db.Integer(11), db.ForeignKey(SbmCOLLECTION.id),
nullable=False)
catalogue_order = db.Column(db.Integer(11), nullable=False,
server_default='0')
son = db.relationship(
SbmCOLLECTION,
backref=db.backref('father', uselist=False),
single_parent=True,
primaryjoin="and_("
"SbmCOLLECTIONSbmCOLLECTION.id_son==SbmCOLLECTION.id) "
)
father = db.relationship(
SbmCOLLECTION,
backref=db.backref('son', uselist=False),
single_parent=True,
primaryjoin="and_("
"SbmCOLLECTIONSbmCOLLECTION.id_father==SbmCOLLECTION.id) "
)
@db.hybrid_property
def id_father(self):
"""Get id_father."""
return self._id_father
@id_father.setter
def id_father(self, value):
"""Set id_father."""
self._id_father = value or None
class SbmDOCTYPE(db.Model):
"""Represents a SbmDOCTYPE record."""
__tablename__ = 'sbmDOCTYPE'
ldocname = db.Column(db.Text, nullable=True)
sdocname = db.Column(db.String(10), nullable=True,
primary_key=True)
cd = db.Column(db.Date, nullable=True)
md = db.Column(db.Date, nullable=True)
description = db.Column(db.Text, nullable=True)
class SbmCOLLECTIONSbmDOCTYPE(db.Model):
"""Represents a SbmCOLLECTIONSbmDOCTYPE record."""
__tablename__ = 'sbmCOLLECTION_sbmDOCTYPE'
id = db.Column(db.Integer(11), nullable=False, autoincrement=True,
primary_key=True)
_id_father = db.Column(db.Integer(11), db.ForeignKey(SbmCOLLECTION.id),
nullable=True, name="id_father")
id_son = db.Column(db.Char(10), db.ForeignKey(SbmDOCTYPE.sdocname),
nullable=False)
catalogue_order = db.Column(db.Integer(11), nullable=False,
server_default='0')
father = db.relationship(
SbmCOLLECTION,
backref=db.backref('sonDoctype', uselist=False),
)
@db.hybrid_property
def id_father(self):
"""Get id_father."""
return self._id_father
@id_father.setter
def id_father(self, value):
"""Set id_father."""
self._id_father = value or None
class SbmCOOKIES(db.Model):
"""Represents a SbmCOOKIES record."""
__tablename__ = 'sbmCOOKIES'
id = db.Column(db.Integer(15, unsigned=True), nullable=False,
primary_key=True, autoincrement=True)
name = db.Column(db.String(100), nullable=False)
value = db.Column(db.Text, nullable=True)
uid = db.Column(db.Integer(15), nullable=False)
class SbmCPLXAPPROVAL(db.Model):
"""Represents a SbmCPLXAPPROVAL record."""
__tablename__ = 'sbmCPLXAPPROVAL'
doctype = db.Column(db.String(10), nullable=False,
server_default='')
categ = db.Column(db.String(50), nullable=False,
server_default='')
rn = db.Column(db.String(50), nullable=False, server_default='',
primary_key=True)
type = db.Column(db.String(10), nullable=False,
primary_key=True)
status = db.Column(db.String(10), nullable=False)
id_group = db.Column(db.Integer(15, unsigned=True), nullable=False,
server_default='0')
id_bskBASKET = db.Column(db.Integer(15, unsigned=True), nullable=False,
server_default='0')
id_EdBoardGroup = db.Column(db.Integer(15, unsigned=True), nullable=False,
server_default='0')
dFirstReq = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
dLastReq = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
dEdBoardSel = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
dRefereeSel = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
dRefereeRecom = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
dEdBoardRecom = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
dPubComRecom = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
dProjectLeaderAction = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
class SbmFIELD(db.Model):
"""Represents a SbmFIELD record."""
__tablename__ = 'sbmFIELD'
subname = db.Column(db.String(13), nullable=True,
primary_key=True)
pagenb = db.Column(db.Integer(11), nullable=True,
primary_key=True, autoincrement=False)
fieldnb = db.Column(db.Integer(11), nullable=True)
fidesc = db.Column(db.String(15), nullable=True,
primary_key=True)
fitext = db.Column(db.Text, nullable=True)
level = db.Column(db.Char(1), nullable=True)
sdesc = db.Column(db.Text, nullable=True)
checkn = db.Column(db.Text, nullable=True)
cd = db.Column(db.Date, nullable=True)
md = db.Column(db.Date, nullable=True)
fiefi1 = db.Column(db.Text, nullable=True)
fiefi2 = db.Column(db.Text, nullable=True)
class SbmFIELDDESC(db.Model):
"""Represents a SbmFIELDDESC record."""
__tablename__ = 'sbmFIELDDESC'
name = db.Column(db.String(15), # db.ForeignKey(SbmFIELD.fidesc),
nullable=False, server_default='', primary_key=True)
alephcode = db.Column(db.String(50), nullable=True)
marccode = db.Column(db.String(50), nullable=False, server_default='')
type = db.Column(db.Char(1), nullable=True)
size = db.Column(db.Integer(11), nullable=True)
rows = db.Column(db.Integer(11), nullable=True)
cols = db.Column(db.Integer(11), nullable=True)
maxlength = db.Column(db.Integer(11), nullable=True)
val = db.Column(db.Text, nullable=True)
fidesc = db.Column(db.Text, nullable=True)
cd = db.Column(db.Date, nullable=True)
md = db.Column(db.Date, nullable=True)
modifytext = db.Column(db.Text, nullable=True)
fddfi2 = db.Column(db.Text, nullable=True)
cookie = db.Column(db.Integer(11), nullable=True,
server_default='0')
# field = db.relationship(SbmFIELD, backref='fielddescs')
class SbmFORMATEXTENSION(db.Model):
"""Represents a SbmFORMATEXTENSION record."""
__tablename__ = 'sbmFORMATEXTENSION'
id = db.Column(db.Integer(), nullable=False,
primary_key=True, autoincrement=True)
FILE_FORMAT = db.Column(
db.Text().with_variant(mysql.TEXT(50), 'mysql'),
nullable=False)
FILE_EXTENSION = db.Column(
db.Text().with_variant(mysql.TEXT(10), 'mysql'),
nullable=False)
Index('sbmformatextension_file_format_idx',
SbmFORMATEXTENSION.FILE_FORMAT, mysql_length=50)
Index('sbmformatextension_file_extension_idx',
SbmFORMATEXTENSION.FILE_EXTENSION, mysql_length=10)
class SbmFUNCTIONS(db.Model):
"""Represents a SbmFUNCTIONS record."""
__tablename__ = 'sbmFUNCTIONS'
action = db.Column(db.String(10), nullable=False,
server_default='', primary_key=True)
doctype = db.Column(db.String(10), nullable=False,
server_default='', primary_key=True)
function = db.Column(db.String(40), nullable=False,
server_default='', primary_key=True)
score = db.Column(db.Integer(11), nullable=False,
server_default='0', primary_key=True)
step = db.Column(db.TinyInteger(4), nullable=False,
server_default='1', primary_key=True)
class SbmFUNDESC(db.Model):
"""Represents a SbmFUNDESC record."""
__tablename__ = 'sbmFUNDESC'
function = db.Column(db.String(40), nullable=False,
server_default='', primary_key=True)
param = db.Column(db.String(40), primary_key=True)
class SbmGFILERESULT(db.Model):
"""Represents a SbmGFILERESULT record."""
__tablename__ = 'sbmGFILERESULT'
id = db.Column(db.Integer(), nullable=False,
primary_key=True, autoincrement=True)
FORMAT = db.Column(
db.Text().with_variant(db.Text(50), 'mysql'),
nullable=False)
RESULT = db.Column(
db.Text().with_variant(db.Text(50), 'mysql'),
nullable=False)
Index('sbmgfileresult_format_idx', SbmGFILERESULT.FORMAT, mysql_length=50)
Index('sbmgfileresult_result_idx', SbmGFILERESULT.RESULT, mysql_length=50)
class SbmIMPLEMENT(db.Model):
"""Represents a SbmIMPLEMENT record."""
__tablename__ = 'sbmIMPLEMENT'
docname = db.Column(db.String(10), nullable=True)
actname = db.Column(db.Char(3), nullable=True)
displayed = db.Column(db.Char(1), nullable=True)
subname = db.Column(db.String(13), nullable=True, primary_key=True)
nbpg = db.Column(db.Integer(11), nullable=True, primary_key=True,
autoincrement=False)
cd = db.Column(db.Date, nullable=True)
md = db.Column(db.Date, nullable=True)
buttonorder = db.Column(db.Integer(11), nullable=True)
statustext = db.Column(db.Text, nullable=True)
level = db.Column(db.Char(1), nullable=False, server_default='')
score = db.Column(db.Integer(11), nullable=False, server_default='0')
stpage = db.Column(db.Integer(11), nullable=False, server_default='0')
endtxt = db.Column(db.String(100), nullable=False, server_default='')
class SbmPARAMETERS(db.Model):
"""Represents a SbmPARAMETERS record."""
__tablename__ = 'sbmPARAMETERS'
doctype = db.Column(db.String(10), nullable=False,
server_default='', primary_key=True)
name = db.Column(db.String(40), nullable=False,
server_default='', primary_key=True)
value = db.Column(db.Text, nullable=False)
class SbmPUBLICATION(db.Model):
"""Represents a SbmPUBLICATION record."""
__tablename__ = 'sbmPUBLICATION'
doctype = db.Column(db.String(10), nullable=False,
server_default='', primary_key=True)
categ = db.Column(db.String(50), nullable=False,
server_default='', primary_key=True)
rn = db.Column(db.String(50), nullable=False, server_default='',
primary_key=True)
status = db.Column(db.String(10), nullable=False, server_default='')
dFirstReq = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
dLastReq = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
dAction = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
accessref = db.Column(db.String(20), nullable=False, server_default='')
accessedi = db.Column(db.String(20), nullable=False, server_default='')
access = db.Column(db.String(20), nullable=False, server_default='')
referees = db.Column(db.String(50), nullable=False, server_default='')
authoremail = db.Column(db.String(50), nullable=False,
server_default='')
dRefSelection = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
dRefRec = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
dEdiRec = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
accessspo = db.Column(db.String(20), nullable=False, server_default='')
journal = db.Column(db.String(100), nullable=True)
class SbmPUBLICATIONCOMM(db.Model):
"""Represents a SbmPUBLICATIONCOMM record."""
__tablename__ = 'sbmPUBLICATIONCOMM'
id = db.Column(db.Integer(11), nullable=False,
primary_key=True, autoincrement=True)
id_parent = db.Column(db.Integer(11), server_default='0', nullable=True)
rn = db.Column(db.String(100), nullable=False, server_default='')
firstname = db.Column(db.String(100), nullable=True)
secondname = db.Column(db.String(100), nullable=True)
email = db.Column(db.String(100), nullable=True)
date = db.Column(db.String(40), nullable=False, server_default='')
synopsis = db.Column(db.String(255), nullable=False, server_default='')
commentfulltext = db.Column(db.Text, nullable=True)
class SbmPUBLICATIONDATA(db.Model):
"""Represents a SbmPUBLICATIONDATA record."""
__tablename__ = 'sbmPUBLICATIONDATA'
doctype = db.Column(db.String(10), nullable=False,
server_default='', primary_key=True)
editoboard = db.Column(db.String(250), nullable=False, server_default='')
base = db.Column(db.String(10), nullable=False, server_default='')
logicalbase = db.Column(db.String(10), nullable=False, server_default='')
spokesperson = db.Column(db.String(50), nullable=False, server_default='')
class SbmREFEREES(db.Model):
"""Represents a SbmREFEREES record."""
__tablename__ = 'sbmREFEREES'
doctype = db.Column(db.String(10), nullable=False, server_default='')
categ = db.Column(db.String(10), nullable=False, server_default='')
name = db.Column(db.String(50), nullable=False, server_default='')
address = db.Column(db.String(50), nullable=False, server_default='')
rid = db.Column(db.Integer(11), nullable=False, primary_key=True,
autoincrement=True)
class SbmSUBMISSIONS(db.Model):
"""Represents a SbmSUBMISSIONS record."""
__tablename__ = 'sbmSUBMISSIONS'
email = db.Column(db.String(50), nullable=False,
server_default='')
doctype = db.Column(db.String(10), nullable=False,
server_default='')
action = db.Column(db.String(10), nullable=False,
server_default='')
status = db.Column(db.String(10), nullable=False,
server_default='')
id = db.Column(db.String(30), nullable=False,
server_default='')
reference = db.Column(db.String(40), nullable=False,
server_default='')
cd = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
md = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
log_id = db.Column(db.Integer(11), nullable=False,
primary_key=True,
autoincrement=True)
__all__ = ('SbmACTION',
'SbmALLFUNCDESCR',
'SbmAPPROVAL',
'SbmCATEGORIES',
'SbmCHECKS',
'SbmCOLLECTION',
'SbmCOLLECTIONSbmCOLLECTION',
'SbmDOCTYPE',
'SbmCOLLECTIONSbmDOCTYPE',
'SbmCOOKIES',
'SbmCPLXAPPROVAL',
'SbmFIELD',
'SbmFIELDDESC',
'SbmFORMATEXTENSION',
'SbmFUNCTIONS',
'SbmFUNDESC',
'SbmGFILERESULT',
'SbmIMPLEMENT',
'SbmPARAMETERS',
'SbmPUBLICATION',
'SbmPUBLICATIONCOMM',
'SbmPUBLICATIONDATA',
'SbmREFEREES',
'SbmSUBMISSIONS')
| gpl-2.0 | 2,390,014,930,960,900,600 | 36.674242 | 78 | 0.61477 | false | 3.562959 | false | false | false |
rhiever/sklearn-benchmarks | model_code/grid_search/SGDClassifier.py | 1 | 1597 | import sys
import pandas as pd
import numpy as np
import itertools
from sklearn.preprocessing import RobustScaler
from sklearn.linear_model import SGDClassifier
from evaluate_model import evaluate_model
dataset = sys.argv[1]
pipeline_components = [RobustScaler, SGDClassifier]
pipeline_parameters = {}
loss_values = ['hinge', 'log', 'modified_huber', 'squared_hinge', 'perceptron']
penalty_values = ['l2', 'l1', 'elasticnet']
alpha_values = [0.000001, 0.00001, 0.0001, 0.001, 0.01]
learning_rate_values = ['constant', 'optimal', 'invscaling']
fit_intercept_values = [True, False]
l1_ratio_values = [0., 0.1, 0.15, 0.25, 0.5, 0.75, 0.9, 1.]
eta0_values = [0.0, 0.01, 0.1, 0.5, 1., 10., 50., 100.]
power_t_values = [0., 0.1, 0.5, 1., 10., 50., 100.]
random_state = [324089]
all_param_combinations = itertools.product(loss_values, penalty_values, alpha_values, learning_rate_values, fit_intercept_values, l1_ratio_values, eta0_values, power_t_values, random_state)
pipeline_parameters[SGDClassifier] = \
[{'loss': loss, 'penalty': penalty, 'alpha': alpha, 'learning_rate': learning_rate, 'fit_intercept': fit_intercept, 'l1_ratio': l1_ratio, 'eta0': eta0, 'power_t': power_t, 'random_state': random_state}
for (loss, penalty, alpha, learning_rate, fit_intercept, l1_ratio, eta0, power_t, random_state) in all_param_combinations
if not (penalty != 'elasticnet' and l1_ratio != 0.15) and not (learning_rate not in ['constant', 'invscaling'] and eta0 != 0.0) and not (learning_rate != 'invscaling' and power_t != 0.5)]
evaluate_model(dataset, pipeline_components, pipeline_parameters)
| mit | 2,270,912,534,851,505,000 | 52.233333 | 204 | 0.705698 | false | 2.962894 | false | false | false |
sitexa/foobnix | foobnix/gui/top.py | 1 | 2239 | #-*- coding: utf-8 -*-
'''
Created on 22 сент. 2010
@author: ivan
'''
from gi.repository import Gtk
from foobnix.gui.model.signal import FControl
from foobnix.gui.state import LoadSave
from foobnix.gui.menu import MenuBarWidget
from foobnix.helpers.my_widgets import ImageButton
from foobnix.helpers.menu import Popup
from foobnix.fc.fc import FC
from foobnix.util.widget_utils import MenuStyleDecorator
class TopWidgets(FControl, LoadSave, Gtk.HBox):
def __init__(self, controls):
FControl.__init__(self, controls)
Gtk.HBox.__init__(self, False, 0)
self.old_menu = MenuBarWidget(controls)
self.pack_start(self.old_menu.widget, False, False, 0)
self.new_menu_button = ImageButton(Gtk.STOCK_PREFERENCES)
self.new_menu_button.connect("button-press-event", self.on_button_press)
self.pack_start(self.new_menu_button, False, False, 0)
self.pack_start(controls.playback, False, False, 0)
self.pack_start(controls.os, False, False, 0)
self.pack_start(controls.volume, False, False, 0)
self.pack_start(Gtk.SeparatorToolItem.new(), False, False, 0)
self.pack_start(controls.record, False, False, 0)
self.pack_start(controls.seek_bar, True, True, 0)
"""menu init"""
menu = Popup()
decorator = MenuStyleDecorator()
MenuBarWidget(self.controls, menu)
menu.add_separator()
menu.add_item(_("Preferences"), Gtk.STOCK_PREFERENCES, self.controls.show_preferences)
menu.add_separator()
menu.add_item(_("Quit"), Gtk.STOCK_QUIT, self.controls.quit)
decorator.apply(menu)
self.menu = menu
def update_menu_style(self):
if FC().menu_style == "new":
self.old_menu.widget.hide()
self.new_menu_button.show()
else:
self.old_menu.widget.show()
self.new_menu_button.hide()
def on_save(self):
self.controls.volume.on_save()
self.old_menu.on_save()
def on_load(self):
self.controls.volume.on_load()
self.old_menu.on_load()
self.controls.os.on_load()
self.update_menu_style()
def on_button_press(self, w, e):
self.menu.show(e)
| gpl-3.0 | 2,132,427,195,450,941,200 | 30.928571 | 94 | 0.640268 | false | 3.371041 | false | false | false |
inkhey/Transvoyage.py | transvoyage.py | 1 | 13138 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# transvoyage.py
# Version 0.3
#
# Copyright 2014 Guénaël Muller <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#TODO
# - se conformer à la PEP 8
# - commentaires et TODO bilingue.
# - optimisations
# - traduction inversé amélioré
# - nouveau langages
# - debugage de certains regex et autres
import sys
import os
import subprocess
import re
import urllib
import argparse
# traductions des types Articles
listTypeFr=('Ville','Région continentale','Région','Pays' ,'Quartier','Itinéraire','Parc')
listTypeEn=('city' ,'continent' ,'region','country','district','itinerary' ,'park')
#Equivalences sections
listSectionFr=["Comprendre","Aller" ,"Circuler" ,"Voir","Faire","Acheter","Manger","Boire un verre / Sortir","Se loger","Aux environs","Travailler","Apprendre","Gérer le Quotidien","Sécurité","Communiquer"]
listSectionEn=["Understand","Get in","Get around","See" ,"Do" ,"Buy" ,"Eat" ,"Drink" ,"Sleep" ,"Go next","Work" ,"Learn" ,"Cope" ,"Stay safe", "Connect" ]
listSectionFr.extend(["Respecter","Parler","Éléctricité"])
listSectionEn.extend(["Respect","Talk","Electricity"])
listSectionFr.extend(["Se préparer","Étapes","Autres destinations","Lire","Douanes","En taxi","Santé","Monnaie","Villes","Régions","Quartiers","Bureaux d'information touristique"])
listSectionEn.extend(["Prepare","Route","Other destinations","Read","Customs","By taxi","Stay healthy","Currency","Cities","Regions","Districts","Tourist office"])
listSectionFr.extend(['Histoire', 'Paysage', 'Flore et faune',"Climat","Randonnée","Droits d'accès","Droits d'accès","Activités","Météo","Nature"])
listSectionEn.extend(['History', 'Landscape', 'Flora and fauna',"Climate","Hiking","Fees/permits","Fees/Permits","Activities","Weather","Wildlife"])
listSectionFr.extend(['À pied', 'En train', 'En bus',"En avion","En ferry","En bateau","En voiture","En vélo","En vélo","En vélo","En motoneige","En stop"])
listSectionEn.extend(['By foot', 'By train', 'By bus',"By plane","By ferry","By boat","By car","By bicycle","By cycle","By bike","By snowmobile","By thumb"])
listSectionFr.extend(['Bon marché', 'Prix moyen','Prix moyen', 'Luxe','Hôtel','Logements','Dans la nature'])
listSectionEn.extend(['Budget', 'Mid-range','Mid range', 'Splurge','Hotel','Lodging','Backcountry'])
# Équivalence image
listImageFr=["[[Fichier:","[[Fichier:","gauche","droite","vignette","vignette"]
listImageEn=["[[Image:","[[File:","left","right","thumbnail","thumb"]
#Equivalence Listings
#titre listings
listListingDebFr=["Listing","Faire","Voir","Acheter","Manger","Sortir","Se loger","Destination","Aller","Circuler"]
listListingDebEn=["listing","do" ,"see","buy","eat","drink","sleep","listing","listing","listing"]
#paramètres
listListingFr=["nom=","adresse=","téléphone","latitude=","longitude=","email=","direction=","numéro gratuit=","fax=","prix=","description=<!-- ","-->}}","arrivée=","départ=","horaire="]
listListingEn=["name=","address=" ,"phone","lat=","long=","email=","directions=","tollfree=","fax=","price=","content=","}}","checkin=","checkout=","hours="]
#Equivalence Itineraires
listItineraireFr=["Jour ",": De"," à "]
listItineraireEn=["Day ",":"," to "]
#Equivalence Dans
listDansFr=["Dans"]
listDansEn=["IsPartOf"]
#Equivalence Carte
#Debut
listMapDebFr=["ListeRegions","carte=","taillecarte="]
listMapDebEn=["Regionlist","regionmap=","regionmapsize="]
#Paramètres
listMapFr=["nomregion0=","couleurregion0=","elementsregion0=","descriptionregion0="]
listMapEn=["region0name=","region0color=","region0items=","region0description="]
# Tout les regex en string par langue de Destination
RegSFr=["(.*)\[\[(Image|Fichier):(.*)\s*$","(=+)(.*)(=+)(.*)","(.*){{(Listing|Faire|Voir|Acheter|Manger|Boire|Sortir|Se loger|Destination|Aller|Circuler)\s(.*)\s*$","(.*)}}[.\s]*$","{{Dans\|(.*)}}\s*$"]
# 0 1 2 3 4
RegSFr.extend(["^(=+)(.*) à (.*)(=+)\s*$","(.*){{ListeRegions(.*)","(.*)region([0-9]+)=(.*)","{{Avancement\|statut=(ébauche|esquisse|utilisable|guide|étoile)\|type=0}}(.*)","(.*){{Climat(.*)","(.*){{Représentation diplomatique"])
# 5 6 7 8 9 10
RegSEn=["(.*)\[\[(Image|File):(.*)\s*$", "(=+)(.*)(=+)(.*)","(.*){{(listing|do|see|buy|eat|drink|sleep)\s(.*)\s*$","(.*)}}[.\s]*$","{{IsPartOf\|(.*)}}\s*$"]
# 0 1 2 3 4
RegSEn.extend(["^(=+)(.*) to (.*)(=+)\s*$","(.*){{Regionlist(.*)","(.*)region(.*)name=(.*)","{{(outline|usable|guide|stub|star)0}}(.*)","(.*){{Climate(.*)","(.*){{flag|(.*)}}(.*){{Listing(.*)"])
# 5 6 7 8 9 10
#Avancement
avFr="{{Avancement|statut=esquisse|type=0}}\n"
avEn="{{outline0}}\n"
#Equivalence climat
listMoisFr=["jan","fev","mar","avr","mai","jui","jul","aou","sep","oct","nov","dec"]
listMoisEn=["jan","feb","mar","apr","may","jun","jul","aug","sep","oct","nov","dec"]
listClimatFr=["Climat","description"]
listClimatEn=["Climate","description"]
for mois in listMoisFr :
listClimatFr.append("tmin-"+mois)
listClimatFr.append("tmax-"+mois)
listClimatFr.append("prec-"+mois)
for mois in listMoisEn :
listClimatEn.append(mois+"low")
listClimatEn.append(mois+"high")
listClimatEn.append(mois+"precip")
# Trousse à mots par langues
ListFr=(listTypeFr,listSectionFr,listImageFr,listListingDebFr,listListingFr,listItineraireFr,listDansFr,listMapDebFr,listMapFr,RegSFr,avFr,listClimatFr)
ListEn=(listTypeEn,listSectionEn,listImageEn,listListingDebEn,listListingEn,listItineraireEn,listDansEn,listMapDebEn,listMapEn,RegSEn,avEn,listClimatEn)
# 0 1 2 3 4 5 6 7 8 9 10 11
ListingsSpecialFr={"Villes":"Ville","Autres destinations":"Destinations","Aux environs":"Destinations"}
#lien langage/trousse
ListLang ={"fr":ListFr, "en":ListEn}
#Langue source et destination et contenu récupérer
src=ListEn
dest=ListFr
lang="en"
content=""
section=""
# Pour récupérér le type de l'article (Ville,Itinéraire,Quartier,etc…)
def recupTypeArticle() :
typeArticle = dest[0][0]
listRegex = list()
for mot in src[0] :
s=src[9][8].replace("0",mot)
listRegex.append(re.compile(s))
bOk=True
for line in content:
if (not bOk) :
break
for i in range (len(listRegex)) :
if listRegex[i].search(line) :
typeArticle=dest[0][i]
bOk=False
break
return typeArticle
#Pour créer l'entête
def creationEntete (typeArticle,titre) :
s=""
if dest==ListFr : # Si on traduit en français
s="""{{Bannière page}}
{{Info """+typeArticle+"""
| nom=
| nom local=
| région=
| image=
| légende image=
| rivière=
| superficie=
| population=
| population agglomération=
| année population=
| altitude=
| latitude=
| longitude=
| zoom=
| code postal=
| indicatif=
| adresse OT=
| horaire OT=
| téléphone OT=
| numéro gratuit OT=
| email OT=
| facebook OT=
| twitter OT=
| URL OT=
| URL officiel=
| URL touristique=
}}
"""
return s
# Pour récupérer les images (et les traduire)
def recupImage(line) :
s=line
for i in range (len(src[2])) :
s=s.replace(src[2][i],dest[2][i])
return s
#Pour récupérer les sections et sous-sections
def recupSection(line) :
s=line
for i in range (len(src[1])) :
s=s.replace(src[1][i],dest[1][i])
return s
#Pour récupérer les listings
def recupListing(line,debut) :
s=line
if debut :
for i in range (len(src[3])) :
s=s.replace(src[3][i],dest[3][i])
for i in range (len(src[4])) :
s=s.replace(src[4][i],dest[4][i])
return s
#Pour récupérer les sections d'étapes
def recupItineraire(line) :
s=line
for i in range (len(src[5])) :
s=s.replace(src[5][i],dest[5][i])
return s
#Pour récupérer la catégorisation
def recupDans(line) :
s=line
for i in range (len(src[6])) :
s=s.replace(src[6][i],dest[6][i])
return s
#Pour récupérer les cartes avec régions
def recupMap(line,numMap) :
s=line
if numMap == 0 :
for i in range (len(src[7])) :
s=s.replace(src[7][i],dest[7][i])
numPrec=str(numMap-1)
sNumMap=str(numMap)
for i in range (len(src[8])):
src[8][i]=src[8][i].replace(numPrec,sNumMap)
dest[8][i]=dest[8][i].replace(numPrec,sNumMap)
if numMap > 0 :
for i in range (len(src[8])) :
s=s.replace(src[8][i],dest[8][i])
return s
def recupClimat(line) :
s=line
for i in range (len(src[11])):
s=s.replace(src[11][i],dest[11][i])
return s
#Programme en lui même
parser = argparse.ArgumentParser()
parser.add_argument('title',help="nom de la page à convertir" )
parser.add_argument('--src',help="langage source : fr,en,… par défault fr ")
parser.add_argument('--dest',help="langage destination : fr,en,… par défault en ")
parser.add_argument('-d','--debug',action='store_true' ,help="mode debugage : récupération du fichier source en même temps que le résultat")
parser.add_argument('-C','--nocomment',action='store_true' ,help="commentaires désactivé dans le résultat ")
args = parser.parse_args()
bAv=False # Pour savoir si la bannière d'avancement à été placé
result="" # Pou stocké le resultat
#arguments
title=args.title
if args.src and args.src.lower() in ListLang.keys() :
src=ListLang[args.src.lower()]
lang=args.src.lower()
if args.dest and args.dest.lower() in ListLang.keys() :
dest=ListLang[args.dest.lower()]
url="https://"+lang+".wikivoyage.org/w/index.php?title="+title+"&action=raw"
content=urllib.urlopen(url).readlines()
# on récupère le type de l'article et on crée l'entête
TypeArticle=recupTypeArticle()
result +=creationEntete(TypeArticle,title)
# les différents regex
regImg =re.compile(src[9][0])
regSection =re.compile(src[9][1])
regListing =re.compile(src[9][2])
regListingEnd =re.compile(src[9][3])
regDans =re.compile(src[9][4])
regItineraire =re.compile(src[9][5])
regMap =re.compile(src[9][6])
regNomRegion =re.compile(src[9][7])
regClimat =re.compile(src[9][9])
regDiplomat =re.compile(src[9][10])
# On ouvre et on lit
i=0
numMap=-1
bClimat=False
bListing=False
for line in content:
i=i+1
if numMap>-1 :
if regNomRegion.search(line) :
numMap=numMap+1
result+=recupMap(line,numMap)
if regListingEnd.search(line) :
sNumMap=str(numMap)
for i in range (len(src[8])):
src[8][i]=src[8][i].replace(sNumMap,"0")
dest[8][i]=dest[8][i].replace(sNumMap,"0")
numMap=-1
if bClimat or regClimat.search(line):
result+=recupClimat(line)
bClimat=True
if regListingEnd.search(line) :
bClimat=False
elif bListing :
s=recupListing(line,False)
if regListingEnd.search(line) :
bListing=False
if not regListingEnd.search(s) :
s+="}}"
result+=s
elif regDiplomat.search(line) and dest==ListFr :
s="* {{Représentation diplomatique"
bListing=True
result+=s
elif regMap.search(line) :
numMap=0
result+=recupMap(line,numMap)
elif regItineraire.search(line) :
result+=recupItineraire(line)
elif regListing.search(line) :
s=recupListing(line,True)
if dest==ListFr and section in ListingsSpecialFr.keys() :
s=s.replace('Listing',ListingsSpecialFr[section])
result+=s
bListing=True
elif regImg.search(line) :
result+=recupImage(line)
elif regSection.search(line) :
s=recupSection(line)
if len(s)>3 and s[2] !="=" :
section=s.replace("==","").replace("\n","")
result+=s
elif regDans.search(line) :
s=dest[10].replace("0",TypeArticle.lower()) #avancement
result+=s
bAv=True
result+=recupDans(line)
if (not bAv) : # Si la bannière avancement n'a toujour pas été placé
s=dest[10].replace("0",TypeArticle.lower())
result+=s
# On écrit les fichiers
title=title.replace("/","-")
title=title.replace(".","-")
if args.nocomment is True :
result=re.sub(r'<!--(.*)(.|\n)(.*)-->',r'\2',result)
with open("./"+title+".txt", "w") as fichier:
fichier.write(result)
if args.debug is True :
with open("./"+title+"_src.txt", "w") as fichier:
fichier.writelines(content)
| gpl-3.0 | -8,850,327,259,524,849,000 | 35.261838 | 229 | 0.639038 | false | 2.674199 | false | false | false |
MultiNet-80211/Hostapd | wpa_supplicant/examples/wpas-dbus-new-signals.py | 1 | 6275 | #!/usr/bin/python
import dbus
import sys, os
import time
import gobject
from dbus.mainloop.glib import DBusGMainLoop
WPAS_DBUS_SERVICE = "fi.w1.wpa_supplicant1"
WPAS_DBUS_INTERFACE = "fi.w1.wpa_supplicant1"
WPAS_DBUS_OPATH = "/fi/w1/wpa_supplicant1"
WPAS_DBUS_INTERFACES_INTERFACE = "fi.w1.wpa_supplicant1.Interface"
WPAS_DBUS_INTERFACES_OPATH = "/fi/w1/wpa_supplicant1/Interfaces"
WPAS_DBUS_BSS_INTERFACE = "fi.w1.wpa_supplicant1.BSS"
WPAS_DBUS_NETWORK_INTERFACE = "fi.w1.wpa_supplicant1.Network"
def byte_array_to_string(s):
import urllib
r = ""
for c in s:
if c >= 32 and c < 127:
r += "%c" % c
else:
r += urllib.quote(chr(c))
return r
def list_interfaces(wpas_obj):
ifaces = wpas_obj.Get(WPAS_DBUS_INTERFACE, 'Interfaces',
dbus_interface=dbus.PROPERTIES_IFACE)
for path in ifaces:
if_obj = bus.get_object(WPAS_DBUS_SERVICE, path)
ifname = if_obj.Get(WPAS_DBUS_INTERFACES_INTERFACE, 'Ifname',
dbus_interface=dbus.PROPERTIES_IFACE)
print ifname
def interfaceAdded(interface, properties):
print "InterfaceAdded(%s): Ifname=%s" % (interface, properties['Ifname'])
def interfaceRemoved(interface):
print "InterfaceRemoved(%s)" % (interface)
def propertiesChanged(properties):
for i in properties:
print "PropertiesChanged: %s=%s" % (i, properties[i])
def showBss(bss):
net_obj = bus.get_object(WPAS_DBUS_SERVICE, bss)
net = dbus.Interface(net_obj, WPAS_DBUS_BSS_INTERFACE)
# Convert the byte-array for SSID and BSSID to printable strings
val = net_obj.Get(WPAS_DBUS_BSS_INTERFACE, 'BSSID',
dbus_interface=dbus.PROPERTIES_IFACE)
bssid = ""
for item in val:
bssid = bssid + ":%02x" % item
bssid = bssid[1:]
val = net_obj.Get(WPAS_DBUS_BSS_INTERFACE, 'SSID',
dbus_interface=dbus.PROPERTIES_IFACE)
ssid = byte_array_to_string(val)
val = net_obj.Get(WPAS_DBUS_BSS_INTERFACE, 'WPAIE',
dbus_interface=dbus.PROPERTIES_IFACE)
wpa = "no"
if val != None:
wpa = "yes"
val = net_obj.Get(WPAS_DBUS_BSS_INTERFACE, 'RSNIE',
dbus_interface=dbus.PROPERTIES_IFACE)
wpa2 = "no"
if val != None:
wpa2 = "yes"
freq = net_obj.Get(WPAS_DBUS_BSS_INTERFACE, 'Frequency',
dbus_interface=dbus.PROPERTIES_IFACE)
signal = net_obj.Get(WPAS_DBUS_BSS_INTERFACE, 'Signal',
dbus_interface=dbus.PROPERTIES_IFACE)
val = net_obj.Get(WPAS_DBUS_BSS_INTERFACE, 'Rates',
dbus_interface=dbus.PROPERTIES_IFACE)
if len(val) > 0:
maxrate = val[0] / 1000000
else:
maxrate = 0
print " %s :: ssid='%s' wpa=%s wpa2=%s signal=%d rate=%d freq=%d" % (bssid, ssid, wpa, wpa2, signal, maxrate, freq)
def scanDone(success):
gobject.MainLoop().quit()
print "Scan done: success=%s" % success
def scanDone2(success, path=None):
print "Scan done: success=%s [path=%s]" % (success, path)
def bssAdded(bss, properties):
print "BSS added: %s" % (bss)
showBss(bss)
def bssRemoved(bss):
print "BSS removed: %s" % (bss)
def blobAdded(blob):
print "BlobAdded(%s)" % (blob)
def blobRemoved(blob):
print "BlobRemoved(%s)" % (blob)
def networkAdded(network, properties):
print "NetworkAdded(%s)" % (network)
def networkRemoved(network):
print "NetworkRemoved(%s)" % (network)
def networkSelected(network):
print "NetworkSelected(%s)" % (network)
def propertiesChangedInterface(properties):
for i in properties:
print "PropertiesChanged(interface): %s=%s" % (i, properties[i])
def propertiesChangedBss(properties):
for i in properties:
print "PropertiesChanged(BSS): %s=%s" % (i, properties[i])
def propertiesChangedNetwork(properties):
for i in properties:
print "PropertiesChanged(Network): %s=%s" % (i, properties[i])
def main():
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
global bus
bus = dbus.SystemBus()
wpas_obj = bus.get_object(WPAS_DBUS_SERVICE, WPAS_DBUS_OPATH)
if len(sys.argv) != 2:
list_interfaces(wpas_obj)
os._exit(1)
wpas = dbus.Interface(wpas_obj, WPAS_DBUS_INTERFACE)
bus.add_signal_receiver(interfaceAdded,
dbus_interface=WPAS_DBUS_INTERFACE,
signal_name="InterfaceAdded")
bus.add_signal_receiver(interfaceRemoved,
dbus_interface=WPAS_DBUS_INTERFACE,
signal_name="InterfaceRemoved")
bus.add_signal_receiver(propertiesChanged,
dbus_interface=WPAS_DBUS_INTERFACE,
signal_name="PropertiesChanged")
ifname = sys.argv[1]
path = wpas.GetInterface(ifname)
if_obj = bus.get_object(WPAS_DBUS_SERVICE, path)
iface = dbus.Interface(if_obj, WPAS_DBUS_INTERFACES_INTERFACE)
iface.connect_to_signal("ScanDone", scanDone2,
path_keyword='path')
bus.add_signal_receiver(scanDone,
dbus_interface=WPAS_DBUS_INTERFACES_INTERFACE,
signal_name="ScanDone",
path=path)
bus.add_signal_receiver(bssAdded,
dbus_interface=WPAS_DBUS_INTERFACES_INTERFACE,
signal_name="BSSAdded",
path=path)
bus.add_signal_receiver(bssRemoved,
dbus_interface=WPAS_DBUS_INTERFACES_INTERFACE,
signal_name="BSSRemoved",
path=path)
bus.add_signal_receiver(blobAdded,
dbus_interface=WPAS_DBUS_INTERFACES_INTERFACE,
signal_name="BlobAdded",
path=path)
bus.add_signal_receiver(blobRemoved,
dbus_interface=WPAS_DBUS_INTERFACES_INTERFACE,
signal_name="BlobRemoved",
path=path)
bus.add_signal_receiver(networkAdded,
dbus_interface=WPAS_DBUS_INTERFACES_INTERFACE,
signal_name="NetworkAdded",
path=path)
bus.add_signal_receiver(networkRemoved,
dbus_interface=WPAS_DBUS_INTERFACES_INTERFACE,
signal_name="NetworkRemoved",
path=path)
bus.add_signal_receiver(networkSelected,
dbus_interface=WPAS_DBUS_INTERFACES_INTERFACE,
signal_name="NetworkSelected",
path=path)
bus.add_signal_receiver(propertiesChangedInterface,
dbus_interface=WPAS_DBUS_INTERFACES_INTERFACE,
signal_name="PropertiesChanged",
path=path)
bus.add_signal_receiver(propertiesChangedBss,
dbus_interface=WPAS_DBUS_BSS_INTERFACE,
signal_name="PropertiesChanged")
bus.add_signal_receiver(propertiesChangedNetwork,
dbus_interface=WPAS_DBUS_NETWORK_INTERFACE,
signal_name="PropertiesChanged")
gobject.MainLoop().run()
if __name__ == "__main__":
main()
| gpl-2.0 | 1,071,992,416,003,939,100 | 28.91133 | 124 | 0.69259 | false | 2.785175 | false | false | false |
itsCoder/itscoder.github.io | generate_post.py | 1 | 1594 | # coding=utf-8
import os
import re
FUCK_STR = ' '
PATTERN_PHASE_FILE = re.compile('\S+-weeklyblog-phase-(\d+)\.md')
PATTERN_POST = re.compile('-\s*\[(.+)\]\((https?://\S+)\)\s*\(\[@(.+)\]\((.+)\)\)')
PATTERN_CATEGORY = re.compile('#{5}\s*(.*?)\n')
BLOG_DIR = '_posts/'
def get_post(f, phase):
phase_summary = ''
with open(f, 'r') as md:
content = md.readline().replace(FUCK_STR, " ")
category = ''
while content:
if re.match(PATTERN_CATEGORY, content):
category = re.match(PATTERN_CATEGORY, content).group(1)
else:
post = re.match(PATTERN_POST, content)
if post:
# | 这是文章标题 | Android | 作者链接 |
phase_summary += '| [%s](%s) | %s | [%s](%s) |%s|\n' % (
post.group(1), post.group(2), category,
post.group(3), post.group(4), phase)
content = md.readline().replace(FUCK_STR, " ")
return phase_summary
if __name__ == '__main__':
with open('README.md', 'w') as post_md:
th = '| 标题 | 类别 | 作者 | 期数 |\n| :---- | :-----: | :--: | :-----: |\n'
post_md.write(th)
f_list = os.listdir(BLOG_DIR)
f_list.reverse()
for f_name in f_list:
f = os.path.join(BLOG_DIR, f_name)
if os.path.isfile(f):
result = re.match(PATTERN_PHASE_FILE, f_name)
if result:
phase_count = result.group(1)
post_md.write(get_post(f, phase_count)) | mit | -2,145,483,734,492,894,700 | 32.869565 | 83 | 0.466924 | false | 3.071006 | false | false | false |
sdrogers/ms2ldaviz | ms2ldaviz/setup_feat_col.py | 1 | 2204 | import os
import sys
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ms2ldaviz.settings_simon")
import django
django.setup()
from basicviz.models import *
if __name__ == '__main__':
other_experiment_name = sys.argv[1]
fs,status = BVFeatureSet.objects.get_or_create(name = 'binned_005')
if status:
print("Created feature set")
else:
print("Featureset already exists")
mbe = Experiment.objects.get(name = 'massbank_binned_005')
print("Got " + str(mbe))
if mbe.featureset == None:
mbe.featureset = fs
mbe.save()
mbe_features = Feature.objects.filter(experiment = mbe)
print("Got {} features".format(len(mbe_features)))
mbe_features_sub = Feature.objects.filter(experiment = mbe,featureset = None)
print("{} have no featureset".format(len(mbe_features_sub)))
for f in mbe_features_sub:
f.featureset = fs
f.save()
# Now get the features as tied to the feature set
mbe_features = Feature.objects.filter(featureset = fs)
print("Got {} features".format(len(mbe_features)))
fnames = set([f.name for f in mbe_features])
# get another experiment
i93 = Experiment.objects.get(name = other_experiment_name)
i93.featureset = fs
i93.save()
print("Got " + str(i93))
i93_features = Feature.objects.filter(experiment = i93)
print("Got {} features".format(len(i93_features)))
for f in i93_features:
if f.name in fnames:
# Find all the instances
fis = FeatureInstance.objects.filter(feature = f)
gfeature = [g for g in mbe_features if g.name == f.name][0]
for fi in fis:
fi.feature = gfeature
fi.save()
mis = Mass2MotifInstance.objects.filter(feature = f)
for ms in mis:
ms.feature = gfeature
ms.save()
else:
new_feature = Feature.objects.create(name = f.name,featureset = fs,min_mz = f.min_mz,max_mz = f.max_mz)
fis = FeatureInstance.objects.filter(feature = f)
for fi in fis:
fi.feature = new_feature
fi.save()
mis = Mass2MotifInstance.objects.filter(feature = f)
for ms in mis:
ms.feature = new_feature
ms.save()
for f in i93_features:
if len(f.featureinstance_set.all()) == 0 and len(f.mass2motifinstance_set.all()) == 0 and len(f.featuremap_set.all()) == 0:
f.delete()
else:
print(f) | mit | 8,517,246,142,656,428,000 | 25.890244 | 125 | 0.685118 | false | 2.832905 | false | false | false |
beyoungwoo/C_glibc_Sample | _Algorithm/ProjectEuler_python/euler_4.py | 1 | 1587 | #!/usr/bin/python -Wall
# -*- coding: utf-8 -*-
"""
<div id="content">
<div style="text-align:center;" class="print"><img src="images/print_page_logo.png" alt="projecteuler.net" style="border:none;" /></div>
<h2>Largest palindrome product</h2><div id="problem_info" class="info"><h3>Problem 4</h3><span>Published on Friday, 16th November 2001, 06:00 pm; Solved by 254694; Difficulty rating: 5%</span></div>
<div class="problem_content" role="problem">
<p>A palindromic number reads the same both ways. The largest palindrome made from the product of two 2-digit numbers is 9009 = 91 * 99.</p>
<p>Find the largest palindrome made from the product of two 3-digit numbers.</p>
</div><br />
<br /></div>
"""
# 999 * 999 = 998001
# 998 comp 100
def L_comp(n, s):
if (n == 2) :
if (s[0] == s[3] and s[1] == s[2]) :
return True
else :
return False
elif (n == 3) :
if (s[0] == s[5] and s[1] == s[4] and s[2] == s[3]) :
return True
else :
return False
def L_mutiple(n, max_num):
max_range = max_num -1
min_range = max_num /2
for i in range(max_range, min_range, -1):
for j in range(max_range, min_range, -1):
ret = i * j
s = "%d" % (ret)
result = L_comp(n, s)
if (result):
return ret
return -1
def L_plaindrome(n):
if (n != 2 and n != 3):
print "invalid input"
return -1
max_num = 1
for i in range (0, n):
max_num *= 10
return L_mutiple(n, max_num)
print L_plaindrome(3)
| gpl-3.0 | -6,911,959,164,782,817,000 | 28.388889 | 198 | 0.555766 | false | 2.90128 | false | false | false |
michal-ruzicka/archivematica | src/MCPClient/lib/clientScripts/restructureForComplianceMaildir.py | 1 | 1919 | #!/usr/bin/python -OO
# This file is part of Archivematica.
#
# Copyright 2010-2013 Artefactual Systems Inc. <http://artefactual.com>
#
# Archivematica is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Archivematica is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Archivematica. If not, see <http://www.gnu.org/licenses/>.
# @package Archivematica
# @subpackage archivematicaClientScript
# @author Joseph Perry <[email protected]>
import os
import sys
import shutil
requiredDirectories = ["logs", "logs/fileMeta", "metadata", "metadata/submissionDocumentation", "objects", "objects/Maildir"]
optionalFiles = "processingMCP.xml"
def restructureMaildirDirectory(unitPath):
for dir in requiredDirectories:
dirPath = os.path.join(unitPath, dir)
if not os.path.isdir(dirPath):
os.mkdir(dirPath)
print "creating: ", dir
for item in os.listdir(unitPath):
dst = os.path.join(unitPath, "objects", "Maildir") + "/."
itemPath = os.path.join(unitPath, item)
if os.path.isdir(itemPath) and item not in requiredDirectories:
shutil.move(itemPath, dst)
print "moving directory to objects/Maildir: ", item
elif os.path.isfile(itemPath) and item not in optionalFiles:
shutil.move(itemPath, dst)
print "moving file to objects/Maildir: ", item
if __name__ == '__main__':
target = sys.argv[1]
restructureMaildirDirectory(target)
| agpl-3.0 | 8,630,748,113,008,248,000 | 36.627451 | 125 | 0.705576 | false | 3.711799 | false | false | false |
gopaycommunity/gopay-python-api | tests/unit/utils.py | 1 | 1531 | from gopay.enums import PaymentInstrument, BankSwiftCode, Currency, Language
class Utils:
GO_ID = '8712700986'
CLIENT_ID = '1689337452'
CLIENT_SECRET = 'CKr7FyEE'
CLIENT_ID_EET = "1365575992"
CLIENT_SECRET_EET = "NUVsrv4W"
GO_ID_EET = '8289213768'
@staticmethod
def create_base_payment():
base_payment = {
'payer': {
'allowed_payment_instruments': [PaymentInstrument.BANK_ACCOUNT, PaymentInstrument.PAYMENT_CARD],
'allowed_swifts': [BankSwiftCode.CESKA_SPORITELNA, BankSwiftCode.RAIFFEISENBANK],
#'default_swift': BankSwiftCode.CESKA_SPORITELNA,
#'default_payment_instrument': PaymentInstrument.BANK_ACCOUNT,
'contact': {
'email': '[email protected]',
},
},
'order_number': '6789',
'amount': '1900',
'currency': Currency.CZECH_CROWNS,
'order_description': '6789Description',
'lang': Language.CZECH, # if lang is not specified, then default lang is used
'additional_params': [
{'name': 'AdditionalKey', 'value': 'AdditionalValue'}
],
'items': [
{'name': 'Item01', 'amount': '1900', 'count' : '1'},
],
'callback': {
'return_url': 'https://eshop123.cz/return',
'notification_url': 'https://eshop123.cz/notify'
},
}
return base_payment
| mit | -2,250,619,758,455,118,600 | 35.452381 | 112 | 0.536251 | false | 3.653938 | false | false | false |
ludwiktrammer/odoo | addons/sale/sale.py | 1 | 42630 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from datetime import datetime, timedelta
from openerp import SUPERUSER_ID
from openerp import api, fields, models, _
import openerp.addons.decimal_precision as dp
from openerp.exceptions import UserError
from openerp.tools import float_is_zero, float_compare, DEFAULT_SERVER_DATETIME_FORMAT
class res_company(models.Model):
_inherit = "res.company"
sale_note = fields.Text(string='Default Terms and Conditions', translate=True)
class SaleOrder(models.Model):
_name = "sale.order"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_description = "Sales Order"
_order = 'date_order desc, id desc'
@api.depends('order_line.price_total')
def _amount_all(self):
"""
Compute the total amounts of the SO.
"""
for order in self:
amount_untaxed = amount_tax = 0.0
for line in order.order_line:
amount_untaxed += line.price_subtotal
amount_tax += line.price_tax
order.update({
'amount_untaxed': order.pricelist_id.currency_id.round(amount_untaxed),
'amount_tax': order.pricelist_id.currency_id.round(amount_tax),
'amount_total': amount_untaxed + amount_tax,
})
@api.depends('state', 'order_line.invoice_status')
def _get_invoiced(self):
"""
Compute the invoice status of a SO. Possible statuses:
- no: if the SO is not in status 'sale' or 'done', we consider that there is nothing to
invoice. This is also hte default value if the conditions of no other status is met.
- to invoice: if any SO line is 'to invoice', the whole SO is 'to invoice'
- invoiced: if all SO lines are invoiced, the SO is invoiced.
- upselling: if all SO lines are invoiced or upselling, the status is upselling.
The invoice_ids are obtained thanks to the invoice lines of the SO lines, and we also search
for possible refunds created directly from existing invoices. This is necessary since such a
refund is not directly linked to the SO.
"""
for order in self:
invoice_ids = order.order_line.mapped('invoice_lines').mapped('invoice_id')
# Search for refunds as well
refund_ids = self.env['account.invoice'].browse()
if invoice_ids:
refund_ids = refund_ids.search([('type', '=', 'out_refund'), ('origin', 'in', invoice_ids.mapped('number')), ('origin', '!=', False)])
line_invoice_status = [line.invoice_status for line in order.order_line]
if order.state not in ('sale', 'done'):
invoice_status = 'no'
elif any(invoice_status == 'to invoice' for invoice_status in line_invoice_status):
invoice_status = 'to invoice'
elif all(invoice_status == 'invoiced' for invoice_status in line_invoice_status):
invoice_status = 'invoiced'
elif all(invoice_status in ['invoiced', 'upselling'] for invoice_status in line_invoice_status):
invoice_status = 'upselling'
else:
invoice_status = 'no'
order.update({
'invoice_count': len(set(invoice_ids.ids + refund_ids.ids)),
'invoice_ids': invoice_ids.ids + refund_ids.ids,
'invoice_status': invoice_status
})
@api.model
def _default_note(self):
return self.env.user.company_id.sale_note
@api.model
def _get_default_team(self):
default_team_id = self.env['crm.team']._get_default_team_id()
return self.env['crm.team'].browse(default_team_id)
@api.onchange('fiscal_position_id')
def _compute_tax_id(self):
"""
Trigger the recompute of the taxes if the fiscal position is changed on the SO.
"""
for order in self:
order.order_line._compute_tax_id()
name = fields.Char(string='Order Reference', required=True, copy=False, readonly=True, index=True, default=lambda self: _('New'))
origin = fields.Char(string='Source Document', help="Reference of the document that generated this sales order request.")
client_order_ref = fields.Char(string='Customer Reference', copy=False)
state = fields.Selection([
('draft', 'Quotation'),
('sent', 'Quotation Sent'),
('sale', 'Sale Order'),
('done', 'Done'),
('cancel', 'Cancelled'),
], string='Status', readonly=True, copy=False, index=True, track_visibility='onchange', default='draft')
date_order = fields.Datetime(string='Order Date', required=True, readonly=True, index=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, copy=False, default=fields.Datetime.now)
validity_date = fields.Date(string='Expiration Date', readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]})
create_date = fields.Datetime(string='Creation Date', readonly=True, index=True, help="Date on which sales order is created.")
user_id = fields.Many2one('res.users', string='Salesperson', index=True, track_visibility='onchange', default=lambda self: self.env.user)
partner_id = fields.Many2one('res.partner', string='Customer', readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, required=True, change_default=True, index=True, track_visibility='always')
partner_invoice_id = fields.Many2one('res.partner', string='Invoice Address', readonly=True, required=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, help="Invoice address for current sales order.")
partner_shipping_id = fields.Many2one('res.partner', string='Delivery Address', readonly=True, required=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, help="Delivery address for current sales order.")
pricelist_id = fields.Many2one('product.pricelist', string='Pricelist', required=True, readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, help="Pricelist for current sales order.")
currency_id = fields.Many2one("res.currency", related='pricelist_id.currency_id', string="Currency", readonly=True, required=True)
project_id = fields.Many2one('account.analytic.account', 'Analytic Account', readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, help="The analytic account related to a sales order.", copy=False, domain=[('account_type', '=', 'normal')])
order_line = fields.One2many('sale.order.line', 'order_id', string='Order Lines', states={'cancel': [('readonly', True)], 'done': [('readonly', True)]}, copy=True)
invoice_count = fields.Integer(string='# of Invoices', compute='_get_invoiced', readonly=True)
invoice_ids = fields.Many2many("account.invoice", string='Invoices', compute="_get_invoiced", readonly=True, copy=False)
invoice_status = fields.Selection([
('upselling', 'Upselling Opportunity'),
('invoiced', 'Fully Invoiced'),
('to invoice', 'To Invoice'),
('no', 'Nothing to Invoice')
], string='Invoice Status', compute='_get_invoiced', store=True, readonly=True, default='no')
note = fields.Text('Terms and conditions', default=_default_note)
amount_untaxed = fields.Monetary(string='Untaxed Amount', store=True, readonly=True, compute='_amount_all', track_visibility='always')
amount_tax = fields.Monetary(string='Taxes', store=True, readonly=True, compute='_amount_all', track_visibility='always')
amount_total = fields.Monetary(string='Total', store=True, readonly=True, compute='_amount_all', track_visibility='always')
payment_term_id = fields.Many2one('account.payment.term', string='Payment Term', oldname='payment_term')
fiscal_position_id = fields.Many2one('account.fiscal.position', oldname='fiscal_position', string='Fiscal Position')
company_id = fields.Many2one('res.company', 'Company', default=lambda self: self.env['res.company']._company_default_get('sale.order'))
team_id = fields.Many2one('crm.team', 'Sales Team', change_default=True, default=_get_default_team, oldname='section_id')
procurement_group_id = fields.Many2one('procurement.group', 'Procurement Group', copy=False)
product_id = fields.Many2one('product.product', related='order_line.product_id', string='Product')
@api.model
def _get_customer_lead(self, product_tmpl_id):
return False
@api.multi
def button_dummy(self):
return True
@api.multi
def unlink(self):
for order in self:
if order.state != 'draft':
raise UserError(_('You can only delete draft quotations!'))
return super(SaleOrder, self).unlink()
@api.multi
def _track_subtype(self, init_values):
self.ensure_one()
if 'state' in init_values and self.state == 'sale':
return 'sale.mt_order_confirmed'
elif 'state' in init_values and self.state == 'sent':
return 'sale.mt_order_sent'
return super(SaleOrder, self)._track_subtype(init_values)
@api.multi
@api.onchange('partner_shipping_id')
def onchange_partner_shipping_id(self):
"""
Trigger the change of fiscal position when the shipping address is modified.
"""
fiscal_position = self.env['account.fiscal.position'].get_fiscal_position(self.partner_id.id, self.partner_shipping_id.id)
if fiscal_position:
self.fiscal_position_id = fiscal_position
return {}
@api.multi
@api.onchange('partner_id')
def onchange_partner_id(self):
"""
Update the following fields when the partner is changed:
- Pricelist
- Payment term
- Invoice address
- Delivery address
"""
if not self.partner_id:
self.update({
'partner_invoice_id': False,
'partner_shipping_id': False,
'payment_term_id': False,
'fiscal_position_id': False,
})
return
addr = self.partner_id.address_get(['delivery', 'invoice'])
values = {
'pricelist_id': self.partner_id.property_product_pricelist and self.partner_id.property_product_pricelist.id or False,
'payment_term_id': self.partner_id.property_payment_term_id and self.partner_id.property_payment_term_id.id or False,
'partner_invoice_id': addr['invoice'],
'partner_shipping_id': addr['delivery'],
'note': self.with_context(lang=self.partner_id.lang).env.user.company_id.sale_note,
}
if self.partner_id.user_id:
values['user_id'] = self.partner_id.user_id.id
if self.partner_id.team_id:
values['team_id'] = self.partner_id.team_id.id
self.update(values)
@api.model
def create(self, vals):
if vals.get('name', 'New') == 'New':
vals['name'] = self.env['ir.sequence'].next_by_code('sale.order') or 'New'
# Makes sure partner_invoice_id', 'partner_shipping_id' and 'pricelist_id' are defined
if any(f not in vals for f in ['partner_invoice_id', 'partner_shipping_id', 'pricelist_id']):
partner = self.env['res.partner'].browse(vals.get('partner_id'))
addr = partner.address_get(['delivery', 'invoice'])
vals['partner_invoice_id'] = vals.setdefault('partner_invoice_id', addr['invoice'])
vals['partner_shipping_id'] = vals.setdefault('partner_shipping_id', addr['delivery'])
vals['pricelist_id'] = vals.setdefault('pricelist_id', partner.property_product_pricelist and partner.property_product_pricelist.id)
result = super(SaleOrder, self).create(vals)
return result
@api.multi
def _prepare_invoice(self):
"""
Prepare the dict of values to create the new invoice for a sales order. This method may be
overridden to implement custom invoice generation (making sure to call super() to establish
a clean extension chain).
"""
self.ensure_one()
journal_id = self.env['account.invoice'].default_get(['journal_id'])['journal_id']
if not journal_id:
raise UserError(_('Please define an accounting sale journal for this company.'))
invoice_vals = {
'name': self.client_order_ref or '',
'origin': self.name,
'type': 'out_invoice',
'account_id': self.partner_invoice_id.property_account_receivable_id.id,
'partner_id': self.partner_invoice_id.id,
'journal_id': journal_id,
'currency_id': self.pricelist_id.currency_id.id,
'comment': self.note,
'payment_term_id': self.payment_term_id.id,
'fiscal_position_id': self.fiscal_position_id.id or self.partner_invoice_id.property_account_position_id.id,
'company_id': self.company_id.id,
'user_id': self.user_id and self.user_id.id,
'team_id': self.team_id.id
}
return invoice_vals
@api.multi
def print_quotation(self):
self.filtered(lambda s: s.state == 'draft').write({'state': 'sent'})
return self.env['report'].get_action(self, 'sale.report_saleorder')
@api.multi
def action_view_invoice(self):
invoice_ids = self.mapped('invoice_ids')
imd = self.env['ir.model.data']
action = imd.xmlid_to_object('account.action_invoice_tree1')
list_view_id = imd.xmlid_to_res_id('account.invoice_tree')
form_view_id = imd.xmlid_to_res_id('account.invoice_form')
result = {
'name': action.name,
'help': action.help,
'type': action.type,
'views': [[list_view_id, 'tree'], [form_view_id, 'form'], [False, 'graph'], [False, 'kanban'], [False, 'calendar'], [False, 'pivot']],
'target': action.target,
'context': action.context,
'res_model': action.res_model,
}
if len(invoice_ids) > 1:
result['domain'] = "[('id','in',%s)]" % invoice_ids.ids
elif len(invoice_ids) == 1:
result['views'] = [(form_view_id, 'form')]
result['res_id'] = invoice_ids.ids[0]
else:
result = {'type': 'ir.actions.act_window_close'}
return result
@api.multi
def action_invoice_create(self, grouped=False, final=False):
"""
Create the invoice associated to the SO.
:param grouped: if True, invoices are grouped by SO id. If False, invoices are grouped by
(partner, currency)
:param final: if True, refunds will be generated if necessary
:returns: list of created invoices
"""
inv_obj = self.env['account.invoice']
precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')
invoices = {}
for order in self:
group_key = order.id if grouped else (order.partner_id.id, order.currency_id.id)
for line in order.order_line.sorted(key=lambda l: l.qty_to_invoice < 0):
if float_is_zero(line.qty_to_invoice, precision_digits=precision):
continue
if group_key not in invoices:
inv_data = order._prepare_invoice()
invoice = inv_obj.create(inv_data)
invoices[group_key] = invoice
elif group_key in invoices:
vals = {}
if order.name not in invoices[group_key].origin.split(', '):
vals['origin'] = invoices[group_key].origin + ', ' + order.name
if order.client_order_ref and order.client_order_ref not in invoices[group_key].name.split(', '):
vals['name'] = invoices[group_key].name + ', ' + order.client_order_ref
invoices[group_key].write(vals)
if line.qty_to_invoice > 0:
line.invoice_line_create(invoices[group_key].id, line.qty_to_invoice)
elif line.qty_to_invoice < 0 and final:
line.invoice_line_create(invoices[group_key].id, line.qty_to_invoice)
for invoice in invoices.values():
if not invoice.invoice_line_ids:
raise UserError(_('There is no invoicable line.'))
# If invoice is negative, do a refund invoice instead
if invoice.amount_untaxed < 0:
invoice.type = 'out_refund'
for line in invoice.invoice_line_ids:
line.quantity = -line.quantity
# Use additional field helper function (for account extensions)
for line in invoice.invoice_line_ids:
line._set_additional_fields(invoice)
# Necessary to force computation of taxes. In account_invoice, they are triggered
# by onchanges, which are not triggered when doing a create.
invoice.compute_taxes()
return [inv.id for inv in invoices.values()]
@api.multi
def action_draft(self):
orders = self.filtered(lambda s: s.state in ['cancel', 'sent'])
orders.write({
'state': 'draft',
'procurement_group_id': False,
})
orders.mapped('order_line').mapped('procurement_ids').write({'sale_line_id': False})
@api.multi
def action_cancel(self):
self.write({'state': 'cancel'})
@api.multi
def action_quotation_send(self):
'''
This function opens a window to compose an email, with the edi sale template message loaded by default
'''
self.ensure_one()
ir_model_data = self.env['ir.model.data']
try:
template_id = ir_model_data.get_object_reference('sale', 'email_template_edi_sale')[1]
except ValueError:
template_id = False
try:
compose_form_id = ir_model_data.get_object_reference('mail', 'email_compose_message_wizard_form')[1]
except ValueError:
compose_form_id = False
ctx = dict()
ctx.update({
'default_model': 'sale.order',
'default_res_id': self.ids[0],
'default_use_template': bool(template_id),
'default_template_id': template_id,
'default_composition_mode': 'comment',
'mark_so_as_sent': True
})
return {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'views': [(compose_form_id, 'form')],
'view_id': compose_form_id,
'target': 'new',
'context': ctx,
}
@api.multi
def force_quotation_send(self):
for order in self:
email_act = order.action_quotation_send()
if email_act and email_act.get('context'):
email_ctx = email_act['context']
email_ctx.update(default_email_from=order.company_id.email)
order.with_context(email_ctx).message_post_with_template(email_ctx.get('default_template_id'))
return True
@api.multi
def action_done(self):
self.write({'state': 'done'})
@api.model
def _prepare_procurement_group(self):
return {'name': self.name}
@api.multi
def action_confirm(self):
for order in self:
order.state = 'sale'
if self.env.context.get('send_email'):
self.force_quotation_send()
order.order_line._action_procurement_create()
if not order.project_id:
for line in order.order_line:
if line.product_id.invoice_policy == 'cost':
order._create_analytic_account()
break
if self.env['ir.values'].get_default('sale.config.settings', 'auto_done_setting'):
self.action_done()
return True
@api.multi
def _create_analytic_account(self, prefix=None):
for order in self:
name = order.name
if prefix:
name = prefix + ": " + order.name
analytic = self.env['account.analytic.account'].create({
'name': name,
'code': order.client_order_ref,
'company_id': order.company_id.id,
'partner_id': order.partner_id.id
})
order.project_id = analytic
@api.multi
def _notification_group_recipients(self, message, recipients, done_ids, group_data):
group_user = self.env.ref('base.group_user')
for recipient in recipients:
if recipient.id in done_ids:
continue
if not recipient.user_ids:
group_data['partner'] |= recipient
else:
group_data['user'] |= recipient
done_ids.add(recipient.id)
return super(SaleOrder, self)._notification_group_recipients(message, recipients, done_ids, group_data)
class SaleOrderLine(models.Model):
_name = 'sale.order.line'
_description = 'Sales Order Line'
_order = 'order_id desc, sequence, id'
@api.depends('state', 'product_uom_qty', 'qty_delivered', 'qty_to_invoice', 'qty_invoiced')
def _compute_invoice_status(self):
"""
Compute the invoice status of a SO line. Possible statuses:
- no: if the SO is not in status 'sale' or 'done', we consider that there is nothing to
invoice. This is also hte default value if the conditions of no other status is met.
- to invoice: we refer to the quantity to invoice of the line. Refer to method
`_get_to_invoice_qty()` for more information on how this quantity is calculated.
- upselling: this is possible only for a product invoiced on ordered quantities for which
we delivered more than expected. The could arise if, for example, a project took more
time than expected but we decided not to invoice the extra cost to the client. This
occurs onyl in state 'sale', so that when a SO is set to done, the upselling opportunity
is removed from the list.
- invoiced: the quantity invoiced is larger or equal to the quantity ordered.
"""
precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')
for line in self:
if line.state not in ('sale', 'done'):
line.invoice_status = 'no'
elif not float_is_zero(line.qty_to_invoice, precision_digits=precision):
line.invoice_status = 'to invoice'
elif line.state == 'sale' and line.product_id.invoice_policy == 'order' and\
float_compare(line.qty_delivered, line.product_uom_qty, precision_digits=precision) == 1:
line.invoice_status = 'upselling'
elif float_compare(line.qty_invoiced, line.product_uom_qty, precision_digits=precision) >= 0:
line.invoice_status = 'invoiced'
else:
line.invoice_status = 'no'
@api.depends('product_uom_qty', 'discount', 'price_unit', 'tax_id')
def _compute_amount(self):
"""
Compute the amounts of the SO line.
"""
for line in self:
price = line.price_unit * (1 - (line.discount or 0.0) / 100.0)
taxes = line.tax_id.compute_all(price, line.order_id.currency_id, line.product_uom_qty, product=line.product_id, partner=line.order_id.partner_id)
line.update({
'price_tax': taxes['total_included'] - taxes['total_excluded'],
'price_total': taxes['total_included'],
'price_subtotal': taxes['total_excluded'],
})
@api.depends('product_id.invoice_policy', 'order_id.state')
def _compute_qty_delivered_updateable(self):
for line in self:
line.qty_delivered_updateable = line.product_id.invoice_policy in ('order', 'delivery') and line.order_id.state == 'sale' and line.product_id.track_service == 'manual'
@api.depends('qty_invoiced', 'qty_delivered', 'product_uom_qty', 'order_id.state')
def _get_to_invoice_qty(self):
"""
Compute the quantity to invoice. If the invoice policy is order, the quantity to invoice is
calculated from the ordered quantity. Otherwise, the quantity delivered is used.
"""
for line in self:
if line.order_id.state in ['sale', 'done']:
if line.product_id.invoice_policy == 'order':
line.qty_to_invoice = line.product_uom_qty - line.qty_invoiced
else:
line.qty_to_invoice = line.qty_delivered - line.qty_invoiced
else:
line.qty_to_invoice = 0
@api.depends('invoice_lines.invoice_id.state', 'invoice_lines.quantity')
def _get_invoice_qty(self):
"""
Compute the quantity invoiced. If case of a refund, the quantity invoiced is decreased. Note
that this is the case only if the refund is generated from the SO and that is intentional: if
a refund made would automatically decrease the invoiced quantity, then there is a risk of reinvoicing
it automatically, which may not be wanted at all. That's why the refund has to be created from the SO
"""
for line in self:
qty_invoiced = 0.0
for invoice_line in line.invoice_lines:
if invoice_line.invoice_id.state != 'cancel':
if invoice_line.invoice_id.type == 'out_invoice':
qty_invoiced += invoice_line.quantity
elif invoice_line.invoice_id.type == 'out_refund':
qty_invoiced -= invoice_line.quantity
line.qty_invoiced = qty_invoiced
@api.depends('price_subtotal', 'product_uom_qty')
def _get_price_reduce(self):
for line in self:
line.price_reduce = line.price_subtotal / line.product_uom_qty if line.product_uom_qty else 0.0
@api.multi
def _compute_tax_id(self):
for line in self:
fpos = line.order_id.fiscal_position_id or line.order_id.partner_id.property_account_position_id
if fpos:
# The superuser is used by website_sale in order to create a sale order. We need to make
# sure we only select the taxes related to the company of the partner. This should only
# apply if the partner is linked to a company.
if self.env.uid == SUPERUSER_ID and line.order_id.company_id:
taxes = fpos.map_tax(line.product_id.taxes_id).filtered(lambda r: r.company_id == line.order_id.company_id)
else:
taxes = fpos.map_tax(line.product_id.taxes_id)
line.tax_id = taxes
else:
line.tax_id = line.product_id.taxes_id if line.product_id.taxes_id else False
@api.multi
def _prepare_order_line_procurement(self, group_id=False):
self.ensure_one()
return {
'name': self.name,
'origin': self.order_id.name,
'date_planned': datetime.strptime(self.order_id.date_order, DEFAULT_SERVER_DATETIME_FORMAT) + timedelta(days=self.customer_lead),
'product_id': self.product_id.id,
'product_qty': self.product_uom_qty,
'product_uom': self.product_uom.id,
'company_id': self.order_id.company_id.id,
'group_id': group_id,
'sale_line_id': self.id
}
@api.multi
def _action_procurement_create(self):
"""
Create procurements based on quantity ordered. If the quantity is increased, new
procurements are created. If the quantity is decreased, no automated action is taken.
"""
precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')
new_procs = self.env['procurement.order'] #Empty recordset
for line in self:
if line.state != 'sale' or not line.product_id._need_procurement():
continue
qty = 0.0
for proc in line.procurement_ids:
qty += proc.product_qty
if float_compare(qty, line.product_uom_qty, precision_digits=precision) >= 0:
continue
if not line.order_id.procurement_group_id:
vals = line.order_id._prepare_procurement_group()
line.order_id.procurement_group_id = self.env["procurement.group"].create(vals)
vals = line._prepare_order_line_procurement(group_id=line.order_id.procurement_group_id.id)
vals['product_qty'] = line.product_uom_qty - qty
new_proc = self.env["procurement.order"].create(vals)
new_procs += new_proc
new_procs.run()
return new_procs
@api.model
def _get_analytic_invoice_policy(self):
return ['cost']
@api.model
def _get_analytic_track_service(self):
return []
@api.model
def create(self, values):
onchange_fields = ['name', 'price_unit', 'product_uom', 'tax_id']
if values.get('order_id') and values.get('product_id') and any(f not in values for f in onchange_fields):
line = self.new(values)
line.product_id_change()
for field in onchange_fields:
if field not in values:
values[field] = line._fields[field].convert_to_write(line[field])
line = super(SaleOrderLine, self).create(values)
if line.state == 'sale':
if (not line.order_id.project_id and
(line.product_id.track_service in self._get_analytic_track_service() or
line.product_id.invoice_policy in self._get_analytic_invoice_policy())):
line.order_id._create_analytic_account()
line._action_procurement_create()
return line
@api.multi
def write(self, values):
lines = False
if 'product_uom_qty' in values:
precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')
lines = self.filtered(
lambda r: r.state == 'sale' and float_compare(r.product_uom_qty, values['product_uom_qty'], precision_digits=precision) == -1)
result = super(SaleOrderLine, self).write(values)
if lines:
lines._action_procurement_create()
return result
order_id = fields.Many2one('sale.order', string='Order Reference', required=True, ondelete='cascade', index=True, copy=False)
name = fields.Text(string='Description', required=True)
sequence = fields.Integer(string='Sequence', default=10)
invoice_lines = fields.Many2many('account.invoice.line', 'sale_order_line_invoice_rel', 'order_line_id', 'invoice_line_id', string='Invoice Lines', copy=False)
invoice_status = fields.Selection([
('upselling', 'Upselling Opportunity'),
('invoiced', 'Fully Invoiced'),
('to invoice', 'To Invoice'),
('no', 'Nothing to Invoice')
], string='Invoice Status', compute='_compute_invoice_status', store=True, readonly=True, default='no')
price_unit = fields.Float('Unit Price', required=True, digits=dp.get_precision('Product Price'), default=0.0)
price_subtotal = fields.Monetary(compute='_compute_amount', string='Subtotal', readonly=True, store=True)
price_tax = fields.Monetary(compute='_compute_amount', string='Taxes', readonly=True, store=True)
price_total = fields.Monetary(compute='_compute_amount', string='Total', readonly=True, store=True)
price_reduce = fields.Monetary(compute='_get_price_reduce', string='Price Reduce', readonly=True, store=True)
tax_id = fields.Many2many('account.tax', string='Taxes')
discount = fields.Float(string='Discount (%)', digits=dp.get_precision('Discount'), default=0.0)
product_id = fields.Many2one('product.product', string='Product', domain=[('sale_ok', '=', True)], change_default=True, ondelete='restrict', required=True)
product_uom_qty = fields.Float(string='Quantity', digits=dp.get_precision('Product Unit of Measure'), required=True, default=1.0)
product_uom = fields.Many2one('product.uom', string='Unit of Measure', required=True)
qty_delivered_updateable = fields.Boolean(compute='_compute_qty_delivered_updateable', string='Can Edit Delivered', readonly=True, default=True)
qty_delivered = fields.Float(string='Delivered', copy=False, digits=dp.get_precision('Product Unit of Measure'), default=0.0)
qty_to_invoice = fields.Float(
compute='_get_to_invoice_qty', string='To Invoice', store=True, readonly=True,
digits=dp.get_precision('Product Unit of Measure'), default=0.0)
qty_invoiced = fields.Float(
compute='_get_invoice_qty', string='Invoiced', store=True, readonly=True,
digits=dp.get_precision('Product Unit of Measure'), default=0.0)
salesman_id = fields.Many2one(related='order_id.user_id', store=True, string='Salesperson', readonly=True)
currency_id = fields.Many2one(related='order_id.currency_id', store=True, string='Currency', readonly=True)
company_id = fields.Many2one(related='order_id.company_id', string='Company', store=True, readonly=True)
order_partner_id = fields.Many2one(related='order_id.partner_id', store=True, string='Customer')
state = fields.Selection([
('draft', 'Quotation'),
('sent', 'Quotation Sent'),
('sale', 'Sale Order'),
('done', 'Done'),
('cancel', 'Cancelled'),
], related='order_id.state', string='Order Status', readonly=True, copy=False, store=True, default='draft')
customer_lead = fields.Float(
'Delivery Lead Time', required=True, default=0.0,
help="Number of days between the order confirmation and the shipping of the products to the customer", oldname="delay")
procurement_ids = fields.One2many('procurement.order', 'sale_line_id', string='Procurements')
@api.multi
def _prepare_invoice_line(self, qty):
"""
Prepare the dict of values to create the new invoice line for a sales order line.
:param qty: float quantity to invoice
"""
self.ensure_one()
res = {}
account = self.product_id.property_account_income_id or self.product_id.categ_id.property_account_income_categ_id
if not account:
raise UserError(_('Please define income account for this product: "%s" (id:%d) - or for its category: "%s".') % \
(self.product_id.name, self.product_id.id, self.product_id.categ_id.name))
fpos = self.order_id.fiscal_position_id or self.order_id.partner_id.property_account_position_id
if fpos:
account = fpos.map_account(account)
res = {
'name': self.name,
'sequence': self.sequence,
'origin': self.order_id.name,
'account_id': account.id,
'price_unit': self.price_unit,
'quantity': qty,
'discount': self.discount,
'uom_id': self.product_uom.id,
'product_id': self.product_id.id or False,
'invoice_line_tax_ids': [(6, 0, self.tax_id.ids)],
'account_analytic_id': self.order_id.project_id.id,
}
return res
@api.multi
def invoice_line_create(self, invoice_id, qty):
"""
Create an invoice line. The quantity to invoice can be positive (invoice) or negative
(refund).
:param invoice_id: integer
:param qty: float quantity to invoice
"""
precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')
for line in self:
if not float_is_zero(qty, precision_digits=precision):
vals = line._prepare_invoice_line(qty=qty)
vals.update({'invoice_id': invoice_id, 'sale_line_ids': [(6, 0, [line.id])]})
self.env['account.invoice.line'].create(vals)
@api.multi
@api.onchange('product_id')
def product_id_change(self):
if not self.product_id:
return {'domain': {'product_uom': []}}
vals = {}
domain = {'product_uom': [('category_id', '=', self.product_id.uom_id.category_id.id)]}
if not self.product_uom or (self.product_id.uom_id.category_id.id != self.product_uom.category_id.id):
vals['product_uom'] = self.product_id.uom_id
product = self.product_id.with_context(
lang=self.order_id.partner_id.lang,
partner=self.order_id.partner_id.id,
quantity=self.product_uom_qty,
date=self.order_id.date_order,
pricelist=self.order_id.pricelist_id.id,
uom=self.product_uom.id
)
name = product.name_get()[0][1]
if product.description_sale:
name += '\n' + product.description_sale
vals['name'] = name
self._compute_tax_id()
if self.order_id.pricelist_id and self.order_id.partner_id:
vals['price_unit'] = self.env['account.tax']._fix_tax_included_price(product.price, product.taxes_id, self.tax_id)
self.update(vals)
return {'domain': domain}
@api.onchange('product_uom', 'product_uom_qty')
def product_uom_change(self):
if not self.product_uom:
self.price_unit = 0.0
return
if self.order_id.pricelist_id and self.order_id.partner_id:
product = self.product_id.with_context(
lang=self.order_id.partner_id.lang,
partner=self.order_id.partner_id.id,
quantity=self.product_uom_qty,
date_order=self.order_id.date_order,
pricelist=self.order_id.pricelist_id.id,
uom=self.product_uom.id,
fiscal_position=self.env.context.get('fiscal_position')
)
self.price_unit = self.env['account.tax']._fix_tax_included_price(product.price, product.taxes_id, self.tax_id)
@api.multi
def unlink(self):
if self.filtered(lambda x: x.state in ('sale', 'done')):
raise UserError(_('You can not remove a sale order line.\nDiscard changes and try setting the quantity to 0.'))
return super(SaleOrderLine, self).unlink()
@api.multi
def _get_delivered_qty(self):
'''
Intended to be overridden in sale_stock and sale_mrp
:return: the quantity delivered
:rtype: float
'''
return 0.0
class MailComposeMessage(models.TransientModel):
_inherit = 'mail.compose.message'
@api.multi
def send_mail(self, auto_commit=False):
if self._context.get('default_model') == 'sale.order' and self._context.get('default_res_id') and self._context.get('mark_so_as_sent'):
order = self.env['sale.order'].browse([self._context['default_res_id']])
if order.state == 'draft':
order.state = 'sent'
return super(MailComposeMessage, self.with_context(mail_post_autofollow=True)).send_mail(auto_commit=auto_commit)
class AccountInvoice(models.Model):
_inherit = 'account.invoice'
@api.model
def _get_default_team(self):
default_team_id = self.env['crm.team']._get_default_team_id()
return self.env['crm.team'].browse(default_team_id)
team_id = fields.Many2one('crm.team', string='Sales Team', default=_get_default_team, oldname='section_id')
@api.multi
def confirm_paid(self):
res = super(AccountInvoice, self).confirm_paid()
todo = set()
for invoice in self:
for line in invoice.invoice_line_ids:
for sale_line in line.sale_line_ids:
todo.add((sale_line.order_id, invoice.number))
for (order, name) in todo:
order.message_post(body=_("Invoice %s paid") % (name))
return res
class AccountInvoiceLine(models.Model):
_inherit = 'account.invoice.line'
sale_line_ids = fields.Many2many('sale.order.line', 'sale_order_line_invoice_rel', 'invoice_line_id', 'order_line_id', string='Sale Order Lines', readonly=True, copy=False)
class ProcurementOrder(models.Model):
_inherit = 'procurement.order'
sale_line_id = fields.Many2one('sale.order.line', string='Sale Order Line')
class ProductProduct(models.Model):
_inherit = 'product.product'
@api.multi
def _sales_count(self):
r = {}
domain = [
('state', 'in', ['sale', 'done']),
('product_id', 'in', self.ids),
]
for group in self.env['sale.report'].read_group(domain, ['product_id', 'product_uom_qty'], ['product_id']):
r[group['product_id'][0]] = group['product_uom_qty']
for product in self:
product.sales_count = r.get(product.id, 0)
return r
sales_count = fields.Integer(compute='_sales_count', string='# Sales')
class ProductTemplate(models.Model):
_inherit = 'product.template'
track_service = fields.Selection([('manual', 'Manually set quantities on order')], string='Track Service', default='manual')
@api.multi
@api.depends('product_variant_ids.sales_count')
def _sales_count(self):
for product in self:
product.sales_count = sum([p.sales_count for p in product.product_variant_ids])
@api.multi
def action_view_sales(self):
self.ensure_one()
action = self.env.ref('sale.action_product_sale_list')
product_ids = self.product_variant_ids.ids
return {
'name': action.name,
'help': action.help,
'type': action.type,
'view_type': action.view_type,
'view_mode': action.view_mode,
'target': action.target,
'context': "{'default_product_id': " + str(product_ids[0]) + "}",
'res_model': action.res_model,
'domain': [('state', 'in', ['sale', 'done']), ('product_id.product_tmpl_id', '=', self.id)],
}
sales_count = fields.Integer(compute='_sales_count', string='# Sales')
invoice_policy = fields.Selection(
[('order', 'Ordered quantities'),
('delivery', 'Delivered quantities'),
('cost', 'Invoice based on time and material')],
string='Invoicing Policy', default='order')
| agpl-3.0 | -3,618,811,220,402,187,000 | 46.314095 | 276 | 0.607718 | false | 3.871231 | false | false | false |
sk413025/tilitools | ocsvm.py | 1 | 3767 | from cvxopt import matrix,spmatrix,sparse
from cvxopt.blas import dot,dotu
from cvxopt.solvers import qp
import numpy as np
from kernel import Kernel
class OCSVM:
"""One-class support vector machine
'Estimating the support of a high-dimensional distribution.',
Sch\"{o}lkopf, B and Platt, J C and Shawe-Taylor, J and Smola, a J and Williamson, R C,
Microsoft, 1999
"""
MSG_ERROR = -1 # (scalar) something went wrong
MSG_OK = 0 # (scalar) everything alright
PRECISION = 10**-3 # important: effects the threshold, support vectors and speed!
kernel = [] # (matrix) our training kernel
samples = -1 # (scalar) amount of training data in X
C = 1.0 # (scalar) the regularization constant > 0
isDualTrained = False # (boolean) indicates if the oc-svm was trained in dual space
alphas = [] # (vector) dual solution vector
svs = [] # (vector) support vector indices
threshold = 0.0 # (scalar) the optimized threshold (rho)
def __init__(self, kernel, C=1.0):
self.kernel = kernel
self.C = C
(self.samples,foo) = kernel.size
print('Creating new one-class svm with {0} samples and C={1}.'.format(self.samples,C))
def train_dual(self):
"""Trains an one-class svm in dual with kernel."""
if (self.samples<1):
print('Invalid training data.')
return OCSVM.MSG_ERROR
# number of training examples
N = self.samples
# generate a kernel matrix
P = self.kernel
# there is no linear part of the objective
q = matrix(0.0, (N,1))
# sum_i alpha_i = A alpha = b = 1.0
A = matrix(1.0, (1,N))
b = matrix(1.0, (1,1))
# 0 <= alpha_i <= h = C
G1 = spmatrix(1.0, range(N), range(N))
G = sparse([G1,-G1])
h1 = matrix(self.C, (N,1))
h2 = matrix(0.0, (N,1))
h = matrix([h1,h2])
sol = qp(P,-q,G,h,A,b)
# mark dual as solved
self.isDualTrained = True
# store solution
self.alphas = sol['x']
# find support vectors
self.svs = []
for i in range(N):
if self.alphas[i]>OCSVM.PRECISION:
self.svs.append(i)
# find support vectors with alpha < C for threshold calculation
#self.threshold = 10**8
#flag = False
#for i in self.svs:
# if self.alphas[i]<(C-OCSVM.PRECISION) and flag==False:
# (self.threshold, MSG) = self.apply_dual(self.kernel[i,self.svs])
# flag=True
# break
# no threshold set yet?
#if (flag==False):
# (thres, MSG) = self.apply_dual(self.kernel[self.svs,self.svs])
# self.threshold = matrix(max(thres))
(thres, MSG) = self.apply_dual(self.kernel[self.svs,self.svs])
self.threshold = matrix(max(thres))
T = np.single(self.threshold)
cnt = 0
for i in range(len(self.svs)):
if thres[i,0]<(T-OCSVM.PRECISION):
cnt += 1
#print(self.alphas)
print('Found {0} support vectors. {1} of them are outliers.'.format(len(self.svs),cnt))
print('Threshold is {0}'.format(self.threshold))
return OCSVM.MSG_OK
def get_threshold(self):
return self.threshold
def get_support_dual(self):
return self.svs
def get_alphas(self):
return self.alphas
def get_support_dual_values(self):
return self.alphas[self.svs]
def set_train_kernel(self,kernel):
(dim1,dim2) = kernel.size
if (dim1!=dim2 and dim1!=self.samples):
print('(Kernel) Wrong format.')
return OCSVM.MSG_ERROR
self.kernel = kernel;
return OCSVM.MSG_OK
def apply_dual(self, kernel):
"""Application of a dual trained oc-svm."""
# number of training examples
N = self.samples
# check number and dims of test data
(tN,foo) = kernel.size
if (tN<1):
print('Invalid test data')
return 0, OCSVM.MSG_ERROR
if (self.isDualTrained!=True):
print('First train, then test.')
return 0, OCSVM.MSG_ERROR
# apply trained classifier
res = matrix([dotu(kernel[i,:],self.alphas[self.svs]) for i in range(tN)])
return res, OCSVM.MSG_OK
| mit | -5,839,695,781,475,350,000 | 24.452703 | 89 | 0.663127 | false | 2.743627 | false | false | false |
jianrongdeng/LAMOST | ana/scripts/filesIO.py | 1 | 9324 | """
============================
script: filesIO.py
============================
date: 20170615 by Jianrong Deng
purpose:
handle input / output files
various data I/O functions
Input: input dir, date, time
"""
import const
import pickle
import os
#==========================
def getDir (path=const.test_path_out, date=const.test_date, datatype=const.test_datatype):
"""
purpose: name scheme for net data (= raw - overscan - bias) file
"""
dir = path +'/' + date + '/' + datatype
return dir
#==========================
#==========================
def getFilename (path=const.test_path_out, date=const.test_date, datatype=const.test_datatype, det=const.test_det, time=const.test_time[0], tag = '-sub_overscan-sub_bias', postfix = '.fit'):
"""
purpose: name scheme for net data (= raw - overscan - bias) file
"""
filename = path +'/' + date +'/' + datatype +'/' + det +'-' + time +'-' + tag + postfix
return filename
#==========================
def setOutFilename(rawfile, d_tag='stat'):
"""
purpose: set output filename using environment variables
input: rawfile: filename of raw data
output: output filename
"""
# info from input filenames
d_path_in = os.environ['env_rawdata_onlypath'] # get environment variable
d_date = get_date(d_path_in) # get date
d_type=get_datatype(d_path_in)
d_det = get_det(rawfile)
d_time = get_time(rawfile)
# setup output file directory and names
d_path_out = os.environ['env_path_out'] # get environment variable
os.system('mkdir -p {}'.format(getDir(path=d_path_out, date=d_date, datatype=d_type))) # create output directory if not already exists
file_out = getFilename(path=d_path_out, date=d_date,det=d_det,time=d_time, tag = d_tag, postfix='.dat')
return file_out
#==========================
#==========================
def setFilename(infile, in_tag='stat.dat', out_tag='clusters.dat'):
"""
purpose: set output filename using environment variables
input: infile: the input filename
in_tag: tag of the input file
out_tag: tag of the output file
output: output filename
"""
in_len = len(infile)
file_out = infile[0:in_len-len(in_tag)]
file_out = file_out + out_tag
return file_out
#==========================
def getMaskFilename(path=const.test_path_out, date=const.test_date, datatype=const.test_datatype, det=const.test_det, time=const.test_time[0], tag = '-3sigma_mask', postfix = '.fit'):
"""
purpose: name scheme for 3sigma-mask file
"""
filename = path + date + datatype + det + time + tag + postfix
return filename
#==========================
def getClusterFilename(path=const.test_path_out, date=const.test_date, datatype=const.test_datatype, det=const.test_det, time=const.test_time[0], tag = '-cluster', postfix = '.dat'):
"""
purpose: name scheme for 3sigma-mask file
"""
filename = path + date + datatype + det + time + tag + postfix
return filename
#============================
#============================
def dumpPixelLists(file_out, pixelLists, DEBUG = const.DEBUG):
#============================
"""
purpose: save pixel Lists to output file
input : filename and pixellist
"""
# save list to output file
try:
with open(file_out, "wb") as data:
pickle.dump(pixelLists, data)
except IOError as err:
print('File error: ', + str(err))
except pickle.pickleError as perr:
print('picklingerror:' + str(perr))
if DEBUG: printPixelLists(pixelLists)
return
#============================
#============================
def loadPixelLists(file_out, DEBUG = const.DEBUG):
#============================
"""
purpose: load pixel List from file
input : filename
output : pixellists
"""
# save list to output file
try:
with open(file_out, "rb") as data:
pixelLists = pickle.load(data)
except IOError as err:
print('File error: ', + str(err))
except pickle.pickleError as perr:
print('picklingerror:' + str(perr))
if DEBUG: printPixelLists(pixelLists)
return pixelLists
#============================
#============================
def printPixelLists(pixelLists, DEBUG = const.DEBUG_L2):
#============================
"""
purpose: print candidate pixel List
input : pixelLists
"""
print('number of images: ', len(pixelLists))
for im in pixelLists: # loop through five images
print('number of candiate pixels (clusters) in the image: ', len(im))
if DEBUG:
for ip in im:
print (ip[0], ip[1], int(ip[2]))
return
#============================
#============================
def dumpStat(file_stat, stat, DEBUG = const.DEBUG_L2):
#============================
"""
purpose: save stat info (mean and sstd ) to output file
input : file_stat and data stat(mean and sstd)
"""
try:
with open(file_stat, "wb") as data:
pickle.dump(stat, data)
except IOError as err:
print('File error: ', + str(err))
except pickle.pickleError as perr:
print('picklingerror:' + str(perr))
if DEBUG:
printStats (stat)
return
#============================
#============================
def loadStat(file_stat, DEBUG = const.DEBUG):
#============================
"""
purpose: save stat info (mean and sstd ) to output file
input : file_stat and data stat(mean and sstd)
"""
try:
with open(file_stat, "rb") as data:
stat = pickle.load(data)
except IOError as err:
print('File error: ', + str(err))
except pickle.pickleError as perr:
print('picklingerror:' + str(perr))
if DEBUG:
printStats (stat)
return stat
#============================
#============================
def printStats(stats):
#============================
"""
purpose: print stat info (mean and sstd )
input : data stat(mean and sstd)
"""
print ('image stat where [0-4] is real data, [5] is bias medium')
for ist in range(len(stats)):
print ('image :', ist, 'mean =', stats[ist][0], ', sstd =', stats[ist][1])
return
#============================
#============================
def get_onlyrawfilenames(DEBUG=const.DEBUG_L2):
"""
purpose: get rawfilenames from environment variables
"""
if DEBUG: # in debug mode, check if file exists
os.system("${env_rawdata_onlypath:?}") # ${variable:?} check if the variable is set
os.system("ls -l ${env_rawdata_onlypath:?}/${env_rawdata_onlyfilenames_0:?}")
os.system("ls -l ${env_rawdata_onlypath:?}/${env_rawdata_onlyfilenames_1:?}")
rawfiles=[]
rawfiles.append( os.environ['env_rawdata_onlyfilenames_0'])
rawfiles.append( os.environ['env_rawdata_onlyfilenames_1'])
rawfiles.append( os.environ['env_rawdata_onlyfilenames_2'])
rawfiles.append( os.environ['env_rawdata_onlyfilenames_3'])
rawfiles.append( os.environ['env_rawdata_onlyfilenames_4'])
return rawfiles
#============================
#============================
def get_rawfilenames(DEBUG=const.DEBUG_L2):
"""
purpose: get rawfilenames (with pathname) from environment variables
output: rawfilenames with pathname
"""
path= os.environ['env_rawdata_onlypath']
rawfiles= get_onlyrawfilenames()
for ir in range(len(rawfiles)):
rawfiles[ir]=path + '/' + rawfiles[ir]
return rawfiles
#============================
#============================
def get_det(filename):
"""
purpose: strip the time stamps from filenames
"""
temp = filename.strip().split('-')
det=temp[0]+'-' + temp[1]
return det
#============================
#============================
def get_times(filenames):
"""
purpose: strip the time stamps from filenames
"""
times = []
for ifile in filenames:
times.append(get_time(ifile))
return times
#============================
#============================
def get_time(filename):
"""
purpose: strip the time stamp from the filename
"""
temp = filename.strip().split('-')
return temp[2]
#============================
#============================
def get_date(pathname, DEBUG=const.DEBUG_L2):
"""
purpose: strip the date stamps from pathname
"""
temp = pathname.strip().split('/')
date = temp[3]
if DEBUG:
print('pathname = ', pathname, '\t date =', date)
return date
#============================
#============================
def get_datatype(pathname):
"""
purpose: strip the data type info from pathname
"""
temp = pathname.strip().split('/')
return temp[4]
#============================
#============================
class filename_rawdata:
"""
purpose: filename class for rawdata
"""
#============================
def __init__(self, a_det, a_dType, a_date, a_times=[]):
"""
purpose: initialization
"""
self.det = a_det
self.dType = a_dType
self.date = a_date
self.times = a_times
#============================
| gpl-3.0 | -551,796,630,725,550,400 | 28.506329 | 191 | 0.525097 | false | 3.885 | true | false | false |
fbzhong/sublime-closure-linter | gjslint.py | 1 | 4010 | import os
import re
import shutil
import sublime
import sublime_plugin
from const import *
from listener import *
from statusprocess import *
from asyncprocess import *
class ShowClosureLinterResultCommand(sublime_plugin.WindowCommand):
"""show closure linter result"""
def run(self):
self.window.run_command("show_panel", {"panel": "output."+RESULT_VIEW_NAME})
class ClosureLinterCommand(sublime_plugin.WindowCommand):
def run(self):
s = sublime.load_settings(SETTINGS_FILE)
file_path = self.window.active_view().file_name()
file_name = os.path.basename(file_path)
self.debug = s.get('debug', False)
self.buffered_data = ''
self.file_path = file_path
self.file_name = file_name
self.is_running = True
self.tests_panel_showed = False
self.ignored_error_count = 0
self.ignore_errors = s.get('ignore_errors', [])
self.init_tests_panel()
cmd = '"' + s.get('gjslint_path', 'jslint') + '" ' + s.get('gjslint_flags', '') + ' "' + file_path + '"'
if self.debug:
print "DEBUG: " + str(cmd)
AsyncProcess(cmd, self)
StatusProcess('Starting Closure Linter for file ' + file_name, self)
ClosureLinterEventListener.disabled = True
def init_tests_panel(self):
if not hasattr(self, 'output_view'):
self.output_view = self.window.get_output_panel(RESULT_VIEW_NAME)
self.output_view.set_name(RESULT_VIEW_NAME)
self.clear_test_view()
self.output_view.settings().set("file_path", self.file_path)
def show_tests_panel(self):
if self.tests_panel_showed:
return
self.window.run_command("show_panel", {"panel": "output."+RESULT_VIEW_NAME})
self.tests_panel_showed = True
def clear_test_view(self):
self.output_view.set_read_only(False)
edit = self.output_view.begin_edit()
self.output_view.erase(edit, sublime.Region(0, self.output_view.size()))
self.output_view.end_edit(edit)
self.output_view.set_read_only(True)
def append_data(self, proc, data, end=False):
self.buffered_data = self.buffered_data + data.decode("utf-8")
data = self.buffered_data.replace(self.file_path, self.file_name).replace('\r\n', '\n').replace('\r', '\n')
if end == False:
rsep_pos = data.rfind('\n')
if rsep_pos == -1:
# not found full line.
return
self.buffered_data = data[rsep_pos+1:]
data = data[:rsep_pos+1]
# ignore error.
text = data
if len(self.ignore_errors) > 0:
text = ''
for line in data.split('\n'):
if len(line) == 0:
continue
ignored = False
for rule in self.ignore_errors:
if re.search(rule, line):
ignored = True
self.ignored_error_count += 1
if self.debug:
print "text match line "
print "rule = " + rule
print "line = " + line
print "---------"
break
if ignored == False:
text += line + '\n'
self.show_tests_panel()
selection_was_at_end = (len(self.output_view.sel()) == 1 and self.output_view.sel()[0] == sublime.Region(self.output_view.size()))
self.output_view.set_read_only(False)
edit = self.output_view.begin_edit()
self.output_view.insert(edit, self.output_view.size(), text)
if end:
text = '\nclosure linter: ignored ' + str(self.ignored_error_count) + ' errors.\n'
self.output_view.insert(edit, self.output_view.size(), text)
# if selection_was_at_end:
# self.output_view.show(self.output_view.size())
self.output_view.end_edit(edit)
self.output_view.set_read_only(True)
# if end:
# self.output_view.run_command("goto_line", {"line": 1})
def update_status(self, msg, progress):
sublime.status_message(msg + " " + progress)
def proc_terminated(self, proc):
if proc.returncode == 0:
msg = self.file_name + ' lint free!'
else:
msg = ''
self.append_data(proc, msg, True)
ClosureLinterEventListener.disabled = False
| bsd-3-clause | 6,959,701,586,400,418,000 | 31.33871 | 134 | 0.625436 | false | 3.294988 | true | false | false |
andrejbauer/jurij | jurij/graph.py | 1 | 4649 | # -*- encoding: utf-8 -*-
# A very simple implementation of graphs in python, including graphs
# embedded in the plane.
class Graph():
"""A graph stored as an adjacency dictionary."""
def __init__(self, data=None, vertices=None, edges=None,
vertex_labels=None, edge_labels=None):
"""Construct a graph to from the given data.
The object must define methods vertices() and edges() which
return iterators on vertices and edges, respectively. Vertices
are required to be hashable objects while edges are pairs of
vertices."""
if type(data) == dict:
# the graph is given as an adjancency dictionary
self.adjacency = dict([(x,set(ys)) for (x,ys) in data.items()])
elif type(data) in (list, tuple):
# the graph is given as a list of edges
self.adjacency = {}
for (x,y) in data:
self.adjacency[x] = set()
self.adjacency[y] = set()
for (x,y) in data: self.adjacency[x].add(y)
elif data is None:
self.adjacency = {}
if vertices is not None:
for x in vertices: self.adjacency[x] = set()
if edges is not None:
for (x,y) in edges:
if x not in self.adjacency: self.adjacency[x] = set()
if y not in self.adjacency: self.adjacency[y] = set()
self.adjacency[x].add(y)
else:
# the graph is given by an object which can produce
# a list of vertices and a list of edges
self.adjacency = dict([(x,set()) for x in data.vertices()])
for (x,y) in data.edges(): self.adjacency[x].add(y)
self.vertex_labels = {}
if vertex_labels is not None:
for x in self.adjacency:
if x in vertex_labels:
self.vertex_labels[x] = vertex_labels[s]
elif hasattr(data, 'vertex_label'):
for x in self.adjacency:
u = data.vertex_label(x)
if u is not None: self.vertex_labels[x] = u
self.edge_labels = {}
if edge_labels is not None:
for (x,ys) in self.adjacency.items():
for y in ys:
if (x,y) in edge_labels:
self.edge_labels[(x,y)] = edge_labels[(x,y)]
elif hasattr(data, 'edge_label'):
for (x,ys) in self.adjacency.items():
for y in ys:
u = data.edge_label((x,y))
if u is not None: self.edge_labels[(x,y)] = u
def __repr__(self):
return 'Graph({0})'.format(self.adjacency)
def vertices(self):
'''The set vertices of the graph as an iterator.'''
return self.adjacency.keys()
def edges(self):
'''The edges of the graph as an iterator.'''
for (u, vs) in self.adjacency.items():
for v in vs:
yield (u,v)
def opposite(self):
'''The opposite adjacency, i.e., with all edges reversed.'''
if hasattr(self, '_opposite_adjacency'):
return self._opposite_adjacency
else:
self._opposite_adjacency = dict([(x,set()) for x in self.vertices()])
for (x, ys) in self.adjacency.items():
for y in ys:
self._opposite_adjacency[y].add(x)
return self._opposite_adjacency
def vertex_label(self,x):
return self.vertex_labels.get(x)
def edge_label(self,e):
return self.edge_labels.get(e)
def add_vertex(self,x):
if x not in self.adjacency:
self.adjaceny[x] = set()
def remove_vertex(self,x):
del self.adjacency[x]
for xs in self.adjacency.values():
xs.remove(x)
def add_edge(self,e):
(x,y) = e
self.adjacency[x].add(y)
def remove_edge(self,e):
(x,y) = e
self.adjacency[x].remove(y)
def vertex_size(self):
return len(self.vertices())
def edge_size(self):
return len(self.edges())
def product(g,h):
'''The product of graphs g and h.'''
return Graph(vertices = [(x,y) for x in g.vertices() for y in h.vertices()],
edges = [((x,u), (x,v)) for x in g.vertices() for (u,v) in h.edges()] +
[((u,y), (v,y)) for (u,v) in g.edges() for y in h.vertices()])
def cone(g):
'''The cone over g.'''
k = 0
adj = {}
for x in g.vertices():
adj[x] = g.adjacency[x]
if type(x) == int: k = max(k, x+1)
adj[k] = g.vertices()
return Graph(adj)
| bsd-2-clause | -6,448,450,689,787,050,000 | 35.03876 | 88 | 0.52678 | false | 3.779675 | false | false | false |
pxzhang94/GAN | GAN/wasserstein_gan/wgan_tensorflow.py | 1 | 3148 | import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import os
mb_size = 32
X_dim = 784
z_dim = 10
h_dim = 128
mnist = input_data.read_data_sets('../../MNIST_data', one_hot=True)
def plot(samples):
fig = plt.figure(figsize=(4, 4))
gs = gridspec.GridSpec(4, 4)
gs.update(wspace=0.05, hspace=0.05)
for i, sample in enumerate(samples):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(sample.reshape(28, 28), cmap='Greys_r')
return fig
def xavier_init(size):
in_dim = size[0]
xavier_stddev = 1. / tf.sqrt(in_dim / 2.)
return tf.random_normal(shape=size, stddev=xavier_stddev)
X = tf.placeholder(tf.float32, shape=[None, X_dim])
D_W1 = tf.Variable(xavier_init([X_dim, h_dim]))
D_b1 = tf.Variable(tf.zeros(shape=[h_dim]))
D_W2 = tf.Variable(xavier_init([h_dim, 1]))
D_b2 = tf.Variable(tf.zeros(shape=[1]))
theta_D = [D_W1, D_W2, D_b1, D_b2]
z = tf.placeholder(tf.float32, shape=[None, z_dim])
G_W1 = tf.Variable(xavier_init([z_dim, h_dim]))
G_b1 = tf.Variable(tf.zeros(shape=[h_dim]))
G_W2 = tf.Variable(xavier_init([h_dim, X_dim]))
G_b2 = tf.Variable(tf.zeros(shape=[X_dim]))
theta_G = [G_W1, G_W2, G_b1, G_b2]
def sample_z(m, n):
return np.random.uniform(-1., 1., size=[m, n])
def generator(z):
G_h1 = tf.nn.relu(tf.matmul(z, G_W1) + G_b1)
G_log_prob = tf.matmul(G_h1, G_W2) + G_b2
G_prob = tf.nn.sigmoid(G_log_prob)
return G_prob
def discriminator(x):
D_h1 = tf.nn.relu(tf.matmul(x, D_W1) + D_b1)
out = tf.matmul(D_h1, D_W2) + D_b2
return out
G_sample = generator(z)
D_real = discriminator(X)
D_fake = discriminator(G_sample)
D_loss = tf.reduce_mean(D_real) - tf.reduce_mean(D_fake)
G_loss = -tf.reduce_mean(D_fake)
D_solver = (tf.train.RMSPropOptimizer(learning_rate=1e-4)
.minimize(-D_loss, var_list=theta_D))
G_solver = (tf.train.RMSPropOptimizer(learning_rate=1e-4)
.minimize(G_loss, var_list=theta_G))
clip_D = [p.assign(tf.clip_by_value(p, -0.01, 0.01)) for p in theta_D]
sess = tf.Session()
sess.run(tf.global_variables_initializer())
if not os.path.exists('out/'):
os.makedirs('out/')
i = 0
for it in range(1000000):
for _ in range(5):
X_mb, _ = mnist.train.next_batch(mb_size)
_, D_loss_curr, _ = sess.run(
[D_solver, D_loss, clip_D],
feed_dict={X: X_mb, z: sample_z(mb_size, z_dim)}
)
_, G_loss_curr = sess.run(
[G_solver, G_loss],
feed_dict={z: sample_z(mb_size, z_dim)}
)
if it % 100 == 0:
print('Iter: {}; D loss: {:.4}; G_loss: {:.4}'
.format(it, D_loss_curr, G_loss_curr))
if it % 1000 == 0:
samples = sess.run(G_sample, feed_dict={z: sample_z(16, z_dim)})
fig = plot(samples)
plt.savefig('out/{}.png'
.format(str(i).zfill(3)), bbox_inches='tight')
i += 1
plt.close(fig)
| apache-2.0 | -6,329,197,380,667,734,000 | 24.184 | 76 | 0.589263 | false | 2.557271 | false | false | false |
retorquere/zotero-better-bibtex | setup/item.py | 1 | 24607 | #!/usr/bin/env python3
from networkx.readwrite import json_graph
from collections import OrderedDict
import hashlib
import operator
import shlex
from functools import reduce
from http.client import RemoteDisconnected
from lxml import etree
from mako import exceptions
from mako.template import Template
from munch import Munch
from pytablewriter import MarkdownTableWriter
from urllib.error import HTTPError
from urllib.request import urlopen, urlretrieve, Request
import glob
import itertools
import json, jsonpatch, jsonpath_ng
import mako
import networkx as nx
import os
import sys
import re
import sys
import tarfile
import tempfile
import zipfile
import fnmatch
root = os.path.join(os.path.dirname(__file__), '..')
print('parsing Zotero/Juris-M schemas')
SCHEMA = Munch(root = os.path.join(root, 'schema'))
ITEMS = os.path.join(root, 'gen/items')
TYPINGS = os.path.join(root, 'gen/typings')
os.makedirs(SCHEMA.root, exist_ok=True)
os.makedirs(ITEMS, exist_ok=True)
os.makedirs(TYPINGS, exist_ok=True)
def readurl(url):
req = Request(url)
if ('api.github.com' in url) and (token := os.environ.get('GITHUB_TOKEN', None)): req.add_header('Authorization', f'token {token}')
return urlopen(req).read().decode('utf-8')
class fetch(object):
def __init__(self, client):
self.schema = os.path.join(SCHEMA.root, f'{client}.json')
if client == 'zotero':
releases = [
ref['ref'].split('/')[-1]
for ref in
json.loads(readurl('https://api.github.com/repos/zotero/zotero/git/refs/tags'))
]
releases += [
rel['version']
for rel in
json.loads(urlopen("https://www.zotero.org/download/client/manifests/release/updates-linux-x86_64.json").read().decode("utf-8"))
if not rel['version'] in releases
]
releases = [rel for rel in releases if rel.startswith('5.')]
releases = sorted(releases, key=lambda r: [int(n) for n in r.replace('m', '.').split('.')])
self.update(
client=client,
releases=releases,
download='https://www.zotero.org/download/client/dl?channel=release&platform=linux-x86_64&version={version}',
jarpath='Zotero_linux-x86_64/zotero.jar',
schema='resource/schema/global/schema.json'
)
elif client == 'jurism':
releases = [
ref['ref'].split('/')[-1].replace('v', '')
for ref in
json.loads(readurl('https://api.github.com/repos/juris-m/zotero/git/refs/tags'))
]
releases += [
rel
for rel in
readurl('https://github.com/Juris-M/assets/releases/download/client%2Freleases%2Fincrementals-linux/incrementals-release-linux').strip().split("\n")
if rel != '' and rel not in releases
]
releases = [rel for rel in releases if rel.startswith('5.') and 'm' in rel and not 'beta' in rel]
releases = sorted(releases, key=lambda r: [int(n) for n in r.replace('m', '.').split('.')])
self.update(
client=client,
releases=releases,
download='https://github.com/Juris-M/assets/releases/download/client%2Frelease%2F{version}/Jurism-{version}_linux-x86_64.tar.bz2',
jarpath='Jurism_linux-x86_64/jurism.jar',
schema='resource/schema/global/schema-jurism.json'
)
else:
raise ValueError(f'Unknown client {client}')
def hash(self, schema):
#print(schema.keys())
#'version', 'itemTypes', 'meta', 'csl', 'locales', 'release', 'hash'
return hashlib.sha512(json.dumps({ k: v for k, v in schema.items() if k in ('itemTypes', 'meta', 'csl')}, sort_keys=True).encode('utf-8')).hexdigest()
def update(self, client, releases, download, jarpath, schema):
hashes_cache = os.path.join(SCHEMA.root, 'hashes.json')
itemtypes = os.path.join(SCHEMA.root, f'{client}-type-ids.json')
if os.path.exists(hashes_cache):
with open(hashes_cache) as f:
hashes = json.load(f, object_hook=OrderedDict)
else:
hashes = OrderedDict()
if not client in hashes:
hashes[client] = OrderedDict()
current = releases[-1]
if current in hashes[client] and os.path.exists(self.schema) and os.path.exists(itemtypes):
return
elif 'CI' in os.environ:
raise ValueError(f'{self.schema} out of date')
print(' updating', os.path.basename(self.schema))
for release in releases:
if release != current and release in hashes[client]: continue
with tempfile.NamedTemporaryFile() as tarball:
print(' downloading', download.format(version=release))
try:
urlretrieve(download.format(version=release), tarball.name)
tar = tarfile.open(tarball.name, 'r:bz2')
jar = tar.getmember(jarpath)
print(' extracting', jar.name)
jar.name = os.path.basename(jar.name)
tar.extract(jar, path=os.path.dirname(tarball.name))
jar = zipfile.ZipFile(os.path.join(os.path.dirname(tarball.name), jar.name))
itt = fnmatch.filter(jar.namelist(), f'**/system-*-{client}.sql')
assert len(itt) <= 1, itt
if len(itt) == 1:
itt = itt[0]
else:
itt = fnmatch.filter(jar.namelist(), '**/system-*.sql')
assert len(itt) == 1, itt
itt = itt[0]
with jar.open(itt) as f, open(itemtypes, 'wb') as i:
i.write(f.read())
try:
with jar.open(schema) as f:
client_schema = json.load(f)
with open(self.schema, 'w') as f:
json.dump(client_schema, f, indent=' ')
hashes[client][release] = self.hash(client_schema)
print(' release', release, 'schema', client_schema['version'], 'hash', hashes[client][release])
except KeyError:
hashes[client][release] = None
print(' release', release, 'does not have a bundled schema')
except HTTPError as e:
if e.code in [ 403, 404 ]:
print(' release', release, 'not available')
hashes[client][release] = None
else:
raise e
with open(hashes_cache, 'w') as f:
json.dump(hashes, f, indent=' ')
def __enter__(self):
self.f = open(self.schema)
return self.f
def __exit__(self, type, value, traceback):
self.f.close()
class jsonpath:
finders = {}
@classmethod
def parse(cls, path):
if not path in cls.finders: cls.finders[path] = jsonpath_ng.parse(path)
return cls.finders[path]
def patch(s, *ps):
# field/type order doesn't matter for BBT
for it in s['itemTypes']:
assert 'creatorTypes' in it
# assures primary is first
assert len(it['creatorTypes'])== 0 or [ct['creatorType'] for ct in it['creatorTypes'] if ct.get('primary', False)] == [it['creatorTypes'][0]['creatorType']]
s['itemTypes'] = {
itemType['itemType']: {
'itemType': itemType['itemType'],
'fields': { field['field']: field.get('baseField', field['field']) for field in itemType['fields'] },
'creatorTypes': [ct['creatorType'] for ct in itemType['creatorTypes'] ]
}
for itemType in s['itemTypes']
}
del s['locales']
for p in ps:
print('applying', p)
with open(os.path.join(SCHEMA.root, p)) as f:
s = jsonpatch.apply_patch(s, json.load(f))
return s
class ExtraFields:
def __init__(self):
self.changeid = 0
self.dg = nx.DiGraph()
self.color = Munch(
zotero='#33cccc',
csl='#99CC00',
label='#C0C0C0',
removed='#666666',
added='#0000FF'
)
def make_label(self, field):
label = field.replace('_', ' ').replace('-', ' ')
label = re.sub(r'([a-z])([A-Z])', r'\1 \2', label)
label = label.lower()
return label
def add_label(self, domain, name, label):
assert domain in ['csl', 'zotero'], (domain, name, label)
assert type(name) == str
assert type(label) == str
for label in [label, self.make_label(label)]:
attrs = {
'domain': 'label',
'name': label,
'graphics': {'h': 30.0, 'w': 7 * len(label), 'hasFill': 0, 'outline': self.color.label},
}
if re.search(r'[-_A-Z]', label): attrs['LabelGraphics'] = { 'color': self.color.label }
self.dg.add_node(f'label:{label}', **attrs)
self.dg.add_edge(f'label:{label}', f'{domain}:{name}', graphics={ 'targetArrow': 'standard' })
def add_mapping(self, from_, to, reverse=True):
mappings = [(from_, to)]
if reverse: mappings.append((to, from_))
for from_, to in mappings:
self.dg.add_edge(':'.join(from_), ':'.join(to), graphics={ 'targetArrow': 'standard' })
def add_var(self, domain, name, type_, client):
assert domain in ['csl', 'zotero']
assert type(name) == str
assert type_ in ['name', 'date', 'text']
node_id = f'{domain}:{name}'
if node_id in self.dg.nodes:
assert self.dg.nodes[node_id]['type'] == type_, (domain, name, self.dg.nodes[node_id]['type'], type_)
else:
self.dg.add_node(f'{domain}:{name}', domain=domain, name=name, type=type_, graphics={'h': 30.0, 'w': 7 * len(name), 'fill': self.color[domain]})
self.dg.nodes[node_id][client] = True
def load(self, schema, client):
typeof = {}
for field, meta in schema.meta.fields.items():
typeof[field] = meta.type
# add nodes & edges
for field, baseField in {str(f.path): f.value for f in jsonpath.parse('$.itemTypes.*.fields.*').find(schema)}.items():
self.add_var(domain='zotero', name=baseField, type_=typeof.get(baseField, 'text'), client=client)
for field in jsonpath.parse('$.itemTypes.*.creatorTypes[*]').find(schema):
self.add_var(domain='zotero', name=field.value, type_='name', client=client)
for fields in jsonpath.parse('$.csl.fields.text').find(schema):
for csl, zotero in fields.value.items():
self.add_var(domain='csl', name=csl, type_='text', client=client)
for field in zotero:
self.add_var(domain='zotero', name=field, type_='text', client=client)
self.add_mapping(from_=('csl', csl), to=('zotero', field))
for fields in jsonpath.parse('$.csl.fields.date').find(schema):
for csl, zotero in fields.value.items():
self.add_var(domain='csl', name=csl, type_='date', client=client)
if type(zotero) == str: zotero = [zotero] # juris-m has a list here, zotero strings
for field in zotero:
self.add_var(domain='zotero', name=field, type_='date', client=client)
self.add_mapping(from_=('csl', csl), to=('zotero', field))
for zotero, csl in schema.csl.names.items():
self.add_var(domain='csl', name=csl, type_='name', client=client)
self.add_var(domain='zotero', name=zotero, type_='name', client=client)
self.add_mapping(from_=('csl', csl), to=('zotero', zotero))
for field, type_ in schema.csl.unmapped.items():
if type_ != 'type': self.add_var(domain='csl', name=field, type_=type_, client=client)
# add labels
for node, data in list(self.dg.nodes(data=True)):
if data['domain'] == 'label': continue # how is this possible?
self.add_label(data['domain'], data['name'], data['name'])
for field, baseField in {str(f.path): f.value for f in jsonpath.parse('$.itemTypes.*.fields.*').find(schema)}.items():
if field == baseField: continue
self.add_label('zotero', baseField, field)
for alias, field in schema.csl.alias.items():
self.add_label('csl', field, alias)
def add_change(self, label, change):
if not label or label == '':
return str(change)
else:
return ','.join(label.split(',') + [ str(change) ])
def save(self):
stringizer = lambda x: self.dg.nodes[x]['name'] if x in self.dg.nodes else x
# remove multi-line text fields
for node, data in list(self.dg.nodes(data=True)):
if data['domain'] + '.' + data['name'] in [ 'zotero.abstractNote', 'zotero.extra', 'csl.abstract', 'csl.note' ]:
self.dg.remove_node(node)
# remove two or more incoming var edges, as that would incur overwrites (= data loss)
removed = set()
for node, data in self.dg.nodes(data=True):
incoming = reduce(lambda acc, edge: acc[self.dg.nodes[edge[0]]['domain']].append(edge) or acc, self.dg.in_edges(node), Munch(zotero=[], csl=[], label=[]))
for domain, edges in incoming.items():
if domain == 'label' or len(edges) < 2: continue
self.changeid += 1
for edge in edges:
removed.add(edge)
self.dg.edges[edge].update({
'removed': True,
'label': self.add_change(self.dg.edges[edge].get('label'), self.changeid),
'graphics': { 'style': 'dashed', 'fill': self.color.removed, 'targetArrow': 'standard' },
'LabelGraphics': { 'color': self.color.label },
})
# hop-through labels. Memorize here which labels had a direct connection *before any expansion*
labels = {
label: set([self.dg.nodes[edge[1]]['domain'] for edge in self.dg.out_edges(label)])
for label, data in self.dg.nodes(data=True)
if data['domain'] == 'label' and not re.search(r'[-_A-Z]', data['name']) # a label but not a shadow label
}
for u, vs in dict(nx.all_pairs_dijkstra_path(self.dg, weight=lambda u, v, d: None if d.get('removed', False) else 1)).items():
# only interested in shortest paths that originate in a label
if not u in labels: continue
for v, path in vs.items():
if u == v: continue # no loops obviously
if self.dg.has_edge(u, v): continue # already in place
if len(path) != 3: continue # only consider one-step hop-through
# TODO: label already has direct edge to the hop-through domain -- this entails fanning out the data unnecesarily
if self.dg.nodes[v]['domain'] in labels[u]: continue
self.changeid += 1
for edge in zip(path, path[1:]):
self.dg.edges[edge].update({
'label': self.add_change(self.dg.edges[edge].get('label'), self.changeid),
})
self.dg.add_edge(u, v, label=str(self.changeid), added=True, graphics={ 'style': 'dashed', 'fill': self.color.added, 'targetArrow': 'standard' })
for u, vs in dict(nx.all_pairs_shortest_path(self.dg)).items():
if self.dg.nodes[u]['domain'] != 'label': continue
for v, path in vs.items():
# length of 3 means potential hop-through node
if u != v and len(path) == 3 and len(set(zip(path, path[1:])).intersection(removed)) > 0:
#print('removed', path)
pass
#for i, sg in enumerate(nx.weakly_connected_components(self.dg)):
# nx.draw(self.dg.subgraph(sg), with_labels=True)
# plt.savefig(f'{i}.png')
mapping = {}
for label, data in list(self.dg.nodes(data=True)):
if data['domain'] != 'label': continue
name = data['name']
var_nodes = [var for _, var in self.dg.out_edges(label)]
if len(var_nodes) == 0:
self.dg.remove_node(label)
else:
for var in var_nodes:
var = self.dg.nodes[var]
if not name in mapping: mapping[name] = {}
assert 'type' not in mapping[name] or mapping[name]['type'] == var['type']
mapping[name]['type'] = var['type']
domain = var['domain']
if not domain in mapping[name]: mapping[name][domain] = []
mapping[name][domain].append(var['name'])
# ensure names don't get mapped to multiple fields
for var, mapped in mapping.items():
if mapped['type'] != 'name': continue
assert len(mapped.get('zotero', [])) <= 1, (var, mapped)
assert len(mapped.get('csl', [])) <= 1, (var, mapped)
# docs
with open(os.path.join(root, 'site/layouts/shortcodes/extra-fields.md'), 'w') as f:
writer = MarkdownTableWriter()
writer.headers = ['label', 'type', 'zotero/jurism', 'csl']
writer.value_matrix = []
doc = {}
for label, data in self.dg.nodes(data=True):
if not ' ' in label or data['domain'] != 'label': continue
name = data['name']
doc[name] = {'zotero': [], 'csl': []}
for _, to in self.dg.out_edges(label):
data = self.dg.nodes[to]
if not 'type' in doc[name]:
doc[name]['type'] = data['type']
else:
assert doc[name]['type'] == data['type']
if data.get('zotero', False) == data.get('jurism', False):
postfix = ''
elif data.get('zotero'):
postfix = '\u00B2'
else:
postfix = '\u00B9'
doc[name][data['domain']].append(data['name'].replace('_', '\\_') + postfix)
for label, data in sorted(doc.items(), key=lambda x: x[0]):
writer.value_matrix.append((f'**{label}**', data['type'], ' / '.join(sorted(data['zotero'])), ' / '.join(sorted(data['csl']))))
writer.stream = f
writer.write_table()
with open(os.path.join(ITEMS, 'extra-fields.json'), 'w') as f:
json.dump(mapping, f, sort_keys=True, indent=' ')
# remove phantom labels for clarity
for label in [node for node, data in self.dg.nodes(data=True) if data['domain'] == 'label' and 'LabelGraphics' in data]:
self.dg.remove_node(label)
nx.write_gml(self.dg, 'mapping.gml', stringizer)
#with open('extra-fields-graph.json', 'w') as f:
# json.dump(json_graph.node_link_data(self.dg, {"link": "edges", "source": "from", "target": "to"}), f)
# # https://github.com/vasturiano/3d-force-graph
# https://neo4j.com/developer-blog/visualizing-graphs-in-3d-with-webgl/
#with open('mapping.json', 'w') as f:
# data = nx.readwrite.json_graph.node_link_data(self.dg)
# for node in data['nodes']:
# node.pop('graphics', None)
# node.pop('type', None)
# node['label'] = node.pop('name')
# for link in data['links']:
# link.pop('graphics', None)
# link.pop('LabelGraphics', None)
# json.dump(data, f, indent=' ')
with fetch('zotero') as z, fetch('jurism') as j:
print(' writing extra-fields')
ef = ExtraFields()
SCHEMA.zotero = Munch.fromDict(patch(json.load(z), 'schema.patch', 'zotero.patch'))
SCHEMA.jurism = Munch.fromDict(patch(json.load(j), 'schema.patch', 'jurism.patch'))
#with open('schema.json', 'w') as f:
# json.dump(SCHEMA.jurism, f, indent=' ')
# test for inconsistent basefield mapping
for schema in ['jurism', 'zotero']:
fieldmap = {}
for field_path, field, baseField in [(str(f.full_path), str(f.path), f.value) for f in jsonpath.parse(f'$.itemTypes.*.fields.*').find(SCHEMA[schema])]:
if not field in fieldmap:
fieldmap[field] = baseField
else:
assert baseField == fieldmap[field], (schema, field_path, baseField, fieldmap[field])
ef.load(SCHEMA.jurism, 'jurism')
ef.load(SCHEMA.zotero, 'zotero')
ef.save()
with open(os.path.join(SCHEMA.root, 'hashes.json')) as f:
min_version = {}
hashes = json.load(f, object_hook=OrderedDict)
for client in hashes.keys():
releases = [rel for rel, h in hashes[client].items() if h is not None]
current = releases[-1]
min_version[client] = current
for rel in reversed(releases):
if hashes[client][rel] != hashes[client][current]:
break
else:
min_version[client] = rel
with open(os.path.join(root, 'schema', 'supported.json'), 'w') as f:
json.dump(min_version, f)
print(' writing creators')
creators = {'zotero': {}, 'jurism': {}}
for creatorTypes in jsonpath.parse('*.itemTypes.*.creatorTypes').find(SCHEMA):
if len(creatorTypes.value) == 0: continue
client, itemType = operator.itemgetter(0, 2)(str(creatorTypes.full_path).split('.'))
if not itemType in creators[client]: creators[client][itemType] = []
for creatorType in creatorTypes.value:
creators[client][itemType].append(creatorType)
with open(os.path.join(ITEMS, 'creators.json'), 'w') as f:
json.dump(creators, f, indent=' ', default=lambda x: list(x))
def template(tmpl):
return Template(filename=os.path.join(root, 'setup/templates', tmpl))
print(' writing typing for serialized item')
with open(os.path.join(TYPINGS, 'serialized-item.d.ts'), 'w') as f:
fields = sorted(list(set(field.value for field in jsonpath.parse('*.itemTypes.*.fields.*').find(SCHEMA))))
itemTypes = sorted(list(set(field.value for field in jsonpath.parse('*.itemTypes.*.itemType').find(SCHEMA))))
print(template('items/serialized-item.d.ts.mako').render(fields=fields, itemTypes=itemTypes).strip(), file=f)
print(' writing field simplifier')
with open(os.path.join(ITEMS, 'items.ts'), 'w') as f:
valid = Munch(type={}, field={})
for itemType in jsonpath.parse('*.itemTypes.*.itemType').find(SCHEMA):
client = str(itemType.full_path).split('.')[0]
itemType = itemType.value
if not itemType in valid.type:
valid.type[itemType] = client
if itemType == 'note':
valid.field[itemType] = {field: 'true' for field in 'itemType tags note id itemID dateAdded dateModified'.split(' ')}
elif itemType == 'attachment':
valid.field[itemType] = {field: 'true' for field in 'itemType tags id itemID dateAdded dateModified'.split(' ')}
else:
valid.field[itemType] = {field: 'true' for field in 'itemType creators tags attachments notes seeAlso id itemID dateAdded dateModified multi'.split(' ')}
elif valid.type[itemType] != client:
valid.type[itemType] = 'true'
for field in jsonpath.parse('*.itemTypes.*.fields.*').find(SCHEMA):
client, itemType = operator.itemgetter(0, 2)(str(field.full_path).split('.'))
for field in [str(field.path), field.value]:
if not field in valid.field[itemType]:
valid.field[itemType][field] = client
elif valid.field[itemType][field] != client:
valid.field[itemType][field] = 'true'
# map aliases to base names
DG = nx.DiGraph()
for field in jsonpath.parse('*.itemTypes.*.fields.*').find(SCHEMA):
client = str(field.full_path).split('.')[0]
baseField = field.value
field = str(field.path)
if field == baseField: continue
if not (data := DG.get_edge_data(field, baseField, default=None)):
DG.add_edge(field, baseField, client=client)
elif data['client'] != client:
DG.edges[field, baseField]['client'] = 'both'
aliases = {}
for field, baseField, client in DG.edges.data('client'):
if not client in aliases: aliases[client] = {}
if not baseField in aliases[client]: aliases[client][baseField] = []
aliases[client][baseField].append(field)
# map names to basenames
names = Munch(field={}, type={})
names.field['dateadded'] = Munch(jurism='dateAdded', zotero='dateAdded')
names.field['datemodified'] = Munch(jurism='dateModified', zotero='dateModified')
labels = {}
for field in jsonpath.parse('*.itemTypes.*.fields.*').find(SCHEMA):
client, itemType = operator.itemgetter(0, 2)(str(field.full_path).split('.'))
baseField = field.value
field = str(field.path)
for section, field, name in [('field', field.lower(), baseField), ('field', baseField.lower(), baseField), ('type', itemType.lower(), itemType)]:
if not field in names[section]:
names[section][field] = Munch.fromDict({ client: name })
elif not client in names[section][field]:
names[section][field][client] = name
else:
assert names[section][field][client] == name, (client, section, field, names[section][field][client], name)
if name == 'numPages':
label = 'Number of pages'
else:
label = name[0].upper() + re.sub('([a-z])([A-Z])', lambda m: m.group(1) + ' ' + m.group(2).lower(), re.sub('[-_]', ' ', name[1:]))
if not field in labels:
labels[field] = Munch.fromDict({ client: label })
elif not client in labels[field]:
labels[field][client] = label
else:
assert labels[field][client] == label, (client, field, labels[field][client], label)
try:
print(template('items/items.ts.mako').render(names=names, labels=labels, valid=valid, aliases=aliases).strip(), file=f)
except:
print(exceptions.text_error_template().render())
#stringizer = lambda x: DG.nodes[x]['name'] if x in DG.nodes else x
#nx.write_gml(DG, 'fields.gml') # , stringizer)
print(' writing csl-types')
with open(os.path.join(ITEMS, 'csl-types.json'), 'w') as f:
types = set()
for type_ in jsonpath.parse('*.csl.types.*').find(SCHEMA):
types.add(str(type_.full_path).split('.')[-1])
for type_ in jsonpath.parse('*.csl.unmapped.*').find(SCHEMA):
if type_.value == 'type': types.add(str(type_.full_path).split('.')[-1])
json.dump(list(types), f)
| mit | -3,831,744,764,635,071,500 | 40.011667 | 161 | 0.61897 | false | 3.327519 | false | false | false |
hasadna/OpenTrain | webserver/opentrain/timetable/utils.py | 1 | 3883 | from models import TtStop,TtStopTime,TtTrip
import gtfs.models
from timetable.models import TtShape
import json
from common import ot_utils
import datetime
def build_from_gtfs(start_date,days=30):
build_stops()
end_date = start_date + datetime.timedelta(days=days-1)
print '=' * 50
print 'Start day = %s' % (start_date)
print 'End day = %s' % (end_date)
clean_trips(start_date, end_date)
build_trips(start_date, end_date)
def build_stops():
stops = gtfs.models.Stop.objects.all()
for stop in stops:
if not TtStop.objects.filter(gtfs_stop_id=stop.stop_id).exists():
new_stop = TtStop(gtfs_stop_id = stop.stop_id,
stop_name = stop.stop_name,
stop_lat = stop.stop_lat,
stop_lon = stop.stop_lon,
stop_url = stop.stop_url)
new_stop.save()
print 'Added stop %s' % (new_stop)
def clean_trips(from_date,to_date):
qs = TtTrip.objects.filter(date__gte=from_date).filter(date__lte=to_date)
print 'Going to delete %s trips of dates %s to %s (incl)' % (qs.count(),from_date,to_date)
qs.delete()
def build_trips(from_date=None,to_date=None):
trips = gtfs.models.Trip.objects.all()
date_str = ot_utils.get_date_underscored()
print 'Total number of trips: %s' % (trips.count())
if from_date:
trips = trips.filter(service__start_date__gte=from_date)
if to_date:
trips = trips.filter(service__end_date__lte=to_date)
print 'number of trips in date range %s' % (trips.count())
trips_count = trips.count()
for idx,trip in enumerate(trips):
print 'Building trip %s/%s' % (idx,trips_count)
trip_date = trip.service.start_date
new_trip = TtTrip()
new_trip.gtfs_trip_id = trip.trip_id
new_trip.date = trip_date
assert trip.service.start_date == trip.service.end_date
new_trip.shape = _get_or_build_shape(trip.shape_id, date_str)
new_trip.save()
_build_stoptimes(new_trip,trip)
stops = list(new_trip.get_stop_times())
new_trip.from_stoptime = stops[0]
new_trip.to_stoptime = stops[-1]
new_trip.save()
def _get_or_build_shape(gtfs_shape_id,date_str):
try:
ttshape = TtShape.objects.get(gtfs_shape_id=gtfs_shape_id,gtfs_date_str=date_str)
return ttshape
except TtShape.DoesNotExist:
return _build_shape(gtfs_shape_id,date_str)
def _build_shape(gtfs_shape_id,date_str):
print 'Building shape for gtfs shape id = %s date_str = %s' % (gtfs_shape_id,date_str)
points = gtfs.models.Shape.objects.filter(shape_id=gtfs_shape_id).order_by('shape_pt_sequence')
point_list = []
for point in points:
point_list.append([point.shape_pt_lat,point.shape_pt_lon])
ttshape = TtShape(gtfs_shape_id=gtfs_shape_id,
gtfs_date_str=date_str,
points=json.dumps(point_list))
ttshape.save()
return ttshape
def _build_stoptimes(new_trip,trip):
stoptimes = trip.stoptime_set.all().order_by('stop_sequence')
new_stoptimes = []
for stoptime in stoptimes:
new_stop = TtStop.objects.get(gtfs_stop_id=stoptime.stop.stop_id)
exp_arrival = ot_utils.db_time_to_datetime(stoptime.arrival_time,new_trip.date)
exp_departure = ot_utils.db_time_to_datetime(stoptime.departure_time,new_trip.date)
new_stoptime = TtStopTime(stop=new_stop,
stop_sequence=stoptime.stop_sequence,
trip=new_trip,
exp_arrival=exp_arrival,
exp_departure=exp_departure)
new_stoptimes.append(new_stoptime)
TtStopTime.objects.bulk_create(new_stoptimes)
| bsd-3-clause | -7,730,519,627,983,113,000 | 40.308511 | 99 | 0.60443 | false | 3.156911 | false | false | false |
aaronj1335/cs388-final-project | bin/plot.py | 1 | 6512 | #!/usr/bin/env python
import os
from os.path import join
from itertools import chain
from math import log
from pylab import plot, show, legend, close, figure, title, xlabel, ylabel, barh, savefig
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
N = 0 # problem size
P1 = 1 # num of level 1 threads
P2 = 2 # num of level 2 threads
T = 3 # running time
# we still store all figures here
out_dir = "report/figures/"
# make the output directory if necessary
if not os.path.exists(out_dir):
os.makedirs(out_dir)
BLOCK = False
if 'BLOCK' not in globals():
BLOCK = True
def prettify(func):
def inner(*args, **kwargs):
# update a few params about the
mpl.rcParams['font.family'] = 'serif'
return func(*args, **kwargs)
return inner
def is_truthy(x):
return bool(x)
def partition_by(data, key_fn):
items = {}
for i in data:
key = key_fn(i)
if key not in items:
items[key] = []
items[key].append(i)
return items.values()
def read_data(d):
params = lambda f: tuple(map(int, f.split('.')[0].split('_')))
time = lambda f: (float(open(join(d, f)).read()),)
return sorted(params(f) + time(f) for f in os.listdir(d))
def weak_scaling_data(data):
wsdata = lambda d: [(i[P1] * i[P2], i[T], i) for i in d]
key_fn = lambda i: i[N] / (i[P1] * i[P2])
partitioned = partition_by(data, key_fn)
# select the fastest numbers
sets = []
ds = filter(is_truthy, [wsdata(d) for d in partitioned])
for d in ds:
seen = {}
for i in d:
if i[0] not in seen or seen[i[0]][1] > i[1]:
seen[i[0]] = i
sets.append(sorted(seen.values()))
max_len = max(map(len, sets))
sets = filter(lambda s: len(s) == max_len, sets)
return sets
def strong_scaling_data(data):
ssdata = lambda d: [(i[P1] * i[P2], i[T], i) for i in d]
key_fn = lambda i: i[N]
partitioned = partition_by(data, key_fn)
# select the fastest numbers
sets = []
ds = filter(is_truthy, [ssdata(d) for d in partitioned])
for d in ds:
seen = {}
for i in d:
if i[0] not in seen or seen[i[0]][1] > i[1]:
seen[i[0]] = i
sets.append(sorted(seen.values()))
return sets
@prettify
def plot_scaling(data, the_title, munger, labeler):
figure()
for d in munger(data):
zippd = zip(*d)
# special case
if 'Intel Weak' in the_title:
if str(2048*24) not in labeler(d):
continue
plot(zippd[0], zippd[1], 'o-', label=labeler(d))
legend()
xlabel('Threads')
ylabel('Time (seconds)')
ax = plt.gca()
current = map(int, ax.get_xticks())
# just to wash out dups
padding = sorted(set(current))
# put one tick at the end
padding += [max(padding) + padding[1] - padding[0]]
# ensure we start from zero
if padding[0] != 0:
padding = [0] + padding
# finalize xticks
ax.set_xticks(padding)
if 'Intel Weak' in the_title:
# force y axis to be int
yticks = ax.get_yticks()
# ensure these are ints
bounds = map(int, (min(yticks), max(yticks)))
ax.set_yticks(range(bounds[0]-2, bounds[1]+3))
t = "_".join(the_title.lower().split()) + ".pdf"
savefig(out_dir + t, dpi=100, format='pdf')
print t
def plot_weak_scaling(data, dataset=''):
labeler = lambda d: 'Ratio: ' + str(d[0][2][0] / (d[0][2][1] * d[0][2][2]) * 24)
plot_scaling(data, the_title=(dataset + ' Weak Scaling'),
munger=weak_scaling_data, labeler=labeler)
def plot_strong_scaling(data, dataset=''):
# need to multiply by 24 to find true problem size
labeler = lambda d: 'Problem size: ' + str(d[0][2][N] * 24)
plot_scaling(data, the_title=(dataset + ' Strong Scaling'),
munger=strong_scaling_data, labeler=labeler)
@prettify
def plot_parallelization_levels(data, n, p, dataset=''):
figure()
t = 'Coarse versus fine-grained parallelism'
if dataset:
t += ' (' + dataset + ')'
d = [(i[T], '%d X %d' % (i[P1], i[P2]))
for idx, i in enumerate(data)
if i[N] == n and i[P1] * i[P2] == p]
zippd = zip(*d)
xs = range(len(zippd[0]))
plot(xs, zippd[0], 'o-', label='Problem size: ' + str(n))
plt.xticks(xs, zippd[1])
legend()
xlabel('Coarse grained threads X fine grained threads')
ylabel('Time (seconds)')
t = "_".join(t.lower().split()) + ".pdf"
savefig(out_dir + t, dpi=100, format='pdf')
print t
@prettify
def plot_compiler_difference(gcc, intel):
n = max(i[N] for i in gcc)
gcc = [i for i in gcc if i[N] == n and i[P2] == 1]
intel = [i for i in intel if i[N] == n and i[P2] == 1]
d = [(i[P1] - 0.5, (i[T] - g[T]) / min(g[T], i[T]) * 100.)
for i, g in zip(intel, gcc)]
zippd = zip(*d)
figure()
plt.gca().xaxis.set_major_formatter(
FuncFormatter(lambda v, p: str(v) + ' %'))
t = 'Comparison of Intel and GNU comiler performance'
barh(zippd[0], zippd[1])
ylabel('Threads')
xlabel('Speedup')
t = "_".join(t.lower().split()) + ".pdf"
savefig(out_dir + t, dpi=100, format='pdf')
print t
data = wdata = sdata = intel_total_time = gcc_total_time = gcc_data = intel_data = None
if __name__ == '__main__':
close(); close(); close(); close(); close(); close(); close(); close(); close(); # lololol
data = gcc_data = read_data('goodtokno/tacc_gcc47_O3_2048')
wdata = weak_scaling_data(data)
sdata = strong_scaling_data(data)
gcc_total_time = sum(map(lambda i: i[T], data))
plot_strong_scaling(data, dataset='GCC')
plot_weak_scaling(data, dataset='GCC')
data = intel_data = read_data('goodtokno/tacc_intel_O3_8192')
wdata = weak_scaling_data(data)
sdata = strong_scaling_data(data)
intel_total_time = sum(map(lambda i: i[T], data))
plot_strong_scaling(data, dataset='Intel')
plot_weak_scaling(data, dataset='Intel')
plot_parallelization_levels(intel_data, 8192, 8, dataset='Intel')
plot_parallelization_levels(gcc_data, 2048, 8, dataset='GCC')
plot_compiler_difference(gcc_data, intel_data)
data = read_data('goodtokno/tacc_gcc47_O3_coarsevsfineperf')
plot_strong_scaling(data, dataset='GCC Without Nested Parallelism -')
if BLOCK:
raw_input()
| bsd-2-clause | -7,641,680,713,887,290,000 | 27.814159 | 94 | 0.578163 | false | 3.07025 | false | false | false |
CommunityHoneyNetwork/CHN-Server | mhn/auth/views.py | 2 | 5341 | import hashlib
import random
from flask import Blueprint, request, jsonify
from flask_mail import Message
from sqlalchemy.exc import IntegrityError
from flask_security.utils import (
login_user as login, verify_and_update_password,
encrypt_password, logout_user as logout)
from mhn import db, mail
from mhn import user_datastore
from mhn.common.utils import error_response
from mhn.auth.models import User, PasswdReset, ApiKey
from mhn.auth import errors
from mhn.auth import (
get_datastore, login_required, roles_accepted, current_user)
from mhn.api import errors as apierrors
import uuid
auth = Blueprint('auth', __name__, url_prefix='/auth')
@auth.route('/login/', methods=['POST'])
def login_user():
if 'email' not in request.json:
return error_response(errors.AUTH_EMAIL_MISSING, 400)
if 'password' not in request.json:
return error_response(errors.AUTH_PSSWD_MISSING, 400)
# email and password are in the posted data.
user = User.query.filter_by(
email=request.json.get('email')).first()
psswd_check = False
if user:
psswd_check = verify_and_update_password(
request.json.get('password'), user)
if user and psswd_check:
login(user, remember=True)
return jsonify(user.to_dict())
else:
return error_response(errors.AUTH_INCORRECT_CREDENTIALS, 401)
@auth.route('/logout/', methods=['GET'])
def logout_user():
logout()
return jsonify({})
@auth.route('/user/', methods=['POST'])
@auth.route('/register/', methods=['POST'])
@roles_accepted('admin')
def create_user():
missing = User.check_required(request.json)
if missing:
return error_response(
apierrors.API_FIELDS_MISSING.format(missing), 400)
else:
user = get_datastore().create_user(
email=request.json.get('email'),
password=encrypt_password(request.json.get('password')))
userrole = user_datastore.find_role('admin')
user_datastore.add_role_to_user(user, userrole)
try:
db.session.add(user)
db.session.flush()
apikey = ApiKey(user_id=user.id, api_key=str(uuid.uuid4()).replace("-", ""))
db.session.add(apikey)
db.session.commit()
except IntegrityError:
return error_response(errors.AUTH_USERNAME_EXISTS, 400)
else:
return jsonify(user.to_dict())
@auth.route('/user/<user_id>/', methods=['DELETE'])
@roles_accepted('admin')
def delete_user(user_id):
user = User.query.get(user_id)
if not user:
return error_response(errors.AUTH_NOT_FOUND.format(user_id), 404)
user.active= False
db.session.add(user)
db.session.commit()
return jsonify({})
@auth.route('/resetrequest/', methods=['POST'])
def reset_passwd_request():
if 'email' not in request.json:
return error_response(errors.AUTH_EMAIL_MISSING, 400)
email = request.json['email']
user = User.query.filter_by(email=email).first()
if not user:
return error_response(errors.AUTH_NOT_FOUND.format(email), 404)
hashstr = hashlib.sha1(str(random.getrandbits(128)) + user.email).hexdigest()
# Deactivate all other password resets for this user.
PasswdReset.query.filter_by(user=user).update({'active': False})
reset = PasswdReset(hashstr=hashstr, active=True, user=user)
db.session.add(reset)
db.session.commit()
# Send password reset email to user.
from mhn import mhn
msg = Message(
html=reset.email_body, subject='MHN Password reset',
recipients=[user.email], sender=mhn.config['DEFAULT_MAIL_SENDER'])
try:
mail.send(msg)
except:
return error_response(errors.AUTH_SMTP_ERROR, 500)
else:
return jsonify({})
@auth.route('/changepass/', methods=['POST'])
def change_passwd():
password = request.json.get('password')
password_repeat = request.json.get('password_repeat')
if not password or not password_repeat:
# Request body is not complete.
return error_response(errors.AUTH_RESET_MISSING, 400)
if password != password_repeat:
# Password do not match.
return error_response(errors.AUTH_PASSWD_MATCH, 400)
if current_user.is_authenticated:
# No need to check password hash object or email.
user = current_user
else:
email = request.json.get('email')
hashstr = request.json.get('hashstr')
if not email or not hashstr:
# Request body is not complete for not authenticated
# request, ie, uses password reset hash.
return error_response(errors.AUTH_RESET_MISSING, 400)
reset = db.session.query(PasswdReset).join(User).\
filter(User.email == email, PasswdReset.active == True).\
filter(PasswdReset.hashstr == hashstr).\
first()
if not reset:
return error_response(errors.AUTH_RESET_HASH, 404)
db.session.add(reset)
reset.active = False
user = reset.user
user.password = encrypt_password(password)
db.session.add(user)
db.session.commit()
return jsonify({})
@auth.route('/me/', methods=['GET'])
@login_required
def get_user():
return jsonify(current_user.to_dict())
| lgpl-2.1 | 2,775,682,520,948,707,000 | 33.458065 | 88 | 0.644823 | false | 3.774558 | false | false | false |
tlksio/tlksio | env/lib/python3.4/site-packages/pylint/checkers/base.py | 1 | 37578 | # Copyright (c) 2003-2013 LOGILAB S.A. (Paris, FRANCE).
# Copyright (c) 2009-2010 Arista Networks, Inc.
# http://www.logilab.fr/ -- mailto:[email protected]
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""basic checker for Python code"""
from logilab import astng
from logilab.common.ureports import Table
from logilab.astng import are_exclusive
from pylint.interfaces import IASTNGChecker
from pylint.reporters import diff_string
from pylint.checkers import BaseChecker, EmptyReport
from pylint.checkers.utils import (
check_messages,
clobber_in_except,
is_inside_except,
safe_infer,
)
import re
# regex for class/function/variable/constant name
CLASS_NAME_RGX = re.compile('[A-Z_][a-zA-Z0-9]+$')
MOD_NAME_RGX = re.compile('(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$')
CONST_NAME_RGX = re.compile('(([A-Z_][A-Z0-9_]*)|(__.*__))$')
COMP_VAR_RGX = re.compile('[A-Za-z_][A-Za-z0-9_]*$')
DEFAULT_NAME_RGX = re.compile('[a-z_][a-z0-9_]{2,30}$')
# do not require a doc string on system methods
NO_REQUIRED_DOC_RGX = re.compile('__.*__')
del re
def in_loop(node):
"""return True if the node is inside a kind of for loop"""
parent = node.parent
while parent is not None:
if isinstance(parent, (astng.For, astng.ListComp, astng.SetComp,
astng.DictComp, astng.GenExpr)):
return True
parent = parent.parent
return False
def in_nested_list(nested_list, obj):
"""return true if the object is an element of <nested_list> or of a nested
list
"""
for elmt in nested_list:
if isinstance(elmt, (list, tuple)):
if in_nested_list(elmt, obj):
return True
elif elmt == obj:
return True
return False
def _loop_exits_early(loop):
"""Returns true if a loop has a break statement in its body."""
loop_nodes = (astng.For, astng.While)
# Loop over body explicitly to avoid matching break statements
# in orelse.
for child in loop.body:
if isinstance(child, loop_nodes):
continue
for _ in child.nodes_of_class(astng.Break, skip_klass=loop_nodes):
return True
return False
def report_by_type_stats(sect, stats, old_stats):
"""make a report of
* percentage of different types documented
* percentage of different types with a bad name
"""
# percentage of different types documented and/or with a bad name
nice_stats = {}
for node_type in ('module', 'class', 'method', 'function'):
try:
total = stats[node_type]
except KeyError:
raise EmptyReport()
nice_stats[node_type] = {}
if total != 0:
try:
documented = total - stats['undocumented_'+node_type]
percent = (documented * 100.) / total
nice_stats[node_type]['percent_documented'] = '%.2f' % percent
except KeyError:
nice_stats[node_type]['percent_documented'] = 'NC'
try:
percent = (stats['badname_'+node_type] * 100.) / total
nice_stats[node_type]['percent_badname'] = '%.2f' % percent
except KeyError:
nice_stats[node_type]['percent_badname'] = 'NC'
lines = ('type', 'number', 'old number', 'difference',
'%documented', '%badname')
for node_type in ('module', 'class', 'method', 'function'):
new = stats[node_type]
old = old_stats.get(node_type, None)
if old is not None:
diff_str = diff_string(old, new)
else:
old, diff_str = 'NC', 'NC'
lines += (node_type, str(new), str(old), diff_str,
nice_stats[node_type].get('percent_documented', '0'),
nice_stats[node_type].get('percent_badname', '0'))
sect.append(Table(children=lines, cols=6, rheaders=1))
def redefined_by_decorator(node):
"""return True if the object is a method redefined via decorator.
For example:
@property
def x(self): return self._x
@x.setter
def x(self, value): self._x = value
"""
if node.decorators:
for decorator in node.decorators.nodes:
if (isinstance(decorator, astng.Getattr) and
getattr(decorator.expr, 'name', None) == node.name):
return True
return False
class _BasicChecker(BaseChecker):
__implements__ = IASTNGChecker
name = 'basic'
class BasicErrorChecker(_BasicChecker):
msgs = {
'E0100': ('__init__ method is a generator',
'init-is-generator',
'Used when the special class method __init__ is turned into a '
'generator by a yield in its body.'),
'E0101': ('Explicit return in __init__',
'return-in-init',
'Used when the special class method __init__ has an explicit \
return value.'),
'E0102': ('%s already defined line %s',
'function-redefined',
'Used when a function / class / method is redefined.'),
'E0103': ('%r not properly in loop',
'not-in-loop',
'Used when break or continue keywords are used outside a loop.'),
'E0104': ('Return outside function',
'return-outside-function',
'Used when a "return" statement is found outside a function or '
'method.'),
'E0105': ('Yield outside function',
'yield-outside-function',
'Used when a "yield" statement is found outside a function or '
'method.'),
'E0106': ('Return with argument inside generator',
'return-arg-in-generator',
'Used when a "return" statement with an argument is found '
'outside in a generator function or method (e.g. with some '
'"yield" statements).'),
'E0107': ("Use of the non-existent %s operator",
'nonexistent-operator',
"Used when you attempt to use the C-style pre-increment or"
"pre-decrement operator -- and ++, which doesn't exist in Python."),
'E0108': ('Duplicate argument name %s in function definition',
'duplicate-argument-name',
'Duplicate argument names in function definitions are syntax'
' errors.'),
'W0120': ('Else clause on loop without a break statement',
'useless-else-on-loop',
'Loops should only have an else clause if they can exit early '
'with a break statement, otherwise the statements under else '
'should be on the same scope as the loop itself.'),
}
def __init__(self, linter):
_BasicChecker.__init__(self, linter)
@check_messages('E0102')
def visit_class(self, node):
self._check_redefinition('class', node)
@check_messages('E0100', 'E0101', 'E0102', 'E0106', 'E0108')
def visit_function(self, node):
if not redefined_by_decorator(node):
self._check_redefinition(node.is_method() and 'method' or 'function', node)
# checks for max returns, branch, return in __init__
returns = node.nodes_of_class(astng.Return,
skip_klass=(astng.Function, astng.Class))
if node.is_method() and node.name == '__init__':
if node.is_generator():
self.add_message('E0100', node=node)
else:
values = [r.value for r in returns]
if [v for v in values if not (v is None or
(isinstance(v, astng.Const) and v.value is None)
or (isinstance(v, astng.Name) and v.name == 'None'))]:
self.add_message('E0101', node=node)
elif node.is_generator():
# make sure we don't mix non-None returns and yields
for retnode in returns:
if isinstance(retnode.value, astng.Const) and \
retnode.value.value is not None:
self.add_message('E0106', node=node,
line=retnode.fromlineno)
args = set()
for name in node.argnames():
if name in args:
self.add_message('E0108', node=node, args=(name,))
else:
args.add(name)
@check_messages('E0104')
def visit_return(self, node):
if not isinstance(node.frame(), astng.Function):
self.add_message('E0104', node=node)
@check_messages('E0105')
def visit_yield(self, node):
if not isinstance(node.frame(), (astng.Function, astng.Lambda)):
self.add_message('E0105', node=node)
@check_messages('E0103')
def visit_continue(self, node):
self._check_in_loop(node, 'continue')
@check_messages('E0103')
def visit_break(self, node):
self._check_in_loop(node, 'break')
@check_messages('W0120')
def visit_for(self, node):
self._check_else_on_loop(node)
@check_messages('W0120')
def visit_while(self, node):
self._check_else_on_loop(node)
@check_messages('E0107')
def visit_unaryop(self, node):
"""check use of the non-existent ++ adn -- operator operator"""
if ((node.op in '+-') and
isinstance(node.operand, astng.UnaryOp) and
(node.operand.op == node.op)):
self.add_message('E0107', node=node, args=node.op*2)
def _check_else_on_loop(self, node):
"""Check that any loop with an else clause has a break statement."""
if node.orelse and not _loop_exits_early(node):
self.add_message('W0120', node=node,
# This is not optimal, but the line previous
# to the first statement in the else clause
# will usually be the one that contains the else:.
line=node.orelse[0].lineno - 1)
def _check_in_loop(self, node, node_name):
"""check that a node is inside a for or while loop"""
_node = node.parent
while _node:
if isinstance(_node, (astng.For, astng.While)):
break
_node = _node.parent
else:
self.add_message('E0103', node=node, args=node_name)
def _check_redefinition(self, redeftype, node):
"""check for redefinition of a function / method / class name"""
defined_self = node.parent.frame()[node.name]
if defined_self is not node and not are_exclusive(node, defined_self):
self.add_message('E0102', node=node,
args=(redeftype, defined_self.fromlineno))
class BasicChecker(_BasicChecker):
"""checks for :
* doc strings
* modules / classes / functions / methods / arguments / variables name
* number of arguments, local variables, branches, returns and statements in
functions, methods
* required module attributes
* dangerous default values as arguments
* redefinition of function / method / class
* uses of the global statement
"""
__implements__ = IASTNGChecker
name = 'basic'
msgs = {
'W0101': ('Unreachable code',
'unreachable',
'Used when there is some code behind a "return" or "raise" \
statement, which will never be accessed.'),
'W0102': ('Dangerous default value %s as argument',
'dangerous-default-value',
'Used when a mutable value as list or dictionary is detected in \
a default value for an argument.'),
'W0104': ('Statement seems to have no effect',
'pointless-statement',
'Used when a statement doesn\'t have (or at least seems to) \
any effect.'),
'W0105': ('String statement has no effect',
'pointless-string-statement',
'Used when a string is used as a statement (which of course \
has no effect). This is a particular case of W0104 with its \
own message so you can easily disable it if you\'re using \
those strings as documentation, instead of comments.'),
'W0106': ('Expression "%s" is assigned to nothing',
'expression-not-assigned',
'Used when an expression that is not a function call is assigned\
to nothing. Probably something else was intended.'),
'W0108': ('Lambda may not be necessary',
'unnecessary-lambda',
'Used when the body of a lambda expression is a function call \
on the same argument list as the lambda itself; such lambda \
expressions are in all but a few cases replaceable with the \
function being called in the body of the lambda.'),
'W0109': ("Duplicate key %r in dictionary",
'duplicate-key',
"Used when a dictionary expression binds the same key multiple \
times."),
'W0122': ('Use of the exec statement',
'exec-statement',
'Used when you use the "exec" statement, to discourage its \
usage. That doesn\'t mean you can not use it !'),
'W0141': ('Used builtin function %r',
'bad-builtin',
'Used when a black listed builtin function is used (see the '
'bad-function option). Usual black listed functions are the ones '
'like map, or filter , where Python offers now some cleaner '
'alternative like list comprehension.'),
'W0142': ('Used * or ** magic',
'star-args',
'Used when a function or method is called using `*args` or '
'`**kwargs` to dispatch arguments. This doesn\'t improve '
'readability and should be used with care.'),
'W0150': ("%s statement in finally block may swallow exception",
'lost-exception',
"Used when a break or a return statement is found inside the \
finally clause of a try...finally block: the exceptions raised \
in the try clause will be silently swallowed instead of being \
re-raised."),
'W0199': ('Assert called on a 2-uple. Did you mean \'assert x,y\'?',
'assert-on-tuple',
'A call of assert on a tuple will always evaluate to true if '
'the tuple is not empty, and will always evaluate to false if '
'it is.'),
'C0121': ('Missing required attribute "%s"', # W0103
'missing-module-attribute',
'Used when an attribute required for modules is missing.'),
}
options = (('required-attributes',
{'default' : (), 'type' : 'csv',
'metavar' : '<attributes>',
'help' : 'Required attributes for module, separated by a '
'comma'}
),
('bad-functions',
{'default' : ('map', 'filter', 'apply', 'input'),
'type' :'csv', 'metavar' : '<builtin function names>',
'help' : 'List of builtins function names that should not be '
'used, separated by a comma'}
),
)
reports = ( ('RP0101', 'Statistics by type', report_by_type_stats), )
def __init__(self, linter):
_BasicChecker.__init__(self, linter)
self.stats = None
self._tryfinallys = None
def open(self):
"""initialize visit variables and statistics
"""
self._tryfinallys = []
self.stats = self.linter.add_stats(module=0, function=0,
method=0, class_=0)
def visit_module(self, node):
"""check module name, docstring and required arguments
"""
self.stats['module'] += 1
for attr in self.config.required_attributes:
if attr not in node:
self.add_message('C0121', node=node, args=attr)
def visit_class(self, node):
"""check module name, docstring and redefinition
increment branch counter
"""
self.stats['class'] += 1
@check_messages('W0104', 'W0105')
def visit_discard(self, node):
"""check for various kind of statements without effect"""
expr = node.value
if isinstance(expr, astng.Const) and isinstance(expr.value,
str):
# treat string statement in a separated message
self.add_message('W0105', node=node)
return
# ignore if this is :
# * a direct function call
# * the unique child of a try/except body
# * a yield (which are wrapped by a discard node in _ast XXX)
# warn W0106 if we have any underlying function call (we can't predict
# side effects), else W0104
if (isinstance(expr, (astng.Yield, astng.CallFunc)) or
(isinstance(node.parent, astng.TryExcept) and
node.parent.body == [node])):
return
if any(expr.nodes_of_class(astng.CallFunc)):
self.add_message('W0106', node=node, args=expr.as_string())
else:
self.add_message('W0104', node=node)
@check_messages('W0108')
def visit_lambda(self, node):
"""check whether or not the lambda is suspicious
"""
# if the body of the lambda is a call expression with the same
# argument list as the lambda itself, then the lambda is
# possibly unnecessary and at least suspicious.
if node.args.defaults:
# If the arguments of the lambda include defaults, then a
# judgment cannot be made because there is no way to check
# that the defaults defined by the lambda are the same as
# the defaults defined by the function called in the body
# of the lambda.
return
call = node.body
if not isinstance(call, astng.CallFunc):
# The body of the lambda must be a function call expression
# for the lambda to be unnecessary.
return
# XXX are lambda still different with astng >= 0.18 ?
# *args and **kwargs need to be treated specially, since they
# are structured differently between the lambda and the function
# call (in the lambda they appear in the args.args list and are
# indicated as * and ** by two bits in the lambda's flags, but
# in the function call they are omitted from the args list and
# are indicated by separate attributes on the function call node).
ordinary_args = list(node.args.args)
if node.args.kwarg:
if (not call.kwargs
or not isinstance(call.kwargs, astng.Name)
or node.args.kwarg != call.kwargs.name):
return
elif call.kwargs:
return
if node.args.vararg:
if (not call.starargs
or not isinstance(call.starargs, astng.Name)
or node.args.vararg != call.starargs.name):
return
elif call.starargs:
return
# The "ordinary" arguments must be in a correspondence such that:
# ordinary_args[i].name == call.args[i].name.
if len(ordinary_args) != len(call.args):
return
for i in range(len(ordinary_args)):
if not isinstance(call.args[i], astng.Name):
return
if node.args.args[i].name != call.args[i].name:
return
self.add_message('W0108', line=node.fromlineno, node=node)
def visit_function(self, node):
"""check function name, docstring, arguments, redefinition,
variable names, max locals
"""
self.stats[node.is_method() and 'method' or 'function'] += 1
# check for dangerous default values as arguments
for default in node.args.defaults:
try:
value = next(default.infer())
except astng.InferenceError:
continue
if (isinstance(value, astng.Instance) and
value.qname() in ('__builtin__.set', '__builtin__.dict', '__builtin__.list')):
if value is default:
msg = default.as_string()
elif type(value) is astng.Instance:
msg = '%s (%s)' % (default.as_string(), value.qname())
else:
msg = '%s (%s)' % (default.as_string(), value.as_string())
self.add_message('W0102', node=node, args=(msg,))
@check_messages('W0101', 'W0150')
def visit_return(self, node):
"""1 - check is the node has a right sibling (if so, that's some
unreachable code)
2 - check is the node is inside the finally clause of a try...finally
block
"""
self._check_unreachable(node)
# Is it inside final body of a try...finally bloc ?
self._check_not_in_finally(node, 'return', (astng.Function,))
@check_messages('W0101')
def visit_continue(self, node):
"""check is the node has a right sibling (if so, that's some unreachable
code)
"""
self._check_unreachable(node)
@check_messages('W0101', 'W0150')
def visit_break(self, node):
"""1 - check is the node has a right sibling (if so, that's some
unreachable code)
2 - check is the node is inside the finally clause of a try...finally
block
"""
# 1 - Is it right sibling ?
self._check_unreachable(node)
# 2 - Is it inside final body of a try...finally bloc ?
self._check_not_in_finally(node, 'break', (astng.For, astng.While,))
@check_messages('W0101')
def visit_raise(self, node):
"""check is the node has a right sibling (if so, that's some unreachable
code)
"""
self._check_unreachable(node)
@check_messages('W0122')
def visit_exec(self, node):
"""just print a warning on exec statements"""
self.add_message('W0122', node=node)
@check_messages('W0141', 'W0142')
def visit_callfunc(self, node):
"""visit a CallFunc node -> check if this is not a blacklisted builtin
call and check for * or ** use
"""
if isinstance(node.func, astng.Name):
name = node.func.name
# ignore the name if it's not a builtin (i.e. not defined in the
# locals nor globals scope)
if not (name in node.frame() or
name in node.root()):
if name in self.config.bad_functions:
self.add_message('W0141', node=node, args=name)
if node.starargs or node.kwargs:
scope = node.scope()
if isinstance(scope, astng.Function):
toprocess = [(n, vn) for (n, vn) in ((node.starargs, scope.args.vararg),
(node.kwargs, scope.args.kwarg)) if n]
if toprocess:
for cfnode, fargname in toprocess[:]:
if getattr(cfnode, 'name', None) == fargname:
toprocess.remove((cfnode, fargname))
if not toprocess:
return # W0142 can be skipped
self.add_message('W0142', node=node.func)
@check_messages('W0199')
def visit_assert(self, node):
"""check the use of an assert statement on a tuple."""
if node.fail is None and isinstance(node.test, astng.Tuple) and \
len(node.test.elts) == 2:
self.add_message('W0199', node=node)
@check_messages('W0109')
def visit_dict(self, node):
"""check duplicate key in dictionary"""
keys = set()
for k, _ in node.items:
if isinstance(k, astng.Const):
key = k.value
if key in keys:
self.add_message('W0109', node=node, args=key)
keys.add(key)
def visit_tryfinally(self, node):
"""update try...finally flag"""
self._tryfinallys.append(node)
def leave_tryfinally(self, node):
"""update try...finally flag"""
self._tryfinallys.pop()
def _check_unreachable(self, node):
"""check unreachable code"""
unreach_stmt = node.next_sibling()
if unreach_stmt is not None:
self.add_message('W0101', node=unreach_stmt)
def _check_not_in_finally(self, node, node_name, breaker_classes=()):
"""check that a node is not inside a finally clause of a
try...finally statement.
If we found before a try...finally bloc a parent which its type is
in breaker_classes, we skip the whole check."""
# if self._tryfinallys is empty, we're not a in try...finally bloc
if not self._tryfinallys:
return
# the node could be a grand-grand...-children of the try...finally
_parent = node.parent
_node = node
while _parent and not isinstance(_parent, breaker_classes):
if hasattr(_parent, 'finalbody') and _node in _parent.finalbody:
self.add_message('W0150', node=node, args=node_name)
return
_node = _parent
_parent = _node.parent
class NameChecker(_BasicChecker):
msgs = {
'C0102': ('Black listed name "%s"',
'blacklisted-name',
'Used when the name is listed in the black list (unauthorized \
names).'),
'C0103': ('Invalid name "%s" for type %s (should match %s)',
'invalid-name',
'Used when the name doesn\'t match the regular expression \
associated to its type (constant, variable, class...).'),
}
options = (('module-rgx',
{'default' : MOD_NAME_RGX,
'type' :'regexp', 'metavar' : '<regexp>',
'help' : 'Regular expression which should only match correct '
'module names'}
),
('const-rgx',
{'default' : CONST_NAME_RGX,
'type' :'regexp', 'metavar' : '<regexp>',
'help' : 'Regular expression which should only match correct '
'module level names'}
),
('class-rgx',
{'default' : CLASS_NAME_RGX,
'type' :'regexp', 'metavar' : '<regexp>',
'help' : 'Regular expression which should only match correct '
'class names'}
),
('function-rgx',
{'default' : DEFAULT_NAME_RGX,
'type' :'regexp', 'metavar' : '<regexp>',
'help' : 'Regular expression which should only match correct '
'function names'}
),
('method-rgx',
{'default' : DEFAULT_NAME_RGX,
'type' :'regexp', 'metavar' : '<regexp>',
'help' : 'Regular expression which should only match correct '
'method names'}
),
('attr-rgx',
{'default' : DEFAULT_NAME_RGX,
'type' :'regexp', 'metavar' : '<regexp>',
'help' : 'Regular expression which should only match correct '
'instance attribute names'}
),
('argument-rgx',
{'default' : DEFAULT_NAME_RGX,
'type' :'regexp', 'metavar' : '<regexp>',
'help' : 'Regular expression which should only match correct '
'argument names'}),
('variable-rgx',
{'default' : DEFAULT_NAME_RGX,
'type' :'regexp', 'metavar' : '<regexp>',
'help' : 'Regular expression which should only match correct '
'variable names'}
),
('inlinevar-rgx',
{'default' : COMP_VAR_RGX,
'type' :'regexp', 'metavar' : '<regexp>',
'help' : 'Regular expression which should only match correct '
'list comprehension / generator expression variable \
names'}
),
# XXX use set
('good-names',
{'default' : ('i', 'j', 'k', 'ex', 'Run', '_'),
'type' :'csv', 'metavar' : '<names>',
'help' : 'Good variable names which should always be accepted,'
' separated by a comma'}
),
('bad-names',
{'default' : ('foo', 'bar', 'baz', 'toto', 'tutu', 'tata'),
'type' :'csv', 'metavar' : '<names>',
'help' : 'Bad variable names which should always be refused, '
'separated by a comma'}
),
)
def open(self):
self.stats = self.linter.add_stats(badname_module=0,
badname_class=0, badname_function=0,
badname_method=0, badname_attr=0,
badname_const=0,
badname_variable=0,
badname_inlinevar=0,
badname_argument=0)
@check_messages('C0102', 'C0103')
def visit_module(self, node):
self._check_name('module', node.name.split('.')[-1], node)
@check_messages('C0102', 'C0103')
def visit_class(self, node):
self._check_name('class', node.name, node)
for attr, anodes in node.instance_attrs.items():
self._check_name('attr', attr, anodes[0])
@check_messages('C0102', 'C0103')
def visit_function(self, node):
self._check_name(node.is_method() and 'method' or 'function',
node.name, node)
# check arguments name
args = node.args.args
if args is not None:
self._recursive_check_names(args, node)
@check_messages('C0102', 'C0103')
def visit_assname(self, node):
"""check module level assigned names"""
frame = node.frame()
ass_type = node.ass_type()
if isinstance(ass_type, (astng.Comprehension, astng.Comprehension)):
self._check_name('inlinevar', node.name, node)
elif isinstance(frame, astng.Module):
if isinstance(ass_type, astng.Assign) and not in_loop(ass_type):
self._check_name('const', node.name, node)
elif isinstance(ass_type, astng.ExceptHandler):
self._check_name('variable', node.name, node)
elif isinstance(frame, astng.Function):
# global introduced variable aren't in the function locals
if node.name in frame:
self._check_name('variable', node.name, node)
def _recursive_check_names(self, args, node):
"""check names in a possibly recursive list <arg>"""
for arg in args:
if isinstance(arg, astng.AssName):
self._check_name('argument', arg.name, node)
else:
self._recursive_check_names(arg.elts, node)
def _check_name(self, node_type, name, node):
"""check for a name using the type's regexp"""
if is_inside_except(node):
clobbering, _ = clobber_in_except(node)
if clobbering:
return
if name in self.config.good_names:
return
if name in self.config.bad_names:
self.stats['badname_' + node_type] += 1
self.add_message('C0102', node=node, args=name)
return
regexp = getattr(self.config, node_type + '_rgx')
if regexp.match(name) is None:
type_label = {'inlinedvar': 'inlined variable',
'const': 'constant',
'attr': 'attribute',
}.get(node_type, node_type)
self.add_message('C0103', node=node, args=(name, type_label, regexp.pattern))
self.stats['badname_' + node_type] += 1
class DocStringChecker(_BasicChecker):
msgs = {
'C0111': ('Missing docstring', # W0131
'missing-docstring',
'Used when a module, function, class or method has no docstring.\
Some special methods like __init__ doesn\'t necessary require a \
docstring.'),
'C0112': ('Empty docstring', # W0132
'empty-docstring',
'Used when a module, function, class or method has an empty \
docstring (it would be too easy ;).'),
}
options = (('no-docstring-rgx',
{'default' : NO_REQUIRED_DOC_RGX,
'type' : 'regexp', 'metavar' : '<regexp>',
'help' : 'Regular expression which should only match '
'functions or classes name which do not require a '
'docstring'}
),
)
def open(self):
self.stats = self.linter.add_stats(undocumented_module=0,
undocumented_function=0,
undocumented_method=0,
undocumented_class=0)
def visit_module(self, node):
self._check_docstring('module', node)
def visit_class(self, node):
if self.config.no_docstring_rgx.match(node.name) is None:
self._check_docstring('class', node)
def visit_function(self, node):
if self.config.no_docstring_rgx.match(node.name) is None:
ftype = node.is_method() and 'method' or 'function'
if isinstance(node.parent.frame(), astng.Class):
overridden = False
# check if node is from a method overridden by its ancestor
for ancestor in node.parent.frame().ancestors():
if node.name in ancestor and \
isinstance(ancestor[node.name], astng.Function):
overridden = True
break
if not overridden:
self._check_docstring(ftype, node)
else:
self._check_docstring(ftype, node)
def _check_docstring(self, node_type, node):
"""check the node has a non empty docstring"""
docstring = node.doc
if docstring is None:
self.stats['undocumented_'+node_type] += 1
self.add_message('C0111', node=node)
elif not docstring.strip():
self.stats['undocumented_'+node_type] += 1
self.add_message('C0112', node=node)
class PassChecker(_BasicChecker):
"""check is the pass statement is really necessary"""
msgs = {'W0107': ('Unnecessary pass statement',
'unnecessary-pass',
'Used when a "pass" statement that can be avoided is '
'encountered.'),
}
def visit_pass(self, node):
if len(node.parent.child_sequence(node)) > 1:
self.add_message('W0107', node=node)
class LambdaForComprehensionChecker(_BasicChecker):
"""check for using a lambda where a comprehension would do.
See <http://www.artima.com/weblogs/viewpost.jsp?thread=98196>
where GvR says comprehensions would be clearer.
"""
msgs = {'W0110': ('map/filter on lambda could be replaced by comprehension',
'deprecated-lambda',
'Used when a lambda is the first argument to "map" or '
'"filter". It could be clearer as a list '
'comprehension or generator expression.'),
}
@check_messages('W0110')
def visit_callfunc(self, node):
"""visit a CallFunc node, check if map or filter are called with a
lambda
"""
if not node.args:
return
if not isinstance(node.args[0], astng.Lambda):
return
infered = safe_infer(node.func)
if (infered
and infered.parent.name == '__builtin__'
and infered.name in ['map', 'filter']):
self.add_message('W0110', node=node)
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(BasicErrorChecker(linter))
linter.register_checker(BasicChecker(linter))
linter.register_checker(NameChecker(linter))
linter.register_checker(DocStringChecker(linter))
linter.register_checker(PassChecker(linter))
linter.register_checker(LambdaForComprehensionChecker(linter))
| mit | 1,372,308,262,561,143,800 | 41.033557 | 94 | 0.551599 | false | 4.217982 | false | false | false |
ideamonk/apt-offline | apt_offline_gui/Ui_AptOfflineQtAbout.py | 1 | 8800 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'AptOfflineQtAbout.ui'
#
# Created: Sun Nov 7 20:54:52 2010
# by: PyQt4 UI code generator 4.7.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_AboutAptOffline(object):
def setupUi(self, AboutAptOffline):
AboutAptOffline.setObjectName("AboutAptOffline")
AboutAptOffline.setWindowModality(QtCore.Qt.ApplicationModal)
AboutAptOffline.resize(526, 378)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(AboutAptOffline.sizePolicy().hasHeightForWidth())
AboutAptOffline.setSizePolicy(sizePolicy)
AboutAptOffline.setMinimumSize(QtCore.QSize(526, 378))
AboutAptOffline.setMaximumSize(QtCore.QSize(526, 378))
self.label = QtGui.QLabel(AboutAptOffline)
self.label.setGeometry(QtCore.QRect(12, 30, 511, 21))
font = QtGui.QFont()
font.setPointSize(16)
self.label.setFont(font)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.tabWidget = QtGui.QTabWidget(AboutAptOffline)
self.tabWidget.setGeometry(QtCore.QRect(7, 90, 510, 241))
self.tabWidget.setObjectName("tabWidget")
self.aboutTab = QtGui.QWidget()
self.aboutTab.setObjectName("aboutTab")
self.label_3 = QtGui.QLabel(self.aboutTab)
self.label_3.setGeometry(QtCore.QRect(10, 20, 491, 31))
self.label_3.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.label_3.setWordWrap(True)
self.label_3.setObjectName("label_3")
self.label_14 = QtGui.QLabel(self.aboutTab)
self.label_14.setGeometry(QtCore.QRect(10, 46, 481, 61))
self.label_14.setWordWrap(True)
self.label_14.setObjectName("label_14")
self.tabWidget.addTab(self.aboutTab, "")
self.authorTab = QtGui.QWidget()
self.authorTab.setObjectName("authorTab")
self.label_4 = QtGui.QLabel(self.authorTab)
self.label_4.setGeometry(QtCore.QRect(10, 10, 111, 16))
self.label_4.setObjectName("label_4")
self.label_5 = QtGui.QLabel(self.authorTab)
self.label_5.setGeometry(QtCore.QRect(30, 30, 271, 16))
self.label_5.setObjectName("label_5")
self.label_6 = QtGui.QLabel(self.authorTab)
self.label_6.setGeometry(QtCore.QRect(10, 60, 131, 16))
self.label_6.setObjectName("label_6")
self.label_7 = QtGui.QLabel(self.authorTab)
self.label_7.setGeometry(QtCore.QRect(30, 80, 261, 16))
self.label_7.setObjectName("label_7")
self.label_8 = QtGui.QLabel(self.authorTab)
self.label_8.setGeometry(QtCore.QRect(30, 100, 271, 16))
self.label_8.setObjectName("label_8")
self.tabWidget.addTab(self.authorTab, "")
self.thanksTab = QtGui.QWidget()
self.thanksTab.setObjectName("thanksTab")
self.label_9 = QtGui.QLabel(self.thanksTab)
self.label_9.setGeometry(QtCore.QRect(10, 10, 221, 16))
self.label_9.setObjectName("label_9")
self.label_10 = QtGui.QLabel(self.thanksTab)
self.label_10.setGeometry(QtCore.QRect(10, 30, 141, 16))
self.label_10.setObjectName("label_10")
self.label_11 = QtGui.QLabel(self.thanksTab)
self.label_11.setGeometry(QtCore.QRect(10, 50, 161, 16))
self.label_11.setObjectName("label_11")
self.label_12 = QtGui.QLabel(self.thanksTab)
self.label_12.setGeometry(QtCore.QRect(10, 70, 161, 16))
self.label_12.setObjectName("label_12")
self.label_13 = QtGui.QLabel(self.thanksTab)
self.label_13.setGeometry(QtCore.QRect(10, 110, 301, 31))
self.label_13.setWordWrap(True)
self.label_13.setObjectName("label_13")
self.tabWidget.addTab(self.thanksTab, "")
self.licenseTab = QtGui.QWidget()
self.licenseTab.setObjectName("licenseTab")
self.licenseText = QtGui.QPlainTextEdit(self.licenseTab)
self.licenseText.setGeometry(QtCore.QRect(4, 4, 490, 203))
font = QtGui.QFont()
font.setPointSize(8)
self.licenseText.setFont(font)
self.licenseText.setObjectName("licenseText")
self.tabWidget.addTab(self.licenseTab, "")
self.label_2 = QtGui.QLabel(AboutAptOffline)
self.label_2.setGeometry(QtCore.QRect(10, 60, 511, 16))
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName("label_2")
self.pushButton = QtGui.QPushButton(AboutAptOffline)
self.pushButton.setGeometry(QtCore.QRect(416, 340, 101, 31))
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/icons/icons/dialog-cancel.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButton.setIcon(icon)
self.pushButton.setObjectName("pushButton")
self.retranslateUi(AboutAptOffline)
self.tabWidget.setCurrentIndex(3)
QtCore.QObject.connect(self.pushButton, QtCore.SIGNAL("clicked()"), AboutAptOffline.close)
QtCore.QMetaObject.connectSlotsByName(AboutAptOffline)
def retranslateUi(self, AboutAptOffline):
AboutAptOffline.setWindowTitle(QtGui.QApplication.translate("AboutAptOffline", "About Apt-Offline", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("AboutAptOffline", "Apt-Offline", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("AboutAptOffline", "apt-offline is an Offline APT Package Manager for Debian and derivatives. ", None, QtGui.QApplication.UnicodeUTF8))
self.label_14.setText(QtGui.QApplication.translate("AboutAptOffline", "apt-offline can fully update/upgrade your disconnected Debian box without the need of connecting it to the network. \n"
"\n"
"This is a Graphical User Interface which exposes the functionality of apt-offline.", None, QtGui.QApplication.UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.aboutTab), QtGui.QApplication.translate("AboutAptOffline", "About", None, QtGui.QApplication.UnicodeUTF8))
self.label_4.setText(QtGui.QApplication.translate("AboutAptOffline", "Written by:", None, QtGui.QApplication.UnicodeUTF8))
self.label_5.setText(QtGui.QApplication.translate("AboutAptOffline", "Ritesh Raj Sarraf <[email protected]>", None, QtGui.QApplication.UnicodeUTF8))
self.label_6.setText(QtGui.QApplication.translate("AboutAptOffline", "GUI written by:", None, QtGui.QApplication.UnicodeUTF8))
self.label_7.setText(QtGui.QApplication.translate("AboutAptOffline", "Manish Sinha <[email protected]>", None, QtGui.QApplication.UnicodeUTF8))
self.label_8.setText(QtGui.QApplication.translate("AboutAptOffline", "Abhishek Mishra <[email protected]>", None, QtGui.QApplication.UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.authorTab), QtGui.QApplication.translate("AboutAptOffline", "Author", None, QtGui.QApplication.UnicodeUTF8))
self.label_9.setText(QtGui.QApplication.translate("AboutAptOffline", "Peter Otten", None, QtGui.QApplication.UnicodeUTF8))
self.label_10.setText(QtGui.QApplication.translate("AboutAptOffline", "Duncan Booth", None, QtGui.QApplication.UnicodeUTF8))
self.label_11.setText(QtGui.QApplication.translate("AboutAptOffline", "Simon Forman", None, QtGui.QApplication.UnicodeUTF8))
self.label_12.setText(QtGui.QApplication.translate("AboutAptOffline", "Dennis Lee Bieber", None, QtGui.QApplication.UnicodeUTF8))
self.label_13.setText(QtGui.QApplication.translate("AboutAptOffline", "The awesome Directi people for their office space required for the mini hackfests", None, QtGui.QApplication.UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.thanksTab), QtGui.QApplication.translate("AboutAptOffline", "Thanks To", None, QtGui.QApplication.UnicodeUTF8))
self.licenseText.setPlainText(QtGui.QApplication.translate("AboutAptOffline", "LICENSE not found. Please contact the developers immediately.", None, QtGui.QApplication.UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.licenseTab), QtGui.QApplication.translate("AboutAptOffline", "License", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("AboutAptOffline", "A GUI for apt-offline - an offline APT Package Manager", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton.setText(QtGui.QApplication.translate("AboutAptOffline", "Close", None, QtGui.QApplication.UnicodeUTF8))
import resources_rc
| gpl-3.0 | 6,399,191,816,789,832,000 | 65.165414 | 201 | 0.716364 | false | 3.546957 | false | false | false |
gangadharkadam/tailorerp | erpnext/support/doctype/support_ticket/support_ticket.py | 1 | 8287 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from erpnext.utilities.transaction_base import TransactionBase
from frappe.utils import now, extract_email_id
import json
import requests
STANDARD_USERS = ("Guest", "Administrator")
class SupportTicket(TransactionBase):
def get_sender(self, comm):
return frappe.db.get_value('Support Email Settings',None,'support_email')
def get_subject(self, comm):
return '[' + self.name + '] ' + (comm.subject or 'No Subject Specified')
def get_content(self, comm):
signature = frappe.db.get_value('Support Email Settings',None,'support_signature')
content = comm.content
if signature:
content += '<p>' + signature + '</p>'
return content
def get_portal_page(self):
return "ticket"
def on_update1(self):
from frappe.utils import get_url, cstr
frappe.errprint(get_url())
if get_url()=='http://tailorpad.com':
pass
else:
pr2 = frappe.db.sql("""select name from `tabSupport Ticket`""")
frappe.errprint(pr2)
frappe.errprint("is feed back saved")
if pr2:
# self.login()
frappe.errprint("in if for creation support ticket")
test = {}
support_ticket = self.get_ticket_details()
self.call_del_keys(support_ticket)
#test['communications'] = []
#self.call_del_keys(support_ticket.get('communications'), test)
self.login()
frappe.errprint("support_ticket")
frappe.errprint(support_ticket)
self.tenent_based_ticket_creation(support_ticket)
# def on_update(self):
# self.send_email()
def send_email(self):
frappe.errprint("in the sendmail")
from frappe.utils.user import get_user_fullname
from frappe.utils import get_url
if self.get("__islocal") and get_url()=='http://tailorpad.com':
# mail_titles = frappe.get_hooks().get("login_mail_title", [])
# title = frappe.db.get_default('company') or (mail_titles and mail_titles[0]) or ""
full_name = get_user_fullname(frappe.session['user'])
if full_name == "Guest":
full_name = "Administrator"
first_name = frappe.db.sql_list("""select first_name from `tabUser` where name='%s'"""%(self.raised_by))
frappe.errprint(first_name[0])
msg="Dear "+first_name[0]+"!<br><br>Support Ticket is created successfully for '"+self.subject+"'<br><br>Your Support Ticket Number is '"+self.name+"' <br><br>Please note for further information. <br><br>Regards, <br>Team TailorPad."
sender = frappe.session.user not in STANDARD_USERS and frappe.session.user or None
frappe.sendmail(recipients=self.raised_by, sender=sender, subject=self.subject,
message=msg)
def login(self):
login_details = {'usr': 'Administrator', 'pwd': 'admin'}
url = 'http://tailorpad.com/api/method/login'
headers = {'content-type': 'application/x-www-form-urlencoded'}
frappe.errprint([url, 'data='+json.dumps(login_details)])
response = requests.post(url, data='data='+json.dumps(login_details), headers=headers)
def get_ticket_details(self):
# return frappe.get_doc('Support Ticket', self.name)
response = requests.get("""%(url)s/api/resource/Support Ticket/SUP-00001"""%{'url':get_url()})
# frappe.errprint(["""%(url)s/api/resource/Support Ticket/%(name)s"""%{'url':get_url(), 'name':self.name}])
frappe.errprint(response.text)
return eval(response.text).get('data')
def call_del_keys(self, support_ticket):
if support_ticket:
if isinstance(support_ticket, dict):
self.del_keys(support_ticket)
if isinstance(support_ticket, list):
for comm in support_ticket:
self.del_keys(comm)
def del_keys(self, support_ticket):
frappe.errprint(type(support_ticket))
del support_ticket['name']
del support_ticket['creation']
del support_ticket['modified']
del support_ticket['company']
def tenent_based_ticket_creation(self, support_ticket):
frappe.errprint(support_ticket)
url = 'http://tailorpad.com/api/resource/Support Ticket'
#url = 'http://192.168.5.12:7676/api/method/login'
headers = {'content-type': 'application/x-www-form-urlencoded'}
frappe.errprint('data='+json.dumps(support_ticket))
response = requests.post(url, data='data='+json.dumps(support_ticket), headers=headers)
frappe.errprint(response)
frappe.errprint(response.text)
def validate(self):
self.update_status()
self.set_lead_contact(self.raised_by)
if self.status == "Closed":
from frappe.widgets.form.assign_to import clear
clear(self.doctype, self.name)
#self.on_update1()
self.send_email()
def set_lead_contact(self, email_id):
import email.utils
email_id = email.utils.parseaddr(email_id)
if email_id:
if not self.lead:
self.lead = frappe.db.get_value("Lead", {"email_id": email_id})
if not self.contact:
self.contact = frappe.db.get_value("Contact", {"email_id": email_id})
if not self.company:
self.company = frappe.db.get_value("Lead", self.lead, "company") or \
frappe.db.get_default("company")
def update_status(self):
status = frappe.db.get_value("Support Ticket", self.name, "status")
if self.status!="Open" and status =="Open" and not self.first_responded_on:
self.first_responded_on = now()
if self.status=="Closed" and status !="Closed":
self.resolution_date = now()
if self.status=="Open" and status !="Open":
# if no date, it should be set as None and not a blank string "", as per mysql strict config
self.resolution_date = None
@frappe.whitelist()
def set_status(name, status):
st = frappe.get_doc("Support Ticket", name)
st.status = status
st.save()
@frappe.whitelist()
def get_admin(name):
admin = frappe.db.sql("select email_id_admin from tabUser where name='administrator'")
frappe.errprint(admin)
frappe.errprint(frappe.session.get('user'))
if admin:
frappe.errprint("if")
return admin[0][0]
else:
frappe.errprint("else")
return frappe.session.get('user')
@frappe.whitelist()
def assing_future(name, assign_in_future,raised_by,assign_to):
frappe.errprint("in assign future")
from frappe.utils import get_url, cstr
if get_url()=='http://tailorpad.com':
check_entry = frappe.db.sql("""select assign_to from `tabAssing Master` where name = %s """, raised_by)
frappe.errprint("in assign")
if check_entry :
frappe.errprint("chk")
if assign_in_future=='No':
frappe.errprint("no")
frappe.db.sql("""delete from `tabAssing Master` where name = %s """, raised_by)
else :
frappe.errprint("Yes")
frappe.db.sql("""update `tabAssing Master` set assign_to=%s where name = %s """,(assign_to,raised_by))
else :
frappe.errprint("not chk")
if assign_in_future=='Yes':
frappe.errprint("Yes")
am = frappe.new_doc("Assing Master")
am.update({
"name": raised_by,
"assign_to": assign_to,
"raised_by":raised_by
})
am.insert()
def auto_close_tickets():
frappe.db.sql("""update `tabSupport Ticket` set status = 'Closed'
where status = 'Replied'
and date_sub(curdate(),interval 15 Day) > modified""")
@frappe.whitelist()
def reenable(name):
frappe.errprint("calling superadmin")
from frappe.utils import get_url, cstr
frappe.errprint(get_url())
if get_url()!='http://tailorpad.com':
frappe.errprint("in reenable")
from frappe.utils import get_url, cstr,add_months
from frappe import msgprint, throw, _
res = frappe.db.sql("select validity from `tabUser` where name='Administrator' and no_of_users >0")
if res:
res1 = frappe.db.sql("select validity_end_date from `tabUser` where '"+cstr(name)+"' and validity_end_date <CURDATE()")
if res1:
bc="update `tabUser` set validity_end_date=DATE_ADD((nowdate(), INTERVAL "+cstr(res[0][0])+" MONTH) where name = '"+cstr(name)+"'"
frappe.db.sql(bc)
frappe.db.sql("update `tabUser`set no_of_users=no_of_users-1 where name='Administrator'")
else:
ab="update `tabUser` set validity_end_date=DATE_ADD(validity_end_date,INTERVAL "+cstr(res[0][0])+" MONTH) where name = '"+cstr(name)+"' "
frappe.errprint(ab)
frappe.db.sql(ab)
frappe.db.sql("update `tabUser`set no_of_users=no_of_users-1 where name='Administrator'")
else:
frappe.throw(_("Your subscription plan expired .Please purchase an subscription plan and enable user."))
| agpl-3.0 | -8,289,523,798,107,514,000 | 34.26383 | 237 | 0.68879 | false | 3.070396 | false | false | false |
afriestad/WikiLinks | kokekunster/settings_dev.py | 1 | 1482 | import os
from kokekunster.settings import BASE_DIR
# Settings for development environment
DEBUG = True
ALLOWED_HOSTS = ['*']
# "Secret" cryptographic key, only used during local development
SECRET_KEY = 'fc4_hb-wi32l^c&qpx6!m)o*xd(4ga$13(ese#pfj#pjxnmt0p'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
# The development database requires postgresql to be installed on the machine.
# The following settings correspond to the default settings used by
# Postgres.app
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ['USER'],
'USER': os.environ['USER'],
'PASSWORD': '',
'HOST': 'localhost',
'PORT': '',
}
}
# For testing email
EMAIL_BACKEND = 'django.core.mail.backends.filebased.EmailBackend'
EMAIL_FILE_PATH = os.path.join(BASE_DIR, 'tmp', 'email')
ADMINS = (
('Test Testesen', '[email protected]'),
('Testinne Testesen', '[email protected]'),
)
# User uploaded files (MEDIA)
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# Settings for 'dbbackup' app such that it is easy to import production data
# to the dev environment
DBBACKUP_STORAGE = 'django.core.files.storage.FileSystemStorage'
DBBACKUP_STORAGE_OPTIONS = {
'location': os.path.join(BASE_DIR, 'tmp'),
}
# Use the PK for the localhost Site model here
SITE_ID = 1
# IPs which have access to the Django debug toolbar
INTERNAL_IPS = ('127.0.0.1',)
| mit | 8,427,073,580,051,762,000 | 22.15625 | 78 | 0.687584 | false | 3.200864 | false | false | false |
pFernbach/hpp-rbprm-corba | script/tools/parse_bench_contactGen.py | 1 | 1861 | import sys
totalSuccess = 0.
totalTime = 0.
totalMuscodConverg = 0.
totalMuscodWarmStartConverg = 0.
totalCrocConverg = 0.
totalConf = 0.
totalIt = 0.
f = open("/local/fernbac/bench_iros18/bench_contactGeneration/walk_noCroc.log","r")
line = f.readline()
while line.startswith("new"):
totalIt += 1.
line = f.readline()
t = line.rstrip("\n").split(" ")
assert t[0].startswith("success")
success = t[1].startswith("True")
if success:
totalSuccess += 1.
line = f.readline()
t = line.rstrip("\n").split(" ")
assert t[0].startswith("muscodNoWarmStart")
if t[1].startswith("True"):
totalMuscodConverg += 1.
line = f.readline()
t = line.rstrip("\n").split(" ")
if t[0].startswith("crocConverged"):
if t[1].startswith("True"):
totalCrocConverg +=1.
elif t[0].startswith("muscodWarmStart"):
if t[1].startswith("True"):
totalMuscodWarmStartConverg += 1.
else :
print "badly formated log"
line = f.readline()
t = line.rstrip("\n").split(" ")
assert t[0].startswith("time")
if success :
totalTime += float(t[1])
line = f.readline()
t = line.rstrip("\n").split(" ")
assert t[0].startswith("configs")
if success :
totalConf += int(t[1])
line = f.readline()
print "For : "+str(totalIt)+" runs : "
print "success contact generation : "+str((totalSuccess/totalIt)*100.)+ " %"
print "success muscod : "+str((totalMuscodConverg/totalSuccess)*100.)+ " %"
print "success muscod Warm Start : "+str((totalMuscodWarmStartConverg/totalSuccess)*100.)+ " %"
print "success croc converged : : "+str((totalCrocConverg/totalSuccess)*100.)+ " %"
print "avg time : "+str(totalTime/totalSuccess)+ "s"
print "avg configs : "+str(totalConf/totalSuccess)
| lgpl-3.0 | 5,070,445,143,479,115,000 | 32.232143 | 97 | 0.602902 | false | 3.197595 | false | false | false |
LordDarkula/chess_py | chess_py/core/board.py | 1 | 13656 | # -*- coding: utf-8 -*-
"""
Constructs board object which stores the get_location of all the pieces.
Default Array
| [[0th row 0th item, 0th row 1st item, 0th row 2nd item],
| [1st row 0th item, 1st row 1st item, 1st row 2nd item],
| [2nd row 0th item, 2nd row 1st item, 2nd row 2nd item]]
| Default board
| 8 ║♜ ♞ ♝ ♛ ♚ ♝ ♞ ♜ Black pieces
| 7 ║♟ ♟ ♟ ♟ ♟ ♟ ♟ ♟ Black pawns
| 6 ║a6… … … … … …h6
| 5 ║… … … … … … … …
| 4 ║… … … … … … … …
| 3 ║a3… … … … … …h3 Algebraic
| 2 ║♙ ♙ ♙ ♙ ♙ ♙ ♙ ♙ White pawns
| 1 ║♖ ♘ ♗ ♕ ♔ ♗ ♘ ♖ White pieces
| -—╚═══════════════
| ——-a b c d e f g h
Pieces on the board are flipped in position array so white home row is at index 0
and black home row is at index 7
| Copyright © 2016 Aubhro Sengupta. All rights reserved.
"""
from __future__ import print_function
import inspect
from multiprocessing import Process
from copy import copy as cp
from math import fabs
from .color import white, black
from .algebraic import notation_const
from .algebraic.location import Location
from .algebraic.move import Move
from ..pieces.piece import Piece
from ..pieces.bishop import Bishop
from ..pieces.king import King
from ..pieces.pawn import Pawn
from ..pieces.queen import Queen
from ..pieces.rook import Rook
from ..pieces.knight import Knight
class Board:
"""
Standard starting position in a chess game.
Initialized upon startup and is used when init_default constructor is used
"""
def __init__(self, position):
"""
Creates a ``Board`` given an array of ``Piece`` and ``None``
objects to represent the given position of the board.
:type: position: list
"""
self.position = position
self.possible_moves = dict()
try:
self.king_loc_dict = {white: self.find_king(white),
black: self.find_king(black)}
except ValueError:
self.king_loc_dict = None
@classmethod
def init_default(cls):
"""
Creates a ``Board`` with the standard chess starting position.
:rtype: Board
"""
return cls([
# First rank
[Rook(white, Location(0, 0)), Knight(white, Location(0, 1)), Bishop(white, Location(0, 2)),
Queen(white, Location(0, 3)), King(white, Location(0, 4)), Bishop(white, Location(0, 5)),
Knight(white, Location(0, 6)), Rook(white, Location(0, 7))],
# Second rank
[Pawn(white, Location(1, file)) for file in range(8)],
# Third rank
[None for _ in range(8)],
# Fourth rank
[None for _ in range(8)],
# Fifth rank
[None for _ in range(8)],
# Sixth rank
[None for _ in range(8)],
# Seventh rank
[Pawn(black, Location(6, file)) for file in range(8)],
# Eighth rank
[Rook(black, Location(7, 0)), Knight(black, Location(7, 1)), Bishop(black, Location(7, 2)),
Queen(black, Location(7, 3)), King(black, Location(7, 4)), Bishop(black, Location(7, 5)),
Knight(black, Location(7, 6)), Rook(black, Location(7, 7))]
])
@property
def position_tuple(self):
return ((str(piece) for piece in self.position[index]) for index, row in enumerate(self.position))
def __key(self):
return self.position
def __hash__(self):
return hash(tuple([hash(piece) for piece in self]))
def __eq__(self, other):
if not isinstance(other, self.__class__):
raise TypeError("Cannot compare other type to Board")
for i, row in enumerate(self.position):
for j, piece in enumerate(row):
if piece != other.position[i][j]:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
board_string = ""
for i, row in enumerate(self.position):
board_string += str(8 - i) + " "
for j, square in enumerate(row):
piece = self.piece_at_square(Location(7 - i, j))
if isinstance(piece, Piece):
board_string += piece.symbol + " "
else:
board_string += "_ "
board_string += "\n"
board_string += " a b c d e f g h"
return board_string
def __iter__(self):
for row in self.position:
for square in row:
yield square
def __copy__(self):
"""
Copies the board faster than deepcopy
:rtype: Board
"""
return Board([[cp(piece) or None
for piece in self.position[index]]
for index, row in enumerate(self.position)])
def piece_at_square(self, location):
"""
Finds the chess piece at a square of the position.
:type: location: Location
:rtype: Piece
"""
return self.position[location.rank][location.file]
def is_square_empty(self, location):
"""
Finds whether a chess piece occupies a square of the position.
:type: location: Location
:rtype: bool
"""
return self.position[location.rank][location.file] is None
def material_advantage(self, input_color, val_scheme):
"""
Finds the advantage a particular side possesses given a value scheme.
:type: input_color: Color
:type: val_scheme: PieceValues
:rtype: double
"""
if self.get_king(input_color).in_check(self) and self.no_moves(input_color):
return -100
if self.get_king(-input_color).in_check(self) and self.no_moves(-input_color):
return 100
return sum([val_scheme.val(piece, input_color) for piece in self])
def advantage_as_result(self, move, val_scheme):
"""
Calculates advantage after move is played
:type: move: Move
:type: val_scheme: PieceValues
:rtype: double
"""
test_board = cp(self)
test_board.update(move)
return test_board.material_advantage(move.color, val_scheme)
def all_possible_moves(self, input_color):
"""
Checks if all the possible moves has already been calculated
and is stored in `possible_moves` dictionary. If not, it is calculated
with `_calc_all_possible_moves`.
:type: input_color: Color
:rtype: list
"""
position_tuple = self.position_tuple
if position_tuple not in self.possible_moves:
self.possible_moves[position_tuple] = tuple(self._calc_all_possible_moves(input_color))
return self.possible_moves[position_tuple]
def _calc_all_possible_moves(self, input_color):
"""
Returns list of all possible moves
:type: input_color: Color
:rtype: list
"""
for piece in self:
# Tests if square on the board is not empty
if piece is not None and piece.color == input_color:
for move in piece.possible_moves(self):
test = cp(self)
test_move = Move(end_loc=move.end_loc,
piece=test.piece_at_square(move.start_loc),
status=move.status,
start_loc=move.start_loc,
promoted_to_piece=move.promoted_to_piece)
test.update(test_move)
if self.king_loc_dict is None:
yield move
continue
my_king = test.piece_at_square(self.king_loc_dict[input_color])
if my_king is None or \
not isinstance(my_king, King) or \
my_king.color != input_color:
self.king_loc_dict[input_color] = test.find_king(input_color)
my_king = test.piece_at_square(self.king_loc_dict[input_color])
if not my_king.in_check(test):
yield move
def runInParallel(*fns):
"""
Runs multiple processes in parallel.
:type: fns: def
"""
proc = []
for fn in fns:
p = Process(target=fn)
p.start()
proc.append(p)
for p in proc:
p.join()
def no_moves(self, input_color):
# Loops through columns
for piece in self:
# Tests if square on the board is not empty
if piece is not None and piece.color == input_color:
for move in piece.possible_moves(self):
test = cp(self)
test.update(move)
if not test.get_king(input_color).in_check(test):
return False
return True
def find_piece(self, piece):
"""
Finds Location of the first piece that matches piece.
If none is found, Exception is raised.
:type: piece: Piece
:rtype: Location
"""
for i, _ in enumerate(self.position):
for j, _ in enumerate(self.position):
loc = Location(i, j)
if not self.is_square_empty(loc) and \
self.piece_at_square(loc) == piece:
return loc
raise ValueError("{} \nPiece not found: {}".format(self, piece))
def get_piece(self, piece_type, input_color):
"""
Gets location of a piece on the board given the type and color.
:type: piece_type: Piece
:type: input_color: Color
:rtype: Location
"""
for loc in self:
piece = self.piece_at_square(loc)
if not self.is_square_empty(loc) and \
isinstance(piece, piece_type) and \
piece.color == input_color:
return loc
raise Exception("{} \nPiece not found: {}".format(self, piece_type))
def find_king(self, input_color):
"""
Finds the Location of the King of input_color
:type: input_color: Color
:rtype: Location
"""
return self.find_piece(King(input_color, Location(0, 0)))
def get_king(self, input_color):
"""
Returns King of input_color
:type: input_color: Color
:rtype: King
"""
return self.piece_at_square(self.find_king(input_color))
def remove_piece_at_square(self, location):
"""
Removes piece at square
:type: location: Location
"""
self.position[location.rank][location.file] = None
def place_piece_at_square(self, piece, location):
"""
Places piece at given get_location
:type: piece: Piece
:type: location: Location
"""
self.position[location.rank][location.file] = piece
piece.location = location
def move_piece(self, initial, final):
"""
Moves piece from one location to another
:type: initial: Location
:type: final: Location
"""
self.place_piece_at_square(self.piece_at_square(initial), final)
self.remove_piece_at_square(initial)
def update(self, move):
"""
Updates position by applying selected move
:type: move: Move
"""
if move is None:
raise TypeError("Move cannot be type None")
if self.king_loc_dict is not None and isinstance(move.piece, King):
self.king_loc_dict[move.color] = move.end_loc
# Invalidates en-passant
for square in self:
pawn = square
if isinstance(pawn, Pawn):
pawn.just_moved_two_steps = False
# Sets King and Rook has_moved property to True is piece has moved
if type(move.piece) is King or type(move.piece) is Rook:
move.piece.has_moved = True
elif move.status == notation_const.MOVEMENT and \
isinstance(move.piece, Pawn) and \
fabs(move.end_loc.rank - move.start_loc.rank) == 2:
move.piece.just_moved_two_steps = True
if move.status == notation_const.KING_SIDE_CASTLE:
self.move_piece(Location(move.end_loc.rank, 7), Location(move.end_loc.rank, 5))
self.piece_at_square(Location(move.end_loc.rank, 5)).has_moved = True
elif move.status == notation_const.QUEEN_SIDE_CASTLE:
self.move_piece(Location(move.end_loc.rank, 0), Location(move.end_loc.rank, 3))
self.piece_at_square(Location(move.end_loc.rank, 3)).has_moved = True
elif move.status == notation_const.EN_PASSANT:
self.remove_piece_at_square(Location(move.start_loc.rank, move.end_loc.file))
elif move.status == notation_const.PROMOTE or \
move.status == notation_const.CAPTURE_AND_PROMOTE:
try:
self.remove_piece_at_square(move.start_loc)
self.place_piece_at_square(move.promoted_to_piece(move.color, move.end_loc), move.end_loc)
except TypeError as e:
raise ValueError("Promoted to piece cannot be None in Move {}\n{}".format(repr(move), e))
return
self.move_piece(move.piece.location, move.end_loc)
| mit | -1,198,334,598,218,724,400 | 30.571429 | 106 | 0.550108 | false | 3.810345 | true | false | false |
nke001/attention-lvcsr | lvsr/extensions.py | 1 | 3223 | """Nice small extensions that maybe will it make to Blocks at some point."""
from __future__ import print_function
import subprocess
import pkgutil
import math
from theano.scan_module.scan_op import Scan
from blocks.extensions import TrainingExtension, SimpleExtension
class CGStatistics(SimpleExtension):
def __init__(self, **kwargs):
kwargs.setdefault('before_first_epoch', True)
kwargs.setdefault('on_resumption', True)
super(CGStatistics, self).__init__(**kwargs)
def do(self, *args, **kwargs):
print("Computation graph statistics:")
scan_nodes = [
node for node in self.main_loop.algorithm._function.maker.fgraph.apply_nodes
if isinstance(node.op, Scan)]
print("\tnumber of scan nodes:", len(scan_nodes))
class CodeVersion(SimpleExtension):
def __init__(self, packages, **kwargs):
self.packages = packages
kwargs.setdefault('before_training', True)
super(CodeVersion, self).__init__(**kwargs)
def do(self, *args, **kwargs):
package_paths = {name: loader.path
for loader, name, _ in pkgutil.iter_modules()}
for package in self.packages:
path = package_paths[package]
last_commit_record = "_{}_last_commit".format(package)
git_diff_record = "_{}_git_diff".format(package)
self.main_loop.log.status[last_commit_record] = (
subprocess.check_output("git --no-pager log -1",
cwd=path, shell=True))
self.main_loop.log.status[git_diff_record] = (
subprocess.check_output("git diff",
cwd=path, shell=True))
class IPDB(SimpleExtension):
def do(self, *args, **kwargs):
import ipdb; ipdb.set_trace()
class AdaptiveClipping(TrainingExtension):
def __init__(self, log_record, clipping_rule,
initial_threshold, burnin_period=100, decay_rate=0.99):
self.log_record = log_record
self.clipping_rule = clipping_rule
self.initial_threshold = initial_threshold
self.burnin_period = burnin_period
self.decay_rate = decay_rate
self.mean_gradient_norm = self.mean_gradient_norm2 = .0
def after_batch(self, batch):
gradient_norm = math.log(self.main_loop.log.current_row[self.log_record])
self.mean_gradient_norm = (self.decay_rate * self.mean_gradient_norm
+ (1 - self.decay_rate) * gradient_norm)
self.mean_gradient_norm2 = (self.decay_rate * self.mean_gradient_norm2
+ (1 - self.decay_rate) * gradient_norm ** 2)
self.std_gradient_norm = (
(self.mean_gradient_norm2 - self.mean_gradient_norm ** 2) ** .5)
threshold = math.exp(self.mean_gradient_norm + 1 * self.std_gradient_norm)
confidence = (min(
self.burnin_period, self.main_loop.status['iterations_done']) /
float(self.burnin_period))
threshold = (confidence * threshold +
(1 - confidence) * self.initial_threshold)
self.clipping_rule.threshold.set_value(threshold)
| mit | -8,607,135,214,068,613,000 | 39.2875 | 88 | 0.603785 | false | 3.91616 | false | false | false |
DavidGSola/Basic-RESTful-Service-with-FLASK | practica1.py | 1 | 1251 | # -*- coding: utf-8 -*-
from flask import Flask, url_for, render_template, Response
import random
app = Flask(__name__)
@app.route('/')
def api_root():
mensaje = 'Welcome'
return Response(mensaje, status=200, mimetype='text/plain')
@app.route('/hola')
def api_home():
mensaje = 'Hola -cañón-'
return Response(mensaje, status=200, mimetype='text/plain')
@app.route('/imagen')
def api_imagen():
mensaje = '<img src=' + url_for('static',filename='img/imagen.jpg') + '>'
return Response(mensaje, status=200, mimetype='image/jpg')
@app.route('/hola_pepe')
def api_pepe():
mensaje = 'Hola <b> pepe</b>'
return Response(mensaje, status=200, mimetype='text/html')
@app.route('/pagina')
def api_pagina():
mensaje = render_template('pagina.html')
return Response(mensaje, status=200, mimetype='text/html')
@app.route('/circulos_varios')
def api_circulos():
randoms = [random.randrange(50,200) for i in range(9)]
mensaje = render_template( 'circulos.xml',
cx1=randoms[0], cy1=randoms[1], r1=randoms[2],
cx2=randoms[3], cy2=randoms[4], r2=randoms[5],
cx3=randoms[6], cy3=randoms[7], r3=randoms[8])
return Response(mensaje, status=200, mimetype='image/svg+xml')
if __name__ == '__main__':
app.run(host='0.0.0.0')
| apache-2.0 | -8,561,766,479,407,991,000 | 28.738095 | 74 | 0.665332 | false | 2.513078 | false | false | false |
a4a881d4/6FSK | utils.py | 1 | 1800 | import random
import numpy as np
def rsrcBin(L):
r = []
for k in range(L):
r.append(random.randint(0,1))
return r
def rsrc(L):
r = rsrcBin(L)
x = [1-2*x for x in r]
return x
def fftOnce(x):
W = len(x)
hw = np.hamming(W)
ss = np.fft.fft(x*hw)
return np.conj(ss)*ss
def spectrum(x):
W = 1024*32
r = fftOnce(x[:W])
for k in range(W/2,len(x)-W,W/2):
r = r + fftOnce(x[k:k+W])
return r
def xorsum(k):
r = 0
for i in range(self.order):
r = r^(k&1)
k = k>>1
return r&1
class mseq:
def __init__(self,poly):
self.p = poly
k=0
while poly!=0:
k = k+1
poly = poly>>1
self.order = k-1
print "M sequence order",k
self.length = (1<<self.order)-1
self.s = []
state = 1
for n in range(self.length):
state = state<<1
if state>self.length:
state = state^self.p
self.s.append(1)
else:
self.s.append(0)
def printSeq(self,x=None):
if x==None:
x = self.s
for k in x:
print k,
print ""
def sum(self):
ss = 0
for x in self.s:
ss = ss + x
return ss
def shift(self,l):
return self.s[l:]+self.s[:l]
class gold:
def __init__(self,p0,p1):
self.m0 = mseq(p0)
self.m1 = mseq(p1)
def seq(self,k0,k1):
s0 = self.m0.shift(k0)
s1 = self.m1.shift(k1)
r = [a^b for (a,b) in zip(s0,s1)]
return r
def toReal(self,s):
return np.array([1-2*x for x in s])
def xcorr(self,x,y):
return np.correlate(np.array(x),np.array(y),'full')
def main():
m = mseq(0x409)
m.printSeq()
y = m.shift(1)
print "shift 1"
m.printSeq(y)
print m.sum()
g = gold(0x409,0x40f)
s = g.toReal(g.seq(1,3))
x = g.xcorr(s,s)
import matplotlib.pyplot as plt
plt.plot(x)
plt.show()
if __name__ == '__main__':
main()
| gpl-3.0 | 1,853,868,788,121,301,800 | 15.142857 | 53 | 0.545 | false | 2.179177 | false | false | false |
owers19856/PyLATO | TBelec.py | 1 | 39671 | """
Created on Thursday 16 April 2015
@author: Andrew Horsfield, Marc Coury and Max Boleininger
This module contains functions that are needed once the molecular orbitals are
populated by electrons.
"""
#
# Import the modules that will be needed
import numpy as np
import math
import TBH
import sys
import time
import myfunctions
from Verbosity import *
import random
# PyDQED module
from pydqed import DQED
class Electronic:
"""Initialise and build the density matrix."""
def __init__(self, JobClass):
"""Compute the total number of electrons in the system, allocate space for occupancies"""
# Save job reference as an attribute for internal use.
self.Job = JobClass
# Set up the core charges, and count the number of electrons
self.zcore = np.zeros(self.Job.NAtom, dtype='double')
for a in range(0, self.Job.NAtom):
self.zcore[a] = self.Job.Model.atomic[self.Job.AtomType[a]]['NElectrons']
self.NElectrons = np.sum(self.zcore)
#
# Allocate memory for the level occupancies and density matrix
self.occ = np.zeros( self.Job.Hamilton.HSOsize, dtype='double')
self.rho = np.matrix(np.zeros((self.Job.Hamilton.HSOsize, self.Job.Hamilton.HSOsize), dtype='complex'))
self.rhotot = np.matrix(np.zeros((self.Job.Hamilton.HSOsize, self.Job.Hamilton.HSOsize), dtype='complex'))
# setup for the Pulay mixing
self.inputrho = np.zeros((self.Job.Def['num_rho'], self.Job.Hamilton.HSOsize, self.Job.Hamilton.HSOsize), dtype='complex')
self.outputrho = np.zeros((self.Job.Def['num_rho'], self.Job.Hamilton.HSOsize, self.Job.Hamilton.HSOsize), dtype='complex')
self.residue = np.zeros((self.Job.Def['num_rho'], self.Job.Hamilton.HSOsize, self.Job.Hamilton.HSOsize), dtype='complex')
if self.Job.Def['el_kT'] == 0.0:
self.fermi = myfunctions.fermi_0
else:
self.fermi = myfunctions.fermi_non0
if self.Job.Def['optimisation_routine'] == 1:
self.optimisation_routine = self.optimisation_routine1
elif self.Job.Def['optimisation_routine'] == 2:
self.optimisation_routine = self.optimisation_routine2
elif self.Job.Def['optimisation_routine'] == 3:
self.optimisation_routine = self.optimisation_routine3
self.optimisation_rho = optimisation_rho_Duff_Meloni
elif self.Job.Def['optimisation_routine'] == 4:
self.optimisation_routine = self.optimisation_routine4
self.optimisation_rho = optimisation_rho_total
else:
print "WARNING: No optimisation routine selected. Using optimisation_routine1."
self.optimisation_routine = self.optimisation_routine1
def occupy(self, s, kT, n_tol, max_loops):
"""Populate the eigenstates using the Fermi function.
This function uses binary section."""
#
# Find the lower bound to the chemical potential
mu_l = self.Job.e[0]
while np.sum(self.fermi(self.Job.e, mu_l, kT)) > self.NElectrons:
mu_l -= 10.0*kT
#
# Find the upper bound to the chemical potential
mu_u = self.Job.e[-1]
while np.sum(self.fermi(self.Job.e, mu_u, kT)) < self.NElectrons:
mu_u += 10.0*kT
#
# Find the correct chemical potential using binary section
mu = 0.5*(mu_l + mu_u)
n = np.sum(self.fermi(self.Job.e, mu, kT))
count = 0
while math.fabs(self.NElectrons-n) > n_tol*self.NElectrons:
count+=1
if count>max_loops:
print("ERROR: The chemical potential could not be found. The error became "+str(math.fabs(self.NElectrons-n)))
sys.exit()
if n > self.NElectrons:
mu_u = mu
elif n < self.NElectrons:
mu_l = mu
mu = 0.5*(mu_l + mu_u)
n = np.sum(self.fermi(self.Job.e, mu, kT))
self.occ = self.fermi(self.Job.e, mu, kT)
def densitymatrix(self):
"""Build the density matrix."""
self.rho = np.matrix(self.Job.psi)*np.diag(self.occ)*np.matrix(self.Job.psi).H
def SCFerror(self):
"""
Calculate the self-consistent field error. We do this by comparing the
on-site elements of the new density matrix (self.rho) with the old
density matrix, self.rhotot. It is normalised by dividing by the total
number of electrons.
"""
return sum(abs(
self.rho[TBH.map_atomic_to_index(atom1, orbital1, spin1, self.Job.NAtom, self.Job.NOrb),TBH.map_atomic_to_index(atom1, orbital2, spin2, self.Job.NAtom, self.Job.NOrb)]
- self.rhotot[TBH.map_atomic_to_index(atom1, orbital1, spin1, self.Job.NAtom, self.Job.NOrb),TBH.map_atomic_to_index(atom1, orbital2, spin2, self.Job.NAtom, self.Job.NOrb)])
for atom1 in range(self.Job.NAtom) for orbital1 in range(self.Job.NOrb[atom1]) for spin1 in range(2)
for orbital2 in range(orbital1, self.Job.NOrb[atom1]) for spin2 in range(spin1, 2)
)/(self.Job.Electron.NElectrons**2)
def idempotency_error(self, rho):
"""
Determine how far from idempotency the density matrix is. If the
density matrix is idempotent then
rho*rho - rho = 0.
We normalise by the number of electrons.
"""
rho_err = np.linalg.norm((np.dot(rho, rho) - rho))/self.NElectrons
return rho_err
def McWeeny(self):
"""
Make the density matrix idempotent using the McWeeny transformation,
R.McWeeny, Rev. Mod. Phys. (1960):
rho_n+1 = 3*rho_n^3 - 2*rho_n^2
"""
if self.Job.Def['Hamiltonian'] in ('scase','pcase','dcase','vectorS'):
rho_temp = self.rhotot
else:
rho_temp = self.rho
# Make sure that it isn't already idempotent
err_orig = self.idempotency_error(rho_temp)
if err_orig < self.Job.Def['McWeeny_tol']:
# if already idempotent then don't do anything, just exit function
return
flag, iterations, err, rho_temp = self.McWeeny_iterations(rho_temp)
# if the flag is false it means that idempotency was reduced below the tolerance
if flag == False:
# if the iterations did not converge but the idempotency error has
# gotten smaller then print a warning but treat as a success.
if err < err_orig:
print "Max iterations, ", iterations, " reached. Idempotency error = ", err
flag = True
else:
print "McWeeny transformation unsuccessful. Proceeding using input density matrix."
# Turn off using the McWeeny transformation as once it doesn't work it seems to not work again.
self.Job.Def["McWeeny"] = 0
# if this is going to be treated like a success then reassign rho_temp.
if flag == True:
if self.Job.Def['Hamiltonian'] in ('scase','pcase','dcase','vectorS'):
self.rhotot = rho_temp
else:
self.rho = rho_temp
def McWeeny_iterations(self, rho):
"""
Iterations of the McWeeny scheme for the inputted rho.
Return a True/False flag that indicates convergence, the number of
iterations required to reach convergence, the error and the converged density
matrix.
"""
converge_flag = False
for ii in range(self.Job.Def['McWeeny_max_loops']):
# McWeeny transformation
rho = 3*np.dot(rho, np.dot(rho, rho)) - 2*np.dot(rho, rho)
err = self.idempotency_error(rho)
verboseprint(self.Job.Def['extraverbose'], "McWeeny iteration: ", ii, "; Idempotency error = ", err)
if err < self.Job.Def['McWeeny_tol']:
converge_flag = True
return converge_flag, ii, err, rho
# Check to make sure that the error hasn't become a nan.
elif np.isnan(err):
return converge_flag, ii, err, rho
# if it gets to this statement then it probably hasn't converged.
return converge_flag, ii, err, rho
def linear_mixing(self):
"""
Mix the new and the old density matrix by linear mixing.
The form of this mixing is
rho_out = (1-A)*rho_old + A*rho_new
for which, using our notation, rho_new is self.rho, rho_old is
self.rhotot and we overwrite self.rhotot to make rho_out.
"""
A = self.Job.Def['A']
self.rhotot = (1-A)*self.rhotot + A*self.rho
def GR_Pulay(self, scf_iteration):
"""
This is the guaranteed reduction Pulay mixing scheme proposed by
Bowler and Gillan in 2008. If the number of density matrices to be
used, num_rho, is 1, it reduces to just linear mixing.
The scf_iteration is a required input because when scf_iteration is
less than num_rho then scf_iteration is the number of density matrices
that should be used.
The output is an updated self.rhotot to be used in the construction of
the Fock matrix. Also, self.inputrho, self.outputrho and self.residue
are updated for the next iteration.
"""
num_rho = self.Job.Def['num_rho']
# If the number of scf iterations is less than num_rho replace it by
# the number of scf iterations (as there will only be that number of
# density matrices).
if scf_iteration < num_rho:
num_rho = scf_iteration
# Shift along the density and residue matrices
for ii in range(num_rho-1):
self.inputrho[num_rho - 1 - ii] = np.copy(self.inputrho[num_rho - 2 - ii])
self.outputrho[num_rho - 1 - ii] = np.copy(self.outputrho[num_rho - 2 - ii])
self.residue[num_rho - 1 - ii] = np.copy(self.residue[num_rho - 2 - ii])
# Add in the new density and residue matrices
self.inputrho[0] = self.rhotot
self.outputrho[0] = self.rho
self.residue[0] = self.rho - self.rhotot
# Calculate the values of alpha to minimise the residue
alpha, igo = self.optimisation_routine(num_rho)
if igo == 1:
print "WARNING: Unable to optimise alpha for combining density matrices. Proceeding using guess."
# Guess for alpha is just 1.0 divided by the number of density matrices
alpha = np.zeros((num_rho), dtype='double')
alpha.fill(1.0/num_rho)
verboseprint(self.Job.Def['extraverbose'], "alpha: ", alpha)
# Create an optimised rhotot and an optimised rho and do linear mixing to make next input matrix
self.rhotot = sum(alpha[i]*self.inputrho[i] for i in range(num_rho))
self.rho = sum(alpha[i]*self.outputrho[i] for i in range(num_rho))
self.linear_mixing()
def chargepersite(self):
"""Compute the net charge on each site."""
norb = np.diag(self.rho)
qsite = np.zeros(self.Job.NAtom, dtype='double')
jH = self.Job.Hamilton
for a in range(0, self.Job.NAtom):
qsite[a] = (self.zcore[a] -
(np.sum(norb[jH.Hindex[a]:jH.Hindex[a+1]].real) +
np.sum(norb[jH.H0size+jH.Hindex[a]:jH.H0size+jH.Hindex[a+1]].real)))
return qsite
def electrons_site_orbital_spin(self,site,orbital,spin):
"""Compute the number of electrons with specified spin, orbital and site. """
index = TBH.map_atomic_to_index(site, orbital, spin, self.Job.NAtom, self.Job.NOrb)
return self.rho[index,index].real
def electrons_orbital_occupation_vec(self):
""" Return a vector of the occupation of each spin orbital. """
occupation = []
# Just collect the real part of the diagonal of the density matrix.
for ii in range(self.Job.Hamilton.HSOsize):
occupation.append(self.rho[ii,ii].real)
return occupation
def electrons_site_orbital(self,site,orbital):
"""Compute the number of electrons in a particular orbital on the specified site. """
return self.electrons_site_orbital_spin(site,orbital,0)+self.electrons_site_orbital_spin(site,orbital,1).real
def electrons_site(self,site):
"""Compute the number of electrons on a specified site. """
return sum(self.electrons_site_orbital(site,ii) for ii in range(self.Job.Model.atomic[self.Job.AtomType[site]]['NOrbitals'])).real
def electronspersite(self):
""" Return a vector of the number of electrons on each site. """
esite = np.zeros(self.Job.NAtom, dtype='double')
for a in range(self.Job.NAtom):
esite[a] = self.electrons_site(a).real
return esite
def spinpersite(self):
"""Compute the net spin on each site."""
ssite = np.zeros((3, self.Job.NAtom), dtype='double')
jH = self.Job.Hamilton
for a in range(0, self.Job.NAtom):
srho = np.zeros((2, 2), dtype='complex')
for j in range(jH.Hindex[a], jH.Hindex[a+1]):
#
# Sum over orbitals on one site to produce a 2x2 spin density matrix for the site
srho[0, 0] += self.rho[ j, j]
srho[0, 1] += self.rho[ j, jH.H0size+j]
srho[1, 0] += self.rho[jH.H0size+j, j]
srho[1, 1] += self.rho[jH.H0size+j, jH.H0size+j]
#
# Now compute the net spin vector for the site
ssite[0, a] = (srho[0, 1] + srho[1, 0]).real
ssite[1, a] = (srho[0, 1] - srho[1, 0]).imag
ssite[2, a] = (srho[0, 0] - srho[1, 1]).real
#
return ssite
def magnetic_correlation(self, site1, site2):
"""
Compute the direction averaged magnetic correlation between sites 1
and site 2. This requires the two particle density matrix. As we are
using the mean field approximation the two particle density matrix is
expressible in terms of the one particle density matrix. The equation
below is the equation for the magnetic correlation using the single
particle density matrix.
C_avg = 1/3 sum_{absz}( 2(rho_{aa}^{zs} rho_{bb}^{sz} - rho_{ab}^{zz}rho_{ba}^{ss})
- rho_{aa}^{ss}rho_{bb}^{zz}+rho_{ab}^{sz}rho_{ba}^{zs})
where a are the spatial orbitals on site 1, b are the spatial orbitals
on site 2, s and z are spin indices.
"""
C_avg = np.float64(0.0)
norb_1 = self.Job.Model.atomic[self.Job.AtomType[site1]]['NOrbitals']
norb_2 = self.Job.Model.atomic[self.Job.AtomType[site2]]['NOrbitals']
for s in range(2):
for z in range(2):
for a in range(norb_1):
for b in range(norb_2):
index_az = TBH.map_atomic_to_index(site1,a,z,self.Job.NAtom, self.Job.NOrb)
index_bz = TBH.map_atomic_to_index(site1,b,z,self.Job.NAtom, self.Job.NOrb)
index_bs = TBH.map_atomic_to_index(site1,b,s,self.Job.NAtom, self.Job.NOrb)
index_as = TBH.map_atomic_to_index(site1,a,s,self.Job.NAtom, self.Job.NOrb)
# term 1: 2.0*rho_{aa}^{zs} rho_{bb}^{sz}
C_avg += 2.0*self.rho[index_az,index_as]*self.rho[index_bs,index_bz]
# term 2: -2.0*rho_{ab}^{zz}rho_{ba}^{ss})
C_avg -= 2.0*self.rho[index_az,index_bz]*self.rho[index_as,index_bs]
# term 3: -rho_{aa}^{ss}rho_{bb}^{zz}
C_avg -= self.rho[index_as,index_as]*self.rho[index_bz,index_bz]
# term 4: rho_{ab}^{sz}rho_{ba}^{zs}
C_avg += self.rho[index_as,index_bz]*self.rho[index_bz,index_as]
# remember to divide by 3
C_avg = C_avg/3.0
return C_avg
def optimisation_routine1(self, num_rho):
"""
Optimisation routine where we try to solve for the norm squared of the
optimal density matrix with the constraint that the sum of the
coefficients is equal to one. To include the constraint we set up the
problem:
minimise: alpha_i M_ij alpha_j - lambda (sum_i alpha_i - 1)
where M_ij = Tr(R_i^dag R_j). We then differentiate with respect to
alpha_k and set to zero to minimise:
2 M alpha = lambda
We solve this equation for lambda = 1. We then can simply scale alpha,
such that sum_i alpha_i = 1, which is equivalent to having solved for
a different lambda.
"""
verboseprint(self.Job.Def['extraverbose'], "optimisation_routine")
small = 1e-14
# If there is only one density matrix the solution is simple.
if num_rho == 1:
return np.array([1.0], dtype='double'), 0
alpha = np.zeros(num_rho, dtype='double')
Mmat = np.matrix(np.zeros((num_rho, num_rho), dtype='complex'))
lamb = 0.5*np.ones(num_rho, dtype='double')
for i in range(num_rho):
Mmat[i, i] = np.trace(np.matrix(self.residue[i])*np.matrix(self.residue[i]).H)
for j in range(i+1, num_rho):
Mmat[i, j] = np.trace(np.matrix(self.residue[i])*np.matrix(self.residue[j]).H)
# if np.sum(np.matrix(self.residue[j]).H*np.matrix(self.residue[i])) != Mmat[i, j].conj():
# print "Mmat[%i,%i] = %f. Mmat[%i,%i].conj() = %f." % (j, i, np.sum(np.matrix(self.residue[j]).H*np.matrix(self.residue[i])), i, j, Mmat[i, j].conj())
Mmat[j, i] = Mmat[i, j].conj()
# if np.linalg.det(Mmat) < small:
# return alpha, 1
alpha = np.linalg.solve(Mmat, lamb)
myscale = np.sum(alpha)
if myscale == 0:
print "ERROR: alpha summed to 0 in optimisation_routine. Cannot be scaled to 1."
print alpha
return alpha, 1
else:
alpha = alpha/myscale
return alpha, 0
def optimisation_routine2(self, num_rho):
"""
Optimisation routine where we try to solve for the norm squared of the
optimal density matrix with the constraint that the sum of the
coefficients is equal to one. To include the constraint we set up the
problem:
minimise: alpha_i M_ij alpha_j - lambda (sum_i alpha_i - 1)
where M_ij = Tr(R_i^dag R_j). We then differentiate with respect to
alpha_k and set to zero to minimise:
2 M alpha - lambda = 0
We solve this equation. We have to add a buffer row and column to
include lambda as well as the constraint that the sum of alpha is
equal to one. We absorb the 2 into lambda:
{M_11 M_12 ... -1 {alpha_1 {0
M_21 M_22 ... -1 alpha_2 0
. . . .
. . . = .
. . . .
-1 -1 ... 0} lambda} -1}
"""
small = 1e-10
verboseprint(self.Job.Def['extraverbose'], "optimisation_routine2")
# If there is only one density matrix the solution is simple.
if num_rho == 1:
return np.array([1.0], dtype='double'), 0
alpha = np.zeros(num_rho+1, dtype='double')
Mmat = np.matrix(np.zeros((num_rho+1, num_rho+1), dtype='complex'))
# make all the elements -1
Mmat.fill(-1.0)
# replace the bottom right hand corner by 0
Mmat[-1,-1] = 0.0
# calculate the rest of the Mmat.
for i in range(num_rho):
Mmat[i, i] = np.trace(np.matrix(self.residue[i])*np.matrix(self.residue[i]).H)
for j in range(i+1, num_rho):
Mmat[i, j] = np.trace(np.matrix(self.residue[i])*np.matrix(self.residue[j]).H)
# if np.sum(np.matrix(self.residue[j]).H*np.matrix(self.residue[i])) != Mmat[i, j].conj():
# print "Mmat[%i,%i] = %f. Mmat[%i,%i].conj() = %f." % (j, i, np.sum(np.matrix(self.residue[j]).H*np.matrix(self.residue[i])), i, j, Mmat[i, j].conj())
Mmat[j, i] = Mmat[i, j].conj()
# if abs(np.linalg.det(Mmat)) < small:
# return alpha, 1
RHS = np.zeros(num_rho+1, dtype = 'double')
RHS[-1] = -1.0
alpha = np.linalg.solve(Mmat, RHS)
myscale = abs(np.sum(alpha)-alpha[-1])
if abs(myscale-1.0) > small:
print "ERROR: optimisation_routine2 -- sum alpha = %f. alpha must sum to 1.0." % myscale
print alpha
return alpha, 1
# if successful then return result and no error code.
return alpha, 0
def optimisation_routine3(self, num_rho):
"""
Solve the matrix vector equation approximately such that the alpha lie
between 0 and 1 and the constraint that sum_i alpha_i = 1:
{M_11 M_12 ... M_1N {alpha_1 {0
M_21 M_22 ... M_2N alpha_2 0
. . . .
. . . = .
. . . .
M_N1 M_N2 M_NN} alpha_N} 0}
We use the library PyDQED to find a good solution with alpha_i bound
between 0 and 1. To ensure that alpha_i are bound between 0 and 1 we
replace alpha_i by sin^2(alpha_i). To ensure that sum_i alpha_i = 1
we replace sin^2(alpha_i) by sin^2(alpha_i)/sum_a, where
sum_a = sum_i sin^2(alpha_i).
"""
verboseprint(self.Job.Def['extraverbose'], "optimisation_routine3")
# If there is only one density matrix the solution is simple.
if num_rho == 1:
return np.array([1.0], dtype='double'), 0
alpha = np.zeros(num_rho, dtype='double')
# initial guess for alpha:
alpha.fill(1.0/float(num_rho))
self.Mmat = np.zeros((num_rho, num_rho), dtype='complex')
# calculate Mmat.
for i in range(num_rho):
self.Mmat[i, i] = np.trace(np.matrix(self.residue[i])*np.matrix(self.residue[i]).H)
for j in range(i+1, num_rho):
self.Mmat[i, j] = np.trace(np.matrix(self.residue[i])*np.matrix(self.residue[j]).H)
# if np.sum(np.matrix(self.residue[j]).H*np.matrix(self.residue[i])) != Mmat[i, j].conj():
# print "Mmat[%i,%i] = %f. Mmat[%i,%i].conj() = %f." % (j, i, np.sum(np.matrix(self.residue[j]).H*np.matrix(self.residue[i])), i, j, Mmat[i, j].conj())
self.Mmat[j, i] = self.Mmat[i, j].conj()
# Initialise the PyDQED class
opt = self.optimisation_rho()
opt.Mmat = self.Mmat
# bounds for the x values
# mybounds = [(0,1) for kk in range(num_rho)]
mybounds = None
# No bounds for lambda
# mybounds += [(None, None)]
# Strict bounds for the constraint
# mybounds += [(-1e-12, 1e-12)]
opt.initialize(Nvars=num_rho, Ncons=0, Neq=num_rho, bounds=mybounds, tolf=1e-16, told=1e-8, tolx=1e-8, maxIter=100, verbose=False)
alpha, igo = opt.solve(alpha)
if igo > 1:
verboseprint(self.Job.Def['extraverbose'], dqed_err_dict[igo])
# replace alpha[i] by sin^2(alpha[i])/sum_i sin^2(alpha[i])
sum_alpha = sum(alpha[ii]*alpha[ii] for ii in range(num_rho))
alpha = np.array([alpha[ii]*alpha[ii]/sum_alpha for ii in range(num_rho)])
if abs(sum(alpha)-1.0) > 1e-8:
print "WARNING: sum_i alpha_i - 1.0 = " + str(sum_alpha-1.0) + ". It should be equal to 0.0. Proceeding using guess."
return alpha, 1
if self.Job.Def['random_seeds'] == 1:
# try the random seeds
trial_alpha, trial_cMc, trial_err = self.random_seeds_optimisation(num_rho, self.Job.Def['num_random_seeds'])
# Check to find out which solution is better, the guessed or the random seeds
cMc = alpha.conjugate().dot(self.Mmat.dot(alpha))
if cMc < trial_cMc:
return alpha, igo
else:
# print "Returning random seed answer. cMc = ", str(cMc), "; trial_cMc = ", str(trial_cMc)
return trial_alpha, trial_err
return alpha, igo
# def optimisation_routine4(self, num_rho):
# """
# Solve the matrix vector equation approximately such that the alpha lie
# between 0 and 1 and the constraint that sum_i alpha_i = 1:
# {M_11 M_12 ... M_1N {alpha_1 {0
# M_21 M_22 ... M_2N alpha_2 0
# . . . .
# . . . = .
# . . . .
# M_N1 M_N2 M_NN} alpha_N} 0}
# We use the library PyDQED to find a good solution with alpha_i. We use
# the following trick. We replace alpha[i] by y[i] = alpha[i]^2 to enforce
# that y[i] > 0. We also use the constraint that sum_i y[i] = 1. This
# ensures that y[i] are bound betweeen 0 and 1. This is all done in the
# class optimisation_rho_meloni. It will return alpha but we will have to
# replace it by alpha^2 -- for debugging purposes we should check to make
# sure that they sum to 1.
# """
# verboseprint(self.Job.Def['extraverbose'], "optimisation_routine4")
# # If there is only one density matrix the solution is simple.
# if num_rho == 1:
# return np.array([1.0], dtype='double'), 0
# alpha = np.zeros(num_rho, dtype='double')
# # initial guess for alpha:
# alpha.fill(1.0/float(num_rho))
# self.Mmat = np.zeros((num_rho, num_rho), dtype='complex')
# # calculate Mmat.
# for i in range(num_rho):
# self.Mmat[i, i] = np.trace(np.matrix(self.residue[i])*np.matrix(self.residue[i]).H)
# for j in range(i+1, num_rho):
# self.Mmat[i, j] = np.trace(np.matrix(self.residue[i])*np.matrix(self.residue[j]).H)
# # if np.sum(np.matrix(self.residue[j]).H*np.matrix(self.residue[i])) != Mmat[i, j].conj():
# # print "Mmat[%i,%i] = %f. Mmat[%i,%i].conj() = %f." % (j, i, np.sum(np.matrix(self.residue[j]).H*np.matrix(self.residue[i])), i, j, Mmat[i, j].conj())
# self.Mmat[j, i] = self.Mmat[i, j].conj()
# # Initialise the PyDQED class
# opt = optimisation_rho_Meloni()
# opt.Mmat = self.Mmat
# # bounds for the x values
# mybounds = [(0,1) for kk in range(num_rho)]
# mybounds += [(-1.e-12, 1e-12)]
# opt.initialize(Nvars=num_rho, Ncons=1, Neq=num_rho, bounds=mybounds, tolf=1e-16, told=1e-8, tolx=1e-12, maxIter=100, verbose=False)
# alpha, igo = opt.solve(alpha)
# if igo > 1:
# verboseprint(self.Job.Def['extraverbose'], dqed_err_dict[igo])
# # replace alpha[i] by alpha[i]^2
# alpha = np.array([alpha[ii]*alpha[ii] for ii in range(num_rho)])
# if abs(sum(alpha)-1.0) > 1e-8:
# print "WARNING: sum_i alpha_i - 1.0 = " + str(sum(alpha)-1.0) + ". It should be equal to 0.0. Proceeding using guess."
# return alpha, 1
# if self.Job.Def['random_seeds'] == 1:
# # try the random seeds
# trial_alpha, trial_cMc, trial_err = self.random_seeds_optimisation(num_rho, self.Job.Def['num_random_seeds'])
# # Check to find out which solution is better, the guessed or the random seeds
# cMc = alpha.conjugate().dot(self.Mmat.dot(alpha))
# if cMc < trial_cMc:
# return alpha, igo
# else:
# # print "Returning random seed answer. cMc = ", str(cMc), "; trial_cMc = ", str(trial_cMc)
# return trial_alpha, trial_err
# return alpha, igo
def optimisation_routine4(self, num_rho):
"""
Solve the matrix vector equation approximately such that the alpha lie
between 0 and 1 and the constraint that sum_i alpha_i = 1:
{M_11 M_12 ... M_1N {alpha_1 {0
M_21 M_22 ... M_2N alpha_2 0
. . . .
. . . = .
. . . .
M_N1 M_N2 M_NN} alpha_N} 0}
We use the library PyDQED to find a good solution with alpha_i. We use
the following trick. We replace alpha[i] by y[i] = alpha[i]^2 to enforce
that y[i] > 0. We also use the constraint that sum_i y[i] = 1. This
ensures that y[i] are bound betweeen 0 and 1. This is all done in the
class optimisation_rho_meloni. It will return alpha but we will have to
replace it by alpha^2 -- for debugging purposes we should check to make
sure that they sum to 1.
"""
verboseprint(self.Job.Def['extraverbose'], "optimisation_routine5")
# If there is only one density matrix the solution is simple.
if num_rho == 1:
return np.array([1.0], dtype='double'), 0
alpha = np.zeros(num_rho, dtype='double')
# initial guess for alpha:
alpha.fill(1.0/float(num_rho))
self.Mmat = np.zeros((num_rho, num_rho), dtype='complex')
# calculate Mmat.
for i in range(num_rho):
self.Mmat[i, i] = np.trace(np.matrix(self.residue[i])*np.matrix(self.residue[i]).H)
for j in range(i+1, num_rho):
self.Mmat[i, j] = np.trace(np.matrix(self.residue[i])*np.matrix(self.residue[j]).H)
# if np.sum(np.matrix(self.residue[j]).H*np.matrix(self.residue[i])) != Mmat[i, j].conj():
# print "Mmat[%i,%i] = %f. Mmat[%i,%i].conj() = %f." % (j, i, np.sum(np.matrix(self.residue[j]).H*np.matrix(self.residue[i])), i, j, Mmat[i, j].conj())
self.Mmat[j, i] = self.Mmat[i, j].conj()
# Initialise the PyDQED class
opt = self.optimisation_rho()
opt.Mmat = self.Mmat
# bounds for the x values
mybounds = [(0, None) for kk in range(num_rho)]
# mybounds += [(None, None)]
# mybounds += [(-1.e-12, 1e-12)]
opt.initialize(Nvars=num_rho, Ncons=0, Neq=2, bounds=mybounds, tolf=1e-16, told=1e-8, tolx=1e-8, maxIter=100, verbose=False)
alpha, igo = opt.solve(alpha)
# remove lambda
sum_a = sum(alpha)
alpha = alpha/sum_a
if igo > 1:
verboseprint(self.Job.Def['extraverbose'], dqed_err_dict[igo])
if abs(sum(alpha)-1.0) > 1e-8:
print "WARNING: sum_i alpha_i - 1.0 = " + str(sum(alpha)-1.0) + ". It should be equal to 0.0. Proceeding using guess."
return alpha, 1
if self.Job.Def['random_seeds'] == 1:
# try the random seeds
trial_alpha, trial_cMc, trial_err = self.random_seeds_optimisation(num_rho, self.Job.Def['num_random_seeds'])
# Check to find out which solution is better, the guessed or the random seeds
cMc = alpha.conjugate().dot(self.Mmat.dot(alpha))
if cMc < trial_cMc:
return alpha, igo
else:
# print "Returning random seed answer. cMc = ", str(cMc), "; trial_cMc = ", str(trial_cMc)
return trial_alpha, trial_err
return alpha, igo
def random_seeds_optimisation(self, num_rho, num_trials):
cMc_vec = []
cMc_val = []
cMc_err = []
random.seed()
# Initialise the PyDQED class
opt = self.optimisation_rho()
opt.Mmat = self.Mmat
mybounds = None
opt.initialize(Nvars=num_rho, Ncons=0, Neq=num_rho, bounds=mybounds, tolf=1e-16, told=1e-8, tolx=1e-8, maxIter=100, verbose=False)
# random starting seeds
for gg in range(num_trials):
alpha = np.array([random.random() for hh in range(num_rho)])
alpha, igo = opt.solve(alpha)
if igo > 1:
verboseprint(self.Job.Def['extraverbose'], dqed_err_dict[igo])
# replace alpha[i] by sin^2(alpha[i])/sum_i sin^2(alpha[i])
sum_alpha = sum(np.sin(alpha[ii])*np.sin(alpha[ii]) for ii in range(num_rho))
alpha = np.array([np.sin(alpha[ii])*np.sin(alpha[ii])/sum_alpha for ii in range(num_rho)])
cMc_vec.append(alpha)
cMc_val.append(alpha.conjugate().dot(self.Mmat.dot(alpha)))
cMc_err.append(igo)
# print "Trial values of cMc are: ", cMc_val
val, idx = min((val, idx) for (idx, val) in enumerate(cMc_val))
# print "chosen index = ", idx
return cMc_vec[idx], cMc_val[idx], cMc_err[idx]
# class optimisation_rho_Duff(DQED):
# """
# A DQED class containing the functions to optimise.
# It requires the small Mmat matrix to work.
# {M_11 M_12 ... M_1N
# M_21 M_22 ... M_2N
# . .
# . .
# . .
# M_N1 M_N2 M_NN}
# It implements the constraint that the sum_i x[i] = 1, and the bounds
# 0 < x[i] < 1 by replacing x[i] by sin^2(x[i])/sum_i sin^2(x[i]).
# """
# def evaluate(self, x):
# Neq = self.Neq; Nvars = self.Nvars; Ncons = self.Ncons
# f = np.zeros((Neq), np.float64)
# J = np.zeros((Neq, Nvars), np.float64)
# fcons = np.zeros((Ncons), np.float64)
# Jcons = np.zeros((Ncons, Nvars), np.float64)
# # Replace x[i] by sin^2(x[i])/sum_i sin^2(x[i])
# y = []
# sum_x = sum(np.sin(x[ii])*np.sin(x[ii]) for ii in range(Nvars))
# for ii in range(Nvars):
# y.append(np.sin(x[ii])*np.sin(x[ii])/sum_x)
# for pp in range(Neq):
# f[pp] = sum((self.Mmat[pp, ii])*y[ii] for ii in range(Nvars))
# for kk in range(Nvars):
# # find this equation by differentiating f[pp] w.r.t. x[kk]
# J[pp, kk] = 2*np.sin(x[kk])*np.cos(x[kk])*(f[pp] - self.Mmat[pp, kk])/sum_x
# return f, J, fcons, Jcons
class optimisation_rho_Duff_Meloni(DQED):
"""
A DQED class containing the functions to optimise.
It requires the small Mmat matrix to work.
{M_11 M_12 ... M_1N
M_21 M_22 ... M_2N
. .
. .
. .
M_N1 M_N2 M_NN}
It implements the constraint that the sum_i x[i] = 1, and the bounds
0 <= x[i] <= 1 by replacing x[i] by y[i] = x[i]^2/sum_i x[i]^2. As the y[i]
must be positive and they must sum to 1, they must also lie between 0 and
1.
"""
def evaluate(self, x):
Neq = self.Neq; Nvars = self.Nvars; Ncons = self.Ncons
f = np.zeros((Neq), np.float64)
J = np.zeros((Neq, Nvars), np.float64)
fcons = np.zeros((Ncons), np.float64)
Jcons = np.zeros((Ncons, Nvars), np.float64)
# Replace x[i] by sin^2(x[i])/sum_i sin^2(x[i])
y = []
sum_x = sum(x[ii]*x[ii] for ii in range(Nvars))
for ii in range(Nvars):
y.append(x[ii]*x[ii]/sum_x)
for pp in range(Neq):
f[pp] = sum((self.Mmat[pp, ii])*y[ii] for ii in range(Nvars))
for kk in range(Nvars):
# find this equation by differentiating f[pp] w.r.t. x[kk]
J[pp, kk] = 2*x[kk]*(self.Mmat[pp, kk] - f[pp])/sum_x
return f, J, fcons, Jcons
# class optimisation_rho_Meloni(DQED):
# """
# A DQED class containing the functions to optimise.
# It requires the small Mmat matrix to work.
# {M_11 M_12 ... M_1N
# M_21 M_22 ... M_2N
# . .
# . .
# . .
# M_N1 M_N2 M_NN}
# We replace x[i] by y[i] = x[i]^2. This enforces that y[i] >= 0. We also use
# the constraint that the sum_i y[i] = 1, therefore they must also lie between 0 and
# 1.
# """
# def evaluate(self, x):
# Neq = self.Neq; Nvars = self.Nvars; Ncons = self.Ncons
# f = np.zeros((Neq), np.float64)
# J = np.zeros((Neq, Nvars), np.float64)
# fcons = np.zeros((Ncons), np.float64)
# Jcons = np.zeros((Ncons, Nvars), np.float64)
# # Replace x[i] by sin^2(x[i])/sum_i sin^2(x[i])
# y = np.array((Nvars), np.float64)
# for ii in range(Nvars):
# y[ii] = x[ii]*x[ii]
# for pp in range(Neq):
# f[pp] = self.Mmat[pp].dot(y)
# for kk in range(Nvars):
# # find this equation by differentiating f[pp] w.r.t. x[kk]
# J[pp, kk] = 2*x[kk]*self.Mmat[pp, kk]
# fcons[0] = sum(y) - 1.0
# for kk in range(Nvars):
# Jcons[0, kk] = 2*x[kk]
# return f, J, fcons, Jcons
class optimisation_rho_total(DQED):
"""
A DQED class containing the functions to optimise.
It finds the values of alpha that best minimise:
{alpha_1 {M_11 M_12 ... M_1N {alpha_1
alpha_2 M_21 M_22 ... M_2N alpha_2
. . . .
. . . .
. . . .
alpha_N} M_N1 M_N2 M_NN} alpha_N}
It implements the constraint that the sum_i alpha[i] = 1, and the bounds
0 <= alpha[i] <= 1 by replacing x[i] by y[i] = x[i]/sum_i x[i]. As the y[i]
must be positive (because this problem is quadratic) and they must sum to
1 they must also lie between 0 and 1.
"""
def evaluate(self, x):
Neq = self.Neq; Nvars = self.Nvars; Ncons = self.Ncons
f = np.zeros((Neq), np.float64)
J = np.zeros((Neq, Nvars), np.float64)
fcons = np.zeros((Ncons), np.float64)
Jcons = np.zeros((Ncons, Nvars), np.float64)
sum_x = sum(x)
y = x/sum_x
f[0] = y.dot(self.Mmat.dot(y))
f[1] = sum_x - 1.0
# find this equation by differentiating f[pp] w.r.t. x[kk] and that Mmat is Hermitian
J[0] = 2.0/sum_x*(self.Mmat.dot(y)-f[0])
J[1] = np.ones((Nvars), np.float64)
return f, J, fcons, Jcons
dqed_err_dict={}
dqed_err_dict[2] = "The norm of the residual is zero; the solution vector is a root of the system."
dqed_err_dict[3] = "The bounds on the trust region are being encountered on each step; the solution vector may or may not be a local minimum."
dqed_err_dict[4] = "The solution vector is a local minimum."
dqed_err_dict[5] = "A significant amount of noise or uncertainty has been observed in the residual; the solution may or may not be a local minimum."
dqed_err_dict[6] = "The solution vector is only changing by small absolute amounts; the solution may or may not be a local minimum."
dqed_err_dict[7] = "The solution vector is only changing by small relative amounts; the solution may or may not be a local minimum."
dqed_err_dict[8] = "The maximum number of iterations has been reached; the solution is the best found, but may or may not be a local minimum."
for ii in range(9, 19):
dqed_err_dict[ii] = "An error occurred during the solve operation; the solution is not a local minimum."
| gpl-2.0 | 8,748,730,909,583,750,000 | 43.624297 | 184 | 0.558468 | false | 3.173934 | false | false | false |
CS4098Group/Project | app/peos.py | 1 | 4950 | from subprocess import Popen, PIPE
import os.path
import os
import xml.etree.ElementTree as ET
PEOS_COMMAND = './peos/os/kernel/peos'
class PEOSException(Exception):
pass
def create(path_to_model_file):
if not os.path.isfile(path_to_model_file):
raise IOError("File does not exist")
proc = Popen('{} -c {}'.format(PEOS_COMMAND, path_to_model_file),
shell=True,
stdout=PIPE,
stderr=PIPE)
output, err = proc.communicate()
if err != '':
raise PEOSException(err)
elif proc.returncode != 1:
# Going to need to parse the stderr/stdout pipes for this
raise PEOSException('{} could not be found or started from {}'
.format(path_to_model_file, os.getcwd()))
pid = int(output.split("=")[1][1:])
return pid
def status(pid):
if not os.path.isfile("proc_table.dat.xml"):
# list command creates the xml file
list()
tree = ET.parse("proc_table.dat.xml")
root = tree.getroot()
process = root.find(".//process[@pid='" + str(pid) + "']")
if process is None:
raise NameError("PID not found")
actions = process.findall("action")
actions_dict = []
for action in actions:
inner_list = []
for i in range(len(action)):
inner_list.append({action[i].tag: action[i].attrib})
actions_dict.append({"details": inner_list})
[actions_dict[-1].update({key: value}) for key,
value in action.attrib. items()]
return actions_dict
def bind_resource_file(resource_file):
if not os.path.isfile(resource_file):
raise IOError("File Not Found")
proc = Popen('{} -b {}'.format(PEOS_COMMAND, resource_file),
shell=True,
stdout=PIPE,
stderr=PIPE)
output, err = proc.communicate()
if err != '':
raise PEOSException(err)
elif proc.returncode != 1:
# Going to need to parse the stderr/stdout pipes for this
raise PEOSException('{} could not be imported'
.format(resource_file))
return
def start(pid, action, event):
if event not in ['start', 'finish', 'abort', 'suspend']:
raise PEOSException('Unknown event: {}'.format(event))
proc = Popen('{} -n {} {} {}'.format(PEOS_COMMAND, pid, action, event),
shell=True,
stdout=PIPE,
stderr=PIPE)
output, err = proc.communicate()
if err != '':
raise PEOSException(err)
elif proc.returncode != 1:
# Going to need to parse the stderr/stdout pipes for this
raise PEOSException('{} action could not be started for pid: {}'
.format(action, pid))
return
def update_all():
proc = Popen('{} -u'.format(PEOS_COMMAND),
shell=True,
stdout=PIPE,
stderr=PIPE)
output, err = proc.communicate()
if err != '':
raise PEOSException(err)
elif proc.returncode != 1:
# Going to need to parse the stderr/stdout pipes for this
raise PEOSException('Unknown error')
return
def bind_resource(pid, resource_name, resource_value):
proc = Popen('{} -r {} {} {}'
.format(PEOS_COMMAND, pid, resource_name, resource_value),
shell=True,
stdout=PIPE,
stderr=PIPE)
output, err = proc.communicate()
if err != '':
raise PEOSException(err)
elif proc.returncode != 1:
# Going to need to parse the stderr/stdout pipes for this
raise PEOSException("Could not bind {} to {} in {}"
.format(pid, resource_name, resource_value))
return
def list():
proc = Popen('{} -i'.format(PEOS_COMMAND),
shell=True,
stdout=PIPE,
stderr=PIPE)
output, err = proc.communicate()
if err != '':
raise PEOSException(err)
elif proc.returncode != 1:
# Going to need to parse the stderr/stdout pipes for this
raise PEOSException('Unknown error. Process returned: {}'
.format(proc.returncode))
out = [p.split(" ") for p in output.split("\n")]
p_list = [{"pid": elm[0], "model":os.path.basename(os.path.splitext(elm[1])[0])} for elm in out
if len(elm) > 1 and elm[1] != ""]
return p_list
def delete(pid):
proc = Popen('{} -d {}'.format(PEOS_COMMAND, pid),
shell=True,
stdout=PIPE,
stderr=PIPE)
output, err = proc.communicate()
if err != '':
raise PEOSException(err)
elif proc.returncode != 1:
# Going to need to parse the stderr/stdout pipes for this
raise PEOSException('Failed to delete process {}. Does it exist?'
.format(proc.returncode))
return
| mit | -4,090,355,685,830,059,500 | 27.285714 | 99 | 0.55596 | false | 4.047424 | false | false | false |
mnestis/provglish | provglish/nl/templates/generation_template.py | 1 | 3100 | from provglish import transform, prov
from provglish.lexicalisation import urn_from_uri as lex
from provglish.lexicalisation import plural_p
from provglish.prov import PROV
from provglish.nl.tools import SETTINGS, realise_sentence
import rdflib
from rdflib.plugins import sparql
from rdflib import RDF
import urllib2
_generation_query = sparql.prepareQuery(
"""
SELECT ?entity ?generation ?time ?activity WHERE {
GRAPH <prov_graph> {
{
?entity a prov:Entity .
?entity prov:qualifiedGeneration ?generation .
?generation a prov:Generation .
OPTIONAL { ?generation prov:atTime ?time } .
OPTIONAL { ?generation prov:activity ?activity } .
FILTER ( bound(?time) || bound(?activity))
} UNION {
?entity a prov:Entity .
?entity prov:wasGeneratedBy ?activity .
?activity a prov:Activity
}
}
}
""",
initNs={"prov":PROV})
def _generation_binding(graph):
results = graph.query(_generation_query)
return results.bindings
def _generation_coverage(bindings, graph):
if "?generation" in bindings:
# Qualified
coverage = [(bindings["?entity"], RDF.type, PROV.Entity),
(bindings["?entity"], PROV.qualifiedGeneration, bindings["?generation"]),
(bindings["?generation"], RDF.type, PROV.Generation)]
if "?time" in bindings:
coverage.append((bindings["?generation"], PROV.atTime, bindings["?time"]))
if "?activity" in bindings:
coverage.extend([(bindings["?generation"], PROV.activity, bindings["?activity"]),
(bindings["?activity"], RDF.type, PROV.Activity)])
return coverage
else:
# Unqualified
return [(bindings["?entity"], RDF.type, PROV.Entity),
(bindings["?entity"], PROV.wasGeneratedBy, bindings["?activity"]),
(bindings["?activity"], RDF.type, PROV.Activity)]
def _generation_string(bindings, history):
sentence = {}
sentence["object"] = {"type": "noun_phrase",
"head": lex(bindings["?entity"]),
"features": {"number": "plural" if plural_p(bindings["?entity"]) else "singular"}}
sentence["verb"] = "generate"
sentence["features"] = {"tense": "past",
"passive": "true"}
sentence["modifiers"] = []
if "?time" in bindings:
sentence["modifiers"].append({"type":"preposition_phrase",
"preposition": "at",
"noun": bindings["?time"]})
if "?activity" in bindings:
sentence["modifiers"].append({"type":"preposition_phrase",
"preposition":"by",
"noun": lex(bindings["?activity"])})
return realise_sentence({"sentence":sentence})
generation = transform.Template("Generation", _generation_binding, _generation_coverage, _generation_string)
| mit | -5,202,516,395,004,116,000 | 35.046512 | 109 | 0.569677 | false | 4.397163 | false | false | false |
ledusledus/lidarheightcellclassifyscripts | cliffs.py | 1 | 3730 | import unittest
from itertools import imap
from operator import add
NO_CLIFF=1
HAS_CLIFF=2
class TestBuildCliff(unittest.TestCase):
def setUp(self):
pass
def testBuild_Cliff(self):
altitudes={(10,10):2,(10,11):4,(11,11):4,(11,10):2,(12,10):2,(12,11):4}
key=(10,10)
start_cliff=1
end_cliff=99
cliff=build_cliff(altitudes,key, start_cliff,end_cliff)
self.assertEqual(cliff,[(10.0,10.5),(11.0,10.5)])
key=(11,10)
cliff=build_cliff(altitudes,key, start_cliff,end_cliff)
self.assertEqual(cliff,[(11.0,10.5),(12.0,10.5)])
def testBuildFullCliff(self):
altitudes={(10,10):2,(10,11):4,(11,11):4,(11,10):2,}
key=(10,10)
start_cliff=1
end_cliff=99
cliff=build_cliff(altitudes,key, start_cliff,end_cliff)
self.assertEqual(cliff,[(10.0,10.5),(11.0,10.5)])
cells={}
res=build_full_cliff(cells,altitudes,key, start_cliff,end_cliff)
self.assertEqual(res, [[(10.0,10.5),(11.0,10.5)]])
cells={}
altitudes={(10,10):2,(10,11):4,(11,11):4,(11,10):2,(12,10):2,(12,11):4}
res=build_full_cliff(cells,altitudes,key, start_cliff,end_cliff)
self.assertEqual(res, [[(10.0,10.5),(11.0,10.5)],[(11.0,10.5),(12.0,10.5)]])
def divide_by_scalar(vector,scalar):
return tuple(i/scalar for i in vector)
def build_cliff( altitudes, key, start_cliff, end_cliff ):
keys = [key, (key[0]+1,key[1]), (key[0]+1,key[1]+1), (key[0],key[1]+1)]
alts = []
for k in keys:
# we need to have a full cell and there is none
if not altitudes.has_key(k):
return None
alts.append(altitudes[k])
deltas=[(abs(alts[(i+1)%4]-alts[i]),i) for i in range(len(alts))]
good_deltas = filter(lambda x: x[0]>=start_cliff and x[0]<end_cliff, deltas)
if len(good_deltas)>2:
print "special case good deltas"
if len(good_deltas) < 2: # no cliffs found
# 1 means we are at the end. In that case it should be found from another cliff.
return None
good_deltas.sort(reverse=True)
idx1=good_deltas[0][1]
idx2=good_deltas[1][1]
if alts[idx1]<alts[(idx1+1)%4]:
idx1,idx2=idx2,idx1
cliff_line=[divide_by_scalar(imap(add, keys[idx1],keys[(idx1+1)%4]),2.0),
divide_by_scalar(imap(add, keys[idx2],keys[(idx2+1)%4]),2.0),]
return cliff_line
def next_key(key, point):
if point[0]==key[0]:
return (key[0]-1,key[1])
if point[0]==key[0]+1:
return (key[0]+1,key[1])
if point[1]==key[1]:
return (key[0],key[1]-1)
return key[0],key[1]+1
def build_full_cliff(cells, altitudes, key, start_cliff, end_cliff ):
cliff_line = build_cliff(altitudes, key, start_cliff, end_cliff )
if cliff_line == None:
cells[key]=NO_CLIFF
return
else:
cells[key]=HAS_CLIFF
curkey=key
full_cliff_lines=[cliff_line]
curpoint=full_cliff_lines[-1][1]
# now we need to grow right:
while True:
curkey=next_key(curkey, curpoint)
if cells.has_key(curkey):
# has been visited already
break
print curkey
cliff_line=build_cliff(altitudes, curkey, start_cliff, end_cliff )
print cliff_line
if cliff_line==None:
cells[curkey]=NO_CLIFF
break
if cliff_line[0]!=full_cliff_lines[-1][1]:
# this is not our cliff
break
cells[curkey]=HAS_CLIFF
full_cliff_lines.append(cliff_line)
print full_cliff_lines
curpoint=full_cliff_lines[-1][1]
# todo: then we need to grow left
return full_cliff_lines
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 4,035,802,107,887,874,000 | 33.537037 | 88 | 0.584718 | false | 2.773234 | true | false | false |
BrunoTh/ETS2Autopilot | UI/ui_updater.py | 1 | 2676 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'designer\updater.ui'
#
# Created by: PyQt5 UI code generator 5.9
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(400, 131)
MainWindow.setLocale(QtCore.QLocale(QtCore.QLocale.German, QtCore.QLocale.Germany))
self.pb_progress = QtWidgets.QProgressBar(MainWindow)
self.pb_progress.setGeometry(QtCore.QRect(10, 60, 381, 23))
self.pb_progress.setProperty("value", 0)
self.pb_progress.setAlignment(QtCore.Qt.AlignCenter)
self.pb_progress.setOrientation(QtCore.Qt.Horizontal)
self.pb_progress.setObjectName("pb_progress")
self.b_run = QtWidgets.QPushButton(MainWindow)
self.b_run.setEnabled(False)
self.b_run.setGeometry(QtCore.QRect(210, 90, 181, 30))
self.b_run.setCheckable(False)
self.b_run.setChecked(False)
self.b_run.setObjectName("b_run")
self.label = QtWidgets.QLabel(MainWindow)
self.label.setGeometry(QtCore.QRect(10, 10, 81, 16))
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(MainWindow)
self.label_2.setGeometry(QtCore.QRect(10, 30, 81, 16))
self.label_2.setObjectName("label_2")
self.b_check = QtWidgets.QPushButton(MainWindow)
self.b_check.setGeometry(QtCore.QRect(10, 90, 181, 30))
self.b_check.setObjectName("b_check")
self.l_newVersion = QtWidgets.QLabel(MainWindow)
self.l_newVersion.setGeometry(QtCore.QRect(100, 30, 81, 16))
self.l_newVersion.setText("")
self.l_newVersion.setObjectName("l_newVersion")
self.l_currentVersion = QtWidgets.QLabel(MainWindow)
self.l_currentVersion.setGeometry(QtCore.QRect(100, 10, 81, 16))
self.l_currentVersion.setText("")
self.l_currentVersion.setObjectName("l_currentVersion")
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Updater"))
self.pb_progress.setFormat(_translate("MainWindow", "%p%"))
self.b_run.setText(_translate("MainWindow", "Run Update"))
self.label.setText(_translate("MainWindow", "Current version:"))
self.label_2.setText(_translate("MainWindow", "New version:"))
self.b_check.setText(_translate("MainWindow", "Check for Update"))
| mit | -8,680,544,128,683,672,000 | 45.947368 | 91 | 0.681988 | false | 3.73743 | false | false | false |
bgribble/mfp | mfp/gui/modes/global_mode.py | 1 | 12471 | #! /usr/bin/env python
'''
global_mode.py: Global input mode bindings
Copyright (c) 2012 Bill Gribble <[email protected]>
'''
from ..input_mode import InputMode
from .label_edit import LabelEditMode
from .transient import TransientMessageEditMode
from .enum_control import EnumEditMode
from ..message_element import TransientMessageElement
from ..patch_element import PatchElement
from mfp import MFPGUI
class GlobalMode (InputMode):
def __init__(self, window):
self.manager = window.input_mgr
self.window = window
self.allow_selection_drag = True
self.selection_drag_started = False
self.drag_started = False
self.selbox_started = False
self.selbox_changed = []
self.drag_start_x = None
self.drag_start_y = None
self.drag_last_x = None
self.drag_last_y = None
self.drag_target = None
self.next_console_position = 1
self.next_tree_position = 1
InputMode.__init__(self, "Global input bindings")
# global keybindings
self.bind("!", self.transient_msg, "Send message to selection")
self.bind("~", self.toggle_console, "Show/hide log and console")
self.bind("`", self.toggle_tree, "Show/hide left side info")
self.bind("PGUP", self.window.layer_select_up, "Select higher layer")
self.bind("PGDN", self.window.layer_select_down, "Select lower layer")
self.bind("C-PGUP", self.window.patch_select_prev, "Select higher patch")
self.bind("C-PGDN", self.window.patch_select_next, "Select lower patch")
self.bind('C-f', self.window.patch_new, "Create a new patch")
self.bind('C-o', self.open_file, "Load file into new patch")
self.bind('C-s', self.save_file, "Save patch to file")
self.bind('C-p', self.save_as_lv2, "Save patch as LV2 plugin")
self.bind('C-w', self.patch_close, "Close current patch")
self.bind('C-q', self.quit, "Quit")
self.bind('C-A-.', self.toggle_pause, "Pause/unpause execution")
self.bind("M1DOWN", lambda: self.selbox_start(None), "Start selection box")
self.bind("M1-MOTION", lambda: self.selbox_motion(True), "Drag selection box")
self.bind("M1UP", self.selbox_end, "End selection box")
self.bind("S-M1DOWN", lambda: self.selbox_start(True), "Start add-to-selection box")
self.bind("S-M1-MOTION", lambda: self.selbox_motion(True), "Drag add-to-selection box")
self.bind("S-M1UP", self.selbox_end, "End selection box")
self.bind("C-M1DOWN", lambda: self.selbox_start(False),
"Start toggle-selection box")
self.bind("C-M1-MOTION", lambda: self.selbox_motion(False),
"Drag toggle-selection box")
self.bind("C-M1UP", self.selbox_end, "End toggle-selection box")
self.bind("S-C-M1DOWN", self.drag_start, "Begin dragging viewport")
self.bind("S-C-M1-MOTION", self.drag_motion, "Drag viewport")
self.bind("S-C-M1UP", self.drag_end, "End drag viewport")
self.bind('+', lambda: self.window.zoom_in(1.25), "Zoom view in")
self.bind('=', lambda: self.window.zoom_in(1.25), "Zoom view in")
self.bind('-', lambda: self.window.zoom_out(0.8), "Zoom view out")
self.bind('SCROLLUP', lambda: self.window.zoom_in(1.06), "Zoom view in")
self.bind('SCROLLDOWN', lambda: self.window.zoom_in(0.95), "Zoom view out")
self.bind('SCROLLSMOOTHUP', lambda: self.window.zoom_in(1.015), "Zoom view in")
self.bind('SCROLLSMOOTHDOWN', lambda: self.window.zoom_in(0.985), "Zoom view out")
self.bind('C-0', self.window.reset_zoom, "Reset view position and zoom")
self.bind("HOVER", lambda: self.hover(False))
self.bind("S-HOVER", lambda: self.hover(True))
def toggle_console(self):
from gi.repository import Gdk
alloc = self.window.content_console_pane.get_allocation()
oldpos = self.window.content_console_pane.get_position()
self.window.content_console_pane.set_position(
alloc.height - self.next_console_position)
self.next_console_position = alloc.height - oldpos
# KLUDGE!
MFPGUI().clutter_do_later(100, self._refresh)
return False
def toggle_tree(self):
alloc = self.window.tree_canvas_pane.get_allocation()
oldpos = self.window.tree_canvas_pane.get_position()
self.window.tree_canvas_pane.set_position(self.next_tree_position)
self.next_tree_position = oldpos
# KLUDGE!
MFPGUI().clutter_do_later(100, self._refresh)
return False
def _refresh(self):
oldpos = self.window.content_console_pane.get_position()
self.window.content_console_pane.set_position(oldpos - 1)
return False
def transient_msg(self):
if self.window.selected:
return self.window.add_element(TransientMessageElement)
else:
return False
def hover(self, details):
for m in self.manager.minor_modes:
if m.enabled and isinstance(m, (TransientMessageEditMode, LabelEditMode,
EnumEditMode)):
details = False
o = self.manager.pointer_obj
try:
if o is not None and o.obj_state == PatchElement.OBJ_COMPLETE:
o.show_tip(self.manager.pointer_x, self.manager.pointer_y, details)
except Exception as e:
print("oops! exception in hover")
import traceback
traceback.print_exc()
pass
return False
def save_file(self):
import os.path
patch = self.window.selected_patch
if patch.last_filename is None:
default_filename = patch.obj_name + '.mfp'
else:
default_filename = patch.last_filename
def cb(fname):
if fname:
patch.last_filename = fname
if fname != default_filename:
basefile = os.path.basename(fname)
parts = os.path.splitext(basefile)
newname = parts[0]
patch.obj_name = newname
MFPGUI().mfp.rename_obj(patch.obj_id, newname)
patch.send_params()
self.window.refresh(patch)
MFPGUI().mfp.save_file(patch.obj_name, fname)
self.window.get_prompted_input("File name to save: ", cb, default_filename)
def save_as_lv2(self):
patch = self.window.selected_patch
default_plugname = 'mfp_' + patch.obj_name
def cb(plugname):
if plugname:
MFPGUI().mfp.save_lv2(patch.obj_name, plugname)
self.window.get_prompted_input("Plugin name to save: ", cb, default_plugname)
def open_file(self):
def cb(fname):
MFPGUI().mfp.open_file(fname)
self.window.get_prompted_input("File name to load: ", cb)
def drag_start(self):
self.drag_started = True
px = self.manager.pointer_ev_x
py = self.manager.pointer_ev_y
self.drag_last_x = px
self.drag_last_y = py
return True
def drag_motion(self):
if self.drag_started is False:
return False
px = self.manager.pointer_ev_x
py = self.manager.pointer_ev_y
dx = px - self.drag_last_x
dy = py - self.drag_last_y
self.drag_last_x = px
self.drag_last_y = py
self.window.move_view(dx, dy)
return True
def drag_end(self):
self.drag_started = False
return True
def selbox_start(self, select_mode):
if select_mode is None:
if self.manager.pointer_obj is not None:
if self.manager.pointer_obj not in self.window.selected:
self.window.unselect_all()
self.window.select(self.manager.pointer_obj)
raise self.manager.InputNeedsRequeue()
if self.allow_selection_drag:
self.selection_drag_started = True
else:
self.window.unselect_all()
self.selbox_started = True
elif select_mode is True:
if (self.manager.pointer_obj
and self.manager.pointer_obj not in self.window.selected):
self.window.select(self.manager.pointer_obj)
self.selbox_started = True
else:
if self.manager.pointer_obj in self.window.selected:
self.window.unselect(self.manager.pointer_obj)
self.selbox_started = True
px = self.manager.pointer_x
py = self.manager.pointer_y
self.drag_start_x = px
self.drag_start_y = py
self.drag_last_x = px
self.drag_last_y = py
return True
def selbox_motion(self, select_mode):
if not (self.selbox_started or self.selection_drag_started):
return False
px = self.manager.pointer_x
py = self.manager.pointer_y
dx = px - self.drag_last_x
dy = py - self.drag_last_y
self.drag_last_x = px
self.drag_last_y = py
if self.selection_drag_started:
for obj in self.window.selected:
if obj.editable and obj.display_type != 'connection':
obj.drag(dx, dy)
return True
enclosed = self.window.show_selection_box(self.drag_start_x, self.drag_start_y,
self.drag_last_x, self.drag_last_y)
for obj in enclosed:
if select_mode:
if obj not in self.window.selected:
if obj not in self.selbox_changed:
self.selbox_changed.append(obj)
self.window.select(obj)
else:
if obj not in self.selbox_changed:
self.selbox_changed.append(obj)
if obj in self.window.selected:
self.window.unselect(obj)
else:
self.window.select(obj)
new_changed = []
for obj in self.selbox_changed:
if obj not in enclosed:
if obj in self.window.selected:
self.window.unselect(obj)
else:
self.window.select(obj)
else:
new_changed.append(obj)
self.selbox_changed = new_changed
return True
def selbox_end(self):
if self.selection_drag_started:
for obj in self.window.selected:
obj.send_params()
self.selbox_started = False
self.selection_drag_started = False
self.selbox_changed = []
self.window.hide_selection_box()
return True
def patch_close(self):
def close_confirm(answer):
if answer is not None:
aa = answer.strip().lower()
if aa in ['y', 'yes']:
self.window.patch_close()
from mfp import log
p = self.window.selected_patch
log.debug("patch_close: checking for unsaved changes")
if MFPGUI().mfp.has_unsaved_changes(p.obj_id):
self.window.get_prompted_input("Patch has unsaved changes. Close anyway? [yN]",
close_confirm, '')
else:
self.window.patch_close()
def quit(self):
def quit_confirm(answer):
if answer is not None:
aa = answer.strip().lower()
if aa in ['y', 'yes']:
self.window.quit()
allpatches = MFPGUI().mfp.open_patches()
clean = True
for p in allpatches:
if MFPGUI().mfp.has_unsaved_changes(p):
clean = False
if not clean:
self.window.get_prompted_input(
"There are patches with unsaved changes. Quit anyway? [yN]",
quit_confirm, '')
else:
self.window.quit()
def toggle_pause(self):
from mfp import log
try:
paused = MFPGUI().mfp.toggle_pause()
if paused:
log.warning("Execution of all patches paused")
else:
log.warning("Execution of all patches resumed")
except Exception as e:
print("Caught exception", e)
| gpl-2.0 | -2,904,285,902,620,587,000 | 35.89645 | 95 | 0.571646 | false | 3.783677 | false | false | false |
winguru/graphite-api | graphite_api/render/glyph.py | 1 | 84736 | """Copyright 2008 Orbitz WorldWide
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
import cairocffi as cairo
import itertools
import json
import math
import pytz
import re
import six
from datetime import datetime, timedelta
from io import BytesIO
from six.moves import range
from six.moves.urllib.parse import unquote_plus
from .datalib import TimeSeries
from ..utils import to_seconds
INFINITY = float('inf')
colorAliases = {
'black': (0, 0, 0),
'white': (255, 255, 255),
'blue': (100, 100, 255),
'green': (0, 200, 0),
'red': (255, 0, 0),
'yellow': (255, 255, 0),
'orange': (255, 165, 0),
'purple': (200, 100, 255),
'brown': (150, 100, 50),
'cyan': (0, 255, 255),
'aqua': (0, 150, 150),
'gray': (175, 175, 175),
'grey': (175, 175, 175),
'magenta': (255, 0, 255),
'pink': (255, 100, 100),
'gold': (200, 200, 0),
'rose': (200, 150, 200),
'darkblue': (0, 0, 255),
'darkgreen': (0, 255, 0),
'darkred': (200, 00, 50),
'darkgray': (111, 111, 111),
'darkgrey': (111, 111, 111),
}
# This gets overridden by graphTemplates.conf
defaultGraphOptions = dict(
background='white',
foreground='black',
majorline='rose',
minorline='grey',
lineColors=("blue", "green", "red", "purple", "brown", "yellow", "aqua",
"grey", "magenta", "pink", "gold", "rose"),
fontname='Sans',
fontsize=10,
fontbold='false',
fontitalic='false',
)
# X-axis configurations (copied from rrdtool, this technique is evil & ugly
# but effective)
SEC = 1
MIN = 60
HOUR = MIN * 60
DAY = HOUR * 24
WEEK = DAY * 7
MONTH = DAY * 31
YEAR = DAY * 365
xAxisConfigs = (
dict(seconds=0.00,
minorGridUnit=SEC,
minorGridStep=5,
majorGridUnit=MIN,
majorGridStep=1,
labelUnit=SEC,
labelStep=5,
format="%H:%M:%S",
maxInterval=10*MIN),
dict(seconds=0.07,
minorGridUnit=SEC,
minorGridStep=10,
majorGridUnit=MIN,
majorGridStep=1,
labelUnit=SEC,
labelStep=10,
format="%H:%M:%S",
maxInterval=20*MIN),
dict(seconds=0.14,
minorGridUnit=SEC,
minorGridStep=15,
majorGridUnit=MIN,
majorGridStep=1,
labelUnit=SEC,
labelStep=15,
format="%H:%M:%S",
maxInterval=30*MIN),
dict(seconds=0.27,
minorGridUnit=SEC,
minorGridStep=30,
majorGridUnit=MIN,
majorGridStep=2,
labelUnit=MIN,
labelStep=1,
format="%H:%M",
maxInterval=2*HOUR),
dict(seconds=0.5,
minorGridUnit=MIN,
minorGridStep=1,
majorGridUnit=MIN,
majorGridStep=2,
labelUnit=MIN,
labelStep=1,
format="%H:%M",
maxInterval=2*HOUR),
dict(seconds=1.2,
minorGridUnit=MIN,
minorGridStep=1,
majorGridUnit=MIN,
majorGridStep=4,
labelUnit=MIN,
labelStep=2,
format="%H:%M",
maxInterval=3*HOUR),
dict(seconds=2,
minorGridUnit=MIN,
minorGridStep=1,
majorGridUnit=MIN,
majorGridStep=10,
labelUnit=MIN,
labelStep=5,
format="%H:%M",
maxInterval=6*HOUR),
dict(seconds=5,
minorGridUnit=MIN,
minorGridStep=2,
majorGridUnit=MIN,
majorGridStep=10,
labelUnit=MIN,
labelStep=10,
format="%H:%M",
maxInterval=12*HOUR),
dict(seconds=10,
minorGridUnit=MIN,
minorGridStep=5,
majorGridUnit=MIN,
majorGridStep=20,
labelUnit=MIN,
labelStep=20,
format="%H:%M",
maxInterval=1*DAY),
dict(seconds=30,
minorGridUnit=MIN,
minorGridStep=10,
majorGridUnit=HOUR,
majorGridStep=1,
labelUnit=HOUR,
labelStep=1,
format="%H:%M",
maxInterval=2*DAY),
dict(seconds=60,
minorGridUnit=MIN,
minorGridStep=30,
majorGridUnit=HOUR,
majorGridStep=2,
labelUnit=HOUR,
labelStep=2,
format="%H:%M",
maxInterval=2*DAY),
dict(seconds=100,
minorGridUnit=HOUR,
minorGridStep=2,
majorGridUnit=HOUR,
majorGridStep=4,
labelUnit=HOUR,
labelStep=4,
format="%a %H:%M",
maxInterval=6*DAY),
dict(seconds=255,
minorGridUnit=HOUR,
minorGridStep=6,
majorGridUnit=HOUR,
majorGridStep=12,
labelUnit=HOUR,
labelStep=12,
format="%m/%d %H:%M",
maxInterval=10*DAY),
dict(seconds=600,
minorGridUnit=HOUR,
minorGridStep=6,
majorGridUnit=DAY,
majorGridStep=1,
labelUnit=DAY,
labelStep=1,
format="%m/%d",
maxInterval=14*DAY),
dict(seconds=1000,
minorGridUnit=HOUR,
minorGridStep=12,
majorGridUnit=DAY,
majorGridStep=1,
labelUnit=DAY,
labelStep=1,
format="%m/%d",
maxInterval=365*DAY),
dict(seconds=2000,
minorGridUnit=DAY,
minorGridStep=1,
majorGridUnit=DAY,
majorGridStep=2,
labelUnit=DAY,
labelStep=2,
format="%m/%d",
maxInterval=365*DAY),
dict(seconds=4000,
minorGridUnit=DAY,
minorGridStep=2,
majorGridUnit=DAY,
majorGridStep=4,
labelUnit=DAY,
labelStep=4,
format="%m/%d",
maxInterval=365*DAY),
dict(seconds=8000,
minorGridUnit=DAY,
minorGridStep=3.5,
majorGridUnit=DAY,
majorGridStep=7,
labelUnit=DAY,
labelStep=7,
format="%m/%d",
maxInterval=365*DAY),
dict(seconds=16000,
minorGridUnit=DAY,
minorGridStep=7,
majorGridUnit=DAY,
majorGridStep=14,
labelUnit=DAY,
labelStep=14,
format="%m/%d",
maxInterval=365*DAY),
dict(seconds=32000,
minorGridUnit=DAY,
minorGridStep=15,
majorGridUnit=DAY,
majorGridStep=30,
labelUnit=DAY,
labelStep=30,
format="%m/%d",
maxInterval=365*DAY),
dict(seconds=64000,
minorGridUnit=DAY,
minorGridStep=30,
majorGridUnit=DAY,
majorGridStep=60,
labelUnit=DAY,
labelStep=60,
format="%m/%d %Y"),
dict(seconds=100000,
minorGridUnit=DAY,
minorGridStep=60,
majorGridUnit=DAY,
majorGridStep=120,
labelUnit=DAY,
labelStep=120,
format="%m/%d %Y"),
dict(seconds=120000,
minorGridUnit=DAY,
minorGridStep=120,
majorGridUnit=DAY,
majorGridStep=240,
labelUnit=DAY,
labelStep=240,
format="%m/%d %Y"),
)
UnitSystems = {
'binary': (
('Pi', 1024.0**5),
('Ti', 1024.0**4),
('Gi', 1024.0**3),
('Mi', 1024.0**2),
('Ki', 1024.0)),
'si': (
('P', 1000.0**5),
('T', 1000.0**4),
('G', 1000.0**3),
('M', 1000.0**2),
('K', 1000.0)),
'sec': (
('Y', 60*60*24*365),
('M', 60*60*24*30),
('D', 60*60*24),
('H', 60*60),
('m', 60)),
'msec': (
('Y', 60*60*24*365*1000),
('M', 60*60*24*30*1000),
('D', 60*60*24*1000),
('H', 60*60*1000),
('m', 60*1000),
('s', 1000)),
'none': [],
}
def force_text(value):
if not isinstance(value, six.string_types):
value = six.text_type(value)
return value
# We accept values fractionally outside of nominal limits, so that
# rounding errors don't cause weird effects. Since our goal is to
# create plots, and the maximum resolution of the plots is likely to
# be less than 10000 pixels, errors smaller than this size shouldn't
# create any visible effects.
EPSILON = 0.0001
class GraphError(Exception):
pass
class _AxisTics:
def __init__(self, minValue, maxValue, unitSystem=None):
self.minValue = self.checkFinite(minValue, "data value")
self.minValueSource = 'data'
self.maxValue = self.checkFinite(maxValue, "data value")
self.maxValueSource = 'data'
self.unitSystem = unitSystem
@staticmethod
def checkFinite(value, name='value'):
"""Check that value is a finite number.
If it is, return it. If not, raise GraphError describing the
problem, using name in the error message.
"""
if math.isnan(value):
raise GraphError('Encountered NaN %s' % (name,))
elif math.isinf(value):
raise GraphError('Encountered infinite %s' % (name,))
return value
@staticmethod
def chooseDelta(x):
"""Choose a reasonable axis range given that one limit is x.
Given that end of the axis range (i.e., minValue or maxValue) is
x, choose a reasonable distance to the other limit.
"""
if abs(x) < 1.0e-9:
return 1.0
else:
return 0.1 * abs(x)
def reconcileLimits(self):
"""If self.minValue is not less than self.maxValue, fix the problem.
If self.minValue is not less than self.maxValue, adjust
self.minValue and/or self.maxValue (depending on which was not
specified explicitly by the user) to make self.minValue <
self.maxValue. If the user specified both limits explicitly, then
raise GraphError.
"""
if self.minValue < self.maxValue:
# The limits are already OK.
return
minFixed = (self.minValueSource in ['min'])
maxFixed = (self.maxValueSource in ['max', 'limit'])
if minFixed and maxFixed:
raise GraphError('The %s must be less than the %s' %
(self.minValueSource, self.maxValueSource))
elif minFixed:
self.maxValue = self.minValue + self.chooseDelta(self.minValue)
elif maxFixed:
self.minValue = self.maxValue - self.chooseDelta(self.maxValue)
else:
delta = self.chooseDelta(max(abs(self.minValue),
abs(self.maxValue)))
average = (self.minValue + self.maxValue) / 2.0
self.minValue = average - delta
self.maxValue = average + delta
def applySettings(self, axisMin=None, axisMax=None, axisLimit=None):
"""Apply the specified settings to this axis.
Set self.minValue, self.minValueSource, self.maxValue,
self.maxValueSource, and self.axisLimit reasonably based on the
parameters provided.
Arguments:
axisMin -- a finite number, or None to choose a round minimum
limit that includes all of the data.
axisMax -- a finite number, 'max' to use the maximum value
contained in the data, or None to choose a round maximum limit
that includes all of the data.
axisLimit -- a finite number to use as an upper limit on maxValue,
or None to impose no upper limit.
"""
if axisMin is not None and not math.isnan(axisMin):
self.minValueSource = 'min'
self.minValue = self.checkFinite(axisMin, 'axis min')
if axisMax == 'max':
self.maxValueSource = 'extremum'
elif axisMax is not None and not math.isnan(axisMax):
self.maxValueSource = 'max'
self.maxValue = self.checkFinite(axisMax, 'axis max')
if axisLimit is None or math.isnan(axisLimit):
self.axisLimit = None
elif axisLimit < self.maxValue:
self.maxValue = self.checkFinite(axisLimit, 'axis limit')
self.maxValueSource = 'limit'
# The limit has already been imposed, so there is no need to
# remember it:
self.axisLimit = None
elif math.isinf(axisLimit):
# It must be positive infinity, which is the same as no limit:
self.axisLimit = None
else:
# We still need to remember axisLimit to avoid rounding top to
# a value larger than axisLimit:
self.axisLimit = axisLimit
self.reconcileLimits()
def makeLabel(self, value):
"""Create a label for the specified value.
Create a label string containing the value and its units (if any),
based on the values of self.step, self.span, and self.unitSystem.
"""
value, prefix = format_units(value, self.step,
system=self.unitSystem)
span, spanPrefix = format_units(self.span, self.step,
system=self.unitSystem)
if prefix:
prefix += " "
if value < 0.1:
return "%g %s" % (float(value), prefix)
elif value < 1.0:
return "%.2f %s" % (float(value), prefix)
if span > 10 or spanPrefix != prefix:
if type(value) is float:
return "%.1f %s" % (value, prefix)
else:
return "%d %s" % (int(value), prefix)
elif span > 3:
return "%.1f %s" % (float(value), prefix)
elif span > 0.1:
return "%.2f %s" % (float(value), prefix)
else:
return "%g %s" % (float(value), prefix)
class _LinearAxisTics(_AxisTics):
"""Axis ticmarks with uniform spacing."""
def __init__(self, minValue, maxValue, unitSystem=None):
_AxisTics.__init__(self, minValue, maxValue, unitSystem=unitSystem)
self.step = None
self.span = None
self.binary = None
def setStep(self, step):
"""Set the size of steps between ticmarks."""
self.step = self.checkFinite(float(step), 'axis step')
def generateSteps(self, minStep):
"""Generate allowed steps with step >= minStep in increasing order."""
self.checkFinite(minStep)
if self.binary:
base = 2.0
mantissas = [1.0]
exponent = math.floor(math.log(minStep, 2) - EPSILON)
else:
base = 10.0
mantissas = [1.0, 2.0, 5.0]
exponent = math.floor(math.log10(minStep) - EPSILON)
while True:
multiplier = base ** exponent
for mantissa in mantissas:
value = mantissa * multiplier
if value >= minStep * (1.0 - EPSILON):
yield value
exponent += 1
def computeSlop(self, step, divisor):
"""Compute the slop that would result from step and divisor.
Return the slop, or None if this combination can't cover the full
range. See chooseStep() for the definition of "slop".
"""
bottom = step * math.floor(self.minValue / float(step) + EPSILON)
top = bottom + step * divisor
if top >= self.maxValue - EPSILON * step:
return max(top - self.maxValue, self.minValue - bottom)
else:
return None
def chooseStep(self, divisors=None, binary=False):
"""Choose a nice, pretty size for the steps between axis labels.
Our main constraint is that the number of divisions must be taken
from the divisors list. We pick a number of divisions and a step
size that minimizes the amount of whitespace ("slop") that would
need to be included outside of the range [self.minValue,
self.maxValue] if we were to push out the axis values to the next
larger multiples of the step size.
The minimum step that could possibly cover the variance satisfies
minStep * max(divisors) >= variance
or
minStep = variance / max(divisors)
It's not necessarily possible to cover the variance with a step
that size, but we know that any smaller step definitely *cannot*
cover it. So we can start there.
For a sufficiently large step size, it is definitely possible to
cover the variance, but at some point the slop will start growing.
Let's define the slop to be
slop = max(minValue - bottom, top - maxValue)
Then for a given, step size, we know that
slop >= (1/2) * (step * min(divisors) - variance)
(the factor of 1/2 is for the best-case scenario that the slop is
distributed equally on the two sides of the range). So suppose we
already have a choice that yields bestSlop. Then there is no need
to choose steps so large that the slop is guaranteed to be larger
than bestSlop. Therefore, the maximum step size that we need to
consider is
maxStep = (2 * bestSlop + variance) / min(divisors)
"""
self.binary = binary
if divisors is None:
divisors = [4, 5, 6]
else:
for divisor in divisors:
self.checkFinite(divisor, 'divisor')
if divisor < 1:
raise GraphError('Divisors must be greater than or equal '
'to one')
if self.minValue == self.maxValue:
if self.minValue == 0.0:
self.maxValue = 1.0
elif self.minValue < 0.0:
self.minValue *= 1.1
self.maxValue *= 0.9
else:
self.minValue *= 0.9
self.maxValue *= 1.1
variance = self.maxValue - self.minValue
bestSlop = None
bestStep = None
for step in self.generateSteps(variance / float(max(divisors))):
if (
bestSlop is not None and
step * min(divisors) >= 2 * bestSlop + variance
):
break
for divisor in divisors:
slop = self.computeSlop(step, divisor)
if slop is not None and (bestSlop is None or slop < bestSlop):
bestSlop = slop
bestStep = step
self.step = bestStep
def chooseLimits(self):
if self.minValueSource == 'data':
# Start labels at the greatest multiple of step <= minValue:
self.bottom = self.step * math.floor(
(self.minValue / self.step + EPSILON))
else:
self.bottom = self.minValue
if self.maxValueSource == 'data':
# Extend the top of our graph to the lowest
# step multiple >= maxValue:
self.top = self.step * math.ceil(
(self.maxValue / self.step - EPSILON))
# ...but never exceed a user-specified limit:
if (
self.axisLimit is not None and
self.top > self.axisLimit + EPSILON * self.step
):
self.top = self.axisLimit
else:
self.top = self.maxValue
self.span = self.top - self.bottom
if self.span == 0:
self.top += 1
self.span += 1
def getLabelValues(self):
if self.step <= 0.0:
raise GraphError('The step size must be positive')
if self.span > 1000.0 * self.step:
# This is insane. Pick something that won't cause trouble:
self.chooseStep()
values = []
start = self.step * math.ceil(self.bottom / self.step - EPSILON)
i = 0
while True:
value = start + i * self.step
if value > self.top + EPSILON * self.step:
break
values.append(value)
i += 1
return values
class _LogAxisTics(_AxisTics):
def __init__(self, minValue, maxValue, unitSystem=None, base=10.0):
_AxisTics.__init__(self, minValue, maxValue, unitSystem=unitSystem)
if base <= 1.0:
raise GraphError('Logarithmic base must be greater than one')
self.base = self.checkFinite(base, 'log base')
self.step = None
self.span = None
def setStep(self, step):
# step is ignored for Logarithmic tics:
self.step = None
def chooseStep(self, divisors=None, binary=False):
# step is ignored for Logarithmic tics:
self.step = None
def chooseLimits(self):
if self.minValue <= 0:
raise GraphError('Logarithmic scale specified with a dataset with '
'a minimum value less than or equal to zero')
self.bottom = math.pow(self.base,
math.floor(math.log(self.minValue, self.base)))
self.top = math.pow(self.base,
math.ceil(math.log(self.maxValue, self.base)))
self.span = self.top - self.bottom
if self.span == 0:
self.top *= self.base
self.span = self.top - self.bottom
def getLabelValues(self):
values = []
value = math.pow(self.base,
math.ceil(math.log(self.bottom, self.base) - EPSILON))
while value < self.top * (1.0 + EPSILON):
values.append(value)
value *= self.base
return values
class Graph(object):
customizable = ('width', 'height', 'margin', 'bgcolor', 'fgcolor',
'fontName', 'fontSize', 'fontBold', 'fontItalic',
'colorList', 'template', 'yAxisSide', 'outputFormat')
def __init__(self, **params):
self.params = params
self.data = params['data']
self.dataLeft = []
self.dataRight = []
self.secondYAxis = False
self.width = int(params.get('width', 200))
self.height = int(params.get('height', 200))
self.margin = int(params.get('margin', 10))
self.userTimeZone = params.get('tz')
self.logBase = params.get('logBase', None)
self.minorY = int(params.get('minorY', 1))
if self.logBase:
if self.logBase == 'e':
self.logBase = math.e
elif self.logBase <= 1:
self.logBase = None
params['logBase'] = None
else:
self.logBase = float(self.logBase)
if self.margin < 0:
self.margin = 10
self.setupCairo(params.get('outputFormat', 'png').lower())
self.area = {
'xmin': self.margin + 10, # Need extra room when the time is
# near the left edge
'xmax': self.width - self.margin,
'ymin': self.margin,
'ymax': self.height - self.margin,
}
self.loadTemplate(params.get('template', 'default'))
opts = self.ctx.get_font_options()
opts.set_antialias(cairo.ANTIALIAS_NONE)
self.ctx.set_font_options(opts)
self.foregroundColor = params.get('fgcolor', self.defaultForeground)
self.backgroundColor = params.get('bgcolor', self.defaultBackground)
self.setColor(self.backgroundColor)
self.drawRectangle(0, 0, self.width, self.height)
if 'colorList' in params:
colorList = unquote_plus(str(params['colorList'])).split(',')
else:
colorList = self.defaultColorList
self.colors = itertools.cycle(colorList)
self.drawGraph(**params)
def setupCairo(self, outputFormat='png'):
self.outputFormat = outputFormat
if outputFormat == 'png':
self.surface = cairo.ImageSurface(cairo.FORMAT_ARGB32,
self.width, self.height)
elif outputFormat == 'svg':
self.surfaceData = BytesIO()
self.surface = cairo.SVGSurface(self.surfaceData,
self.width, self.height)
elif outputFormat == 'pdf':
self.surfaceData = BytesIO()
self.surface = cairo.PDFSurface(self.surfaceData,
self.width, self.height)
res_x, res_y = self.surface.get_fallback_resolution()
self.width = float(self.width / res_x) * 72
self.height = float(self.height / res_y) * 72
self.surface.set_size(self.width, self.height)
self.ctx = cairo.Context(self.surface)
def setColor(self, value, alpha=1.0, forceAlpha=False):
if isinstance(value, tuple) and len(value) == 3:
r, g, b = value
elif value in colorAliases:
r, g, b = colorAliases[value]
elif isinstance(value, six.string_types) and len(value) >= 6:
s = value
if s.startswith('#'):
s = s[1:]
if s.startswith('%23'):
s = s[3:]
r, g, b = (int(s[0:2], base=16), int(s[2:4], base=16),
int(s[4:6], base=16))
if len(s) == 8 and not forceAlpha:
alpha = int(s[6:8], base=16) / 255.0
elif isinstance(value, int) and len(str(value)) == 6:
s = str(value)
r, g, b = (int(s[0:2], base=16), int(s[2:4], base=16),
int(s[4:6], base=16))
else:
raise ValueError("Must specify an RGB 3-tuple, an html color "
"string, or a known color alias!")
r, g, b = [float(c) / 255.0 for c in (r, g, b)]
self.ctx.set_source_rgba(r, g, b, alpha)
def setFont(self, **params):
p = self.defaultFontParams.copy()
p.update(params)
self.ctx.select_font_face(p['name'], p['italic'], p['bold'])
self.ctx.set_font_size(float(p['size']))
def getExtents(self, text=None):
F = self.ctx.font_extents()
extents = {'maxHeight': F[2], 'maxAscent': F[0], 'maxDescent': F[1]}
if text is not None:
T = self.ctx.text_extents(text)
extents['width'] = T[4]
extents['height'] = T[3]
return extents
def drawRectangle(self, x, y, w, h, fill=True):
if not fill:
# offset for borders so they are drawn as lines would be
o = self.ctx.get_line_width() / 2.0
x += o
y += o
w -= o
h -= o
self.ctx.rectangle(x, y, w, h)
if fill:
self.ctx.fill()
else:
self.ctx.set_dash([], 0)
self.ctx.stroke()
def drawText(self, text, x, y, align='left', valign='top', rotate=0):
extents = self.getExtents(text)
angle = math.radians(rotate)
origMatrix = self.ctx.get_matrix()
horizontal = {
'left': 0,
'center': extents['width'] / 2,
'right': extents['width'],
}[align.lower()]
vertical = {
'top': extents['maxAscent'],
'middle': extents['maxHeight'] / 2 - extents['maxDescent'],
'bottom': -extents['maxDescent'],
'baseline': 0,
}[valign.lower()]
self.ctx.move_to(x, y)
self.ctx.rel_move_to(math.sin(angle) * -vertical,
math.cos(angle) * vertical)
self.ctx.rotate(angle)
self.ctx.rel_move_to(-horizontal, 0)
bx, by = self.ctx.get_current_point()
by -= extents['maxAscent']
self.ctx.text_path(text)
self.ctx.fill()
self.ctx.set_matrix(origMatrix)
def drawTitle(self, text):
self.encodeHeader('title')
y = self.area['ymin']
x = self.width / 2
lineHeight = self.getExtents()['maxHeight']
for line in text.split('\n'):
self.drawText(line, x, y, align='center')
y += lineHeight
if self.params.get('yAxisSide') == 'right':
self.area['ymin'] = y
else:
self.area['ymin'] = y + self.margin
def drawLegend(self, elements, unique=False):
# elements is [ (name,color,rightSide), (name,color,rightSide), ... ]
self.encodeHeader('legend')
if unique:
# remove duplicate names
namesSeen = []
newElements = []
for e in elements:
if e[0] not in namesSeen:
namesSeen.append(e[0])
newElements.append(e)
elements = newElements
# Check if there's enough room to use two columns.
rightSideLabels = False
padding = 5
longestName = sorted([e[0] for e in elements], key=len)[-1]
# Double it to check if there's enough room for 2 columns
testSizeName = longestName + " " + longestName
testExt = self.getExtents(testSizeName)
testBoxSize = testExt['maxHeight'] - 1
testWidth = testExt['width'] + 2 * (testBoxSize + padding)
if testWidth + 50 < self.width:
rightSideLabels = True
if self.secondYAxis and rightSideLabels:
extents = self.getExtents(longestName)
padding = 5
boxSize = extents['maxHeight'] - 1
lineHeight = extents['maxHeight'] + 1
labelWidth = extents['width'] + 2 * (boxSize + padding)
columns = max(1, math.floor(
(self.width - self.area['xmin']) / labelWidth))
numRight = len([name for (name, color, rightSide) in elements
if rightSide])
numberOfLines = max(len(elements) - numRight, numRight)
columns = math.floor(columns / 2.0)
columns = max(columns, 1)
legendHeight = max(
1, (numberOfLines / columns)) * (lineHeight + padding)
# scoot the drawing area up to fit the legend
self.area['ymax'] -= legendHeight
self.ctx.set_line_width(1.0)
x = self.area['xmin']
y = self.area['ymax'] + (2 * padding)
n = 0
xRight = self.area['xmax'] - self.area['xmin']
yRight = y
nRight = 0
for name, color, rightSide in elements:
self.setColor(color)
if rightSide:
nRight += 1
self.drawRectangle(xRight - padding, yRight,
boxSize, boxSize)
self.setColor('darkgrey')
self.drawRectangle(xRight - padding, yRight,
boxSize, boxSize, fill=False)
self.setColor(self.foregroundColor)
self.drawText(name, xRight - boxSize, yRight,
align='right')
xRight -= labelWidth
if nRight % columns == 0:
xRight = self.area['xmax'] - self.area['xmin']
yRight += lineHeight
else:
n += 1
self.drawRectangle(x, y, boxSize, boxSize)
self.setColor('darkgrey')
self.drawRectangle(x, y, boxSize, boxSize, fill=False)
self.setColor(self.foregroundColor)
self.drawText(name, x + boxSize + padding, y, align='left')
x += labelWidth
if n % columns == 0:
x = self.area['xmin']
y += lineHeight
else:
extents = self.getExtents(longestName)
boxSize = extents['maxHeight'] - 1
lineHeight = extents['maxHeight'] + 1
labelWidth = extents['width'] + 2 * (boxSize + padding)
columns = math.floor(self.width / labelWidth)
columns = max(columns, 1)
numberOfLines = math.ceil(float(len(elements)) / columns)
legendHeight = numberOfLines * (lineHeight + padding)
# scoot the drawing area up to fit the legend
self.area['ymax'] -= legendHeight
self.ctx.set_line_width(1.0)
x = self.area['xmin']
y = self.area['ymax'] + (2 * padding)
for i, (name, color, rightSide) in enumerate(elements):
if rightSide:
self.setColor(color)
self.drawRectangle(x + labelWidth + padding, y,
boxSize, boxSize)
self.setColor('darkgrey')
self.drawRectangle(x + labelWidth + padding, y,
boxSize, boxSize, fill=False)
self.setColor(self.foregroundColor)
self.drawText(name, x + labelWidth, y, align='right')
x += labelWidth
else:
self.setColor(color)
self.drawRectangle(x, y, boxSize, boxSize)
self.setColor('darkgrey')
self.drawRectangle(x, y, boxSize, boxSize, fill=False)
self.setColor(self.foregroundColor)
self.drawText(name, x + boxSize + padding, y, align='left')
x += labelWidth
if (i + 1) % columns == 0:
x = self.area['xmin']
y += lineHeight
def encodeHeader(self, text):
self.ctx.save()
self.setColor(self.backgroundColor)
self.ctx.move_to(-88, -88) # identifier
for i, char in enumerate(text):
self.ctx.line_to(-ord(char), -i-1)
self.ctx.stroke()
self.ctx.restore()
def loadTemplate(self, template):
from ..app import app
conf = app.config.get('templates', {})
opts = defaults = defaultGraphOptions
defaults.update(conf.get('defaults', {}))
opts.update(conf.get(template, {}))
self.defaultBackground = opts.get('background', defaults['background'])
self.defaultForeground = opts.get('foreground', defaults['foreground'])
self.defaultMajorGridLineColor = opts.get('majorline',
defaults['majorline'])
self.defaultMinorGridLineColor = opts.get('minorline',
defaults['minorline'])
self.defaultColorList = [
c.strip() for c in opts.get('lineColors',
defaults['lineColors'])]
fontName = opts.get('fontname', defaults['fontname'])
fontSize = float(opts.get('fontsize', defaults['fontsize']))
fontBold = opts.get('fontbold', defaults['fontbold']).lower() == 'true'
fontItalic = opts.get('fontitalic',
defaults['fontitalic']).lower() == 'true'
self.defaultFontParams = {
'name': self.params.get('fontName', fontName),
'size': int(self.params.get('fontSize', fontSize)),
'bold': self.params.get('fontBold', fontBold),
'italic': self.params.get('fontItalic', fontItalic),
}
def output(self, fileObj):
if self.outputFormat == 'png':
self.surface.write_to_png(fileObj)
elif self.outputFormat == 'pdf':
self.surface.finish()
pdfData = self.surfaceData.getvalue()
self.surfaceData.close()
fileObj.write(pdfData)
else:
if hasattr(self, 'startTime'):
has_data = True
metaData = {
'x': {
'start': self.startTime,
'end': self.endTime
},
'options': {
'lineWidth': self.lineWidth
},
'font': self.defaultFontParams,
'area': self.area,
'series': []
}
if not self.secondYAxis:
metaData['y'] = {
'top': self.yTop,
'bottom': self.yBottom,
'step': self.yStep,
'labels': self.yLabels,
'labelValues': self.yLabelValues
}
for series in self.data:
if 'stacked' not in series.options:
metaData['series'].append({
'name': series.name,
'start': series.start,
'end': series.end,
'step': series.step,
'valuesPerPoint': series.valuesPerPoint,
'color': series.color,
'data': series,
'options': series.options
})
else:
has_data = False
metaData = {}
self.surface.finish()
svgData = self.surfaceData.getvalue()
self.surfaceData.close()
# we expect height/width in pixels, not points
svgData = svgData.decode().replace('pt"', 'px"', 2)
svgData = svgData.replace('</svg>\n', '', 1)
svgData = svgData.replace('</defs>\n<g',
'</defs>\n<g class="graphite"', 1)
if has_data:
# We encode headers using special paths with d^="M -88 -88"
# Find these, and turn them into <g> wrappers instead
def onHeaderPath(match):
name = ''
for char in re.findall(r'L -(\d+) -\d+', match.group(1)):
name += chr(int(char))
return '</g><g data-header="true" class="%s">' % name
(svgData, subsMade) = re.subn(r'<path.+?d="M -88 -88 (.+?)"/>',
onHeaderPath, svgData)
# Replace the first </g><g> with <g>, and close out the
# last </g> at the end
svgData = svgData.replace('</g><g data-header',
'<g data-header', 1)
if subsMade > 0:
svgData += "</g>"
svgData = svgData.replace(' data-header="true"', '')
fileObj.write(svgData.encode())
fileObj.write(("""<script>
<![CDATA[
metadata = %s
]]>
</script>
</svg>""" % json.dumps(metaData)).encode())
class LineGraph(Graph):
customizable = Graph.customizable + (
'title', 'vtitle', 'lineMode', 'lineWidth', 'hideLegend', 'hideAxes',
'minXStep', 'hideGrid', 'majorGridLineColor', 'minorGridLineColor',
'thickness', 'min', 'max', 'graphOnly', 'yMin', 'yMax', 'yLimit',
'yStep', 'areaMode', 'areaAlpha', 'drawNullAsZero', 'tz', 'yAxisSide',
'pieMode', 'yUnitSystem', 'logBase', 'yMinLeft', 'yMinRight',
'yMaxLeft', 'yMaxRight', 'yLimitLeft', 'yLimitRight', 'yStepLeft',
'yStepRight', 'rightWidth', 'rightColor', 'rightDashed', 'leftWidth',
'leftColor', 'leftDashed', 'xFormat', 'minorY', 'hideYAxis',
'hideXAxis', 'uniqueLegend', 'vtitleRight', 'yDivisors',
'connectedLimit', 'hideNullFromLegend')
validLineModes = ('staircase', 'slope', 'connected')
validAreaModes = ('none', 'first', 'all', 'stacked')
validPieModes = ('maximum', 'minimum', 'average')
def drawGraph(self, **params):
# Make sure we've got datapoints to draw
if self.data:
startTime = min([series.start for series in self.data])
endTime = max([series.end for series in self.data])
timeRange = endTime - startTime
else:
timeRange = None
if not timeRange:
x = self.width / 2
y = self.height / 2
self.setColor('red')
self.setFont(size=math.log(self.width * self.height))
self.drawText("No Data", x, y, align='center')
return
# Determine if we're doing a 2 y-axis graph.
for series in self.data:
if 'secondYAxis' in series.options:
self.dataRight.append(series)
else:
self.dataLeft.append(series)
if len(self.dataRight) > 0:
self.secondYAxis = True
# API compatibilty hacks
if params.get('graphOnly', False):
params['hideLegend'] = True
params['hideGrid'] = True
params['hideAxes'] = True
params['hideXAxis'] = False
params['hideYAxis'] = False
params['yAxisSide'] = 'left'
params['title'] = ''
params['vtitle'] = ''
params['margin'] = 0
params['tz'] = ''
self.margin = 0
self.area['xmin'] = 0
self.area['xmax'] = self.width
self.area['ymin'] = 0
self.area['ymax'] = self.height
if 'yMin' not in params and 'min' in params:
params['yMin'] = params['min']
if 'yMax' not in params and 'max' in params:
params['yMax'] = params['max']
if 'lineWidth' not in params and 'thickness' in params:
params['lineWidth'] = params['thickness']
if 'yAxisSide' not in params:
params['yAxisSide'] = 'left'
if 'yUnitSystem' not in params:
params['yUnitSystem'] = 'si'
else:
params['yUnitSystem'] = force_text(params['yUnitSystem']).lower()
if params['yUnitSystem'] not in UnitSystems.keys():
params['yUnitSystem'] = 'si'
self.params = params
# Don't do any of the special right y-axis stuff if we're drawing 2
# y-axes.
if self.secondYAxis:
params['yAxisSide'] = 'left'
# When Y Axis is labeled on the right, we subtract x-axis positions
# from the max, instead of adding to the minimum
if self.params.get('yAxisSide') == 'right':
self.margin = self.width
# Now to setup our LineGraph specific options
self.lineWidth = float(params.get('lineWidth', 1.2))
self.lineMode = params.get('lineMode', 'slope').lower()
self.connectedLimit = params.get("connectedLimit", INFINITY)
assert self.lineMode in self.validLineModes, "Invalid line mode!"
self.areaMode = params.get('areaMode', 'none').lower()
assert self.areaMode in self.validAreaModes, "Invalid area mode!"
self.pieMode = params.get('pieMode', 'maximum').lower()
assert self.pieMode in self.validPieModes, "Invalid pie mode!"
# Line mode slope does not work (or even make sense) for series that
# have only one datapoint. So if any series have one datapoint we
# force staircase mode.
if self.lineMode == 'slope':
for series in self.data:
if len(series) == 1:
self.lineMode = 'staircase'
break
if self.secondYAxis:
for series in self.data:
if 'secondYAxis' in series.options:
if 'rightWidth' in params:
series.options['lineWidth'] = params['rightWidth']
if 'rightDashed' in params:
series.options['dashed'] = params['rightDashed']
if 'rightColor' in params:
series.color = params['rightColor']
else:
if 'leftWidth' in params:
series.options['lineWidth'] = params['leftWidth']
if 'leftDashed' in params:
series.options['dashed'] = params['leftDashed']
if 'leftColor' in params:
series.color = params['leftColor']
for series in self.data:
if not hasattr(series, 'color'):
series.color = next(self.colors)
titleSize = self.defaultFontParams['size'] + math.floor(
math.log(self.defaultFontParams['size']))
self.setFont(size=titleSize)
self.setColor(self.foregroundColor)
if params.get('title'):
self.drawTitle(force_text(params['title']))
if params.get('vtitle'):
self.drawVTitle(force_text(params['vtitle']))
if self.secondYAxis and params.get('vtitleRight'):
self.drawVTitle(force_text(params['vtitleRight']), rightAlign=True)
self.setFont()
if not params.get('hideLegend', len(self.data) > 10):
elements = []
hideNull = params.get('hideNullFromLegend', False)
for series in self.data:
if series.name:
if not(hideNull and all(v is None for v in list(series))):
elements.append((
unquote_plus(series.name),
series.color,
series.options.get('secondYAxis')))
if len(elements) > 0:
self.drawLegend(elements, params.get('uniqueLegend', False))
# Setup axes, labels, and grid
# First we adjust the drawing area size to fit X-axis labels
if (
not self.params.get('hideAxes', False) and
not self.params.get('hideXAxis', False)
):
self.area['ymax'] -= self.getExtents()['maxAscent'] * 2
self.startTime = min([series.start for series in self.data])
if (
self.lineMode == 'staircase' or
set([len(series) for series in self.data]) == set([2])
):
self.endTime = max([series.end for series in self.data])
else:
self.endTime = max([
(series.end - series.step) for series in self.data])
self.timeRange = self.endTime - self.startTime
# Now we consolidate our data points to fit in the currently estimated
# drawing area
self.consolidateDataPoints()
self.encodeHeader('axes')
# Now its time to fully configure the Y-axis and determine the space
# required for Y-axis labels. Since we'll probably have to squeeze the
# drawing area to fit the Y labels, we may need to reconsolidate our
# data points, which in turn means re-scaling the Y axis, this process
# will repeat until we have accurate Y labels and enough space to fit
# our data points
currentXMin = self.area['xmin']
currentXMax = self.area['xmax']
if self.secondYAxis:
self.setupTwoYAxes()
else:
self.setupYAxis()
while (
currentXMin != self.area['xmin'] or
currentXMax != self.area['xmax']
): # see if the Y-labels require more space
# this can cause the Y values to change
self.consolidateDataPoints()
# so let's keep track of the previous Y-label space requirements
currentXMin = self.area['xmin']
currentXMax = self.area['xmax']
if self.secondYAxis: # and recalculate their new requirements
self.setupTwoYAxes()
else:
self.setupYAxis()
# Now that our Y-axis is finalized, let's determine our X labels (this
# won't affect the drawing area)
self.setupXAxis()
if not self.params.get('hideAxes', False):
self.drawLabels()
if not self.params.get('hideGrid', False):
# hideAxes implies hideGrid
self.encodeHeader('grid')
self.drawGridLines()
# Finally, draw the graph lines
self.encodeHeader('lines')
self.drawLines()
def drawVTitle(self, text, rightAlign=False):
lineHeight = self.getExtents()['maxHeight']
if rightAlign:
self.encodeHeader('vtitleRight')
x = self.area['xmax'] - lineHeight
y = self.height / 2
for line in text.split('\n'):
self.drawText(line, x, y, align='center', valign='baseline',
rotate=90)
x -= lineHeight
self.area['xmax'] = x - self.margin - lineHeight
else:
self.encodeHeader('vtitle')
x = self.area['xmin'] + lineHeight
y = self.height / 2
for line in text.split('\n'):
self.drawText(line, x, y, align='center', valign='baseline',
rotate=270)
x += lineHeight
self.area['xmin'] = x + self.margin + lineHeight
def getYCoord(self, value, side=None):
if "left" == side:
yLabelValues = self.yLabelValuesL
yTop = self.yTopL
yBottom = self.yBottomL
elif "right" == side:
yLabelValues = self.yLabelValuesR
yTop = self.yTopR
yBottom = self.yBottomR
else:
yLabelValues = self.yLabelValues
yTop = self.yTop
yBottom = self.yBottom
try:
highestValue = max(yLabelValues)
lowestValue = min(yLabelValues)
except ValueError:
highestValue = yTop
lowestValue = yBottom
pixelRange = self.area['ymax'] - self.area['ymin']
relativeValue = value - lowestValue
valueRange = highestValue - lowestValue
if self.logBase:
if value <= 0:
return None
relativeValue = (
math.log(value, self.logBase) -
math.log(lowestValue, self.logBase))
valueRange = math.log(highestValue, self.logBase) - math.log(
lowestValue, self.logBase)
pixelToValueRatio = pixelRange / valueRange
valueInPixels = pixelToValueRatio * relativeValue
return self.area['ymax'] - valueInPixels
def drawLines(self, width=None, dash=None, linecap='butt',
linejoin='miter'):
if not width:
width = self.lineWidth
self.ctx.set_line_width(width)
originalWidth = width
width = float(int(width) % 2) / 2
if dash:
self.ctx.set_dash(dash, 1)
else:
self.ctx.set_dash([], 0)
self.ctx.set_line_cap({
'butt': cairo.LINE_CAP_BUTT,
'round': cairo.LINE_CAP_ROUND,
'square': cairo.LINE_CAP_SQUARE,
}[linecap])
self.ctx.set_line_join({
'miter': cairo.LINE_JOIN_MITER,
'round': cairo.LINE_JOIN_ROUND,
'bevel': cairo.LINE_JOIN_BEVEL,
}[linejoin])
# check whether there is an stacked metric
singleStacked = False
for series in self.data:
if 'stacked' in series.options:
singleStacked = True
if singleStacked:
self.data = sort_stacked(self.data)
# stack the values
if self.areaMode == 'stacked' and not self.secondYAxis:
# TODO Allow stacked area mode with secondYAxis
total = []
for series in self.data:
if 'drawAsInfinite' in series.options:
continue
series.options['stacked'] = True
for i in range(len(series)):
if len(total) <= i:
total.append(0)
if series[i] is not None:
original = series[i]
series[i] += total[i]
total[i] += original
elif self.areaMode == 'first':
self.data[0].options['stacked'] = True
elif self.areaMode == 'all':
for series in self.data:
if 'drawAsInfinite' not in series.options:
series.options['stacked'] = True
# apply alpha channel and create separate stroke series
if self.params.get('areaAlpha'):
try:
alpha = float(self.params['areaAlpha'])
except ValueError:
alpha = 0.5
strokeSeries = []
for series in self.data:
if 'stacked' in series.options:
series.options['alpha'] = alpha
newSeries = TimeSeries(
series.name, series.start, series.end,
series.step * series.valuesPerPoint,
[x for x in series])
newSeries.xStep = series.xStep
newSeries.color = series.color
if 'secondYAxis' in series.options:
newSeries.options['secondYAxis'] = True
strokeSeries.append(newSeries)
self.data += strokeSeries
# setup the clip region
self.ctx.set_line_width(1.0)
self.ctx.rectangle(self.area['xmin'], self.area['ymin'],
self.area['xmax'] - self.area['xmin'],
self.area['ymax'] - self.area['ymin'])
self.ctx.clip()
self.ctx.set_line_width(originalWidth)
# save clip to restore once stacked areas are drawn
self.ctx.save()
clipRestored = False
for series in self.data:
if 'stacked' not in series.options:
# stacked areas are always drawn first. if this series is not
# stacked, we finished stacking. reset the clip region so
# lines can show up on top of the stacked areas.
if not clipRestored:
clipRestored = True
self.ctx.restore()
if 'lineWidth' in series.options:
self.ctx.set_line_width(series.options['lineWidth'])
if 'dashed' in series.options:
self.ctx.set_dash([series.options['dashed']], 1)
else:
self.ctx.set_dash([], 0)
# Shift the beginning of drawing area to the start of the series
# if the graph itself has a larger range
missingPoints = (series.start - self.startTime) / series.step
startShift = series.xStep * (missingPoints / series.valuesPerPoint)
x = float(self.area['xmin']) + startShift + (self.lineWidth / 2.0)
y = float(self.area['ymin'])
startX = x
if series.options.get('invisible'):
self.setColor(series.color, 0, True)
else:
self.setColor(series.color,
series.options.get('alpha') or 1.0)
# The number of preceding datapoints that had a None value.
consecutiveNones = 0
for index, value in enumerate(series):
if value != value: # convert NaN to None
value = None
if value is None and self.params.get('drawNullAsZero'):
value = 0.0
if value is None:
if consecutiveNones == 0:
self.ctx.line_to(x, y)
if 'stacked' in series.options:
# Close off and fill area before unknown interval
if self.secondYAxis:
if 'secondYAxis' in series.options:
self.fillAreaAndClip(
x, y, startX,
self.getYCoord(0, "right"))
else:
self.fillAreaAndClip(
x, y, startX,
self.getYCoord(0, "left"))
else:
self.fillAreaAndClip(x, y, startX,
self.getYCoord(0))
x += series.xStep
consecutiveNones += 1
else:
if self.secondYAxis:
if 'secondYAxis' in series.options:
y = self.getYCoord(value, "right")
else:
y = self.getYCoord(value, "left")
else:
y = self.getYCoord(value)
if y is None:
value = None
elif y < 0:
y = 0
if 'drawAsInfinite' in series.options and value > 0:
self.ctx.move_to(x, self.area['ymax'])
self.ctx.line_to(x, self.area['ymin'])
self.ctx.stroke()
x += series.xStep
continue
if consecutiveNones > 0:
startX = x
if self.lineMode == 'staircase':
if consecutiveNones > 0:
self.ctx.move_to(x, y)
else:
self.ctx.line_to(x, y)
x += series.xStep
self.ctx.line_to(x, y)
elif self.lineMode == 'slope':
if consecutiveNones > 0:
self.ctx.move_to(x, y)
self.ctx.line_to(x, y)
x += series.xStep
elif self.lineMode == 'connected':
# If if the gap is larger than the connectedLimit or
# if this is the first non-None datapoint in the
# series, start drawing from that datapoint.
if (
consecutiveNones > self.connectedLimit or
consecutiveNones == index
):
self.ctx.move_to(x, y)
self.ctx.line_to(x, y)
x += series.xStep
consecutiveNones = 0
if 'stacked' in series.options:
if self.lineMode == 'staircase':
xPos = x
else:
xPos = x-series.xStep
if self.secondYAxis:
if 'secondYAxis' in series.options:
areaYFrom = self.getYCoord(0, "right")
else:
areaYFrom = self.getYCoord(0, "left")
else:
areaYFrom = self.getYCoord(0)
self.fillAreaAndClip(xPos, y, startX, areaYFrom)
else:
self.ctx.stroke()
# return to the original line width
self.ctx.set_line_width(originalWidth)
if 'dashed' in series.options:
# if we changed the dash setting before, change it back now
if dash:
self.ctx.set_dash(dash, 1)
else:
self.ctx.set_dash([], 0)
def fillAreaAndClip(self, x, y, startX=None, areaYFrom=None):
startX = (startX or self.area['xmin'])
areaYFrom = (areaYFrom or self.area['ymax'])
pattern = self.ctx.copy_path()
# fill
self.ctx.line_to(x, areaYFrom) # bottom endX
self.ctx.line_to(startX, areaYFrom) # bottom startX
self.ctx.close_path()
if self.areaMode == 'all':
self.ctx.fill_preserve()
else:
self.ctx.fill()
# clip above y axis
self.ctx.append_path(pattern)
self.ctx.line_to(x, areaYFrom) # yZero endX
self.ctx.line_to(self.area['xmax'], areaYFrom) # yZero right
self.ctx.line_to(self.area['xmax'], self.area['ymin']) # top right
self.ctx.line_to(self.area['xmin'], self.area['ymin']) # top left
self.ctx.line_to(self.area['xmin'], areaYFrom) # yZero left
self.ctx.line_to(startX, areaYFrom) # yZero startX
# clip below y axis
self.ctx.line_to(x, areaYFrom) # yZero endX
self.ctx.line_to(self.area['xmax'], areaYFrom) # yZero right
self.ctx.line_to(self.area['xmax'], self.area['ymax']) # bottom right
self.ctx.line_to(self.area['xmin'], self.area['ymax']) # bottom left
self.ctx.line_to(self.area['xmin'], areaYFrom) # yZero left
self.ctx.line_to(startX, areaYFrom) # yZero startX
self.ctx.close_path()
self.ctx.clip()
def consolidateDataPoints(self):
numberOfPixels = self.graphWidth = (
self.area['xmax'] - self.area['xmin'] - (self.lineWidth + 1))
for series in self.data:
numberOfDataPoints = self.timeRange / series.step
minXStep = float(self.params.get('minXStep', 1.0))
divisor = self.timeRange / series.step or 1
bestXStep = numberOfPixels / divisor
if bestXStep < minXStep:
drawableDataPoints = int(numberOfPixels / minXStep)
pointsPerPixel = math.ceil(
float(numberOfDataPoints) / float(drawableDataPoints))
series.consolidate(pointsPerPixel)
series.xStep = (
numberOfPixels * pointsPerPixel) / numberOfDataPoints
else:
series.xStep = bestXStep
def setupYAxis(self):
drawNullAsZero = self.params.get('drawNullAsZero')
stacked = (self.areaMode == 'stacked')
(yMinValue, yMaxValue) = dataLimits(self.data, drawNullAsZero,
stacked)
if self.logBase:
yTics = _LogAxisTics(yMinValue, yMaxValue,
unitSystem=self.params.get('yUnitSystem'),
base=self.logBase)
else:
yTics = _LinearAxisTics(yMinValue, yMaxValue,
unitSystem=self.params.get('yUnitSystem'))
yTics.applySettings(axisMin=self.params.get('yMin'),
axisMax=self.params.get('yMax'),
axisLimit=self.params.get('yLimit'))
if 'yStep' in self.params:
yTics.setStep(self.params['yStep'])
else:
yDivisors = str(self.params.get('yDivisors', '4,5,6'))
yDivisors = [int(d) for d in yDivisors.split(',')]
binary = self.params.get('yUnitSystem') == 'binary'
yTics.chooseStep(divisors=yDivisors, binary=binary)
yTics.chooseLimits()
# Copy the values we need back out of the yTics object:
self.yStep = yTics.step
self.yBottom = yTics.bottom
self.yTop = yTics.top
self.ySpan = yTics.span
if not self.params.get('hideAxes', False):
# Create and measure the Y-labels
self.yLabelValues = yTics.getLabelValues()
self.yLabels = [yTics.makeLabel(value)
for value in self.yLabelValues]
self.yLabelWidth = max([
self.getExtents(label)['width'] for label in self.yLabels])
if not self.params.get('hideYAxis'):
if self.params.get('yAxisSide') == 'left':
# Scoot the graph over to the left just enough to fit the
# y-labels:
xMin = self.margin + (self.yLabelWidth * 1.02)
if self.area['xmin'] < xMin:
self.area['xmin'] = xMin
else:
# Scoot the graph over to the right just enough to fit
# # the y-labels:
xMin = 0
xMax = self.margin - (self.yLabelWidth * 1.02)
if self.area['xmax'] >= xMax:
self.area['xmax'] = xMax
else:
self.yLabelValues = []
self.yLabels = []
self.yLabelWidth = 0.0
def setupTwoYAxes(self):
drawNullAsZero = self.params.get('drawNullAsZero')
stacked = (self.areaMode == 'stacked')
(yMinValueL, yMaxValueL) = dataLimits(self.dataLeft, drawNullAsZero,
stacked)
(yMinValueR, yMaxValueR) = dataLimits(self.dataRight, drawNullAsZero,
stacked)
# TODO: Allow separate bases for L & R Axes.
if self.logBase:
yTicsL = _LogAxisTics(yMinValueL, yMaxValueL,
unitSystem=self.params.get('yUnitSystem'),
base=self.logBase)
yTicsR = _LogAxisTics(yMinValueR, yMaxValueR,
unitSystem=self.params.get('yUnitSystem'),
base=self.logBase)
else:
yTicsL = _LinearAxisTics(yMinValueL, yMaxValueL,
unitSystem=self.params.get('yUnitSystem'))
yTicsR = _LinearAxisTics(yMinValueR, yMaxValueR,
unitSystem=self.params.get('yUnitSystem'))
yTicsL.applySettings(axisMin=self.params.get('yMinLeft'),
axisMax=self.params.get('yMaxLeft'),
axisLimit=self.params.get('yLimitLeft'))
yTicsR.applySettings(axisMin=self.params.get('yMinRight'),
axisMax=self.params.get('yMaxRight'),
axisLimit=self.params.get('yLimitRight'))
yDivisors = str(self.params.get('yDivisors', '4,5,6'))
yDivisors = [int(d) for d in yDivisors.split(',')]
binary = self.params.get('yUnitSystem') == 'binary'
if 'yStepLeft' in self.params:
yTicsL.setStep(self.params['yStepLeft'])
else:
yTicsL.chooseStep(divisors=yDivisors, binary=binary)
if 'yStepRight' in self.params:
yTicsR.setStep(self.params['yStepRight'])
else:
yTicsR.chooseStep(divisors=yDivisors, binary=binary)
yTicsL.chooseLimits()
yTicsR.chooseLimits()
# Copy the values we need back out of the yTics objects:
self.yStepL = yTicsL.step
self.yBottomL = yTicsL.bottom
self.yTopL = yTicsL.top
self.ySpanL = yTicsL.span
self.yStepR = yTicsR.step
self.yBottomR = yTicsR.bottom
self.yTopR = yTicsR.top
self.ySpanR = yTicsR.span
# Create and measure the Y-labels
self.yLabelValuesL = yTicsL.getLabelValues()
self.yLabelValuesR = yTicsR.getLabelValues()
self.yLabelsL = [yTicsL.makeLabel(value)
for value in self.yLabelValuesL]
self.yLabelsR = [yTicsR.makeLabel(value)
for value in self.yLabelValuesR]
self.yLabelWidthL = max([
self.getExtents(label)['width'] for label in self.yLabelsL])
self.yLabelWidthR = max([
self.getExtents(label)['width'] for label in self.yLabelsR])
# scoot the graph over to the left just enough to fit the y-labels
# xMin = self.margin + self.margin + (self.yLabelWidthL * 1.02)
xMin = self.margin + (self.yLabelWidthL * 1.02)
if self.area['xmin'] < xMin:
self.area['xmin'] = xMin
# scoot the graph over to the right just enough to fit the y-labels
xMax = self.width - (self.yLabelWidthR * 1.02)
if self.area['xmax'] >= xMax:
self.area['xmax'] = xMax
def setupXAxis(self):
from ..app import app
if self.userTimeZone:
tzinfo = pytz.timezone(self.userTimeZone)
else:
tzinfo = pytz.timezone(app.config['TIME_ZONE'])
self.start_dt = datetime.fromtimestamp(self.startTime, tzinfo)
self.end_dt = datetime.fromtimestamp(self.endTime, tzinfo)
secondsPerPixel = float(self.timeRange) / float(self.graphWidth)
# pixels per second
self.xScaleFactor = float(self.graphWidth) / float(self.timeRange)
potential = [
c for c in xAxisConfigs if c['seconds'] <= secondsPerPixel and
c.get('maxInterval', self.timeRange + 1) >= self.timeRange]
if potential:
self.xConf = potential[-1]
else:
self.xConf = xAxisConfigs[-1]
self.xLabelStep = self.xConf['labelUnit'] * self.xConf['labelStep']
self.xMinorGridStep = (
self.xConf['minorGridUnit'] * self.xConf['minorGridStep'])
self.xMajorGridStep = (
self.xConf['majorGridUnit'] * self.xConf['majorGridStep'])
def drawLabels(self):
# Draw the Y-labels
if not self.params.get('hideYAxis'):
if not self.secondYAxis:
for value, label in zip(self.yLabelValues, self.yLabels):
if self.params.get('yAxisSide') == 'left':
x = self.area['xmin'] - (self.yLabelWidth * 0.02)
else:
# Inverted for right side Y Axis
x = self.area['xmax'] + (self.yLabelWidth * 0.02)
y = self.getYCoord(value)
if y is None:
value = None
elif y < 0:
y = 0
if self.params.get('yAxisSide') == 'left':
self.drawText(label, x, y, align='right',
valign='middle')
else:
# Inverted for right side Y Axis
self.drawText(label, x, y, align='left',
valign='middle')
else: # Draws a right side and a Left side axis
for valueL, labelL in zip(self.yLabelValuesL, self.yLabelsL):
xL = self.area['xmin'] - (self.yLabelWidthL * 0.02)
yL = self.getYCoord(valueL, "left")
if yL is None:
value = None
elif yL < 0:
yL = 0
self.drawText(labelL, xL, yL, align='right',
valign='middle')
# Right Side
for valueR, labelR in zip(self.yLabelValuesR, self.yLabelsR):
# Inverted for right side Y Axis
xR = self.area['xmax'] + (self.yLabelWidthR * 0.02) + 3
yR = self.getYCoord(valueR, "right")
if yR is None:
valueR = None
elif yR < 0:
yR = 0
# Inverted for right side Y Axis
self.drawText(labelR, xR, yR, align='left',
valign='middle')
if not self.params.get('hideXAxis'):
dt, x_label_delta = find_x_times(self.start_dt,
self.xConf['labelUnit'],
self.xConf['labelStep'])
# Draw the X-labels
xFormat = self.params.get('xFormat', self.xConf['format'])
while dt < self.end_dt:
label = dt.strftime(xFormat)
x = self.area['xmin'] + (
to_seconds(dt - self.start_dt) * self.xScaleFactor)
y = self.area['ymax'] + self.getExtents()['maxAscent']
self.drawText(label, x, y, align='center', valign='top')
dt += x_label_delta
def drawGridLines(self):
# Not sure how to handle this for 2 y-axes
# Just using the left side info for the grid.
# Horizontal grid lines
leftSide = self.area['xmin']
rightSide = self.area['xmax']
labels = []
if self.secondYAxis:
labels = self.yLabelValuesL
else:
labels = self.yLabelValues
for i, value in enumerate(labels):
self.ctx.set_line_width(0.4)
self.setColor(self.params.get('majorGridLineColor',
self.defaultMajorGridLineColor))
if self.secondYAxis:
y = self.getYCoord(value, "left")
else:
y = self.getYCoord(value)
if y is None or y < 0:
continue
self.ctx.move_to(leftSide, y)
self.ctx.line_to(rightSide, y)
self.ctx.stroke()
# draw minor gridlines if this isn't the last label
if self.minorY >= 1 and i < (len(labels) - 1):
# in case graphite supports inverted Y axis now or someday
valueLower, valueUpper = sorted((value, labels[i+1]))
# each minor gridline is 1/minorY apart from the nearby
# gridlines. we calculate that distance, for adding to the
# value in the loop.
distance = ((valueUpper - valueLower) / float(1 + self.minorY))
# starting from the initial valueLower, we add the minor
# distance for each minor gridline that we wish to draw, and
# then draw it.
for minor in range(self.minorY):
self.ctx.set_line_width(0.3)
self.setColor(
self.params.get('minorGridLineColor',
self.defaultMinorGridLineColor))
# the current minor gridline value is halfway between the
# current and next major gridline values
value = valueLower + ((1+minor) * distance)
if self.logBase:
yTopFactor = self.logBase * self.logBase
else:
yTopFactor = 1
if self.secondYAxis:
if value >= (yTopFactor * self.yTopL):
continue
else:
if value >= (yTopFactor * self.yTop):
continue
if self.secondYAxis:
y = self.getYCoord(value, "left")
else:
y = self.getYCoord(value)
if y is None or y < 0:
continue
self.ctx.move_to(leftSide, y)
self.ctx.line_to(rightSide, y)
self.ctx.stroke()
# Vertical grid lines
top = self.area['ymin']
bottom = self.area['ymax']
# First we do the minor grid lines (majors will paint over them)
self.ctx.set_line_width(0.25)
self.setColor(self.params.get('minorGridLineColor',
self.defaultMinorGridLineColor))
dt, x_minor_delta = find_x_times(
self.start_dt, self.xConf['minorGridUnit'],
self.xConf['minorGridStep'])
while dt < self.end_dt:
x = self.area['xmin'] + (
to_seconds(dt - self.start_dt) * self.xScaleFactor)
if x < self.area['xmax']:
self.ctx.move_to(x, bottom)
self.ctx.line_to(x, top)
self.ctx.stroke()
dt += x_minor_delta
# Now we do the major grid lines
self.ctx.set_line_width(0.33)
self.setColor(self.params.get('majorGridLineColor',
self.defaultMajorGridLineColor))
dt, x_major_delta = find_x_times(self.start_dt,
self.xConf['majorGridUnit'],
self.xConf['majorGridStep'])
while dt < self.end_dt:
x = self.area['xmin'] + (
to_seconds(dt - self.start_dt) * self.xScaleFactor)
if x < self.area['xmax']:
self.ctx.move_to(x, bottom)
self.ctx.line_to(x, top)
self.ctx.stroke()
dt += x_major_delta
# Draw side borders for our graph area
self.ctx.set_line_width(0.5)
self.ctx.move_to(self.area['xmax'], bottom)
self.ctx.line_to(self.area['xmax'], top)
self.ctx.move_to(self.area['xmin'], bottom)
self.ctx.line_to(self.area['xmin'], top)
self.ctx.stroke()
class PieGraph(Graph):
customizable = Graph.customizable + (
'title', 'valueLabels', 'valueLabelsMin', 'hideLegend', 'pieLabels',
'areaAlpha', 'valueLabelsColor',
)
validValueLabels = ('none', 'number', 'percent')
def drawGraph(self, **params):
self.pieLabels = params.get('pieLabels', 'horizontal')
self.total = sum([t[1] for t in self.data])
if self.params.get('areaAlpha'):
try:
self.alpha = float(self.params['areaAlpha'])
except ValueError:
self.alpha = 1.0
else:
self.alpha = 1.0
self.slices = []
for name, value in self.data:
self.slices.append({
'name': name,
'value': value,
'percent': value / self.total,
'color': next(self.colors),
'alpha': self.alpha,
})
titleSize = self.defaultFontParams['size'] + math.floor(
math.log(self.defaultFontParams['size']))
self.setFont(size=titleSize)
self.setColor(self.foregroundColor)
if params.get('title'):
self.drawTitle(params['title'])
self.setFont()
if not params.get('hideLegend', False):
elements = [
(slice['name'], slice['color'], None) for slice in self.slices]
self.drawLegend(elements)
self.drawSlices()
if params.get('valueLabelsColor'):
self.valueLabelsColor = params.get('valueLabelsColor')
else:
self.valueLabelsColor = 'black'
self.valueLabelsMin = float(params.get('valueLabelsMin', 5))
self.valueLabels = params.get('valueLabels', 'percent')
assert self.valueLabels in self.validValueLabels, (
"valueLabels=%s must be one of %s" % (
self.valueLabels, self.validValueLabels))
if self.valueLabels != 'none':
self.drawLabels()
def drawSlices(self):
theta = 3.0 * math.pi / 2.0
halfX = (self.area['xmax'] - self.area['xmin']) / 2.0
halfY = (self.area['ymax'] - self.area['ymin']) / 2.0
self.x0 = x0 = self.area['xmin'] + halfX
self.y0 = y0 = self.area['ymin'] + halfY
self.radius = radius = min(halfX, halfY) * 0.95
for slice in self.slices:
self.setColor(slice['color'], slice['alpha'])
self.ctx.move_to(x0, y0)
phi = theta + (2 * math.pi) * slice['percent']
self.ctx.arc(x0, y0, radius, theta, phi)
self.ctx.line_to(x0, y0)
self.ctx.fill()
slice['midAngle'] = (theta + phi) / 2.0
slice['midAngle'] %= 2.0 * math.pi
theta = phi
def drawLabels(self):
self.setFont()
self.setColor(self.valueLabelsColor)
for slice in self.slices:
if self.valueLabels == 'percent':
if slice['percent'] * 100.0 < self.valueLabelsMin:
continue
label = "%%%.2f" % (slice['percent'] * 100.0)
elif self.valueLabels == 'number':
if slice['value'] < self.valueLabelsMin:
continue
if (
slice['value'] < 10 and
slice['value'] != int(slice['value'])
):
label = "%.2f" % slice['value']
else:
label = force_text(int(slice['value']))
theta = slice['midAngle']
x = self.x0 + (self.radius / 2.0 * math.cos(theta))
y = self.y0 + (self.radius / 2.0 * math.sin(theta))
if self.pieLabels == 'rotated':
if theta > (math.pi / 2.0) and theta <= (3.0 * math.pi / 2.0):
theta -= math.pi
self.drawText(label, x, y, align='center', valign='middle',
rotate=math.degrees(theta))
else:
self.drawText(label, x, y, align='center', valign='middle')
GraphTypes = {
'line': LineGraph,
'pie': PieGraph,
}
# Convience functions
def safeArgs(args):
"""Iterate over valid, finite values in an iterable.
Skip any items that are None, NaN, or infinite.
"""
return (arg for arg in args
if arg is not None and not math.isnan(arg) and not math.isinf(arg))
def safeMin(args):
args = list(safeArgs(args))
if args:
return min(args)
def safeMax(args):
args = list(safeArgs(args))
if args:
return max(args)
def safeSum(values):
return sum(safeArgs(values))
def dataLimits(data, drawNullAsZero=False, stacked=False):
"""Return the range of values in data as (yMinValue, yMaxValue).
data is an array of TimeSeries objects.
"""
missingValues = any(None in series for series in data)
finiteData = [series for series in data
if not series.options.get('drawAsInfinite')]
yMinValue = safeMin(safeMin(series) for series in finiteData)
if yMinValue is None:
# This can only happen if there are no valid, non-infinite data.
return (0.0, 1.0)
if yMinValue > 0.0 and drawNullAsZero and missingValues:
yMinValue = 0.0
if stacked:
length = safeMin(len(series) for series in finiteData)
sumSeries = []
for i in range(0, length):
sumSeries.append(safeSum(series[i] for series in finiteData))
yMaxValue = safeMax(sumSeries)
else:
yMaxValue = safeMax(safeMax(series) for series in finiteData)
if yMaxValue < 0.0 and drawNullAsZero and missingValues:
yMaxValue = 0.0
return (yMinValue, yMaxValue)
def sort_stacked(series_list):
stacked = [s for s in series_list if 'stacked' in s.options]
not_stacked = [s for s in series_list if 'stacked' not in s.options]
return stacked + not_stacked
def condition(value, size, step):
if step is None:
return abs(value) >= size
else:
return abs(value) >= size and step >= size
def format_units(v, step=None, system="si", units=None):
"""Format the given value in standardized units.
``system`` is either 'binary' or 'si'
For more info, see:
http://en.wikipedia.org/wiki/SI_prefix
http://en.wikipedia.org/wiki/Binary_prefix
"""
if v is None:
return 0, ''
for prefix, size in UnitSystems[system]:
if condition(v, size, step):
v2 = v / size
if v2 - math.floor(v2) < 0.00000000001 and v > 1:
v2 = float(math.floor(v2))
if units:
prefix = "%s%s" % (prefix, units)
return v2, prefix
if v - math.floor(v) < 0.00000000001 and v > 1:
v = float(math.floor(v))
if units:
prefix = units
else:
prefix = ''
return v, prefix
def find_x_times(start_dt, unit, step):
if not isinstance(start_dt, datetime):
raise ValueError("Invalid start_dt: %s" % start_dt)
if not isinstance(step, int) or not step > 0:
if not isinstance(step, float) or unit != DAY or not step > 0.0:
raise ValueError("Invalid step value: %s" % step)
if unit == SEC:
dt = start_dt.replace(
second=start_dt.second - (start_dt.second % step))
x_delta = timedelta(seconds=step)
elif unit == MIN:
dt = start_dt.replace(
second=0, minute=start_dt.minute - (start_dt.minute % step))
x_delta = timedelta(minutes=step)
elif unit == HOUR:
dt = start_dt.replace(
second=0, minute=0, hour=start_dt.hour - (start_dt.hour % step))
x_delta = timedelta(hours=step)
elif unit == DAY:
dt = start_dt.replace(second=0, minute=0, hour=0)
x_delta = timedelta(days=step)
else:
raise ValueError("Invalid unit: %s" % unit)
while dt < start_dt:
dt += x_delta
return (dt, x_delta)
| apache-2.0 | -1,222,734,711,232,685,800 | 36.067367 | 79 | 0.520098 | false | 4.041013 | false | false | false |
viroulep/RebirthItemTracker | src/game_objects/state.py | 1 | 5925 | """This module handles anything related to the item tracker's state"""
import logging
import json
from game_objects.item import Item, ItemInfo
from game_objects.floor import Floor
from game_objects.serializable import Serializable
class TrackerState(Serializable):
"""This class represents a tracker state, and handle the logic to
modify it while keeping it coherent
"""
serialize = [('seed', basestring), ('floor_list', list),
('item_list', list), ('bosses', list), ('tracker_version', basestring), ('game_version', basestring)]
def __init__(self, seed, tracker_version, game_version):
self.reset(seed, game_version)
self.tracker_version = tracker_version
def reset(self, seed, game_version):
"""
Reset a run to a given string
This should be enough to enable the GC to clean everything from the previous run
"""
# When the tracker state has been restarted, put this to True
# The view can then put it to false once it's been rendered
self.modified = True
self.seed = seed
self.game_version = game_version
self.floor_list = []
self.item_list = []
self.bosses = []
self.player_stats = {}
self.player_transforms = {}
for stat in ItemInfo.stat_list:
self.player_stats[stat] = 0.0
for transform in ItemInfo.transform_list:
self.player_transforms[transform] = set()
def add_floor(self, floor):
""" Add a floor to the current run """
self.floor_list.append(floor)
self.modified = True
@property
def last_floor(self):
"""
Get current floor
If no floor is in the floor list, create a default one
"""
if len(self.floor_list) == 0:
self.add_floor(Floor("f1"))
return self.floor_list[-1]
def add_item(self, item):
"""
Add an item to the current run, and update player's stats accordingly
Return a tuple (boolean, list).
The boolean is true if the item has been added, false otherwise.
"""
# Ignore repeated pickups of space bar items
if not (item.info.space and item in self.item_list):
self.item_list.append(item)
self.__add_stats_for_item(item)
self.modified = True
return True
else:
return False
@property
def last_item(self):
"""
Get last item picked up
Can return None !
"""
if len(self.item_list) > 0:
return self.item_list[-1]
else:
return None
def contains_item(self, item_id):
""" Looks for the given item_id in our item_list """
return len([x for x in self.item_list if x.item_id == item_id]) >= 1
def reroll(self):
""" Tag every (non-spacebar) items as rerolled """
[item.rerolled() for item in self.item_list]
# Add curse to last floor
def add_curse(self, curse):
""" Add a curse to current floor """
self.last_floor.add_curse(curse)
def add_boss(self, bossid):
""" Add boss to seen boss """
if bossid not in self.bosses:
self.bosses.append(bossid)
nbosses = len(self.bosses)
if 11 <= nbosses <= 13:
suffix = 'th'
else:
suffix = {1: 'st', 2: 'nd', 3: 'rd'}.get(nbosses % 10, 'th')
logging.getLogger("tracker").debug("Defeated %s%s boss %s",
len(self.bosses),
suffix,
bossid)
@property
def last_boss(self):
"""
Get last boss encountered
Can return None !
"""
if len(self.bosses) > 0:
return self.bosses[-1]
else:
return None
def drawn(self):
""" Tag this state as rendered """
self.modified = False
@staticmethod
def from_valid_json(json_dic, *args):
""" Create a state from a type-checked dic """
state = TrackerState(json_dic['seed'], json_dic['tracker_version'], json_dic['game_version'])
# The order is important, we want a list of legal floors the item can
# be picked up on before parsing items
for floor_dic in json_dic['floor_list']:
floor = Floor.from_json(floor_dic)
if not floor:
return None
state.add_floor(floor)
for bossstr in json_dic['bosses']:
# TODO create a serializable boss class that would create
# a boss object with description from a bossid
# In any case it's sufficient to (de)serialize only bossids
if not isinstance(bossstr, basestring):
return None
state.add_boss(bossstr)
for item_dic in json_dic['item_list']:
item = Item.from_json(item_dic, state.floor_list)
if not item:
return None
state.add_item(item)
return state
def __add_stats_for_item(self, item):
"""
Update player's stats with the given item.
"""
item_info = item.info
for stat in ItemInfo.stat_list:
if not item_info[stat]:
continue
change = float(item_info[stat])
self.player_stats[stat] += change
for transform in ItemInfo.transform_list:
if not item_info[transform]:
continue
self.player_transforms[transform].add(item)
class TrackerStateEncoder(json.JSONEncoder):
""" An encoder to provide to the json.load method, which handle game objects """
def default(self, obj):
if isinstance(obj, Serializable):
return obj.to_json()
return obj.__dict__
| bsd-2-clause | -7,840,911,215,102,460,000 | 33.447674 | 118 | 0.560169 | false | 4.134682 | false | false | false |
tommo/gii | lib/gii/DeviceManager/DeviceManager.py | 1 | 2811 | import os
import stat
from gii.core import *
from PyQt4 import QtCore, QtGui
from PyQt4.QtCore import QEventLoop, QEvent, QObject
from gii.qt.IconCache import getIcon
from gii.qt.controls.Window import MainWindow
from gii.qt.controls.Menu import MenuManager
from gii.qt.QtEditorModule import QtEditorModule
from gii.SearchView import requestSearchView, registerSearchEnumerator
import Device
##----------------------------------------------------------------##
# def getIOSDeviceName( dev ):
# name = u''
# try:
# name = dev.get_value(name=u'DeviceName')
# except:
# pass
# print( u'%s - "%s"' % ( dev.get_deviceid(), name.decode(u'utf-8') ) )
##----------------------------------------------------------------##
signals.register( 'device.connected' )
signals.register( 'device.disconnected' )
signals.register( 'device.activated' )
signals.register( 'device.deactivated' )
##----------------------------------------------------------------##
class DeviceManager( EditorModule ):
def __init__( self ):
pass
def getName( self ):
return 'device_manager'
def getDependency( self ):
return []
def onLoad( self ):
self.deviceTypes = {}
self.containers = {}
self.devices = {}
self.activeDevice = None
registerSearchEnumerator( deviceSearchEnumerator )
#load device history
signals.connect( 'project.done_deploy', self.onDoneDeploy )
def onDeviceEvent( self, ev, device ):
if ev == 'connected':
signals.emit( 'device.connected', device )
self.devices[ device ] = True
device.setActive( False )
if not self.activeDevice:
self.setActiveDevice( device )
elif ev == 'disconnected':
signals.emit( 'device.disconnected', device )
self.devices[ device ] = False
if device == self.activeDevice:
self.activeDevice = None
def setActiveDevice( self, device ):
if self.activeDevice:
self.activeDevice.setActive( False )
signals.emit( 'device.deactivated', self.activeDevice )
self.activeDevice = device
if device:
self.activeDevice.setActive( True )
signals.emit( 'device.activated', device )
def onDoneDeploy( self, context ):
if not self.devices: return
activeDevice = self.activeDevice or self.devices[0]
print u'deploy on device:'
r = repr( activeDevice )
print r
activeDevice.deploy( context )
print 'deploy done!'
DeviceManager().register()
##----------------------------------------------------------------##
def deviceSearchEnumerator( typeId, context, option ):
if not context in [ 'device' ]: return
result = []
dm = app.getModule( 'device_manager' )
for device in dm.enumerateDevice():
entry = ( device, device.getName(), device.getType(), None )
result.append( entry )
return result
##----------------------------------------------------------------##
| mit | -5,803,916,313,980,087,000 | 27.393939 | 76 | 0.617574 | false | 3.627097 | false | false | false |
google/struct2tensor | struct2tensor/expression_impl/__init__.py | 1 | 1826 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Import all modules in expression_impl.
The modules in this file should be accessed like the following:
```
import struct2tensor as s2t
from struct2tensor import expression_impl
s2t.expression_impl.apply_schema
```
"""
from struct2tensor.expression_impl import apply_schema
from struct2tensor.expression_impl import broadcast
from struct2tensor.expression_impl import depth_limit
from struct2tensor.expression_impl import filter_expression
from struct2tensor.expression_impl import index
from struct2tensor.expression_impl import map_prensor
from struct2tensor.expression_impl import map_prensor_to_prensor
from struct2tensor.expression_impl import map_values
from struct2tensor.expression_impl import parquet
from struct2tensor.expression_impl import placeholder
from struct2tensor.expression_impl import project
from struct2tensor.expression_impl import promote
from struct2tensor.expression_impl import promote_and_broadcast
from struct2tensor.expression_impl import proto
from struct2tensor.expression_impl import reroot
from struct2tensor.expression_impl import size
from struct2tensor.expression_impl import slice_expression
| apache-2.0 | -8,786,078,613,776,037,000 | 41.465116 | 80 | 0.787514 | false | 4.316785 | false | false | false |
floringrigoriu/Algorthitms | Leetcode2021/Monthly/January/Jan23.py | 1 | 1312 | # https://leetcode.com/explore/challenge/card/january-leetcoding-challenge-2021/582/week-4-january-22nd-january-28th/3614/
# A matrix diagonal is a diagonal line of cells starting from some cell in either the topmost row or leftmost column and
# going in the bottom-right direction until reaching the matrix's end.
# For example, the matrix diagonal starting from mat[2][0], where mat is a 6 x 3 matrix, includes cells mat[2][0], mat[3][1], and mat[4][2].
# Given an m x n matrix mat of integers, sort each matrix diagonal in ascending order and return the resulting matrix.
class Solution:
def diagonalSort(self, mat: List[List[int]]) -> List[List[int]]:
if not mat or not mat[0]:
return mat
for d in range (1- len(mat), len(mat[0])):
delta = (-1)* min(d,0)
y= delta;
x = d+delta
diag = []
while y<len(mat) and x< len(mat[0]):
diag.append(mat[y][x])
y = y+1
x = x+1
diag.sort()
y= delta
x = d+delta
while y<len(mat) and x< len(mat[0]):
mat[y][x]=diag[y-delta]
y = y+1
x = x+1
return mat
s = Solution()
print(s.diagonalSort([[3,3,1,1],[2,2,1,2],[1,1,1,2]]))
| gpl-2.0 | -4,631,008,891,928,721,000 | 36.485714 | 141 | 0.560213 | false | 3.372751 | false | false | false |
jianghuaw/nova | nova/api/openstack/compute/extended_volumes.py | 1 | 3796 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Extended Volumes API extension."""
from nova.api.openstack import api_version_request
from nova.api.openstack import wsgi
from nova import context
from nova import objects
from nova.policies import extended_volumes as ev_policies
class ExtendedVolumesController(wsgi.Controller):
def _extend_server(self, context, server, req, bdms):
volumes_attached = []
for bdm in bdms:
if bdm.get('volume_id'):
volume_attached = {'id': bdm['volume_id']}
if api_version_request.is_supported(req, min_version='2.3'):
volume_attached['delete_on_termination'] = (
bdm['delete_on_termination'])
volumes_attached.append(volume_attached)
# NOTE(mriedem): The os-extended-volumes prefix should not be used for
# new attributes after v2.1. They are only in v2.1 for backward compat
# with v2.0.
key = "os-extended-volumes:volumes_attached"
server[key] = volumes_attached
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ['nova.context']
if context.can(ev_policies.BASE_POLICY_NAME, fatal=False):
server = resp_obj.obj['server']
bdms = objects.BlockDeviceMappingList.bdms_by_instance_uuid(
context, [server['id']])
instance_bdms = self._get_instance_bdms(bdms, server)
self._extend_server(context, server, req, instance_bdms)
@staticmethod
def _get_instance_bdms_in_multiple_cells(ctxt, servers):
instance_uuids = [server['id'] for server in servers]
inst_maps = objects.InstanceMappingList.get_by_instance_uuids(
ctxt, instance_uuids)
cell_mappings = {}
for inst_map in inst_maps:
if (inst_map.cell_mapping is not None and
inst_map.cell_mapping.uuid not in cell_mappings):
cell_mappings.update(
{inst_map.cell_mapping.uuid: inst_map.cell_mapping})
bdms = {}
for cell_mapping in cell_mappings.values():
with context.target_cell(ctxt, cell_mapping) as cctxt:
bdms.update(
objects.BlockDeviceMappingList.bdms_by_instance_uuid(
cctxt, instance_uuids))
return bdms
@wsgi.extends
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if context.can(ev_policies.BASE_POLICY_NAME, fatal=False):
servers = list(resp_obj.obj['servers'])
bdms = self._get_instance_bdms_in_multiple_cells(context, servers)
for server in servers:
instance_bdms = self._get_instance_bdms(bdms, server)
self._extend_server(context, server, req, instance_bdms)
def _get_instance_bdms(self, bdms, server):
# server['id'] is guaranteed to be in the cache due to
# the core API adding it in the 'detail' or 'show' method.
# If that instance has since been deleted, it won't be in the
# 'bdms' dictionary though, so use 'get' to avoid KeyErrors.
return bdms.get(server['id'], [])
| apache-2.0 | 4,793,651,557,385,615,000 | 43.139535 | 78 | 0.629874 | false | 3.991588 | false | false | false |
sailfish-sdk/sailfish-qtcreator | share/qtcreator/debugger/stdtypes.py | 1 | 37164 | ############################################################################
#
# Copyright (C) 2016 The Qt Company Ltd.
# Contact: https://www.qt.io/licensing/
#
# This file is part of Qt Creator.
#
# Commercial License Usage
# Licensees holding valid commercial Qt licenses may use this file in
# accordance with the commercial license agreement provided with the
# Software or, alternatively, in accordance with the terms contained in
# a written agreement between you and The Qt Company. For licensing terms
# and conditions see https://www.qt.io/terms-conditions. For further
# information use the contact form at https://www.qt.io/contact-us.
#
# GNU General Public License Usage
# Alternatively, this file may be used under the terms of the GNU
# General Public License version 3 as published by the Free Software
# Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
# included in the packaging of this file. Please review the following
# information to ensure the GNU General Public License requirements will
# be met: https://www.gnu.org/licenses/gpl-3.0.html.
#
############################################################################
from dumper import *
def qform__std__array():
return arrayForms()
def qdump__std__array(d, value):
size = value.type[1]
d.putItemCount(size)
if d.isExpanded():
d.putPlotData(value.address(), size, value.type[0])
def qform__std____1__array():
return arrayForms()
def qdump__std____1__array(d, value):
qdump__std__array(d, value)
def qdump__std__function(d, value):
(ptr, dummy1, manager, invoker) = value.split('pppp')
if manager:
if ptr > 2:
d.putSymbolValue(ptr)
else:
d.putEmptyValue()
d.putBetterType(value.type)
else:
d.putValue('(null)')
d.putPlainChildren(value)
def qdump__std__complex(d, value):
innerType = value.type[0]
(real, imag) = value.split('{%s}{%s}' % (innerType.name, innerType.name))
d.putValue("(%s, %s)" % (real.display(), imag.display()))
d.putNumChild(2)
if d.isExpanded():
with Children(d, 2, childType=innerType):
d.putSubItem("real", real)
d.putSubItem("imag", imag)
def qdump__std____1__complex(d, value):
qdump__std__complex(d, value)
def qdump__std__deque(d, value):
if d.isQnxTarget():
qdump__std__deque__QNX(d, value)
return
if d.isMsvcTarget():
qdump__std__deque__MSVC(d, value)
return
innerType = value.type[0]
innerSize = innerType.size()
bufsize = 1
if innerSize < 512:
bufsize = 512 // innerSize
(mapptr, mapsize, startCur, startFirst, startLast, startNode,
finishCur, finishFirst, finishLast, finishNode) = value.split("pppppppppp")
size = bufsize * ((finishNode - startNode) // d.ptrSize() - 1)
size += (finishCur - finishFirst) // innerSize
size += (startLast - startCur) // innerSize
d.check(0 <= size and size <= 1000 * 1000 * 1000)
d.putItemCount(size)
if d.isExpanded():
with Children(d, size, maxNumChild=2000, childType=innerType):
pcur = startCur
plast = startLast
pnode = startNode
for i in d.childRange():
d.putSubItem(i, d.createValue(pcur, innerType))
pcur += innerSize
if pcur == plast:
newnode = pnode + d.ptrSize()
pfirst = d.extractPointer(newnode)
plast = pfirst + bufsize * d.ptrSize()
pcur = pfirst
pnode = newnode
def qdump__std____1__deque(d, value):
mptr, mfirst, mbegin, mend, start, size = value.split("pppptt")
d.check(0 <= size and size <= 1000 * 1000 * 1000)
d.putItemCount(size)
if d.isExpanded():
innerType = value.type[0]
innerSize = innerType.size()
ptrSize = d.ptrSize()
bufsize = (4096 // innerSize) if innerSize < 256 else 16
with Children(d, size, maxNumChild=2000, childType=innerType):
for i in d.childRange():
k, j = divmod(start + i, bufsize)
base = d.extractPointer(mfirst + k * ptrSize)
d.putSubItem(i, d.createValue(base + j * innerSize, innerType))
def qdump__std__deque__QNX(d, value):
innerType = value.type[0]
innerSize = innerType.size()
if innerSize <= 1:
bufsize = 16
elif innerSize <= 2:
bufsize = 8
elif innerSize <= 4:
bufsize = 4
elif innerSize <= 8:
bufsize = 2
else:
bufsize = 1
try:
val = value['_Mypair']['_Myval2']
except:
val = value
myoff = val['_Myoff'].integer()
mysize = val['_Mysize'].integer()
mapsize = val['_Mapsize'].integer()
d.check(0 <= mapsize and mapsize <= 1000 * 1000 * 1000)
d.putItemCount(mysize)
if d.isExpanded():
with Children(d, mysize, maxNumChild=2000, childType=innerType):
map = val['_Map']
for i in d.childRange():
block = myoff / bufsize
offset = myoff - (block * bufsize)
if mapsize <= block:
block -= mapsize
d.putSubItem(i, map[block][offset])
myoff += 1;
def qdump__std__deque__MSVC(d, value):
innerType = value.type[0]
innerSize = innerType.size()
if innerSize <= 1:
bufsize = 16
elif innerSize <= 2:
bufsize = 8
elif innerSize <= 4:
bufsize = 4
elif innerSize <= 8:
bufsize = 2
else:
bufsize = 1
(proxy, map, mapsize, myoff, mysize) = value.split("ppppp")
d.check(0 <= mapsize and mapsize <= 1000 * 1000 * 1000)
d.putItemCount(mysize)
if d.isExpanded():
with Children(d, mysize, maxNumChild=2000, childType=innerType):
for i in d.childRange():
if myoff >= bufsize * mapsize:
myoff = 0
buf = map + ((myoff // bufsize) * d.ptrSize())
address = d.extractPointer(buf) + ((myoff % bufsize) * innerSize)
d.putSubItem(i, d.createValue(address, innerType))
myoff += 1
def qdump__std____debug__deque(d, value):
qdump__std__deque(d, value)
def qdump__std__list(d, value):
if d.isQnxTarget() or d.isMsvcTarget():
qdump__std__list__QNX(d, value)
return
if value.type.size() == 3 * d.ptrSize():
# C++11 only.
(dummy1, dummy2, size) = value.split("ppp")
d.putItemCount(size)
else:
# Need to count manually.
p = d.extractPointer(value)
head = value.address()
size = 0
while head != p and size < 1001:
size += 1
p = d.extractPointer(p)
d.putItemCount(size, 1000)
if d.isExpanded():
p = d.extractPointer(value)
innerType = value.type[0]
with Children(d, size, maxNumChild=1000, childType=innerType):
for i in d.childRange():
d.putSubItem(i, d.createValue(p + 2 * d.ptrSize(), innerType))
p = d.extractPointer(p)
def qdump__std__list__QNX(d, value):
(proxy, head, size) = value.split("ppp")
d.putItemCount(size, 1000)
if d.isExpanded():
p = d.extractPointer(head)
innerType = value.type[0]
with Children(d, size, maxNumChild=1000, childType=innerType):
for i in d.childRange():
d.putSubItem(i, d.createValue(p + 2 * d.ptrSize(), innerType))
p = d.extractPointer(p)
def qdump__std____debug__list(d, value):
qdump__std__list(d, value)
def qdump__std____cxx11__list(d, value):
qdump__std__list(d, value)
def qdump__std____1__list(d, value):
if value.type.size() == 3 * d.ptrSize():
# C++11 only.
(dummy1, dummy2, size) = value.split("ppp")
d.putItemCount(size)
else:
# Need to count manually.
p = d.extractPointer(value)
head = value.address()
size = 0
while head != p and size < 1001:
size += 1
p = d.extractPointer(p)
d.putItemCount(size, 1000)
if d.isExpanded():
(prev, p) = value.split("pp")
innerType = value.type[0]
typeCode = "pp{%s}" % innerType.name
with Children(d, size, maxNumChild=1000, childType=innerType):
for i in d.childRange():
(prev, p, val) = d.split(typeCode, p)
d.putSubItem(i, val)
def qform__std__map():
return mapForms()
def qdump__std__map(d, value):
if d.isQnxTarget() or d.isMsvcTarget():
qdump_std__map__helper(d, value)
return
# stuff is actually (color, pad) with 'I@', but we can save cycles/
(compare, stuff, parent, left, right, size) = value.split('pppppp')
d.check(0 <= size and size <= 100*1000*1000)
d.putItemCount(size)
if d.isExpanded():
keyType = value.type[0]
valueType = value.type[1]
with Children(d, size, maxNumChild=1000):
node = value["_M_t"]["_M_impl"]["_M_header"]["_M_left"]
nodeSize = node.dereference().type.size()
typeCode = "@{%s}@{%s}" % (keyType.name, valueType.name)
for i in d.childRange():
(pad1, key, pad2, value) = d.split(typeCode, node.pointer() + nodeSize)
d.putPairItem(i, (key, value))
if node["_M_right"].pointer() == 0:
parent = node["_M_parent"]
while True:
if node.pointer() != parent["_M_right"].pointer():
break
node = parent
parent = parent["_M_parent"]
if node["_M_right"] != parent:
node = parent
else:
node = node["_M_right"]
while True:
if node["_M_left"].pointer() == 0:
break
node = node["_M_left"]
def qdump_std__map__helper(d, value):
(proxy, head, size) = value.split("ppp")
d.check(0 <= size and size <= 100*1000*1000)
d.putItemCount(size)
if d.isExpanded():
keyType = value.type[0]
valueType = value.type[1]
pairType = value.type[3][0]
def helper(node):
(left, parent, right, color, isnil, pad, pair) = d.split("pppcc@{%s}" % (pairType.name), node)
if left != head:
for res in helper(left):
yield res
yield pair.split("{%s}@{%s}" % (keyType.name, valueType.name))[::2]
if right != head:
for res in helper(right):
yield res
(smallest, root) = d.split("pp", head)
with Children(d, size, maxNumChild=1000):
for (pair, i) in zip(helper(root), d.childRange()):
d.putPairItem(i, pair)
def qdump__std____debug__map(d, value):
qdump__std__map(d, value)
def qdump__std____debug__set(d, value):
qdump__std__set(d, value)
def qdump__std__multiset(d, value):
qdump__std__set(d, value)
def qdump__std____cxx1998__map(d, value):
qdump__std__map(d, value)
def qform__std__multimap():
return mapForms()
def qdump__std__multimap(d, value):
return qdump__std__map(d, value)
def qdumpHelper__std__tree__iterator(d, value, isSet=False):
if value.type.name.endswith("::iterator"):
treeTypeName = value.type.name[:-len("::iterator")]
elif value.type.name.endswith("::const_iterator"):
treeTypeName = value.type.name[:-len("::const_iterator")]
treeType = d.lookupType(treeTypeName)
keyType = treeType[0]
valueType = treeType[1]
node = value["_M_node"].dereference() # std::_Rb_tree_node_base
d.putNumChild(1)
d.putEmptyValue()
if d.isExpanded():
with Children(d):
if isSet:
typecode = 'pppp@{%s}' % keyType.name
(color, parent, left, right, pad1, key) = d.split(typecode, node)
d.putSubItem("value", key)
else:
typecode = 'pppp@{%s}@{%s}' % (keyType.name, valueType.name)
(color, parent, left, right, pad1, key, pad2, value) = d.split(typecode, node)
d.putSubItem("first", key)
d.putSubItem("second", value)
with SubItem(d, "[node]"):
d.putNumChild(1)
d.putEmptyValue()
d.putType(" ")
if d.isExpanded():
with Children(d):
#d.putSubItem("color", color)
nodeType = node.type.pointer()
d.putSubItem("left", d.createValue(left, nodeType))
d.putSubItem("right", d.createValue(right, nodeType))
d.putSubItem("parent", d.createValue(parent, nodeType))
def qdump__std___Rb_tree_iterator(d, value):
qdumpHelper__std__tree__iterator(d, value)
def qdump__std___Rb_tree_const_iterator(d, value):
qdumpHelper__std__tree__iterator(d, value)
def qdump__std__map__iterator(d, value):
qdumpHelper__std__tree__iterator(d, value)
def qdump____gnu_debug___Safe_iterator(d, value):
d.putItem(value["_M_current"])
def qdump__std__map__const_iterator(d, value):
qdumpHelper__std__tree__iterator(d, value)
def qdump__std__set__iterator(d, value):
qdumpHelper__std__tree__iterator(d, value, True)
def qdump__std__set__const_iterator(d, value):
qdumpHelper__std__tree__iterator(d, value, True)
def qdump__std____cxx1998__set(d, value):
qdump__std__set(d, value)
def qdumpHelper__std__tree__iterator_MSVC(d, value):
d.putNumChild(1)
d.putEmptyValue()
if d.isExpanded():
with Children(d):
childType = value.type[0][0][0]
(proxy, nextIter, node) = value.split("ppp")
(left, parent, right, color, isnil, pad, child) = \
d.split("pppcc@{%s}" % (childType.name), node)
if (childType.name.startswith("std::pair")):
# workaround that values created via split have no members
keyType = childType[0].name
valueType = childType[1].name
d.putPairItem(None, child.split("{%s}@{%s}" % (keyType, valueType))[::2])
else:
d.putSubItem("value", child)
def qdump__std___Tree_const_iterator(d, value):
qdumpHelper__std__tree__iterator_MSVC(d, value)
def qdump__std___Tree_iterator(d, value):
qdumpHelper__std__tree__iterator_MSVC(d, value)
def qdump__std__set(d, value):
if d.isQnxTarget() or d.isMsvcTarget():
qdump__std__set__QNX(d, value)
return
impl = value["_M_t"]["_M_impl"]
size = impl["_M_node_count"].integer()
d.check(0 <= size and size <= 100*1000*1000)
d.putItemCount(size)
if d.isExpanded():
valueType = value.type[0]
node = impl["_M_header"]["_M_left"]
nodeSize = node.dereference().type.size()
typeCode = "@{%s}" % valueType.name
with Children(d, size, maxNumChild=1000, childType=valueType):
for i in d.childRange():
(pad, val) = d.split(typeCode, node.pointer() + nodeSize)
d.putSubItem(i, val)
if node["_M_right"].pointer() == 0:
parent = node["_M_parent"]
while node == parent["_M_right"]:
node = parent
parent = parent["_M_parent"]
if node["_M_right"] != parent:
node = parent
else:
node = node["_M_right"]
while node["_M_left"].pointer() != 0:
node = node["_M_left"]
def qdump__std__set__QNX(d, value):
(proxy, head, size) = value.split("ppp")
d.check(0 <= size and size <= 100*1000*1000)
d.putItemCount(size)
if d.isExpanded():
childType=value.type[0]
def helper(node):
(left, parent, right, color, isnil, pad, value) = d.split("pppcc@{%s}" % childType.name, node)
if left != head:
for res in helper(left):
yield res
yield value
if right != head:
for res in helper(right):
yield res
(smallest, root) = d.split("pp", head)
with Children(d, size, maxNumChild=1000):
for (item, i) in zip(helper(root), d.childRange()):
d.putSubItem(i, item)
def std1TreeMin(d, node):
#_NodePtr __tree_min(_NodePtr __x):
# while (__x->__left_ != nullptr)
# __x = __x->__left_;
# return __x;
#
left = node['__left_']
if left.pointer():
node = left
return node
def std1TreeIsLeftChild(d, node):
# bool __tree_is_left_child(_NodePtr __x):
# return __x == __x->__parent_->__left_;
#
other = node['__parent_']['__left_']
return node.pointer() == other.pointer()
def std1TreeNext(d, node):
#_NodePtr __tree_next(_NodePtr __x):
# if (__x->__right_ != nullptr)
# return __tree_min(__x->__right_);
# while (!__tree_is_left_child(__x))
# __x = __x->__parent_;
# return __x->__parent_;
#
right = node['__right_']
if right.pointer():
return std1TreeMin(d, right)
while not std1TreeIsLeftChild(d, node):
node = node['__parent_']
return node['__parent_']
def qdump__std____1__set(d, value):
tree = value["__tree_"]
base3 = tree["__pair3_"].address()
size = d.extractUInt(base3)
d.check(size <= 100*1000*1000)
d.putItemCount(size)
if d.isExpanded():
# type of node is std::__1::__tree_node<Foo, void *>::value_type
valueType = value.type[0]
d.putFields(tree)
node = tree["__begin_node_"]
nodeType = node.type
with Children(d, size):
for i in d.childRange():
with SubItem(d, i):
d.putItem(node['__value_'])
d.putBetterType(valueType)
node = std1TreeNext(d, node).cast(nodeType)
def qdump__std____1__multiset(d, value):
qdump__std____1__set(d, value)
def qform__std____1__map():
return mapForms()
def qdump__std____1__map(d, value):
tree = value["__tree_"]
base3 = tree["__pair3_"].address()
size = d.extractUInt(base3)
d.check(size <= 100*1000*1000)
d.putItemCount(size)
if d.isExpanded():
# type of node is std::__1::__tree_node<Foo, Bar>::value_type
valueType = value.type[0]
node = tree["__begin_node_"]
nodeType = node.type
with Children(d, size, maxNumChild=1000):
node = tree["__begin_node_"]
for i in d.childRange():
# There's possibly also:
#pair = node['__value_']['__nc']
pair = node['__value_']['__cc']
d.putPairItem(i, pair)
node = std1TreeNext(d, node).cast(nodeType)
def qform__std____1__multimap():
return mapForms()
def qdump__std____1__multimap(d, value):
qdump__std____1__map(d, value)
def qdump__std____1__map__iterator(d, value):
d.putEmptyValue()
if d.isExpanded():
with Children(d):
node = value['__i_']['__ptr_'].dereference()['__value_']['__cc']
d.putSubItem('first', node['first'])
d.putSubItem('second', node['second'])
def qdump__std____1__map__const_iterator(d, value):
qdump__std____1__map__iterator(d, value)
def qdump__std____1__set__iterator(d, value):
d.putEmptyValue()
d.putNumChild(1)
if value.type.name.endswith("::iterator"):
treeTypeName = value.type.name[:-len("::iterator")]
elif value.type.name.endswith("::const_iterator"):
treeTypeName = value.type.name[:-len("::const_iterator")]
treeType = d.lookupType(treeTypeName)
keyType = treeType[0]
if d.isExpanded():
with Children(d):
node = value['__ptr_'].dereference()['__value_']
node = node.cast(keyType)
d.putSubItem('value', node)
def qdump__std____1__set_const_iterator(d, value):
qdump__std____1__set__iterator(d, value)
def qdump__std__stack(d, value):
d.putItem(value["c"])
d.putBetterType(value.type)
def qdump__std____debug__stack(d, value):
qdump__std__stack(d, value)
def qdump__std____1__stack(d, value):
d.putItem(value["c"])
d.putBetterType(value.type)
def qform__std__string():
return [Latin1StringFormat, SeparateLatin1StringFormat,
Utf8StringFormat, SeparateUtf8StringFormat ]
def qdump__std__string(d, value):
qdumpHelper_std__string(d, value, d.createType("char"), d.currentItemFormat())
def qdumpHelper_std__string(d, value, charType, format):
if d.isQnxTarget():
qdumpHelper__std__string__QNX(d, value, charType, format)
return
if d.isMsvcTarget():
qdumpHelper__std__string__MSVC(d, value, charType, format)
return
data = value.extractPointer()
# We can't lookup the std::string::_Rep type without crashing LLDB,
# so hard-code assumption on member position
# struct { size_type _M_length, size_type _M_capacity, int _M_refcount; }
(size, alloc, refcount) = d.split("ppp", data - 3 * d.ptrSize())
refcount = refcount & 0xffffffff
d.check(refcount >= -1) # Can be -1 according to docs.
d.check(0 <= size and size <= alloc and alloc <= 100*1000*1000)
d.putCharArrayHelper(data, size, charType, format)
def qdumpHelper__std__string__QNX(d, value, charType, format):
size = value['_Mysize']
alloc = value['_Myres']
_BUF_SIZE = int(16 / charType.size())
if _BUF_SIZE <= alloc: #(_BUF_SIZE <= _Myres ? _Bx._Ptr : _Bx._Buf);
data = value['_Bx']['_Ptr']
else:
data = value['_Bx']['_Buf']
sizePtr = data.cast(d.charType().pointer())
refcount = int(sizePtr[-1])
d.check(refcount >= -1) # Can be -1 accoring to docs.
d.check(0 <= size and size <= alloc and alloc <= 100*1000*1000)
d.putCharArrayHelper(sizePtr, size, charType, format)
def qdumpHelper__std__string__MSVC(d, value, charType, format):
(proxy, buffer, size, alloc) = value.split("p16spp");
_BUF_SIZE = int(16 / charType.size());
d.check(0 <= size and size <= alloc and alloc <= 100*1000*1000)
if _BUF_SIZE <= alloc:
(proxy, data) = value.split("pp");
else:
data = value.address() + d.ptrSize()
d.putCharArrayHelper(data, size, charType, format)
def qdump__std____1__string(d, value):
firstByte = value.split('b')[0]
if int(firstByte & 1) == 0:
# Short/internal.
size = int(firstByte / 2)
data = value.address() + 1
else:
# Long/external.
(dummy, size, data) = value.split('ppp')
d.putCharArrayHelper(data, size, d.charType(), d.currentItemFormat())
d.putType("std::string")
def qdump__std____1__wstring(d, value):
firstByte = value.split('b')[0]
if int(firstByte & 1) == 0:
# Short/internal.
size = int(firstByte / 2)
data = value.address() + 4
else:
# Long/external.
(dummy, size, data) = value.split('ppp')
d.putCharArrayHelper(data, size, d.createType('wchar_t'))
d.putType("std::wstring")
def qdump__std____weak_ptr(d, value):
return qdump__std__shared_ptr(d, value)
def qdump__std__weak_ptr(d, value):
return qdump__std__shared_ptr(d, value)
def qdump__std____1__weak_ptr(d, value):
return qdump__std____1__shared_ptr(d, value)
def qdump__std__shared_ptr(d, value):
if d.isMsvcTarget():
i = value["_Ptr"]
else:
i = value["_M_ptr"]
if i.pointer() == 0:
d.putValue("(null)")
d.putNumChild(0)
else:
d.putItem(i.dereference())
d.putBetterType(value.type)
def qdump__std____1__shared_ptr(d, value):
i = value["__ptr_"]
if i.pointer() == 0:
d.putValue("(null)")
d.putNumChild(0)
else:
d.putItem(i.dereference())
d.putBetterType(value.type)
def qdump__std__unique_ptr(d, value):
p = d.extractPointer(value)
if p == 0:
d.putValue("(null)")
d.putNumChild(0)
else:
d.putItem(d.createValue(p, value.type[0]))
d.putBetterType(value.type)
def qdump__std____1__unique_ptr(d, value):
qdump__std__unique_ptr(d, value)
def qdump__std__pair(d, value):
typeCode = '{%s}@{%s}' % (value.type[0].name, value.type[1].name)
first, pad, second = value.split(typeCode)
with Children(d):
key = d.putSubItem('first', first)
value = d.putSubItem('second', second)
d.putField('key', key.value)
if key.encoding is not None:
d.putField('keyencoded', key.encoding)
d.putValue(value.value, value.encoding)
def qform__std__unordered_map():
return mapForms()
def qform__std____debug__unordered_map():
return mapForms()
def qdump__std__unordered_map(d, value):
if d.isQnxTarget() or d.isMsvcTarget():
qdump__std__list__QNX(d, value["_List"])
return
try:
# gcc ~= 4.7
size = value["_M_element_count"].integer()
start = value["_M_before_begin"]["_M_nxt"]
except:
try:
# libc++ (Mac)
size = value["_M_h"]["_M_element_count"].integer()
start = value["_M_h"]["_M_bbegin"]["_M_node"]["_M_nxt"]
except:
try:
# gcc 4.9.1
size = value["_M_h"]["_M_element_count"].integer()
start = value["_M_h"]["_M_before_begin"]["_M_nxt"]
except:
# gcc 4.6.2
size = value["_M_element_count"].integer()
start = value["_M_buckets"].dereference()
# FIXME: Pointer-aligned?
d.putItemCount(size)
# We don't know where the data is
d.putNumChild(0)
return
d.putItemCount(size)
if d.isExpanded():
keyType = value.type[0]
valueType = value.type[1]
typeCode = 'p@{%s}@{%s}' % (value.type[0].name, value.type[1].name)
p = start.pointer()
with Children(d, size):
for i in d.childRange():
p, pad, key, pad, val = d.split(typeCode, p)
d.putPairItem(i, (key, val))
def qdump__std____debug__unordered_map(d, value):
qdump__std__unordered_map(d, value)
def qform__std__unordered_multimap():
return qform__std__unordered_map()
def qform__std____debug__unordered_multimap():
return qform__std____debug__unordered_map()
def qdump__std__unordered_multimap(d, value):
qdump__std__unordered_map(d, value)
def qdump__std____debug__unordered_multimap(d, value):
qdump__std__unordered_multimap(d, value)
def qdump__std__unordered_set(d, value):
if d.isQnxTarget() or d.isMsvcTarget():
qdump__std__list__QNX(d, value["_List"])
return
try:
# gcc ~= 4.7
size = value["_M_element_count"].integer()
start = value["_M_before_begin"]["_M_nxt"]
offset = 0
except:
try:
# libc++ (Mac)
size = value["_M_h"]["_M_element_count"].integer()
start = value["_M_h"]["_M_bbegin"]["_M_node"]["_M_nxt"]
offset = 0
except:
try:
# gcc 4.6.2
size = value["_M_element_count"].integer()
start = value["_M_buckets"].dereference()
offset = d.ptrSize()
except:
# gcc 4.9.1
size = value["_M_h"]["_M_element_count"].integer()
start = value["_M_h"]["_M_before_begin"]["_M_nxt"]
offset = 0
d.putItemCount(size)
if d.isExpanded():
p = start.pointer()
valueType = value.type[0]
with Children(d, size, childType=valueType):
ptrSize = d.ptrSize()
for i in d.childRange():
d.putSubItem(i, d.createValue(p + ptrSize - offset, valueType))
p = d.extractPointer(p + offset)
def qform__std____1__unordered_map():
return mapForms()
def qdump__std____1__unordered_map(d, value):
size = value["__table_"]["__p2_"]["__first_"].integer()
d.putItemCount(size)
if d.isExpanded():
# There seem to be several versions of the implementation.
def valueCCorNot(val):
try:
return val["__cc"]
except:
return val
node = value["__table_"]["__p1_"]["__first_"]["__next_"]
with Children(d, size):
for i in d.childRange():
d.putPairItem(i, valueCCorNot(node["__value_"]))
node = node["__next_"]
def qdump__std____1__unordered_set(d, value):
size = int(value["__table_"]["__p2_"]["__first_"])
d.putItemCount(size)
if d.isExpanded():
node = value["__table_"]["__p1_"]["__first_"]["__next_"]
with Children(d, size, childType=value.type[0], maxNumChild=1000):
for i in d.childRange():
d.putSubItem(i, node["__value_"])
node = node["__next_"]
def qdump__std____debug__unordered_set(d, value):
qdump__std__unordered_set(d, value)
def qdump__std__unordered_multiset(d, value):
qdump__std__unordered_set(d, value)
def qdump__std____debug__unordered_multiset(d, value):
qdump__std__unordered_multiset(d, value)
def qform__std__valarray():
return arrayForms()
def qdump__std__valarray(d, value):
if d.isMsvcTarget():
(data, size) = value.split('pp')
else:
(size, data) = value.split('pp')
d.putItemCount(size)
d.putPlotData(data, size, value.type[0])
def qform__std____1__valarray():
return arrayForms()
def qdump__std____1__valarray(d, value):
innerType = value.type[0]
(begin, end) = value.split('pp')
size = int((end - begin) / innerType.size())
d.putItemCount(size)
d.putPlotData(begin, size, innerType)
def qform__std__vector():
return arrayForms()
def qedit__std__vector(d, value, data):
import gdb
values = data.split(',')
n = len(values)
innerType = value.type[0].name
cmd = "set $d = (%s*)calloc(sizeof(%s)*%s,1)" % (innerType, innerType, n)
gdb.execute(cmd)
cmd = "set {void*[3]}%s = {$d, $d+%s, $d+%s}" % (value.address(), n, n)
gdb.execute(cmd)
cmd = "set (%s[%d])*$d={%s}" % (innerType, n, data)
gdb.execute(cmd)
def qdump__std__vector(d, value):
if d.isQnxTarget() or d.isMsvcTarget():
qdumpHelper__std__vector__QNX(d, value)
else:
qdumpHelper__std__vector(d, value, False)
def qdumpHelper__std__vector(d, value, isLibCpp):
innerType = value.type[0]
isBool = innerType.name == 'bool'
if isBool:
if isLibCpp:
(start, size) = value.split("pp") # start is 'unsigned long *'
alloc = size
else:
(start, soffset, pad, finish, foffset, pad, alloc) = value.split("pI@pI@p")
size = (finish - start) * 8 + foffset - soffset # 8 is CHAR_BIT.
else:
(start, finish, alloc) = value.split("ppp")
size = int((finish - start) / innerType.size())
d.check(finish <= alloc)
if size > 0:
d.checkPointer(start)
d.checkPointer(finish)
d.checkPointer(alloc)
d.check(0 <= size and size <= 1000 * 1000 * 1000)
d.putItemCount(size)
if isBool:
if d.isExpanded():
with Children(d, size, maxNumChild=10000, childType=innerType):
for i in d.childRange():
q = start + int(i / 8)
with SubItem(d, i):
d.putValue((int(d.extractPointer(q)) >> (i % 8)) & 1)
d.putType("bool")
d.putNumChild(0)
else:
d.putPlotData(start, size, innerType)
def qdumpHelper__std__vector__QNX(d, value):
innerType = value.type[0]
isBool = innerType.name == 'bool'
if isBool:
(proxy1, proxy2, start, last, end, size) = value.split("pppppi")
else:
(proxy, start, last, end) = value.split("pppp")
size = (last - start) // innerType.size()
d.check(0 <= size and size <= 1000 * 1000 * 1000)
d.check(last <= end)
if size > 0:
d.checkPointer(start)
d.checkPointer(last)
d.checkPointer(end)
d.putItemCount(size)
if d.isExpanded():
if isBool:
with Children(d, size, maxNumChild=10000, childType=innerType):
for i in d.childRange():
q = start + int(i / 8)
with SubItem(d, i):
d.putValue((d.extractPointer(q) >> (i % 8)) & 1)
d.putType("bool")
d.putNumChild(0)
else:
d.putPlotData(start, size, innerType)
def qform__std____1__vector():
return arrayForms()
def qdump__std____1__vector(d, value):
qdumpHelper__std__vector(d, value, True)
def qform__std____debug__vector():
return arrayForms()
def qdump__std____debug__vector(d, value):
qdump__std__vector(d, value)
def qedit__std__string(d, value, data):
d.call('void', value, 'assign', '"%s"' % data.replace('"', '\\"'))
def qedit__string(d, expr, value):
qedit__std__string(d, expr, value)
def qedit__std____cxx11__string(d, expr, value):
qedit__std__string(d, expr, value)
def qedit__std__wstring(d, value, data):
d.call('void', value, 'assign', 'L"%s"' % data.replace('"', '\\"'))
def qedit__wstring(d, expr, value):
qedit__std__wstring(d, expr, value)
def qedit__std____cxx11__wstring(d, expr, value):
qedit__std__wstring(d, expr, value)
def qdump__string(d, value):
qdump__std__string(d, value)
def qform__std__wstring():
return [SimpleFormat, SeparateFormat]
def qdump__std__wstring(d, value):
qdumpHelper_std__string(d, value, d.createType('wchar_t'), d.currentItemFormat())
def qdump__std__basic_string(d, value):
innerType = value.type[0]
qdumpHelper_std__string(d, value, innerType, d.currentItemFormat())
def qdump__std____cxx11__basic_string(d, value):
innerType = value.type[0]
(data, size) = value.split("pI")
d.check(0 <= size) #and size <= alloc and alloc <= 100*1000*1000)
d.putCharArrayHelper(data, size, innerType, d.currentItemFormat())
def qform__std____cxx11__string(d, value):
qform__std__string(d, value)
def qdump__std____cxx11__string(d, value):
(data, size) = value.split("pI")
d.check(0 <= size) #and size <= alloc and alloc <= 100*1000*1000)
d.putCharArrayHelper(data, size, d.charType(), d.currentItemFormat())
# Needed only to trigger the form report above.
def qform__std____cxx11__string():
return qform__std__string()
def qform__std____cxx11__wstring():
return qform__std__wstring()
def qdump__std____1__basic_string(d, value):
innerType = value.type[0].name
if innerType == "char":
qdump__std____1__string(d, value)
elif innerType == "wchar_t":
qdump__std____1__wstring(d, value)
else:
warn("UNKNOWN INNER TYPE %s" % innerType)
def qdump__wstring(d, value):
qdump__std__wstring(d, value)
def qdump__std____1__once_flag(d, value):
qdump__std__once_flag(d, value)
def qdump__std__once_flag(d, value):
d.putValue(value.split("i")[0])
d.putBetterType(value.type)
d.putPlainChildren(value)
def qdump____gnu_cxx__hash_set(d, value):
ht = value["_M_ht"]
size = ht["_M_num_elements"].integer()
d.check(0 <= size and size <= 1000 * 1000 * 1000)
d.putItemCount(size)
innerType = value.type[0]
d.putType("__gnu__cxx::hash_set<%s>" % innerType.name)
if d.isExpanded():
with Children(d, size, maxNumChild=1000, childType=innerType):
buckets = ht["_M_buckets"]["_M_impl"]
bucketStart = buckets["_M_start"]
bucketFinish = buckets["_M_finish"]
p = bucketStart
itemCount = 0
for i in xrange((bucketFinish.pointer() - bucketStart.pointer()) // d.ptrSize()):
if p.dereference().pointer():
cur = p.dereference()
while cur.pointer():
d.putSubItem(itemCount, cur["_M_val"])
cur = cur["_M_next"]
itemCount += 1
p = p + 1
def qdump__uint8_t(d, value):
d.putNumChild(0)
d.putValue(value.integer())
def qdump__int8_t(d, value):
d.putNumChild(0)
d.putValue(value.integer())
def qdump__std__byte(d, value):
d.putNumChild(0)
d.putValue(value.integer())
def qdump__std__optional(d, value):
innerType = value.type[0]
(initialized, pad, payload) = d.split('b@{%s}' % innerType.name, value)
if initialized:
d.putItem(payload)
d.putBetterType(value.type)
else:
d.putSpecialValue("uninitialized")
d.putNumChild(0)
def qdump__std__experimental__optional(d, value):
qdump__std__optional(d, value)
| gpl-3.0 | -7,329,428,618,475,966,000 | 32.360862 | 106 | 0.551367 | false | 3.317918 | false | false | false |
stscieisenhamer/ginga | ginga/rv/plugins/Drawing.py | 1 | 15096 | #
# Drawing.py -- Drawing plugin for Ginga reference viewer
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
from ginga import GingaPlugin
from ginga import colors
from ginga.gw import Widgets
from ginga.misc import ParamSet, Bunch
from ginga.util import dp
draw_colors = colors.get_colors()
default_drawtype = 'circle'
default_drawcolor = 'lightblue'
fillkinds = ('circle', 'rectangle', 'polygon', 'triangle', 'righttriangle',
'square', 'ellipse', 'box')
class Drawing(GingaPlugin.LocalPlugin):
"""Local plugin to draw shapes on canvas."""
def __init__(self, fv, fitsimage):
# superclass defines some variables for us, like logger
super(Drawing, self).__init__(fv, fitsimage)
self.layertag = 'drawing-canvas'
self.dc = fv.get_draw_classes()
canvas = self.dc.DrawingCanvas()
canvas.enable_draw(True)
canvas.enable_edit(True)
canvas.set_drawtype('point', color='cyan')
canvas.set_callback('draw-event', self.draw_cb)
canvas.set_callback('edit-event', self.edit_cb)
canvas.set_callback('edit-select', self.edit_select_cb)
canvas.set_surface(self.fitsimage)
# So we can draw and edit with the cursor
canvas.register_for_cursor_drawing(self.fitsimage)
self.canvas = canvas
self.drawtypes = list(canvas.get_drawtypes())
self.drawcolors = draw_colors
self.linestyles = ['solid', 'dash']
self.coordtypes = ['data', 'wcs', 'cartesian', 'canvas']
# contains all parameters to be passed to the constructor
self.draw_args = []
self.draw_kwdargs = {}
# cache of all canvas item parameters
self.drawparams_cache = {}
# holds object being edited
self.edit_obj = None
# For mask creation from drawn objects
self._drawn_tags = []
self._mask_prefix = 'drawing'
def build_gui(self, container):
top = Widgets.VBox()
top.set_border_width(4)
vbox, sw, orientation = Widgets.get_oriented_box(container)
self.orientation = orientation
vbox.set_border_width(4)
vbox.set_spacing(2)
msg_font = self.fv.get_font("sansFont", 12)
tw = Widgets.TextArea(wrap=True, editable=False)
tw.set_font(msg_font)
self.tw = tw
fr = Widgets.Expander("Instructions")
fr.set_widget(tw)
vbox.add_widget(fr, stretch=0)
fr = Widgets.Frame("Drawing")
captions = (("Draw type:", 'label', "Draw type", 'combobox'),
("Coord type:", 'label', "Coord type", 'combobox'),
)
w, b = Widgets.build_info(captions)
self.w.update(b)
combobox = b.draw_type
for name in self.drawtypes:
combobox.append_text(name)
index = self.drawtypes.index(default_drawtype)
combobox.set_index(index)
combobox.add_callback('activated', lambda w, idx: self.set_drawparams_cb())
combobox = b.coord_type
for name in self.coordtypes:
combobox.append_text(name)
index = 0
combobox.set_index(index)
combobox.add_callback('activated', lambda w, idx: self.set_drawparams_cb())
fr.set_widget(w)
vbox.add_widget(fr, stretch=0)
mode = self.canvas.get_draw_mode()
hbox = Widgets.HBox()
btn1 = Widgets.RadioButton("Draw")
btn1.set_state(mode == 'draw')
btn1.add_callback('activated', lambda w, val: self.set_mode_cb('draw', val))
btn1.set_tooltip("Choose this to draw")
self.w.btn_draw = btn1
hbox.add_widget(btn1)
btn2 = Widgets.RadioButton("Edit", group=btn1)
btn2.set_state(mode == 'edit')
btn2.add_callback('activated', lambda w, val: self.set_mode_cb('edit', val))
btn2.set_tooltip("Choose this to edit")
self.w.btn_edit = btn2
hbox.add_widget(btn2)
hbox.add_widget(Widgets.Label(''), stretch=1)
vbox.add_widget(hbox, stretch=0)
fr = Widgets.Frame("Attributes")
vbox2 = Widgets.VBox()
self.w.attrlbl = Widgets.Label()
vbox2.add_widget(self.w.attrlbl, stretch=0)
self.w.drawvbox = Widgets.VBox()
vbox2.add_widget(self.w.drawvbox, stretch=1)
fr.set_widget(vbox2)
vbox.add_widget(fr, stretch=0)
captions = (("Rotate By:", 'label', 'Rotate By', 'entry',
"Scale By:", 'label', 'Scale By', 'entry'),
("Delete Obj", 'button', "sp1", 'spacer',
"Create mask", 'button', "Clear canvas", 'button'),
)
w, b = Widgets.build_info(captions)
self.w.update(b)
b.delete_obj.add_callback('activated', lambda w: self.delete_object())
b.delete_obj.set_tooltip("Delete selected object in edit mode")
b.delete_obj.set_enabled(False)
b.scale_by.add_callback('activated', self.scale_object)
b.scale_by.set_text('0.9')
b.scale_by.set_tooltip("Scale selected object in edit mode")
b.scale_by.set_enabled(False)
b.rotate_by.add_callback('activated', self.rotate_object)
b.rotate_by.set_text('90.0')
b.rotate_by.set_tooltip("Rotate selected object in edit mode")
b.rotate_by.set_enabled(False)
b.create_mask.add_callback('activated', lambda w: self.create_mask())
b.create_mask.set_tooltip("Create boolean mask from drawing")
b.clear_canvas.add_callback('activated', lambda w: self.clear_canvas())
b.clear_canvas.set_tooltip("Delete all drawing objects")
vbox.add_widget(w, stretch=0)
spacer = Widgets.Label('')
vbox.add_widget(spacer, stretch=1)
top.add_widget(sw, stretch=1)
btns = Widgets.HBox()
btns.set_spacing(4)
btn = Widgets.Button("Close")
btn.add_callback('activated', lambda w: self.close())
btns.add_widget(btn, stretch=0)
btns.add_widget(Widgets.Label(''), stretch=1)
top.add_widget(btns, stretch=0)
container.add_widget(top, stretch=1)
self.toggle_create_button()
def close(self):
self.fv.stop_local_plugin(self.chname, str(self))
def instructions(self):
self.tw.set_text(
"""Draw a figure with the cursor.
For polygons/paths press 'v' to create a vertex, 'z' to remove last vertex.""")
def start(self):
self.instructions()
self.set_drawparams_cb()
# insert layer if it is not already
p_canvas = self.fitsimage.get_canvas()
try:
obj = p_canvas.get_object_by_tag(self.layertag)
except KeyError:
# Add canvas layer
p_canvas.add(self.canvas, tag=self.layertag)
self.resume()
def pause(self):
self.canvas.ui_setActive(False)
def resume(self):
self.canvas.ui_setActive(True)
self.fv.show_status("Draw a figure with the right mouse button")
def stop(self):
# remove the canvas from the image
p_canvas = self.fitsimage.get_canvas()
try:
p_canvas.delete_object_by_tag(self.layertag)
except:
pass
# don't leave us stuck in edit mode
self.canvas.set_draw_mode('draw')
self.canvas.ui_setActive(False)
self.fv.show_status("")
def redo(self):
pass
def draw_cb(self, canvas, tag):
obj = canvas.get_object_by_tag(tag)
self._drawn_tags.append(tag)
self.toggle_create_button()
self.logger.info("drew a %s" % (obj.kind))
return True
def set_drawparams_cb(self):
## if self.canvas.get_draw_mode() != 'draw':
## # if we are in edit mode then don't initialize draw gui
## return
index = self.w.draw_type.get_index()
kind = self.drawtypes[index]
index = self.w.coord_type.get_index()
coord = self.coordtypes[index]
# remove old params
self.w.drawvbox.remove_all()
# Create new drawing class of the right kind
drawClass = self.canvas.get_draw_class(kind)
self.w.attrlbl.set_text("New Object: %s" % (kind))
# Build up a set of control widgets for the parameters
# of the canvas object to be drawn
paramlst = drawClass.get_params_metadata()
params = self.drawparams_cache.setdefault(kind, Bunch.Bunch())
self.draw_params = ParamSet.ParamSet(self.logger, params)
w = self.draw_params.build_params(paramlst,
orientation=self.orientation)
self.draw_params.add_callback('changed', self.draw_params_changed_cb)
self.w.drawvbox.add_widget(w, stretch=1)
# disable edit-only controls
self.w.delete_obj.set_enabled(False)
self.w.scale_by.set_enabled(False)
self.w.rotate_by.set_enabled(False)
args, kwdargs = self.draw_params.get_params()
#self.logger.debug("changing params to: %s" % str(kwdargs))
if kind != 'compass':
kwdargs['coord'] = coord
self.canvas.set_drawtype(kind, **kwdargs)
def draw_params_changed_cb(self, paramObj, params):
index = self.w.draw_type.get_index()
kind = self.drawtypes[index]
args, kwdargs = self.draw_params.get_params()
#self.logger.debug("changing params to: %s" % str(kwdargs))
self.canvas.set_drawtype(kind, **kwdargs)
def edit_cb(self, fitsimage, obj):
# <-- obj has been edited
#self.logger.debug("edit event on canvas: obj=%s" % (obj))
if obj != self.edit_obj:
# edit object is new. Update visual parameters
self.edit_select_cb(fitsimage, obj)
else:
# edit object has been modified. Sync visual parameters
self.draw_params.params_to_widgets()
def edit_params_changed_cb(self, paramObj, obj):
self.draw_params.widgets_to_params()
if hasattr(obj, 'coord'):
tomap = self.fitsimage.get_coordmap(obj.coord)
if obj.crdmap != tomap:
#self.logger.debug("coordmap has changed to '%s'--converting mapper" % (
# str(tomap)))
# user changed type of mapper; convert coordinates to
# new mapper and update widgets
obj.convert_mapper(tomap)
paramObj.params_to_widgets()
obj.sync_state()
# TODO: change whence to 0 if allowing editing of images
whence = 2
self.canvas.redraw(whence=whence)
def edit_initialize(self, fitsimage, obj):
# remove old params
self.w.drawvbox.remove_all()
self.edit_obj = obj
if (obj is not None) and self.canvas.is_selected(obj):
self.w.attrlbl.set_text("Editing a %s" % (obj.kind))
drawClass = obj.__class__
# Build up a set of control widgets for the parameters
# of the canvas object to be drawn
paramlst = drawClass.get_params_metadata()
self.draw_params = ParamSet.ParamSet(self.logger, obj)
w = self.draw_params.build_params(paramlst,
orientation=self.orientation)
self.draw_params.add_callback('changed', self.edit_params_changed_cb)
self.w.drawvbox.add_widget(w, stretch=1)
self.w.delete_obj.set_enabled(True)
self.w.scale_by.set_enabled(True)
self.w.rotate_by.set_enabled(True)
else:
self.w.attrlbl.set_text("")
self.w.delete_obj.set_enabled(False)
self.w.scale_by.set_enabled(False)
self.w.rotate_by.set_enabled(False)
def edit_select_cb(self, fitsimage, obj):
self.logger.debug("editing selection status has changed for %s" % str(obj))
self.edit_initialize(fitsimage, obj)
def set_mode_cb(self, mode, tf):
if tf:
self.canvas.set_draw_mode(mode)
if mode == 'edit':
self.edit_initialize(self.fitsimage, None)
elif mode == 'draw':
self.set_drawparams_cb()
return True
def toggle_create_button(self):
"""Enable or disable Create Mask button based on drawn objects."""
if len(self._drawn_tags) > 0:
self.w.create_mask.set_enabled(True)
else:
self.w.create_mask.set_enabled(False)
def create_mask(self):
"""Create boolean mask from drawing.
All areas enclosed by all the shapes drawn will be set to 1 (True)
in the mask. Otherwise, the values will be set to 0 (False).
The mask will be inserted as a new image buffer, like ``Mosaic``.
"""
ntags = len(self._drawn_tags)
if ntags == 0:
return
old_image = self.fitsimage.get_image()
if old_image is None:
return
mask = None
obj_kinds = set()
# Create mask
for tag in self._drawn_tags:
obj = self.canvas.get_object_by_tag(tag)
try:
cur_mask = old_image.get_shape_mask(obj)
except Exception as e:
self.logger.error('Cannot create mask: {0}'.format(str(e)))
continue
if mask is not None:
mask |= cur_mask
else:
mask = cur_mask
obj_kinds.add(obj.kind)
# Might be useful to inherit header from displayed image (e.g., WCS)
# but the displayed image should not be modified.
# Bool needs to be converted to int so FITS writer would not crash.
image = dp.make_image(mask.astype('int16'), old_image, {},
pfx=self._mask_prefix)
imname = image.get('name')
# Insert new image
self.fv.gui_call(self.fv.add_image, imname, image, chname=self.chname)
# This sets timestamp
image.make_callback('modified')
# Add change log to ChangeHistory
s = 'Mask created from {0} drawings ({1})'.format(
ntags, ','.join(sorted(obj_kinds)))
iminfo = self.channel.get_image_info(imname)
iminfo.reason_modified = s
self.logger.info(s)
def clear_canvas(self):
self.canvas.clear_selected()
self.canvas.delete_all_objects()
self._drawn_tags = []
self.toggle_create_button()
def delete_object(self):
tag = self.canvas.lookup_object_tag(self.canvas._edit_obj)
self._drawn_tags.remove(tag)
self.toggle_create_button()
self.canvas.edit_delete()
self.canvas.redraw(whence=2)
def rotate_object(self, w):
delta = float(w.get_text())
self.canvas.edit_rotate(delta, self.fitsimage)
def scale_object(self, w):
delta = float(w.get_text())
self.canvas.edit_scale(delta, delta, self.fitsimage)
def __str__(self):
return 'drawing'
#END
| bsd-3-clause | 7,770,706,608,207,105,000 | 33.703448 | 88 | 0.588169 | false | 3.630592 | false | false | false |
miguelfrde/stanford-cs231n | assignment2/cs231n/optim.py | 1 | 6261 | import numpy as np
"""
This file implements various first-order update rules that are commonly used
for training neural networks. Each update rule accepts current weights and the
gradient of the loss with respect to those weights and produces the next set of
weights. Each update rule has the same interface:
def update(w, dw, config=None):
Inputs:
- w: A numpy array giving the current weights.
- dw: A numpy array of the same shape as w giving the gradient of the
loss with respect to w.
- config: A dictionary containing hyperparameter values such as learning
rate, momentum, etc. If the update rule requires caching values over many
iterations, then config will also hold these cached values.
Returns:
- next_w: The next point after the update.
- config: The config dictionary to be passed to the next iteration of the
update rule.
NOTE: For most update rules, the default learning rate will probably not
perform well; however the default values of the other hyperparameters should
work well for a variety of different problems.
For efficiency, update rules may perform in-place updates, mutating w and
setting next_w equal to w.
"""
def sgd(w, dw, config=None):
"""
Performs vanilla stochastic gradient descent.
config format:
- learning_rate: Scalar learning rate.
"""
if config is None: config = {}
config.setdefault('learning_rate', 1e-2)
w -= config['learning_rate'] * dw
return w, config
def sgd_momentum(w, dw, config=None):
"""
Performs stochastic gradient descent with momentum.
config format:
- learning_rate: Scalar learning rate.
- momentum: Scalar between 0 and 1 giving the momentum value.
Setting momentum = 0 reduces to sgd.
- velocity: A numpy array of the same shape as w and dw used to store a
moving average of the gradients.
"""
if config is None: config = {}
config.setdefault('learning_rate', 1e-2)
config.setdefault('momentum', 0.9)
v = config.get('velocity', np.zeros_like(w))
next_w = None
###########################################################################
# TODO: Implement the momentum update formula. Store the updated value in #
# the next_w variable. You should also use and update the velocity v. #
###########################################################################
v = config['momentum']*v - config['learning_rate']*dw
next_w = w + v
###########################################################################
# END OF YOUR CODE #
###########################################################################
config['velocity'] = v
return next_w, config
def rmsprop(x, dx, config=None):
"""
Uses the RMSProp update rule, which uses a moving average of squared
gradient values to set adaptive per-parameter learning rates.
config format:
- learning_rate: Scalar learning rate.
- decay_rate: Scalar between 0 and 1 giving the decay rate for the squared
gradient cache.
- epsilon: Small scalar used for smoothing to avoid dividing by zero.
- cache: Moving average of second moments of gradients.
"""
if config is None: config = {}
config.setdefault('learning_rate', 1e-2)
config.setdefault('decay_rate', 0.99)
config.setdefault('epsilon', 1e-8)
config.setdefault('cache', np.zeros_like(x))
next_x = None
###########################################################################
# TODO: Implement the RMSprop update formula, storing the next value of x #
# in the next_x variable. Don't forget to update cache value stored in #
# config['cache']. #
###########################################################################
config['cache'] = config['decay_rate']*config['cache'] + (1 - config['decay_rate']) * dx * dx
next_x = x - config['learning_rate']*dx / (np.sqrt(config['cache']) + config['epsilon'])
###########################################################################
# END OF YOUR CODE #
###########################################################################
return next_x, config
def adam(x, dx, config=None):
"""
Uses the Adam update rule, which incorporates moving averages of both the
gradient and its square and a bias correction term.
config format:
- learning_rate: Scalar learning rate.
- beta1: Decay rate for moving average of first moment of gradient.
- beta2: Decay rate for moving average of second moment of gradient.
- epsilon: Small scalar used for smoothing to avoid dividing by zero.
- m: Moving average of gradient.
- v: Moving average of squared gradient.
- t: Iteration number.
"""
if config is None: config = {}
config.setdefault('learning_rate', 1e-3)
config.setdefault('beta1', 0.9)
config.setdefault('beta2', 0.999)
config.setdefault('epsilon', 1e-8)
config.setdefault('m', np.zeros_like(x))
config.setdefault('v', np.zeros_like(x))
config.setdefault('t', 0)
next_x = None
###########################################################################
# TODO: Implement the Adam update formula, storing the next value of x in #
# the next_x variable. Don't forget to update the m, v, and t variables #
# stored in config. #
###########################################################################
config['t'] += 1
config['m'] = config['beta1']*config['m'] + (1 - config['beta1'])*dx
config['v'] = config['beta2']*config['v'] + (1 - config['beta2'])*dx*dx
mt = config['m'] / (1 - config['beta1']**config['t'])
vt = config['v'] / (1 - config['beta2']**config['t'])
next_x = x - config['learning_rate']*mt / (np.sqrt(vt) + config['epsilon'])
###########################################################################
# END OF YOUR CODE #
###########################################################################
return next_x, config
| mit | 2,448,093,510,012,221,400 | 40.463576 | 97 | 0.54001 | false | 4.590176 | true | false | false |
cvandeplas/plaso | plaso/parsers/winreg_plugins/msie_zones.py | 1 | 11257 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains the MSIE zone settings plugin."""
from plaso.events import windows_events
from plaso.parsers import winreg
from plaso.parsers.winreg_plugins import interface
__author__ = 'Elizabeth Schweinsberg ([email protected])'
class MsieZoneSettingsPlugin(interface.KeyPlugin):
"""Windows Registry plugin for parsing the MSIE Zones settings."""
NAME = 'winreg_msie_zone'
DESCRIPTION = u'Parser for Internet Explorer zone settings Registry data.'
REG_TYPE = 'NTUSER'
REG_KEYS = [
(u'\\Software\\Microsoft\\Windows\\CurrentVersion\\Internet Settings'
u'\\Zones'),
(u'\\Software\\Microsoft\\Windows\\CurrentVersion\\Internet Settings'
u'\\Lockdown_Zones')]
URLS = ['http://support.microsoft.com/kb/182569']
ZONE_NAMES = {
'0': '0 (My Computer)',
'1': '1 (Local Intranet Zone)',
'2': '2 (Trusted sites Zone)',
'3': '3 (Internet Zone)',
'4': '4 (Restricted Sites Zone)',
'5': '5 (Custom)'
}
KNOWN_PERMISSIONS_VALUE_NAMES = [
'1001', '1004', '1200', '1201', '1400', '1402', '1405', '1406', '1407',
'1601', '1604', '1606', '1607', '1608', '1609', '1800', '1802', '1803',
'1804', '1809', '1A04', '2000', '2001', '2004', '2100', '2101', '2102',
'2200', '2201', '2300']
CONTROL_VALUES_PERMISSIONS = {
0x00000000: '0 (Allow)',
0x00000001: '1 (Prompt User)',
0x00000003: '3 (Not Allowed)',
0x00010000: '0x00010000 (Administrator approved)'
}
CONTROL_VALUES_SAFETY = {
0x00010000: '0x00010000 (High safety)',
0x00020000: '0x00020000 (Medium safety)',
0x00030000: '0x00030000 (Low safety)'
}
CONTROL_VALUES_1A00 = {
0x00000000: ('0x00000000 (Automatic logon with current user name and '
'password)'),
0x00010000: '0x00010000 (Prompt for user name and password)',
0x00020000: '0x00020000 (Automatic logon only in Intranet zone)',
0x00030000: '0x00030000 (Anonymous logon)'
}
CONTROL_VALUES_1C00 = {
0x00000000: '0x00000000 (Disable Java)',
0x00010000: '0x00010000 (High safety)',
0x00020000: '0x00020000 (Medium safety)',
0x00030000: '0x00030000 (Low safety)',
0x00800000: '0x00800000 (Custom)'
}
FEATURE_CONTROLS = {
'1200': 'Run ActiveX controls and plug-ins',
'1400': 'Active scripting',
'1001': 'Download signed ActiveX controls',
'1004': 'Download unsigned ActiveX controls',
'1201': 'Initialize and script ActiveX controls not marked as safe',
'1206': 'Allow scripting of IE Web browser control',
'1207': 'Reserved',
'1208': 'Allow previously unused ActiveX controls to run without prompt',
'1209': 'Allow Scriptlets',
'120A': 'Override Per-Site (domain-based) ActiveX restrictions',
'120B': 'Override Per-Site (domain-based) ActiveX restrictions',
'1402': 'Scripting of Java applets',
'1405': 'Script ActiveX controls marked as safe for scripting',
'1406': 'Access data sources across domains',
'1407': 'Allow Programmatic clipboard access',
'1408': 'Reserved',
'1601': 'Submit non-encrypted form data',
'1604': 'Font download',
'1605': 'Run Java',
'1606': 'Userdata persistence',
'1607': 'Navigate sub-frames across different domains',
'1608': 'Allow META REFRESH',
'1609': 'Display mixed content',
'160A': 'Include local directory path when uploading files to a server',
'1800': 'Installation of desktop items',
'1802': 'Drag and drop or copy and paste files',
'1803': 'File Download',
'1804': 'Launching programs and files in an IFRAME',
'1805': 'Launching programs and files in webview',
'1806': 'Launching applications and unsafe files',
'1807': 'Reserved',
'1808': 'Reserved',
'1809': 'Use Pop-up Blocker',
'180A': 'Reserved',
'180B': 'Reserved',
'180C': 'Reserved',
'180D': 'Reserved',
'1A00': 'User Authentication: Logon',
'1A02': 'Allow persistent cookies that are stored on your computer',
'1A03': 'Allow per-session cookies (not stored)',
'1A04': 'Don\'t prompt for client cert selection when no certs exists',
'1A05': 'Allow 3rd party persistent cookies',
'1A06': 'Allow 3rd party session cookies',
'1A10': 'Privacy Settings',
'1C00': 'Java permissions',
'1E05': 'Software channel permissions',
'1F00': 'Reserved',
'2000': 'Binary and script behaviors',
'2001': '.NET: Run components signed with Authenticode',
'2004': '.NET: Run components not signed with Authenticode',
'2100': 'Open files based on content, not file extension',
'2101': 'Web sites in less privileged zone can navigate into this zone',
'2102': ('Allow script initiated windows without size/position '
'constraints'),
'2103': 'Allow status bar updates via script',
'2104': 'Allow websites to open windows without address or status bars',
'2105': 'Allow websites to prompt for information using scripted windows',
'2200': 'Automatic prompting for file downloads',
'2201': 'Automatic prompting for ActiveX controls',
'2300': 'Allow web pages to use restricted protocols for active content',
'2301': 'Use Phishing Filter',
'2400': '.NET: XAML browser applications',
'2401': '.NET: XPS documents',
'2402': '.NET: Loose XAML',
'2500': 'Turn on Protected Mode',
'2600': 'Enable .NET Framework setup',
'{AEBA21FA-782A-4A90-978D-B72164C80120}': 'First Party Cookie',
'{A8A88C49-5EB2-4990-A1A2-0876022C854F}': 'Third Party Cookie'
}
def GetEntries(
self, parser_context, file_entry=None, key=None, registry_type=None,
**unused_kwargs):
"""Retrieves information of the Internet Settings Zones values.
The MSIE Feature controls are stored in the Zone specific subkeys in:
Internet Settings\\Zones key
Internet Settings\\Lockdown_Zones key
Args:
parser_context: A parser context object (instance of ParserContext).
file_entry: optional file entry object (instance of dfvfs.FileEntry).
The default is None.
key: Optional Registry key (instance of winreg.WinRegKey).
The default is None.
registry_type: Optional Registry type string. The default is None.
"""
text_dict = {}
if key.number_of_values == 0:
error_string = u'Key: {0:s} missing values.'.format(key.path)
parser_context.ProduceParseError(
self.NAME, error_string, file_entry=file_entry)
else:
for value in key.GetValues():
if not value.name:
value_name = '(default)'
else:
value_name = u'{0:s}'.format(value.name)
if value.DataIsString():
value_string = u'[{0:s}] {1:s}'.format(
value.data_type_string, value.data)
elif value.DataIsInteger():
value_string = u'[{0:s}] {1:d}'.format(
value.data_type_string, value.data)
elif value.DataIsMultiString():
value_string = u'[{0:s}] {1:s}'.format(
value.data_type_string, u''.join(value.data))
else:
value_string = u'[{0:s}]'.format(value.data_type_string)
text_dict[value_name] = value_string
# Generate at least one event object for the key.
event_object = windows_events.WindowsRegistryEvent(
key.last_written_timestamp, key.path, text_dict, offset=key.offset,
registry_type=registry_type, urls=self.URLS)
parser_context.ProduceEvent(event_object, plugin_name=self.NAME)
if key.number_of_subkeys == 0:
error_string = u'Key: {0:s} missing subkeys.'.format(key.path)
parser_context.ProduceParseError(
self.NAME, error_string, file_entry=file_entry)
return
for zone_key in key.GetSubkeys():
# TODO: these values are stored in the Description value of the
# zone key. This solution will break on zone values that are larger
# than 5.
path = u'{0:s}\\{1:s}'.format(key.path, self.ZONE_NAMES[zone_key.name])
text_dict = {}
# TODO: this plugin currently just dumps the values and does not
# distinguish between what is a feature control or not.
for value in zone_key.GetValues():
# Ignore the default value.
if not value.name:
continue
if value.DataIsString():
value_string = value.data
elif value.DataIsInteger():
if value.name in self.KNOWN_PERMISSIONS_VALUE_NAMES:
value_string = self.CONTROL_VALUES_PERMISSIONS[value.data]
elif value.name == '1A00':
value_string = self.CONTROL_VALUES_1A00[value.data]
elif value.name == '1C00':
value_string = self.CONTROL_VALUES_1C00[value.data]
elif value.name == '1E05':
value_string = self.CONTROL_VALUES_SAFETY[value.data]
else:
value_string = u'{0:d}'.format(value.data)
else:
value_string = u'[{0:s}]'.format(value.data_type_string)
if len(value.name) == 4 and value.name != 'Icon':
value_description = self.FEATURE_CONTROLS.get(value.name, 'UNKNOWN')
else:
value_description = self.FEATURE_CONTROLS.get(value.name, '')
if value_description:
feature_control = u'[{0:s}] {1:s}'.format(
value.name, value_description)
else:
feature_control = u'[{0:s}]'.format(value.name)
text_dict[feature_control] = value_string
event_object = windows_events.WindowsRegistryEvent(
zone_key.last_written_timestamp, path, text_dict,
offset=zone_key.offset, registry_type=registry_type,
urls=self.URLS)
parser_context.ProduceEvent(event_object, plugin_name=self.NAME)
class MsieZoneSettingsSoftwareZonesPlugin(MsieZoneSettingsPlugin):
"""Parses the Zones key in the Software hive."""
NAME = 'winreg_msie_zone_software'
REG_TYPE = 'SOFTWARE'
REG_KEYS = [
u'\\Microsoft\\Windows\\CurrentVersion\\Internet Settings\\Zones',
(u'\\Microsoft\\Windows\\CurrentVersion\\Internet Settings'
u'\\Lockdown_Zones'),
(u'\\Wow6432Node\\Microsoft\\Windows\\CurrentVersion\\Internet Settings'
u'\\Zones'),
(u'\\Wow6432Node\\Microsoft\\Windows\\CurrentVersion\\Internet Settings'
u'\\Lockdown_Zones')]
winreg.WinRegistryParser.RegisterPlugins([
MsieZoneSettingsPlugin, MsieZoneSettingsSoftwareZonesPlugin])
| apache-2.0 | -8,224,367,203,974,624,000 | 38.36014 | 80 | 0.639424 | false | 3.614965 | false | false | false |
datamade/yournextmp-popit | elections/bf_elections_2015/management/commands/bf_elections_2015_import_candidate.py | 1 | 9711 | # -*- coding: utf-8 -*-
import dateutil.parser
import csv
from os.path import dirname, join
import re
import string
import codecs
import requests
from django.core.management.base import BaseCommand
from candidates.utils import strip_accents
from candidates.views.version_data import get_change_metadata
from elections.models import Election
UNKNOWN_PARTY_ID = 'unknown'
USER_AGENT = (
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 '
'(KHTML, like Gecko) Ubuntu Chromium/38.0.2125.111 '
'Chrome/38.0.2125.111Safari/537.36'
)
def get_post_data(api, election_id, province):
from candidates.cache import get_post_cached
from candidates.election_specific import AREA_DATA, AREA_POST_DATA
ynr_election_data = Election.objects.get_by_slug(election_id)
area_key = (ynr_election_data.area_types.first().name,
ynr_election_data.area_generation)
areas_by_name = AREA_DATA.areas_by_name[area_key]
if province != 'Burkina Faso':
province = strip_accents(province).upper()
area = areas_by_name[province]
post_id = AREA_POST_DATA.get_post_id(
election_id, area['type'], area['id']
)
post_data = get_post_cached(api, post_id)['result']
return ynr_election_data, post_data
def get_existing_popit_person(vi_person_id):
from candidates.models import PopItPerson
from candidates.popit import get_search_url
# See if this person already exists by searching for the
# ID they were imported with:
query_format = \
'identifiers.identifier:"{id}" AND ' + \
'identifiers.scheme:"{scheme}"'
search_url = get_search_url(
'persons',
query_format.format(
id=vi_person_id, scheme='import-id'
),
embed='membership.organization'
)
results = requests.get(search_url).json()
total = results['total']
if total > 1:
message = "Multiple matches for CI ID {0}"
raise Exception(message.format(vi_person_id))
if total == 0:
return None
# Otherwise there was exactly one result:
return PopItPerson.create_from_dict(results['result'][0])
def get_party_data(party_name):
from candidates.popit import get_search_url
# See if this person already exists by searching for the
# ID they were imported with:
party_name = party_name.replace('/', '')
party_name = party_name.decode('utf-8')
query_format = \
'name:"{name}"'
search_url = get_search_url(
'organizations',
query_format.format(
name=party_name
)
)
print party_name
results = requests.get(search_url).json()
print results
total = results['total']
if total > 1:
message = "Multiple matches for party {0}"
raise Exception(message.format(party_name))
if total == 0:
return None
# Otherwise there was exactly one result:
return results['result'][0]
""" These functions taken from the csv docs -
https://docs.python.org/2/library/csv.html#examples"""
def unicode_csv_reader(unicode_csv_data, dialect=csv.excel, **kwargs):
# csv.py doesn't do Unicode; encode temporarily as UTF-8:
csv_reader = csv.reader(utf_8_encoder(unicode_csv_data),
dialect=dialect, **kwargs)
for row in csv_reader:
# decode UTF-8 back to Unicode, cell by cell:
yield [unicode(cell, 'utf-8') for cell in row]
def utf_8_encoder(unicode_csv_data):
for line in unicode_csv_data:
yield line.encode('utf-8')
class Command(BaseCommand):
help = "Import inital candidate data"
def handle(self, username=None, **options):
from slumber.exceptions import HttpClientError
from candidates.election_specific import PARTY_DATA, shorten_post_label
from candidates.models import PopItPerson
from candidates.popit import create_popit_api_object
election_data = {
'prv-2015': 'listedescandidatsauxelectionslegislativeslisteprovincialeanptic.csv',
'nat-2015': 'listedescandidatsauxelectionslegislativesanptic.csv'
}
field_map = {
'prv-2015': {
'region': 1,
'party': 4,
'list_order': 5,
'first_name': 7,
'last_name': 6,
'gender': 8,
'birth_date': 9,
'party_short': 3
},
'nat-2015': {
'region': 0,
'party': 2,
'list_order': 3,
'first_name': 5,
'last_name': 4,
'gender': 6,
'birth_date': 7,
'party_short': 2
}
}
api = create_popit_api_object()
party_id_missing = {}
party_name_to_id = {}
for party_id, party_name in PARTY_DATA.party_id_to_name.items():
party_name_to_id[party_name] = party_id
for election_id, filename in election_data.items():
csv_filename = join(
dirname(__file__), '..', '..', 'data', filename
)
fields = field_map[election_id]
with codecs.open(csv_filename, 'r', encoding='windows-1252') as f:
initial = True
for candidate in unicode_csv_reader(f):
# skip header line
if initial:
initial = False
continue
region = candidate[fields['region']]
party = candidate[fields['party']]
party_list_order = candidate[fields['list_order']]
first_name = string.capwords(candidate[fields['first_name']])
last_name = string.capwords(candidate[fields['last_name']])
gender = candidate[fields['gender']]
birth_date = None
if candidate[fields['birth_date']] is not None:
birth_date = str(dateutil.parser.parse(
candidate[fields['birth_date']], dayfirst=True
).date())
name = first_name + ' ' + last_name
id = '-'.join([
re.sub('[^\w]*', '', re.sub(r' ', '-', strip_accents(name.lower()))),
re.sub('[^\w]*', '', candidate[fields['party_short']].lower()),
birth_date
])
# national candidate
if region == 'PAYS':
region = 'Burkina Faso'
election_data, post_data = get_post_data(
api, election_id, region
)
# debug
# tmp = '%s %s %s (%s) - %s (%s)' % ( id, first_name, last_name, party, region, post_data['label'] )
# print tmp
person = get_existing_popit_person(id)
if person:
# print "Found an existing person:", person.get_absolute_url()
pass
else:
print "No existing person, creating a new one:", name
person = PopItPerson()
person.set_identifier('import-id', id)
person.family_name = last_name
person.given_name = first_name
person.name = name
person.gender = gender
if birth_date:
person.birth_date = str(birth_date)
else:
person.birth_date = None
standing_in_election = {
'post_id': post_data['id'],
'name': shorten_post_label(post_data['label']),
'party_list_position': party_list_order,
}
if 'area' in post_data:
standing_in_election['mapit_url'] = post_data['area']['identifier']
person.standing_in = {
election_data.slug: standing_in_election
}
change_metadata = get_change_metadata(
None,
'Imported candidate from CSV',
)
party_comp = re.sub(' +', ' ', party)
party_id = UNKNOWN_PARTY_ID
if party_comp in party_name_to_id.keys():
party_id = party_name_to_id[party_comp]
party = party_comp
else:
party_id = party_name_to_id['Unknown Party']
party = 'Unknown Party'
if party_id == UNKNOWN_PARTY_ID and party_comp not in party_id_missing.keys():
party_id_missing[party_comp] = 1
person.party_memberships = {
election_data.slug: {
'id': party_id,
'name': party,
'imported_name': party_comp
}
}
person.record_version(change_metadata)
try:
person.save_to_popit(api)
except HttpClientError as hce:
print "Got an HttpClientError:", hce.content
raise
if len(party_id_missing) > 0:
print "Unmatched party names:"
for name in party_id_missing.keys():
print name
| agpl-3.0 | -1,523,529,099,116,446,000 | 34.702206 | 120 | 0.510658 | false | 4.218506 | false | false | false |
matthewghgriffiths/nestedbasinsampling | examples/LJ31/system.py | 1 | 3922 | import logging
from pele.potentials import LJ
from nestedbasinsampling import (
NoGUTSSampler, NestedOptimizerKalman, HardShellConstraint, random_structure,
RecordMinimization, CompareStructures, LOG_CONFIG, Database)
logger = logging.getLogger("LJ31.system")
logger = logging.getLogger("NBS.LJ_system")
default_sampler_kws = dict(
max_depth=7, remove_linear_momentum=True, remove_angular_momentum=True,
remove_initial_linear_momentum=False, remove_initial_angular_momentum=False)
default_nopt_kws = dict(
nsteps=2000, MC_steps=10, target_acc=0.4, nsave=30, tol=1e-2,
nwait=15, kalman_discount=100.)
default_struct_kws = dict(niter=100)
default_database_kws = dict()
class NBS_LJ(object):
"""
"""
def __init__(self, natoms, radius=None, stepsize=None,
sampler_kws=None, nopt_kws=None, stepsize_kw=None,
struct_kws=None, database_kws=None):
self.pot = LJ()
self.natoms = natoms
self.radius = float(natoms) ** (1. / 3) if radius is None else radius
self.constraint = HardShellConstraint(self.radius)
self.sampler_kws = default_sampler_kws.copy()
if sampler_kws is not None: self.sampler_kws.update(sampler_kw)
self.sampler = NoGUTSSampler(
self.pot, constraint=self.constraint, **self.sampler_kws)
self.nopt_kws = default_nopt_kws.copy()
if nopt_kws is not None: self.nopt_kws.update(nopt_kws)
self.struct_kws = default_struct_kws.copy()
if struct_kws is not None: self.struct_kws.update(struct_kws)
self.database_kws = default_database_kws.copy()
if database_kws is not None: self.database_kws.update(database_kws)
if 'compareMinima' not in self.database_kws:
self.database_kws['compareMinima'] = self.get_compare_structures()
if stepsize is None:
kws = {} if stepsize_kw is None else stepsize_kw
s = self.determine_stepsize(
target_acc=self.nopt_kws['target_acc'], **kws)
self.stepsize = s[-1]
else:
self.stepsize = stepsize
def determine_stepsize(self, coords=None, E=None, **kwargs):
if coords is None: coords = self.random_config()
if E is None: E = self.pot.getEnergy(coords)
s = self.sampler.determine_stepsize(coords, E, **kwargs)
return s
def random_config(self):
return random_structure(self.natoms, self.radius)
def nopt(self, coords=None, Ecut=None, stepsize=None):
if coords is None: coords = self.random_config()
if Ecut is None: Ecut = self.pot.getEnergy(coords)
if stepsize is None: stepsize = self.stepsize
opt = NestedOptimizerKalman(
coords, self.pot, sampler=self.sampler,
energy=Ecut, stepsize=stepsize, **self.nopt_kws)
return dict(opt.run())
def get_configuration(self):
coords = self.random_config()
Ecut = self.pot.getEnergy(coords)
stepsize = self.stepsize
return coords, Ecut, stepsize
def get_compare_structures(self):
return CompareStructures(**self.struct_kws)
def get_database(self, dbname=":memory:"):
db = Database(dbname, **self.database_kws)
db.add_property('sampler', self.sampler_kws, overwrite=False)
db.add_property('nopt', self.nopt_kws, overwrite=False)
db.add_property('struct', self.struct_kws, overwrite=False)
logger.info("Connecting to database: {:s}".format(dbname))
logger.info("params:\nsampler:\n{:s}\nnopt:\n{:s}".format(
str(self.sampler_kws), str(self.nopt_kws)))
return db
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG, **LOG_CONFIG)
system = NBS_LJ(natoms=31, stepsize=0.1)
res = system.nopt()
| gpl-3.0 | 6,799,967,800,464,881,000 | 37.616162 | 80 | 0.629526 | false | 3.511191 | true | false | false |
ubivar/ubivar-python | ubivar/error.py | 1 | 1837 | # Exceptions
class UbivarError(Exception):
def __init__(self, message=None, http_body=None, http_status=None,
json_body=None, headers=None):
super(UbivarError, self).__init__(message)
if http_body and hasattr(http_body, 'decode'):
try:
http_body = http_body.decode('utf-8')
except BaseException:
http_body = ('<Could not decode body as utf-8. '
'Please report to [email protected]>')
self._message = message
self.http_body = http_body
self.http_status = http_status
self.json_body = json_body
self.headers = headers or {}
self.request_id = self.headers.get('request-id', None)
def __unicode__(self):
if self.request_id is not None:
msg = self._message or "<empty message>"
return u"Request {0}: {1}".format(self.request_id, msg)
else:
return self._message
def __str__(self):
return self.__unicode__()
class APIError(UbivarError):
pass
class APIConnectionError(UbivarError):
pass
class InvalidRequestError(UbivarError):
def __init__(self, message, param, http_body=None,
http_status=None, json_body=None, headers=None):
super(InvalidRequestError, self).__init__(
message, http_body, http_status, json_body,
headers)
self.param = param
class AuthenticationError(UbivarError):
pass
class PermissionError(UbivarError):
pass
class RateLimitError(UbivarError):
pass
class SignatureVerificationError(UbivarError):
def __init__(self, message, sig_header, http_body=None):
super(SignatureVerificationError, self).__init__(
message, http_body)
self.sig_header = sig_header
| mit | -8,536,734,799,136,476,000 | 25.623188 | 70 | 0.598258 | false | 3.967603 | false | false | false |
duncanwp/cis_plugins | Grosvenor_CDNC.py | 1 | 2421 | from cis.data_io.products import NetCDF_Gridded
import cis.data_io.gridded_data as gd
import logging
from cis.utils import demote_warnings
class Grosvenor_CDNC(NetCDF_Gridded):
"""
Plugin for reading Dan Grosvenor's MODIS CDNC files.
"""
@staticmethod
def load_multiple_files_callback(cube, field, filename):
# We need to remove these global attributes when reading multiple files so that the cubes can be properly merged
cube.attributes.pop('history', None)
cube.attributes.pop('CreationDate', None)
return cube
def get_file_signature(self):
return [r'.*\.nc']
def create_data_object(self, filenames, variable):
"""Reads the data for a variable.
:param filenames: list of names of files from which to read data
:param variable: (optional) name of variable; if None, the file(s) must contain data for only one cube
:return: iris.cube.Cube
"""
from cis.time_util import convert_cube_time_coord_to_standard_time
from cis.utils import single_warnings_only
from iris.coords import DimCoord
from numpy.ma import masked_invalid
import iris
# Filter the warnings so that they only appear once - otherwise you get lots of repeated warnings
# - partly because we open the files multiple times (to look for aux coords) and partly because iris
# will throw a warning every time it meets a variable with a non-CF dimension
with single_warnings_only():
cube = self._create_cube(filenames, variable)
# For this data we actually need to add the dim coords...
cubes = iris.load(filenames)
cube.add_dim_coord(DimCoord(cubes.extract('lon')[0].data, units='degrees_east',
standard_name='longitude'), 0)
cube.add_dim_coord(DimCoord(cubes.extract('lat')[0].data, units='degrees_north',
standard_name='latitude'), 1)
cube.add_dim_coord(DimCoord(cubes.extract('time')[0].data, units='days since 1970-01-01 00:00:00',
standard_name='time'), 2)
if cube.attributes['invalid_units'] == 'cm^{-3}':
cube.units = 'cm-3'
# Mask the NaNs
cube.data = masked_invalid(cube.data)
cube = convert_cube_time_coord_to_standard_time(cube)
return cube
| lgpl-3.0 | -3,232,205,901,868,574,700 | 40.033898 | 120 | 0.634036 | false | 4.041736 | false | false | false |
PaulWay/insights-core | insights/parsers/current_clocksource.py | 1 | 1559 | """
CurrentClockSource - file ``/sys/devices/system/clocksource/clocksource0/current_clocksource``
==============================================================================================
This is a relatively simple parser that reads the
``/sys/devices/system/clocksource/clocksource0/current_clocksource`` file.
As well as reporting the contents of the file in its ``data`` property, it
also provides three properties that are true if the clock source is set to
that value:
* **is_kvm** - the clock source file contains 'kvm-clock'
* **is_tsc** - the clock source file contains 'tsc'
* **is_vmi_timer** - the clock source file contains 'vmi-timer'
Examples:
>>> cs = shared[CurrentClockSource]
>>> cs.data
'tsc'
>>> cs.is_tsc
True
"""
from .. import Parser, parser
@parser("current_clocksource")
class CurrentClockSource(Parser):
"""
The CurrentClockSource parser class.
Attributes:
data (str): the content of the current_clocksource file.
"""
def parse_content(self, content):
self.data = list(content)[0]
@property
def is_kvm(self):
"""
bool: does the clock source contain 'kvm-clock'?
"""
return 'kvm-clock' in self.data
@property
def is_tsc(self):
"""
bool: does the clock source contain 'tsc'?
"""
return 'tsc' in self.data
@property
def is_vmi_timer(self):
"""
bool: does the clock source contain 'vmi-timer'?
"""
return 'vmi-timer' in self.data
| apache-2.0 | -2,943,900,483,633,057,000 | 25.87931 | 94 | 0.592046 | false | 3.936869 | false | false | false |
akuster/yali | yali/gui/ScrCheckCD.py | 1 | 4754 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2010 TUBITAK/UEKAE
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# Please read the COPYING file.
#
import gettext
_ = gettext.translation('yali', fallback=True).ugettext
from PyQt4.Qt import QWidget, SIGNAL, QIcon, QPixmap
import pisi.ui
import yali.context as ctx
import yali.pisiiface
from yali.gui import ScreenWidget
from yali.gui.Ui.checkcdwidget import Ui_CheckCDWidget
from yali.gui.YaliDialog import Dialog
class Widget(QWidget, ScreenWidget):
name = "mediaCheck"
def __init__(self):
QWidget.__init__(self)
self.ui = Ui_CheckCDWidget()
self.ui.setupUi(self)
self.check_media_stop = True
self.connect(self.ui.checkButton, SIGNAL("clicked()"), self.slotCheckCD)
if ctx.consts.lang == "tr":
self.ui.progressBar.setFormat("%%p")
self.ui.validationSucceedBox.hide()
self.ui.validationFailBox.hide()
self.ui.progressBar.hide()
def shown(self):
pass
def slotCheckCD(self):
if self.check_media_stop:
self.check_media_stop = False
self.ui.progressBar.show()
icon = QIcon()
icon.addPixmap(QPixmap(":/gui/pics/dialog-error.png"), QIcon.Normal, QIcon.Off)
self.ui.checkButton.setIcon(icon)
self.ui.checkButton.setText("")
self.checkMedia()
else:
self.check_media_stop = True
self.ui.progressBar.show()
icon = QIcon()
icon.addPixmap(QPixmap(":/gui/pics/task-accepted.png"), QIcon.Normal, QIcon.Off)
self.ui.checkButton.setIcon(icon)
self.ui.checkButton.setText(_("Validate"))
def checkMedia(self):
ctx.mainScreen.disableNext()
ctx.mainScreen.disableBack()
ctx.interface.informationWindow.update(_("Starting validation..."))
class PisiUI(pisi.ui.UI):
def notify(self, event, **keywords):
pass
def display_progress(self, operation, percent, info, **keywords):
pass
yali.pisiiface.initialize(ui=PisiUI(), with_comar=False, nodestDir=True)
yali.pisiiface.addCdRepo()
ctx.mainScreen.processEvents()
pkg_names = yali.pisiiface.getAvailablePackages()
self.ui.progressBar.setMaximum(len(pkg_names))
self.ui.checkLabel.setText(_("Package validation is in progress. "
"Please wait until it is completed."))
cur = 0
flag = 0
for pkg_name in pkg_names:
cur += 1
ctx.logger.debug("Validating %s " % pkg_name)
ctx.interface.informationWindow.update(_("Validating %s") % pkg_name)
if self.check_media_stop:
continue
try:
yali.pisiiface.checkPackageHash(pkg_name)
self.ui.progressBar.setValue(cur)
except:
rc = ctx.interface.messageWindow(_("Warning"),
_("Validation of %s package failed."
"Please remaster your installation medium and"
"reboot.") % pkg_name,
type="custom", customIcon="warning",
customButtons=[_("Skip Validation"), _("Skip Package"), _("Reboot")],
default=0)
flag = 1
if not rc:
self.ui.validationBox.hide()
self.ui.validationFailBox.show()
ctx.mainScreen.enableNext()
break
elif rc == 1:
continue
else:
yali.util.reboot()
if not self.check_media_stop and flag == 0:
ctx.interface.informationWindow.update(_('<font color="#FFF"><b>Validation succeeded. You can proceed with the installation.</b></font>'))
self.ui.validationSucceedBox.show()
self.ui.validationBox.hide()
else:
ctx.interface.informationWindow.hide()
self.ui.progressBar.setValue(0)
yali.pisiiface.removeRepo(ctx.consts.cd_repo_name)
ctx.mainScreen.enableNext()
ctx.mainScreen.enableBack()
self.ui.checkLabel.setText(_("Package validation is finished."))
ctx.interface.informationWindow.hide()
| gpl-2.0 | -8,076,896,464,390,555,000 | 35.569231 | 150 | 0.562474 | false | 4.222025 | false | false | false |
cscanlin/munger-builder | script_builder/views.py | 1 | 5537 | import os
import re
import csv
import json
import time
from django.shortcuts import render, render_to_response
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib import messages
from django.contrib.auth.models import User, Group
from django.contrib.auth import authenticate, login
from django.conf import settings
from guardian.shortcuts import assign_perm, get_objects_for_user
from .models import DataField, FieldType, CSVDocument, MungerBuilder, PivotField
from .tasks import download_munger_async, download_test_data_async
INDEX_REDIRECT = HttpResponseRedirect('/script_builder/munger_builder_index')
def munger_builder_index(request):
user = get_user_or_create_anon(request)
anon_check(request)
munger_builder_list = get_objects_for_user(user, 'script_builder.change_mungerbuilder')
if len(munger_builder_list) == 0:
munger_builder_list = add_sample_munger(user)
context = {'munger_builder_list': munger_builder_list}
return render(request, 'script_builder/munger_builder_index.html', context)
def get_user_or_create_anon(request):
if not request.user.id:
timestamp = int(time.time())
credentials = {
'username': 'anon_{0}'.format(timestamp),
'password': timestamp,
}
user = User.objects.create_user(**credentials)
user.save()
assign_perm('script_builder.add_mungerbuilder', user)
assign_perm('script_builder.add_fieldtype', user)
assign_perm('script_builder.add_datafield', user)
assign_perm('script_builder.add_pivotfield', user)
group = Group.objects.get(name='Global Sample')
user.groups.add(group)
user.save()
anon_user = authenticate(**credentials)
login(request, anon_user)
else:
user = request.user
return user
def add_sample_munger(user):
mb = MungerBuilder.objects.create(
munger_name='Sample for {0}'.format(user.username),
input_path='test_data.csv',
is_sample=True,
)
mb.save()
mb.assign_perms(user)
sample_field_dict = {
'order_num': ['count'],
'product': None,
'sales_name': ['index'],
'region': ['column'],
'revenue': ['mean', 'sum'],
'shipping': ['median'],
}
for field_name, field_types in sample_field_dict.items():
data_field = DataField.objects.create(munger_builder=mb, current_name=field_name)
data_field.save()
data_field.assign_perms(user)
if field_types:
for type_name in field_types:
field_type = FieldType.objects.get(type_name=type_name)
PivotField.objects.create(data_field=data_field, field_type=field_type).save()
return get_objects_for_user(user, 'script_builder.change_mungerbuilder')
def new_munger_builder(request):
user = get_user_or_create_anon(request)
mb = MungerBuilder.objects.create(munger_name='New Munger - {0}'.format(user.username))
mb.save()
mb.assign_perms(user)
return HttpResponseRedirect('/script_builder/pivot_builder/{0}'.format(mb.id))
def pivot_builder(request, munger_builder_id):
anon_check(request)
mb = MungerBuilder.objects.get(pk=munger_builder_id)
if not mb.user_is_authorized():
return INDEX_REDIRECT
return render(request, 'script_builder/pivot_builder_react.html', context={'mb': mb})
def download_munger(request, munger_builder_id):
task = download_munger_async.delay(munger_builder_id)
return render_to_response('script_builder/poll_for_download.html',
{'task_id': task.id, 'mb_id': munger_builder_id})
def download_test_data(request, munger_builder_id):
task = download_test_data_async.delay(munger_builder_id)
return render_to_response('script_builder/poll_for_download.html',
{'task_id': task.id, 'mb_id': munger_builder_id})
def poll_for_download(request):
task_id = request.GET.get("task_id")
filename = request.GET.get("filename")
if filename == 'test_data.csv':
async_func = download_test_data_async
file_path = os.path.join(settings.STATIC_ROOT, filename)
else:
async_func = download_munger_async
file_path = os.path.join(settings.MEDIA_ROOT, 'user_munger_scripts', '{0}'.format(filename))
if request.is_ajax():
result = async_func.AsyncResult(task_id)
if result.ready():
return HttpResponse(json.dumps({"filename": result.get()}))
return HttpResponse(json.dumps({"filename": None}))
with open(file_path, 'r') as f:
response = HttpResponse(f, content_type='application/octet-stream')
response['Content-Disposition'] = 'attachment; filename={0}'.format(filename)
return response
# Helper Functions
def parse_text_fields(form, request, input_type):
if input_type == 'text':
return re.split('[,\t\n]', form.cleaned_data['fields_paste'])
if input_type == 'csv':
new_csv = CSVDocument(csv_file=request.FILES['csv_file'])
new_csv.save()
reader = csv.DictReader(request.FILES['csv_file'])
return reader.fieldnames
def anon_check(request):
if 'anon_' in request.user.username:
anon_message = """You are logged in as an anonymous user.
You may not be able to transfer any mungers to a permanent account in the future.
Register to save mungers."""
messages.warning(request, anon_message)
| mit | -3,080,153,474,421,255,000 | 34.954545 | 104 | 0.657576 | false | 3.607166 | true | false | false |
CI-WATER/gsshapy | gsshapy/grid/nwm_to_gssha.py | 1 | 4942 | # -*- coding: utf-8 -*-
#
# nwm_to_gssha.py
# GSSHApy
#
# Created by Alan D Snow, 2016.
# License BSD 3-Clause
import logging
from datetime import timedelta
from os import mkdir, path, remove, rename
import xarray as xr
from .grid_to_gssha import GRIDtoGSSHA
log = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
# MAIN CLASS
# ------------------------------------------------------------------------------
class NWMtoGSSHA(GRIDtoGSSHA):
"""This class converts the National Water Model output data to GSSHA formatted input.
This class inherits from class:`GRIDtoGSSHA`.
Attributes:
gssha_project_folder(:obj:`str`): Path to the GSSHA project folder
gssha_project_file_name(:obj:`str`): Name of the GSSHA elevation grid file.
lsm_input_folder_path(:obj:`str`): Path to the input folder for the LSM files.
lsm_search_card(:obj:`str`): Glob search pattern for LSM files. Ex. "*.grib2".
lsm_lat_var(Optional[:obj:`str`]): Name of the latitude variable in the LSM netCDF files. Defaults to 'lat'.
lsm_lon_var(Optional[:obj:`str`]): Name of the longitude variable in the LSM netCDF files. Defaults to 'lon'.
lsm_time_var(Optional[:obj:`str`]): Name of the time variable in the LSM netCDF files. Defaults to 'time'.
lsm_lat_dim(Optional[:obj:`str`]): Name of the latitude dimension in the LSM netCDF files. Defaults to 'lat'.
lsm_lon_dim(Optional[:obj:`str`]): Name of the longitude dimension in the LSM netCDF files. Defaults to 'lon'.
lsm_time_dim(Optional[:obj:`str`]): Name of the time dimension in the LSM netCDF files. Defaults to 'time'.
output_timezone(Optional[:obj:`tzinfo`]): This is the timezone to output the dates for the data. Default is he GSSHA model timezone. This option does NOT currently work for NetCDF output.
Example::
from datetime import datetime
from gsshapy.grid import NWMtoGSSHA
n2g = NWMtoGSSHA(gssha_project_folder='E:\\GSSHA',
gssha_project_file_name='gssha.prj',
lsm_input_folder_path='E:\\GSSHA\\nwm-data',
lsm_search_card="*.grib")
# example rain gage
out_gage_file = 'E:\\GSSHA\\nwm_rain1.gag'
n2g.lsm_precip_to_gssha_precip_gage(out_gage_file,
lsm_data_var="RAINRATE",
precip_type="RADAR")
# example data var map array
# WARNING: This is not complete
data_var_map_array = [
['precipitation_rate', 'RAINRATE'],
['pressure', 'PSFC'],
['relative_humidity', ['Q2D','T2D', 'PSFC']],
['wind_speed', ['U2D', 'V2D']],
['direct_radiation', 'SWDOWN'], # ???
['diffusive_radiation', 'SWDOWN'], # ???
['temperature', 'T2D'],
['cloud_cover', '????'],
]
e2g.lsm_data_to_arc_ascii(data_var_map_array)
"""
def __init__(self,
gssha_project_folder,
gssha_project_file_name,
lsm_input_folder_path,
lsm_search_card="*.nc",
lsm_lat_var='y',
lsm_lon_var='x',
lsm_time_var='time',
lsm_lat_dim='y',
lsm_lon_dim='x',
lsm_time_dim='time',
output_timezone=None,
):
"""
Initializer function for the NWMtoGSSHA class
"""
super(NWMtoGSSHA, self).__init__(gssha_project_folder,
gssha_project_file_name,
lsm_input_folder_path,
lsm_search_card,
lsm_lat_var,
lsm_lon_var,
lsm_time_var,
lsm_lat_dim,
lsm_lon_dim,
lsm_time_dim,
output_timezone)
@property
def xd(self):
"""get xarray dataset file handle to LSM files"""
if self._xd is None:
path_to_lsm_files = path.join(self.lsm_input_folder_path,
self.lsm_search_card)
self._xd = super(NWMtoGSSHA, self).xd
self._xd.lsm.coords_projected = True
return self._xd
def _load_converted_gssha_data_from_lsm(self, gssha_var, lsm_var, load_type):
"""
This function loads data from LSM and converts to GSSHA format
"""
super(NWMtoGSSHA, self).\
_load_converted_gssha_data_from_lsm(gssha_var, lsm_var, load_type)
self.data.lsm.coords_projected = True
| bsd-3-clause | 1,060,683,975,107,513,100 | 41.973913 | 195 | 0.510522 | false | 3.769641 | false | false | false |
eirmag/weboob | weboob/core/bcall.py | 1 | 7438 | # -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Romain Bignon, Christophe Benz
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
from copy import copy
from threading import Thread, Event, RLock, Timer
from weboob.capabilities.base import CapBaseObject
from weboob.tools.misc import get_backtrace
from weboob.tools.log import getLogger
__all__ = ['BackendsCall', 'CallErrors', 'IResultsCondition', 'ResultsConditionError']
class CallErrors(Exception):
def __init__(self, errors):
msg = 'Errors during backend calls:\n' + \
'\n'.join(['Module(%r): %r\n%r\n' % (backend, error, backtrace)
for backend, error, backtrace in errors])
Exception.__init__(self, msg)
self.errors = copy(errors)
def __iter__(self):
return self.errors.__iter__()
class IResultsCondition(object):
def is_valid(self, obj):
raise NotImplementedError()
class ResultsConditionError(Exception):
pass
class BackendsCall(object):
def __init__(self, backends, condition, function, *args, **kwargs):
"""
@param backends list of backends to call.
@param condition a IResultsCondition object. Can be None.
@param function backends' method name, or callable object.
@param args, kwargs arguments given to called functions.
"""
self.logger = getLogger('bcall')
# Store if a backend is finished
self.backends = {}
for backend in backends:
self.backends[backend.name] = False
# Condition
self.condition = condition
# Global mutex on object
self.mutex = RLock()
# Event set when every backends have give their data
self.finish_event = Event()
# Event set when there are new responses
self.response_event = Event()
# Waiting responses
self.responses = []
# Errors
self.errors = []
# Threads
self.threads = []
# Create jobs for each backend
with self.mutex:
for backend in backends:
self.logger.debug('Creating a new thread for %s' % backend)
self.threads.append(Timer(0, self._caller, (backend, function, args, kwargs)).start())
if not backends:
self.finish_event.set()
def _store_error(self, backend, error):
with self.mutex:
backtrace = get_backtrace(error)
self.errors.append((backend, error, backtrace))
def _store_result(self, backend, result):
with self.mutex:
if isinstance(result, CapBaseObject):
if self.condition and not self.condition.is_valid(result):
return
result.backend = backend.name
self.responses.append((backend, result))
self.response_event.set()
def _caller(self, backend, function, args, kwargs):
self.logger.debug('%s: Thread created successfully' % backend)
with backend:
try:
# Call method on backend
try:
self.logger.debug('%s: Calling function %s' % (backend, function))
if callable(function):
result = function(backend, *args, **kwargs)
else:
result = getattr(backend, function)(*args, **kwargs)
except Exception, error:
self.logger.debug('%s: Called function %s raised an error: %r' % (backend, function, error))
self._store_error(backend, error)
else:
self.logger.debug('%s: Called function %s returned: %r' % (backend, function, result))
if hasattr(result, '__iter__') and not isinstance(result, basestring):
# Loop on iterator
try:
for subresult in result:
# Lock mutex only in loop in case the iterator is slow
# (for example if backend do some parsing operations)
self._store_result(backend, subresult)
except Exception, error:
self._store_error(backend, error)
else:
self._store_result(backend, result)
finally:
with self.mutex:
# This backend is now finished
self.backends[backend.name] = True
for finished in self.backends.itervalues():
if not finished:
return
self.response_event.set()
self.finish_event.set()
def _callback_thread_run(self, callback, errback):
responses = []
while not self.finish_event.isSet() or self.response_event.isSet():
self.response_event.wait()
with self.mutex:
responses = self.responses
self.responses = []
# Reset event
self.response_event.clear()
# Consume responses
while responses:
callback(*responses.pop(0))
if errback:
with self.mutex:
while self.errors:
errback(*self.errors.pop(0))
callback(None, None)
def callback_thread(self, callback, errback=None):
"""
Call this method to create a thread which will callback a
specified function everytimes a new result comes.
When the process is over, the function will be called with
both arguments set to None.
The functions prototypes:
def callback(backend, result)
def errback(backend, error)
"""
thread = Thread(target=self._callback_thread_run, args=(callback, errback))
thread.start()
return thread
def wait(self):
self.finish_event.wait()
with self.mutex:
if self.errors:
raise CallErrors(self.errors)
def __iter__(self):
# Don't know how to factorize with _callback_thread_run
responses = []
while not self.finish_event.isSet() or self.response_event.isSet():
self.response_event.wait()
with self.mutex:
responses = self.responses
self.responses = []
# Reset event
self.response_event.clear()
# Consume responses
while responses:
yield responses.pop(0)
# Raise errors
with self.mutex:
if self.errors:
raise CallErrors(self.errors)
| agpl-3.0 | -295,907,511,899,842,750 | 35.106796 | 112 | 0.565071 | false | 4.692744 | false | false | false |
lujinda/replace | replace/args.py | 1 | 1706 | #/usr/bin/env python
#coding:utf-8
# Author : tuxpy
# Email : [email protected]
# Last modified : 2015-05-19 14:03:37
# Filename : args.py
# Description :
import optparse
from replace import version
import os
def parser_args():
usage = "Usage: %prog [options] target_path"
parser = optparse.OptionParser(usage,
version = version)
_help = "exclude files matching PATTERN"
parser.add_option('--filter_filename',
dest = 'filter_filename', type = str, action="append",
metavar = 'PATTERN', help = _help)
_help = 'only include files matching PATTERN(high priority)'
parser.add_option('--include_filename',
dest = 'include_filename', type = str, action="append",
metavar = 'PATTERN', help = _help)
_help = 'source re pattern'
parser.add_option('-s', '--source', type = str,
dest = 'source_re_string', help = _help)
_help = 'target string'
parser.add_option('-t', '--target', type = str,
dest = 'target_string', help = _help)
_help = 'include hidden file'
parser.add_option('-H', default = False, action = "store_true", dest="include_hidden", help = _help)
_help = 'prompt before every replace'
parser.add_option('-i', default = False,
dest = 'interactive', action = 'store_true',
help = _help)
opt, args = parser.parse_args()
if opt.source_re_string == None or opt.target_string == None:
parser.error('--source or --target be must')
for target_path in args:
if not os.path.exists(target_path):
parser.error("%s is not exists" % (target_path, ))
return opt, args
| gpl-3.0 | -682,240,807,930,640,600 | 29.464286 | 104 | 0.594373 | false | 3.692641 | false | false | false |
macieksmuga/server | tests/unit/test_client.py | 1 | 21768 | """
Tests for the client
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import mock
import ga4gh.protocol as protocol
import ga4gh.backend as backend
import ga4gh.client as client
import ga4gh.datarepo as datarepo
import tests.utils as utils
import ga4gh.exceptions as exceptions
class TestSearchMethodsCallRunRequest(unittest.TestCase):
"""
Test that search methods call lower-level functionality correctly
"""
def setUp(self):
self.httpClient = client.HttpClient("http://example.com")
self.httpClient._runSearchRequest = mock.Mock()
self.httpClient._runGetRequest = mock.Mock()
self.objectId = "SomeId"
self.objectName = "objectName"
self.datasetId = "datasetId"
self.variantSetId = "variantSetId"
self.variantAnnotationSetId = "variantAnnotationSetId"
self.referenceSetId = "referenceSetId"
self.referenceId = "referenceId"
self.readGroupIds = ["readGroupId"]
self.referenceName = "referenceName"
self.start = 100
self.end = 101
self.referenceName = "referenceName"
self.callSetIds = ["id1", "id2"]
self.pageSize = 1000
self.httpClient.setPageSize(self.pageSize)
self.assemblyId = "assemblyId"
self.accession = "accession"
self.md5checksum = "md5checksum"
def testSetPageSize(self):
testClient = client.AbstractClient()
# pageSize is None by default
self.assertIsNone(testClient.getPageSize())
for pageSize in [1, 10, 100]:
testClient.setPageSize(pageSize)
self.assertEqual(testClient.getPageSize(), pageSize)
def testSearchVariants(self):
request = protocol.SearchVariantsRequest()
request.reference_name = self.referenceName
request.start = self.start
request.end = self.end
request.variant_set_id = self.variantSetId
request.call_set_ids.extend(self.callSetIds)
request.page_size = self.pageSize
self.httpClient.searchVariants(
self.variantSetId, start=self.start, end=self.end,
referenceName=self.referenceName, callSetIds=self.callSetIds)
self.httpClient._runSearchRequest.assert_called_once_with(
request, "variants", protocol.SearchVariantsResponse)
def testSearchDatasets(self):
request = protocol.SearchDatasetsRequest()
request.page_size = self.pageSize
self.httpClient.searchDatasets()
self.httpClient._runSearchRequest.assert_called_once_with(
request, "datasets", protocol.SearchDatasetsResponse)
def testSearchVariantSets(self):
request = protocol.SearchVariantSetsRequest()
request.dataset_id = self.datasetId
request.page_size = self.pageSize
self.httpClient.searchVariantSets(self.datasetId)
self.httpClient._runSearchRequest.assert_called_once_with(
request, "variantsets", protocol.SearchVariantSetsResponse)
def testSearchVariantAnnotationSets(self):
request = protocol.SearchVariantAnnotationSetsRequest()
request.variant_set_id = self.variantSetId
request.page_size = self.pageSize
self.httpClient.searchVariantAnnotationSets(self.variantSetId)
self.httpClient._runSearchRequest.assert_called_once_with(
request, "variantannotationsets",
protocol.SearchVariantAnnotationSetsResponse)
def testSearchVariantAnnotations(self):
request = protocol.SearchVariantAnnotationsRequest()
request.variant_annotation_set_id = self.variantAnnotationSetId
request.page_size = self.pageSize
request.reference_name = self.referenceName
request.reference_id = self.referenceId
request.start = self.start
request.end = self.end
self.httpClient.searchVariantAnnotations(
self.variantAnnotationSetId,
referenceName=self.referenceName,
start=self.start,
end=self.end,
effects=[],
referenceId=self.referenceId)
self.httpClient._runSearchRequest.assert_called_once_with(
request, "variantannotations",
protocol.SearchVariantAnnotationsResponse)
with self.assertRaises(exceptions.BadRequestException):
self.httpClient.searchVariantAnnotations(
self.variantAnnotationSetId,
referenceName=self.referenceName,
start=self.start,
end=self.end,
effects=[{"term": "just a term"}, {"id": "an id"}],
referenceId=self.referenceId)
def testSearchFeatureSets(self):
request = protocol.SearchFeatureSetsRequest()
request.dataset_id = self.datasetId
request.page_size = self.pageSize
self.httpClient.searchFeatureSets(self.datasetId)
self.httpClient._runSearchRequest.assert_called_once_with(
request, "featuresets", protocol.SearchFeatureSetsResponse)
def testSearchReferenceSets(self):
request = protocol.SearchReferenceSetsRequest()
request.page_size = self.pageSize
request.accession = self.accession
request.md5checksum = self.md5checksum
request.assembly_id = self.assemblyId
self.httpClient.searchReferenceSets(
accession=self.accession, md5checksum=self.md5checksum,
assemblyId=self.assemblyId)
self.httpClient._runSearchRequest.assert_called_once_with(
request, "referencesets", protocol.SearchReferenceSetsResponse)
def testSearchReferences(self):
request = protocol.SearchReferencesRequest()
request.reference_set_id = self.referenceSetId
request.page_size = self.pageSize
request.accession = self.accession
request.md5checksum = self.md5checksum
self.httpClient.searchReferences(
self.referenceSetId, accession=self.accession,
md5checksum=self.md5checksum)
self.httpClient._runSearchRequest.assert_called_once_with(
request, "references", protocol.SearchReferencesResponse)
def testSearchReadGroupSets(self):
request = protocol.SearchReadGroupSetsRequest()
request.dataset_id = self.datasetId
request.name = self.objectName
request.page_size = self.pageSize
self.httpClient.searchReadGroupSets(
self.datasetId, name=self.objectName)
self.httpClient._runSearchRequest.assert_called_once_with(
request, "readgroupsets", protocol.SearchReadGroupSetsResponse)
def testSearchCallSets(self):
request = protocol.SearchCallSetsRequest()
request.variant_set_id = self.variantSetId
request.name = self.objectName
request.page_size = self.pageSize
self.httpClient.searchCallSets(
self.variantSetId, name=self.objectName)
self.httpClient._runSearchRequest.assert_called_once_with(
request, "callsets", protocol.SearchCallSetsResponse)
def testSearchReads(self):
request = protocol.SearchReadsRequest()
request.read_group_ids.extend(self.readGroupIds)
request.reference_id = self.referenceId
request.start = self.start
request.end = self.end
request.page_size = self.pageSize
self.httpClient.searchReads(
self.readGroupIds, referenceId=self.referenceId,
start=self.start, end=self.end)
self.httpClient._runSearchRequest.assert_called_once_with(
request, "reads", protocol.SearchReadsResponse)
def testGetReferenceSet(self):
self.httpClient.getReferenceSet(self.objectId)
self.httpClient._runGetRequest.assert_called_once_with(
"referencesets", protocol.ReferenceSet, self.objectId)
def testGetVariantAnnotationSet(self):
self.httpClient.getVariantAnnotationSet(self.objectId)
self.httpClient._runGetRequest.assert_called_once_with(
"variantannotationsets", protocol.VariantAnnotationSet,
self.objectId)
def testGetVariantSet(self):
self.httpClient.getVariantSet(self.objectId)
self.httpClient._runGetRequest.assert_called_once_with(
"variantsets", protocol.VariantSet, self.objectId)
def testGetReference(self):
self.httpClient.getReference(self.objectId)
self.httpClient._runGetRequest.assert_called_once_with(
"references", protocol.Reference, self.objectId)
def testGetReadGroupSets(self):
self.httpClient.getReadGroupSet(self.objectId)
self.httpClient._runGetRequest.assert_called_once_with(
"readgroupsets", protocol.ReadGroupSet, self.objectId)
def testGetReadGroup(self):
self.httpClient.getReadGroup(self.objectId)
self.httpClient._runGetRequest.assert_called_once_with(
"readgroups", protocol.ReadGroup, self.objectId)
def testGetCallSets(self):
self.httpClient.getCallSet(self.objectId)
self.httpClient._runGetRequest.assert_called_once_with(
"callsets", protocol.CallSet, self.objectId)
def testGetDatasets(self):
self.httpClient.getDataset(self.objectId)
self.httpClient._runGetRequest.assert_called_once_with(
"datasets", protocol.Dataset, self.objectId)
def testGetVariant(self):
self.httpClient.getVariant(self.objectId)
self.httpClient._runGetRequest.assert_called_once_with(
"variants", protocol.Variant, self.objectId)
class DatamodelObjectWrapper(object):
"""
Thin wrapper class that allows us to treat data model objects uniformly.
We should update the data model interface so that objects are always
returned so that we always call toProtocolElement on the results.
Variants and Reads are the exceptions here.
"""
def __init__(self, gaObject):
self.gaObject = gaObject
def toProtocolElement(self):
return self.gaObject
class DummyResponse(object):
"""
Stand in for requests Response object;
"""
def __init__(self, text):
self.text = text
self.status_code = 200
class DummyRequestsSession(object):
"""
Takes the place of a requests session so that we can check that all
values are sent and received correctly.
"""
def __init__(self, backend, urlPrefix):
self._backend = backend
self._urlPrefix = urlPrefix
self._getMethodMap = {
"datasets": self._backend.runGetDataset,
"referencesets": self._backend.runGetReferenceSet,
"references": self._backend.runGetReference,
"variantsets": self._backend.runGetVariantSet,
"variants": self._backend.runGetVariant,
"readgroupsets": self._backend.runGetReadGroupSet,
"readgroups": self._backend.runGetReadGroup,
}
self._searchMethodMap = {
"datasets": self._backend.runSearchDatasets,
"referencesets": self._backend.runSearchReferenceSets,
"references": self._backend.runSearchReferences,
"variantsets": self._backend.runSearchVariantSets,
"variants": self._backend.runSearchVariants,
"readgroupsets": self._backend.runSearchReadGroupSets,
"reads": self._backend.runSearchReads,
}
self.headers = {}
def checkSessionParameters(self):
contentType = "Content-type"
assert contentType in self.headers
assert self.headers[contentType] == "application/json"
def get(self, url, params):
# TODO add some more checks for params to see if Key is set,
# and we're not sending any extra stuff.
self.checkSessionParameters()
assert url.startswith(self._urlPrefix)
suffix = url[len(self._urlPrefix):]
basesSuffix = "/bases"
splits = suffix.split("/")
if suffix.endswith(basesSuffix):
# ListReferenceBases is an oddball and needs to be treated
# separately.
assert splits[0] == ''
assert splits[1] == 'references'
id_ = splits[2]
assert splits[3] == 'bases'
# This is all very ugly --- see the comments in the LocalClient
# for why we need to do this. Definitely needs to be fixed.
args = dict(params)
if args[u'end'] == u'0':
del args['end']
if args['pageToken'] is "":
del args['pageToken']
result = self._backend.runListReferenceBases(id_, args)
else:
assert len(splits) == 3
assert splits[0] == ''
datatype, id_ = splits[1:]
assert datatype in self._getMethodMap
method = self._getMethodMap[datatype]
result = method(id_)
return DummyResponse(result)
def post(self, url, params=None, data=None):
self.checkSessionParameters()
assert url.startswith(self._urlPrefix)
suffix = url[len(self._urlPrefix):]
searchSuffix = "/search"
assert suffix.startswith("/")
assert suffix.endswith(searchSuffix)
datatype = suffix[1:-len(searchSuffix)]
assert datatype in self._searchMethodMap
method = self._searchMethodMap[datatype]
result = method(data)
return DummyResponse(result)
class DummyHttpClient(client.HttpClient):
"""
Client in which we intercept calls to the underlying requests connection.
"""
def __init__(self, backend):
self._urlPrefix = "http://example.com"
super(DummyHttpClient, self).__init__(self._urlPrefix)
self._session = DummyRequestsSession(backend, self._urlPrefix)
self._setupHttpSession()
class ExhaustiveListingsMixin(object):
"""
Tests exhaustive listings using the high-level API with a Simulated
backend.
"""
@classmethod
def setUpClass(cls):
cls.backend = backend.Backend(datarepo.SimulatedDataRepository(
randomSeed=100, numDatasets=3,
numVariantSets=3, numCalls=3, variantDensity=0.5,
numReferenceSets=3, numReferencesPerReferenceSet=3,
numReadGroupSets=3, numReadGroupsPerReadGroupSet=3,
numAlignments=3))
cls.dataRepo = cls.backend.getDataRepository()
def setUp(self):
self.client = self.getClient()
def verifyObjectList(self, gaObjects, datamodelObjects, getMethod):
"""
Verifies that the specified list of protocol objects corresponds
to the specified list of datamodel objects.
"""
for gaObject, datamodelObject in utils.zipLists(
gaObjects, datamodelObjects):
self.assertEqual(gaObject, datamodelObject.toProtocolElement())
otherGaObject = getMethod(gaObject.id)
self.assertEqual(gaObject, otherGaObject)
def testAllDatasets(self):
datasets = list(self.client.searchDatasets())
self.verifyObjectList(
datasets, self.dataRepo.getDatasets(), self.client.getDataset)
def testAllReferenceSets(self):
referenceSets = list(self.client.searchReferenceSets())
self.verifyObjectList(
referenceSets, self.dataRepo.getReferenceSets(),
self.client.getReferenceSet)
def testAllReferences(self):
for referenceSet in self.client.searchReferenceSets():
references = list(self.client.searchReferences(referenceSet.id))
datamodelReferences = self.dataRepo.getReferenceSet(
referenceSet.id).getReferences()
self.verifyObjectList(
references, datamodelReferences, self.client.getReference)
for datamodelReference in datamodelReferences:
bases = self.client.listReferenceBases(
datamodelReference.getId())
otherBases = datamodelReference.getBases(
0, datamodelReference.getLength())
self.assertEqual(bases, otherBases)
def testAllVariantSets(self):
for dataset in self.client.searchDatasets():
variantSets = list(self.client.searchVariantSets(dataset.id))
datamodelVariantSets = self.dataRepo.getDataset(
dataset.id).getVariantSets()
self.verifyObjectList(
variantSets, datamodelVariantSets, self.client.getVariantSet)
def testAllVariants(self):
for datamodelDataset in self.dataRepo.getDatasets():
for datamodelVariantSet in datamodelDataset.getVariantSets():
# TODO the values should be derived from the datamodel
# variant set object.
start = 0
end = 20
referenceName = "fixme"
variants = list(self.client.searchVariants(
datamodelVariantSet.getId(), start=start, end=end,
referenceName=referenceName))
datamodelVariants = [
DatamodelObjectWrapper(variant) for variant in
datamodelVariantSet.getVariants(
referenceName, start, end)]
self.verifyObjectList(
variants, datamodelVariants, self.client.getVariant)
def testAllReadGroupSets(self):
for dataset in self.client.searchDatasets():
readGroupSets = list(self.client.searchReadGroupSets(dataset.id))
datamodelReadGroupSets = self.dataRepo.getDataset(
dataset.id).getReadGroupSets()
self.verifyObjectList(
readGroupSets, datamodelReadGroupSets,
self.client.getReadGroupSet)
# Check the readGroups.
for readGroupSet, datamodelReadGroupSet in zip(
readGroupSets, datamodelReadGroupSets):
datamodelReadGroups = datamodelReadGroupSet.getReadGroups()
self.verifyObjectList(
readGroupSet.read_groups, datamodelReadGroups,
self.client.getReadGroup)
def testAllReads(self):
for dmDataset in self.dataRepo.getDatasets():
for dmReadGroupSet in dmDataset.getReadGroupSets():
dmReferenceSet = dmReadGroupSet.getReferenceSet()
for dmReadGroup in dmReadGroupSet.getReadGroups():
for dmReference in dmReferenceSet.getReferences():
# TODO fix these coordinates.
start = 0
end = 10
dmReads = list(dmReadGroup.getReadAlignments(
dmReference, start, end))
reads = list(self.client.searchReads(
[dmReadGroup.getId()], dmReference.getId(),
start, end))
self.assertGreater(len(reads), 0)
for dmRead, read in utils.zipLists(dmReads, reads):
self.assertEqual(dmRead, read)
class TestExhaustiveListingsHttp(ExhaustiveListingsMixin, unittest.TestCase):
"""
Tests the exhaustive listings using the HTTP client.
"""
def getClient(self):
return DummyHttpClient(self.backend)
class TestExhaustiveListingsLocal(ExhaustiveListingsMixin, unittest.TestCase):
"""
Tests the exhaustive listings using the local client.
"""
def getClient(self):
return client.LocalClient(self.backend)
class PagingMixin(object):
"""
Tests the paging code using a simulated backend.
"""
@classmethod
def setUpClass(cls):
cls.numReferences = 25
cls.backend = backend.Backend(datarepo.SimulatedDataRepository(
randomSeed=100, numDatasets=0,
numReferenceSets=1,
numReferencesPerReferenceSet=cls.numReferences))
cls.dataRepo = cls.backend.getDataRepository()
def setUp(self):
self.client = self.getClient()
self.datamodelReferenceSet = self.dataRepo.getReferenceSetByIndex(0)
self.datamodelReferences = self.datamodelReferenceSet.getReferences()
self.references = [
dmReference.toProtocolElement()
for dmReference in self.datamodelReferences]
self.assertEqual(len(self.references), self.numReferences)
def verifyAllReferences(self):
"""
Verifies that we correctly return all references.
"""
references = list(self.client.searchReferences(
self.datamodelReferenceSet.getId()))
self.assertEqual(references, self.references)
def testDefaultPageSize(self):
self.verifyAllReferences()
def verifyPageSize(self, pageSize):
self.client.setPageSize(pageSize)
self.assertEqual(pageSize, self.client.getPageSize())
self.verifyAllReferences()
def testPageSize1(self):
self.verifyPageSize(1)
def testPageSize2(self):
self.verifyPageSize(2)
def testPageSize3(self):
self.verifyPageSize(3)
def testPageSizeAlmostListLength(self):
self.verifyPageSize(self.numReferences - 1)
def testPageSizeListLength(self):
self.verifyPageSize(self.numReferences)
class TestPagingLocal(PagingMixin, unittest.TestCase):
"""
Tests paging using the local client.
"""
def getClient(self):
return client.LocalClient(self.backend)
class TestPagingHttp(PagingMixin, unittest.TestCase):
"""
Tests paging using the HTTP client.
"""
def getClient(self):
return DummyHttpClient(self.backend)
| apache-2.0 | 127,916,958,536,394,690 | 38.795247 | 78 | 0.652931 | false | 4.338848 | true | false | false |
streampref/wcimport | tool/query/bestseq/move.py | 1 | 6139 | # -*- coding: utf-8 -*-
'''
Queries for experiments with preference operators
'''
# =============================================================================
# Queries with preference operators
# =============================================================================
# Moves
Q_MOVE_BESTSEQ = '''
SELECT SEQUENCE IDENTIFIED BY player_id
[RANGE {ran} SECOND, SLIDE {sli} SECOND] FROM s
ACCORDING TO TEMPORAL PREFERENCES
IF PREVIOUS (move = 'rec') THEN
(move = 'drib') BETTER (move = 'pass') [place]
AND
(move = 'pass') BETTER (move = 'bpas')
AND
IF ALL PREVIOUS (place = 'mf') THEN
(place = 'mf') BETTER (place = 'di')
;
'''
# =============================================================================
# CQL Equivalences for moves
# =============================================================================
Q_MOVE_DICT = {}
Q_MOVE_ID_LIST = ['z', 'p_join', 'p', 'r1', 'r2', 'nv_ap', 'm_ap', 'r3',
'd1_pref', 'd1_npref', 'd2_pref', 'd2_npref',
'd3_pref', 'd3_npref', 'd1', 'd2', 'd3', 't1', 't2', 't3',
'id', 'equiv']
# Sequence extraction
Q_MOVE_DICT['z'] = '''
SELECT SEQUENCE IDENTIFIED BY player_id
[RANGE {ran} SECOND, SLIDE {sli} SECOND]
FROM s;
'''
# Join same positions
Q_MOVE_DICT['p_join'] = '''
SELECT z1._pos, z1.player_id AS x1, z1.place, z1.move,
z2.player_id AS x2, z2.place AS _place, z2.move AS _move
FROM z AS z1, z AS z2 WHERE z1._pos = z2._pos;
'''
# Smaller non correspondent position (positions to be compared)
Q_MOVE_DICT['p'] = '''
SELECT MIN(_pos) AS _pos, x1, x2 FROM p_join
WHERE NOT place = _place OR NOT move = _move
GROUP BY x1, x2;
'''
# PREVIOUS condition of rule 1
Q_MOVE_DICT['r1'] = '''
SELECT p._pos, p.x1 FROM z, p
WHERE p.x1 = z.player_id AND p._pos = z._pos+1 AND z.move = 'rec';
'''
# Temporal condition of rule 2
Q_MOVE_DICT['r2'] = '''
SELECT _pos, x1 FROM p;
'''
# ALL PREVIOUS condition of rule 2
Q_MOVE_DICT['nv_ap'] = '''
SELECT MAX(_pos) AS _pos, x1 FROM p GROUP BY x1
UNION
SELECT _pos, player_id AS x1 FROM z WHERE NOT place = 'mf';
'''
Q_MOVE_DICT['m_ap'] = '''
SELECT MIN(_pos) AS _pos, x1 FROM nv_ap GROUP BY x1;
'''
Q_MOVE_DICT['r3'] = '''
SELECT p._pos, p.x1 FROM p, m_ap AS pmin
WHERE p.x1 = pmin.x1 AND p._pos <= pmin._pos AND p._pos > 1;
'''
# Preferred tuples according to rule 1
Q_MOVE_DICT['d1_pref'] = '''
SELECT r._pos, r.x1, place, move, 1 AS t FROM r1 AS r, z
WHERE r._pos = z._pos AND r.x1 = z.player_id AND move = 'drib'
UNION
SELECT r._pos, r.x1, t.place, t.move, 0 AS t
FROM r1 AS r, tup AS t WHERE t.move = 'drib';
'''
# Non-preferred tuples according to rule 1
Q_MOVE_DICT['d1_npref'] = '''
SELECT r._pos, r.x1 AS x2, place, move, 1 AS t FROM r1 AS r, z
WHERE r._pos = z._pos AND r.x1 = z.player_id AND move = 'pass'
UNION
SELECT r._pos, r.x1 AS x2, t.place, t.move, 0 AS t
FROM r1 AS r, tup AS t WHERE t.move = 'pass';
'''
# Preferred tuples according to rule 2
Q_MOVE_DICT['d2_pref'] = '''
SELECT r._pos, r.x1, place, move, 1 AS t FROM r2 AS r, z
WHERE r._pos = z._pos AND r.x1 = z.player_id AND move = 'pass'
UNION
SELECT r._pos, r.x1, t.place, t.move, 0 AS t
FROM r2 AS r, tup AS t WHERE t.move = 'pass';
'''
# Non-preferred tuples according to rule 2
Q_MOVE_DICT['d2_npref'] = '''
SELECT r._pos, r.x1 AS x2, place, move, 1 AS t FROM r2 AS r, z
WHERE r._pos = z._pos AND r.x1 = z.player_id AND move = 'bpas'
UNION
SELECT r._pos, r.x1 AS x2, t.place, t.move, 0 AS t
FROM r2 AS r, tup AS t WHERE t.move = 'bpas';
'''
# Preferred tuples according to rule 3
Q_MOVE_DICT['d3_pref'] = '''
SELECT r._pos, r.x1, place, move, 1 AS t FROM r3 AS r, z
WHERE r._pos = z._pos AND r.x1 = z.player_id AND move = 'drib'
UNION
SELECT r._pos, r.x1, t.place, t.move, 0 AS t
FROM r3 AS r, tup AS t WHERE t.move = 'drib';
'''
# Non-preferred tuples according to rule 3
Q_MOVE_DICT['d3_npref'] = '''
SELECT r._pos, r.x1 AS x2, place, move, 1 AS t FROM r3 AS r, z
WHERE r._pos = z._pos AND r.x1 = z.player_id AND move = 'pass'
UNION
SELECT r._pos, r.x1 AS x2, t.place, t.move, 0 AS t
FROM r3 AS r, tup AS t WHERE t.move = 'pass';
'''
# Direct comparisons
Q_MOVE_DICT['d1'] = '''
SELECT ri._pos, ri.x1, ri.x2, pref.place, pref.move , pref.t,
npref.place AS _place, npref.move AS _move, npref.t AS _t
FROM p AS ri, d1_pref AS pref, d1_npref AS npref
WHERE ri._pos = pref._pos AND ri._pos = npref._pos
AND ri.x1 = pref.x1 AND ri.x2 = npref.x2;
'''
Q_MOVE_DICT['d2'] = '''
SELECT ri._pos, ri.x1, ri.x2, pref.place, pref.move , pref.t,
npref.place AS _place, npref.move AS _move, npref.t AS _t
FROM p AS ri, d2_pref AS pref, d2_npref AS npref
WHERE ri._pos = pref._pos AND ri._pos = npref._pos
AND ri.x1 = pref.x1 AND ri.x2 = npref.x2
AND pref.place = npref.place;
'''
Q_MOVE_DICT['d3'] = '''
SELECT ri._pos, ri.x1, ri.x2, pref.place, pref.move , pref.t,
npref.place AS _place, npref.move AS _move, npref.t AS _t
FROM p AS ri, d3_pref AS pref, d3_npref AS npref
WHERE ri._pos = pref._pos AND ri._pos = npref._pos
AND ri.x1 = pref.x1 AND ri.x2 = npref.x2
AND pref.move = npref.move;
'''
# Transitive comparisons
Q_MOVE_DICT['t1'] = '''
SELECT * FROM d1
UNION SELECT * FROM d2
UNION SELECT * FROM d3;
'''
Q_MOVE_DICT['t2'] = '''
SELECT pref._pos, pref.x1, npref.x2, pref.place, pref.move, pref.t,
npref.place AS _place, npref.move AS _move, npref._t
FROM t1 AS pref, t1 AS npref
WHERE pref._pos = npref._pos AND pref.x1 = npref.x1 AND pref.x2 = npref.x2
AND pref._place = npref.place AND pref._move = npref.move
UNION SELECT * FROM t1;
'''
Q_MOVE_DICT['t3'] = '''
SELECT pref._pos, pref.x1, npref.x2, pref.place, pref.move, pref.t,
npref.place AS _place, npref.move AS _move, npref._t
FROM t2 AS pref, t2 AS npref
WHERE pref._pos = npref._pos AND pref.x1 = npref.x1 AND pref.x2 = npref.x2
AND pref._place = npref.place AND pref._move = npref.move
UNION SELECT * FROM t2;
'''
# ID of dominated sequences
Q_MOVE_DICT['id'] = '''
SELECT DISTINCT player_id FROM z
EXCEPT
SELECT DISTINCT x2 AS player_id FROM t3
WHERE t = 1 AND _t = 1;
'''
# Dominant sequences
Q_MOVE_DICT['equiv'] = '''
SELECT z.* FROM z, id
WHERE z.player_id = id.player_id;
'''
| gpl-3.0 | 3,218,681,803,519,124,000 | 29.849246 | 79 | 0.59684 | false | 2.579412 | false | false | false |
hperala/kontuwikibot | scripts/spamremove.py | 1 | 3739 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Script to remove links that are being or have been spammed.
Usage:
spamremove.py www.spammedsite.com
It will use Special:Linksearch to find the pages on the wiki that link to
that site, then for each page make a proposed change consisting of removing
all the lines where that url occurs. You can choose to:
* accept the changes as proposed
* edit the page yourself to remove the offending link
* not change the page in question
Command line options:
-always Do not ask, but remove the lines automatically. Be very
careful in using this option!
-namespace: Filters the search to a given namespace. If this is specified
multiple times it will search all given namespaces
"""
#
# (C) Pywikibot team, 2007-2014
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id: c728e9bcc488a9695bca883a5fc654f3cf0197b9 $'
#
import pywikibot
from pywikibot import i18n
from pywikibot.editor import TextEditor
def main(*args):
"""
Process command line arguments and perform task.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
always = False
namespaces = []
spamSite = ''
for arg in pywikibot.handle_args(args):
if arg == "-always":
always = True
elif arg.startswith('-namespace:'):
try:
namespaces.append(int(arg[len('-namespace:'):]))
except ValueError:
namespaces.append(arg[len('-namespace:'):])
else:
spamSite = arg
if not spamSite:
pywikibot.showHelp()
pywikibot.output(u"No spam site specified.")
return
mysite = pywikibot.Site()
pages = mysite.exturlusage(spamSite, namespaces=namespaces, content=True)
summary = i18n.twtranslate(mysite, 'spamremove-remove',
{'url': spamSite})
for i, p in enumerate(pages, 1):
text = p.text
if spamSite not in text:
continue
# Show the title of the page we're working on.
# Highlight the title in purple.
pywikibot.output(u"\n\n>>> \03{lightpurple}%s\03{default} <<<"
% p.title())
lines = text.split('\n')
newpage = []
lastok = ""
for line in lines:
if spamSite in line:
if lastok:
pywikibot.output(lastok)
pywikibot.output('\03{lightred}%s\03{default}' % line)
lastok = None
else:
newpage.append(line)
if line.strip():
if lastok is None:
pywikibot.output(line)
lastok = line
if always:
answer = "y"
else:
answer = pywikibot.input_choice(
u'\nDelete the red lines?',
[('yes', 'y'), ('no', 'n'), ('edit', 'e')],
'n', automatic_quit=False)
if answer == "n":
continue
elif answer == "e":
editor = TextEditor()
newtext = editor.edit(text, highlight=spamSite,
jumpIndex=text.find(spamSite))
else:
newtext = "\n".join(newpage)
if newtext != text:
p.text = newtext
p.save(summary)
else:
if "i" not in locals():
pywikibot.output('No page found.')
elif i == 1:
pywikibot.output('1 pages done.')
else:
pywikibot.output('%d pages done.' % i)
if __name__ == '__main__':
main()
| mit | -3,847,130,002,318,686,000 | 28.912 | 79 | 0.554426 | false | 4.201124 | false | false | false |
olichtne/python-perfrepo | perfrepo/PerfRepoTest.py | 1 | 3986 | """
This module contains the PerfRepoTest class.
Copyright 2015 Red Hat, Inc.
Licensed under the GNU General Public License, version 2 as
published by the Free Software Foundation; see COPYING for details.
"""
__author__ = """
[email protected] (Ondrej Lichtner)
"""
import textwrap
from xml.etree import ElementTree
from xml.etree.ElementTree import Element, iselement
from perfrepo.PerfRepoObject import PerfRepoObject
from perfrepo.PerfRepoMetric import PerfRepoMetric
from perfrepo.Common import PerfRepoException
from perfrepo.Common import indent
class PerfRepoTest(PerfRepoObject):
def __init__(self, xml=None):
if xml is None:
self._id = None
self._name = None
self._uid = None
self._description = ""
self._groupid = None
self._metrics = []
elif isinstance(xml, str) or isinstance(xml, bytes) or iselement(xml):
if isinstance(xml, str) or isinstance(xml, bytes):
root = ElementTree.fromstring(xml)
else:
root = xml
if root.tag != "test":
raise PerfRepoException("Invalid xml.")
self._id = root.get("id")
self._name = root.get("name")
self._uid = root.get("uid")
self._groupid = root.get("groupId")
if root.find("description") is not None:
self._description = root.find("description").text
else:
self._description = ""
self._metrics = []
for metric in root.find("metrics"):
if metric.tag != "metric":
continue
self._metrics.append(PerfRepoMetric(metric))
else:
raise PerfRepoException("Parameter xml must be"\
" a string, an Element or None")
def get_obj_url(self):
return "test/%s" % self._id
def get_id(self):
return self._id
def get_name(self):
return self._name
def get_uid(self):
return self._uid
def get_description(self):
return self._description
def get_groupid(self):
return self._groupid
def get_metrics(self):
return self._metrics
def set_id(self, id):
self._id = id
def set_name(self, name):
self._name = name
def set_uid(self, uid):
self._uid = uid
def set_description(self, description):
self._description = description
def set_groupid(self, groupid):
self._groupid = groupid
def add_metric(self, metric):
if not isinstance(metric, PerfRepoMetric):
return None
else:
self._metrics.append(metric)
return metric
def to_xml(self):
root = Element('test')
self._set_element_atrib(root, 'id', self._id)
self._set_element_atrib(root, 'name', self._name)
self._set_element_atrib(root, 'uid', self._uid)
self._set_element_atrib(root, 'groupId', self._groupid)
description = ElementTree.SubElement(root, 'description')
description.text = self._description
metrics = ElementTree.SubElement(root, 'metrics')
for metric in self._metrics:
metrics.append(metric.to_xml())
return root
def __str__(self):
ret_str = """\
id = %s
uid = %s
name = %s
groupid = %s
description:
""" % (self._id,
self._uid,
self._name,
self._groupid)
ret_str = textwrap.dedent(ret_str)
ret_str += indent(str(self._description) + "\n", 4)
ret_str += "metrics:\n"
for metric in self._metrics:
ret_str += indent(str(metric) + "\n", 4)
ret_str += indent("------------------------\n", 4)
return textwrap.dedent(ret_str)
| gpl-2.0 | -8,145,718,045,749,561,000 | 29.899225 | 78 | 0.544656 | false | 4.222458 | false | false | false |
daonb/tumulus | tumuli/urls.py | 1 | 1745 | """tumuli URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.core.exceptions import ImproperlyConfigured
from django.conf import settings
from django.conf.urls import url, include
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import include, path
from rest_framework.urlpatterns import format_suffix_patterns
from biography import views
urlpatterns = [
path('admin/', admin.site.urls),
# path('api/', include(biography.urls)),
url('api/bio/(?P<username>.+)/', views.BiographyByUserList.as_view()), # get user's Bio by username
url('^api/periods/(?P<username>.+)/$', views.PeriodByUserList.as_view()), # get user's Periods by username
url('^api/memoirs/(?P<username>.+)/$', views.MemoirsByUserList.as_view()),
# get user's Memoirs by username
url('^api/contentatom/(?P<username>.+)/$', views.ContentAtomByUserList.as_view()),
# get user's Content Atoms by username
]
urlpatterns = format_suffix_patterns(urlpatterns)
try:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
except ImproperlyConfigured:
# it's on S3, nothing for us to do
pass
| agpl-3.0 | 1,101,324,068,289,894,900 | 38.659091 | 110 | 0.718052 | false | 3.681435 | false | false | false |
muccg/rdrf | scripts/check_views.py | 1 | 5203 | '''
TO DO:
- Further abstract states (maybe find some way of removing reliance
on indices)
- Add comments to provide full information on code
- Create unit tests for script (view with mixin, view w/out mixin
with decorators, no mixin no decorators)
'''
import os
import re
import sys
from os.path import abspath, join
check_decorator_strings = [
'@method_decorator(login_required)',
'@login_required',
]
check_method_strings = [
'def get(',
'def post(',
]
ignore_dirs = set([
'build',
])
vcheck_states = {
's': "SEARCH",
'v': "INVIEW",
}
whitelist = [
'ClinicianActivationView',
'CopyrightView',
'LandingView',
'PatientsListingView',
'PromsCompletedPageView',
'PromsLandingPageView',
'PromsView',
'RecaptchaValidator',
'RegistryListView',
'RegistryView',
'RouterView',
'SurveyEndpoint',
'UsernameLookup',
]
def get_lines(file_name, file_dir):
full_file = join(file_dir, file_name)
with open(full_file) as open_file:
lines = open_file.readlines()
return lines, full_file
def get_superclass(class_text):
super_strings = []
ret_strings = []
if re.match(r'^class', class_text) is not None:
super_strings = re.split(r'^class.+\(|,|\):', class_text)
for substr in super_strings:
if substr != "":
ret_strings.append(substr.strip())
return ret_strings
def find_view(line_text):
state_n = 's'
view_n = ''
# Check line
superclasses = get_superclass(line_text)
if superclasses != [] and "View" in superclasses:
# Change to "in-view" state if check for mixin is false
if "LoginRequiredMixin" not in superclasses:
state_n = 'v'
view_n = re.findall(r'class (.+)\(', line_text)[0]
return state_n, view_n
def validate_view(line_text, v_lines, v_index):
has_failed = False
# Check for get/post
if any(met_str in line_text for met_str in check_method_strings):
# Check if get/post has a decorator - if not, add to list
if not any(dec_str in v_lines[v_index - 1] for
dec_str in check_decorator_strings):
has_failed = True
return has_failed
def search_and_check_views(cur_line, all_lines, line_index,
cur_state, cur_view):
view_failed = False
# Change back to normal search once normal indent level is reached
# (use regex to match no leading whitespace and no comments)
if re.match(r'^[^\s\#]', cur_line) is not None:
cur_state = 's'
# Redefine current state
new_state = vcheck_states[cur_state]
# Search until view is found
if new_state == "SEARCH":
cur_state, cur_view = find_view(cur_line)
# While in "in-view" state, look for get/post methods
elif new_state == "INVIEW":
view_failed = validate_view(cur_line, all_lines, line_index)
return view_failed, cur_state, cur_view
def remove_whitelisted(insecure_dict):
remove_files = []
for bad_file, bad_views in insecure_dict.items():
remove_views = []
for bad_view in bad_views:
if bad_view in whitelist:
remove_views.append(bad_view)
for rm_view in remove_views:
insecure_dict[bad_file].remove(rm_view)
if insecure_dict[bad_file] == []:
remove_files.append(bad_file)
for rm_file in remove_files:
insecure_dict.pop(rm_file)
def show_bad_views(file_view_dict):
if len(file_view_dict) > 0:
print("Non-secure views found:")
for bad_file, bad_views in file_view_dict.items():
print(f"File: {bad_file}")
print("Views:")
for bad_view in bad_views:
print(bad_view)
sys.exit(1)
else:
print("Views secure.")
def check_view_security():
files_and_views = {}
# Not the best, but this way only one base directory is read.
# Perhaps do some error handling if a directory isn't passed in
dir_name = abspath(sys.argv[1])
for base_dir, sub_dirs, files in os.walk(dir_name, topdown=True):
# Don't check certain folders - removes duplicates
sub_dirs[:] = [s_dir for s_dir in sub_dirs if
s_dir not in ignore_dirs]
for f_name in files:
if re.match(r'.+\.py$', f_name) is not None:
f_lines, full_f_name = get_lines(f_name, base_dir)
state = 's'
view = ''
view_list = []
for index, line_var in enumerate(f_lines):
weak_view, state, view = search_and_check_views(
line_var, f_lines, index, state, view
)
if weak_view:
if view not in view_list:
view_list.append(view)
if view_list != []:
files_and_views.update({full_f_name: view_list})
remove_whitelisted(files_and_views)
show_bad_views(files_and_views)
# Run the primary function if this is being used standalone
if __name__ == "__main__":
check_view_security()
| agpl-3.0 | 6,208,441,592,899,194,000 | 27.277174 | 71 | 0.582933 | false | 3.603186 | false | false | false |
antmicro/distant-rec | tools/shebang-replace.py | 1 | 1211 | #!/usr/bin/env python3
import sys
from os import listdir, chdir
from os.path import isfile, abspath
UNTIL = '/build/'
REPLACE_WITH = '/b/f/w'
def bangchange(file_path):
script = File(file_path)
if script.flist[0].find("#!") == 0:
if script.flist[0].find(UNTIL) > 0:
print("\033[92m" + "[MOD] {}".format(file_path))
where_divide = script.flist[0].find(UNTIL)
script.flist[0] = "#!" + REPLACE_WITH + script.flist[0][where_divide:]
script.flush()
class File:
def __init__(self, path):
self.fh = open(path, "r+")
try:
self.fstring = self.fh.read()
except UnicodeDecodeError:
print("\033[94m" + "[SKP] {}".format(path))
self.fstring = ""
self.flist = self.fstring.split("\n")
def flush(self):
self.fstring = "\n".join(self.flist)
self.fh.seek(0)
self.fh.write(self.fstring)
self.fh.close()
def main():
if len(sys.argv) != 2:
print("\033[91m"+"[FAIL] Invalid arguments")
return 1
chdir(sys.argv[1])
for filename in listdir("."):
if isfile(abspath(filename)):
bangchange(filename)
main()
| apache-2.0 | -7,748,558,177,413,436,000 | 24.765957 | 82 | 0.549959 | false | 3.220745 | false | false | false |
bgoli/cbmpy-build | ubuntu/1_install_cbmpy_dependencies.py | 1 | 6549 | # Detect all MetaToolKit depencies on Ubuntu and create a custom script to install them.
# Tested on Ubuntu 14.04, 16.04
# Author Brett G. Olivier ([email protected])
# (C) All rights reserved, Brett G. Olivier, Amsterdam 2016.
import os, subprocess, itertools, stat
UBUNTU = CONDA = False
try:
print(os.sys.argv)
arg = os.sys.argv[1]
except:
arg = 'UBUNTU'
if arg == 'UBUNTU':
UBUNTU = True
elif arg == 'CONDA':
CONDA = True
else:
print('\nPlease call script with CONDA as argument for Anaconda install script, defaulting to UBUNTU')
UBUNTU = True
res = {'Required' : {},\
'Optional' : {}
}
# First lets check for some Python essentials
reqcnt = itertools.count(1,1)
optcnt = itertools.count(1,1)
# this should just be there for any sane python build environment
if UBUNTU:
res['Required'][reqcnt.next()] = 'sudo -E apt-get -y install build-essential g++ gfortran python python-dev'
if CONDA:
pass
#res['Required'][reqcnt.next()] = 'conda update -y conda # if this is the only required package ignore it'
try:
import pip
except ImportError:
if UBUNTU:
res['Required'][reqcnt.next()] = 'sudo -E apt-get -y install python-pip'
try:
import numpy
except ImportError:
if UBUNTU:
res['Required'][reqcnt.next()] = 'sudo -E apt-get -y install python-numpy'
try:
import sympy
except ImportError:
if UBUNTU:
res['Required'][reqcnt.next()] = 'sudo -E apt-get -y install python-sympy'
try:
import xlrd
except ImportError:
if UBUNTU:
res['Required'][reqcnt.next()] = 'sudo -E apt-get -y install python-xlrd'
try:
import xlwt
except ImportError:
if UBUNTU:
res['Required'][reqcnt.next()] = 'sudo -E apt-get -y install python-xlwt'
try:
import matplotlib
except ImportError:
if UBUNTU:
res['Required'][reqcnt.next()] = 'sudo -E apt-get -y install python-matplotlib'
try:
import PyQt4
except ImportError:
if UBUNTU:
res['Required'][reqcnt.next()] = 'sudo -E apt-get -y install python-qt4'
elif CONDA:
res['Required'][reqcnt.next()] = 'conda install -y pyqt=4.11.4'
try:
import Bio
except ImportError:
if UBUNTU:
res['Required'][reqcnt.next()] = 'sudo -E apt-get -y install python-biopython'
elif CONDA:
res['Required'][reqcnt.next()] = 'conda install -y biopython'
try:
import nose
except ImportError:
if UBUNTU:
res['Required'][reqcnt.next()] = 'sudo -E apt-get -y install python-nose'
try:
import docx
except ImportError:
if UBUNTU:
res['Required'][reqcnt.next()] = 'sudo -EH pip install docx'
elif CONDA:
res['Required'][reqcnt.next()] = 'pip install docx'
try:
import libsbml
if libsbml.LIBSBML_VERSION < 51201:
print('\nPlease update to the latest version of libSBML.\n')
raise ImportError
except ImportError:
if UBUNTU:
res['Required'][reqcnt.next()] = 'sudo -E apt-get -y install libxml2 libxml2-dev libxslt-dev zlib1g zlib1g-dev bzip2 libbz2-dev'
res['Required'][reqcnt.next()] = 'sudo -EH pip install --upgrade python-libsbml'
elif CONDA:
res['Required'][reqcnt.next()] = 'conda install -c SBMLTeam -y python-libsbml'
try:
import cbmpy
except:
if UBUNTU:
res['Required'][reqcnt.next()] = 'sudo -EH pip install --upgrade cbmpy'
res['Required'][reqcnt.next()] = 'sudo python -c "import cbmpy"'
if CONDA:
res['Required'][reqcnt.next()] = 'pip install cbmpy'
res['Required'][reqcnt.next()] = 'python -c "import cbmpy"'
try:
out = subprocess.call(['java', '-version'])
except (OSError):
if UBUNTU or CONDA:
res['Required'][reqcnt.next()] = 'sudo -E apt-get -y install default-jre'
try:
out = subprocess.call(['perl', '-v'])
except (OSError):
if UBUNTU or CONDA:
res['Required'][reqcnt.next()] = 'sudo -E apt-get -y install perl'
p_script = """\
my $rc = 0;
$rc = eval
{
require XML::Parser;
XML::Parser->import();
1;
};
if ($rc){
exit 0
} else {
exit 1
}
"""
try:
PF = file('_test.pl', 'w')
PF.write(p_script)
PF.close()
out = int(subprocess.call(['perl', '_test.pl']))
if out:
raise OSError
except (OSError):
if UBUNTU or CONDA:
res['Required'][reqcnt.next()] = 'sudo -E apt-get -y install libxml-parser-perl'
try:
out = subprocess.call(['blastall'])
except (OSError):
if UBUNTU or CONDA:
res['Required'][reqcnt.next()] = 'sudo -E apt-get -y install blast2'
# Optional/recommended
# https://github.com/bgoli/cbmpy-glpk
if UBUNTU:
res['Optional'][optcnt.next()] = 'sudo apt-get -y install git cython libxml2-dev libxslt-dev'
try:
import IPython
except ImportError:
if UBUNTU:
res['Optional'][optcnt.next()] = 'sudo -E apt-get -y install ipython ipython-notebook'
try:
import suds
except ImportError:
if UBUNTU:
res['Optional'][optcnt.next()] = 'sudo -E apt-get -y install python-suds'
elif CONDA:
res['Optional'][optcnt.next()] = 'pip install suds'
try:
import flask
except ImportError:
if UBUNTU:
res['Optional'][optcnt.next()] = 'sudo -E apt-get -y install python-flask'
if UBUNTU or CONDA:
bash_script="""\
while true; do
read -p "Do you wish to install *{}* MetaToolkit dependencies? [y/n]: " yn
case $yn in
[Yy]* ) echo "Installing ..."; break;;
[Nn]* ) exit;;
* ) echo "Please enter y/n.";;
esac
done
"""
bash_script="""\
# {}
"""
output = '#!/bin/sh\n\n'
output += '#MetaToolkit: Ubuntu system requirements check\n'
output += '#=============================================\n\n'
REQUIRE_USER_INPUT = False
for r in res:
if len(res[r]) > 0:
if REQUIRE_USER_INPUT:
output += bash_script.format(r)
output += '#{}\n#{}\n\n'.format(r, '-'*len(r))
resk = list(res[r])
resk.sort()
for k in resk:
if k != None:
output += '{}\n'.format(res[r][k])
output += '\n'
output += 'exit\n\n'
fname = 'metatoolkit_install_dependencies.sh'
F = file(fname, 'w')
F.write(output)
F.close()
os.chmod(fname, stat.S_IRWXU)
print('')
print(output)
print('\n\nInstall script (shown above) saved as file: {}\nplease examine it carefully and run. Alternatively install individual dependencies manually').format(fname)
| gpl-3.0 | -6,120,319,862,562,689,000 | 28.90411 | 170 | 0.603604 | false | 3.266334 | false | false | false |
j4k0bk/pyidaemon | replies.py | 1 | 7720 |
# FIXME: Move these messages to somewhere else
NUMERIC_REPLIES = {
'001' : 'Welcome to the Internet Relay Network %s',
'002' : 'Your host is %s, running version %s',
'003' : 'This server was created %s',
'004' : '<servername> <version> <available user modes> <available channel modes>',
'318' : 'End of WHOIS list',
'331' : 'No topic is set',
'366' : 'End of /NAMES list.',
'401' : 'No such nick',
'403' : 'No such channel',
'404' : 'Cannot send to channel',
'405' : 'You have joined too many channels',
'411' : 'No recipient given (%s)',
'412' : 'No text to send',
'421' : 'Unknown command',
'431' : 'No nickname given',
'432' : 'Erroneous nickname',
'433' : 'Nickname is already in use',
'442' : 'You\'re not on that channel',
'451' : 'You have not registered',
'461' : 'Not enough parameters',
'475' : 'Cannot join channel (+k)',
}
# -- IRC REPLIES -------------------------------------------------------------
SERVER = 0
THIS_USER = 1
class IRCReply(object):
def __init__(self, source, cmd, args=[], msg=None, msg_args=None):
self.source = source
self.cmd = cmd # May be a 3-digit numeric
self.args = args
self.msg = msg
self.nick = None
if self.msg is None:
self.msg = NUMERIC_REPLIES.get(cmd)
if self.msg and msg_args:
self.msg = self.msg % msg_args
def __str__(self):
words = []
if self.source:
words.append(':%s' % self.source)
words.append(self.cmd)
if self.nick:
words.append(self.nick)
if self.args:
words.append(' '.join(self.args))
if not self.msg is None:
words.append(':%s' % self.msg)
return ' '.join(words)
class Welcome(IRCReply):
"""001 RPL_WELCOME"""
def __init__(self, nick):
IRCReply.__init__(self, SERVER, '001', msg_args=nick)
class YourHost(IRCReply):
"""002 RPL_YOURHOST"""
def __init__(self, server, version):
IRCReply.__init__(self, SERVER, '002', msg_args=(server, version))
class Created(IRCReply):
"""003 RPL_CREATED"""
def __init__(self, date):
IRCReply.__init__(self, SERVER, '003', msg_args=date)
class MyInfo(IRCReply): # FIXME
"""004 RPL_MYINFO"""
def __init__(self):
IRCReply.__init__(self, SERVER, '004')
class Names(IRCReply):
"""353 RPL_NAMREPLY"""
def __init__(self, channel_name, names):
msg = ' '.join(names)
IRCReply.__init__(self, SERVER, '353', ['@', channel_name], msg=msg)
class EndOfNames(IRCReply):
"""366 RPL_ENDOFNAMES"""
def __init__(self, channel_name):
IRCReply.__init__(self, SERVER, '366', [channel_name])
class WhoIsUser(IRCReply):
"""311 RPL_WHOISUSER"""
def __init__(self, nick, user, host, realname):
args = [nick, user, host, '*']
IRCReply.__init__(self, SERVER, '311', args, msg=realname)
class WhoIsServer(IRCReply):
"""312 RPL_WHOISSERVER"""
def __init__(self, nick, server_name, server_info):
args = [nick, server_name]
IRCReply.__init__(self, SERVER, '312', args, msg=server_info)
class EndOfWhoIs(IRCReply):
"""318 RPL_ENDOFWHOIS"""
def __init__(self, nick):
IRCReply.__init__(self, SERVER, '318', [nick])
class WhoIsChannels(IRCReply):
"""319 RPL_WHOISCHANNELS"""
def __init__(self, nick, channels):
if isinstance(channels, list):
channels = ' '.join(channels)
IRCReply.__init__(self, SERVER, '319', [nick], msg=channels)
class Ping(IRCReply):
def __init__(self, server):
IRCReply.__init__(self, SERVER, 'PING', [server])
class Pong(IRCReply):
def __init__(self, server):
IRCReply.__init__(self, SERVER, 'PONG', [server])
class Nick(IRCReply):
def __init__(self, user, new_nick):
IRCReply.__init__(self, user.mask, 'NICK', [new_nick])
class Join(IRCReply):
def __init__(self, user, channel_name):
IRCReply.__init__(self, user.mask, 'JOIN', [channel_name])
class Part(IRCReply):
def __init__(self, user, channel_name, msg=None):
IRCReply.__init__(self, user.mask, 'PART', [channel_name], msg=msg)
class Topic(IRCReply):
def __init__(self, channel_name, topic):
IRCReply.__init__(self, SERVER, '332', [channel_name], msg=topic)
class TopicSet(IRCReply):
def __init__(self, user, channel_name, topic):
IRCReply.__init__(self, user.mask, 'TOPIC', [channel_name], msg=topic)
class NoTopic(IRCReply):
def __init__(self, channel_name):
IRCReply.__init__(self, SERVER, '331', [channel_name])
class PrivMsg(IRCReply):
def __init__(self, from_user, target, msg):
IRCReply.__init__(self, from_user.mask, 'PRIVMSG', [target], msg=msg)
class Notice(IRCReply):
def __init__(self, from_user, target, msg):
IRCReply.__init__(self, from_user.mask, 'NOTICE', [target], msg=msg)
# -- IRC ERRORS --------------------------------------------------------------
class IRCError(IRCReply, Exception):
def __init__(self, *args, **kwargs):
IRCReply.__init__(self, SERVER, *args, **kwargs)
class NoSuchNick(IRCError):
"""401 ERR_NOSUCHNICK"""
def __init__(self, nick):
IRCError.__init__(self, '401', [nick])
class NoSuchChannel(IRCError):
"""403 ERR_NOSUCHCHANNEL"""
def __init__(self, channel_name):
IRCError.__init__(self, '403', [channel_name])
class CanNotSendToChan(IRCError):
"""404 ERR_CANNOTSENDTOCHAN"""
def __init__(self, channel_name):
IRCError.__init__(self, '404', [channel_name])
class TooManyChannels(IRCError):
"""405 ERR_TOOMANYCHANNELS"""
def __init__(self, channel_name):
IRCError.__init__(self, '405', [channel_name])
class NoRecipient(IRCError):
"""411 ERR_NORECIPIENT"""
def __init__(self, cmd):
IRCError.__init__(self, '411', msg_args=cmd.upper())
class NoTextToSend(IRCError):
"""412 ERR_NOTEXTTOSEND"""
def __init__(self):
IRCError.__init__(self, '412')
class UnknownCommand(IRCError):
"""421 ERR_UNKNOWNCOMMAND"""
def __init__(self, cmd):
IRCError.__init__(self, '421', [cmd.upper()])
class NoNicknameGiven(IRCError):
"""431 ERR_NONICKNAMEGIVEN"""
def __init__(self):
IRCError.__init__(self, '431')
class ErroneousNickname(IRCError):
"""432 ERR_ERRONEUSNICKNAME"""
def __init__(self, nick):
IRCError.__init__(self, '432', [nick])
class NicknameInUse(IRCError):
"""433 ERR_NICKNAMEINUSE"""
def __init__(self, nick):
IRCError.__init__(self, '433', [nick])
class NotOnChannel(IRCError):
"""442 ERR_NOTONCHANNEL"""
def __init__(self, channel_name):
IRCError.__init__(self, '442', [channel_name])
class NotRegistered(IRCError):
"""451 ERR_NOTREGISTERED"""
def __init__(self):
IRCError.__init__(self, '451')
class NeedMoreParams(IRCError):
"""461 ERR_NEEDMOREPARAMS"""
def __init__(self, cmd):
IRCError.__init__(self, '461', [cmd.upper()])
class BadChannelKey(IRCError):
"""475 ERR_BADCHANNELKEY"""
def __init__(self, channel_name):
IRCError.__init__(self, '475', [channel_name])
if __name__ == '__main__':
reply = YourHost('server', 'version')
print str(reply)
try:
raise BadChannelKey('#chan')
except IRCError, e:
print str(e)
| gpl-2.0 | 7,832,151,543,027,428,000 | 26.278388 | 86 | 0.553886 | false | 3.341991 | false | false | false |
espressif/esp-idf | tools/mkdfu.py | 1 | 9975 | #!/usr/bin/env python
#
# Copyright 2020-2021 Espressif Systems (Shanghai) CO LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This program creates archives compatible with ESP32-S* ROM DFU implementation.
#
# The archives are in CPIO format. Each file which needs to be flashed is added to the archive
# as a separate file. In addition to that, a special index file, 'dfuinfo0.dat', is created.
# This file must be the first one in the archive. It contains binary structures describing each
# subsequent file (for example, where the file needs to be flashed/loaded).
from __future__ import print_function, unicode_literals
import argparse
import hashlib
import json
import os
import struct
import zlib
from collections import namedtuple
from functools import partial
from future.utils import iteritems
try:
import typing
except ImportError:
# Only used for type annotations
pass
try:
from itertools import izip as zip # type: ignore
except ImportError:
# Python 3
pass
# CPIO ("new ASCII") format related things
CPIO_MAGIC = b'070701'
CPIO_STRUCT = b'=6s' + b'8s' * 13
CPIOHeader = namedtuple(
'CPIOHeader',
[
'magic',
'ino',
'mode',
'uid',
'gid',
'nlink',
'mtime',
'filesize',
'devmajor',
'devminor',
'rdevmajor',
'rdevminor',
'namesize',
'check',
],
)
CPIO_TRAILER = 'TRAILER!!!'
def make_cpio_header(
filename_len, file_len, is_trailer=False
): # type: (int, int, bool) -> CPIOHeader
""" Returns CPIOHeader for the given file name and file size """
def as_hex(val): # type: (int) -> bytes
return '{:08x}'.format(val).encode('ascii')
hex_0 = as_hex(0)
mode = hex_0 if is_trailer else as_hex(0o0100644)
nlink = as_hex(1) if is_trailer else hex_0
return CPIOHeader(
magic=CPIO_MAGIC,
ino=hex_0,
mode=mode,
uid=hex_0,
gid=hex_0,
nlink=nlink,
mtime=hex_0,
filesize=as_hex(file_len),
devmajor=hex_0,
devminor=hex_0,
rdevmajor=hex_0,
rdevminor=hex_0,
namesize=as_hex(filename_len),
check=hex_0,
)
# DFU format related things
# Structure of one entry in dfuinfo0.dat
DFUINFO_STRUCT = b'<I I 64s 16s'
DFUInfo = namedtuple('DFUInfo', ['address', 'flags', 'name', 'md5'])
DFUINFO_FILE = 'dfuinfo0.dat'
# Structure which gets added at the end of the entire DFU file
DFUSUFFIX_STRUCT = b'<H H H H 3s B'
DFUSuffix = namedtuple(
'DFUSuffix', ['bcd_device', 'pid', 'vid', 'bcd_dfu', 'sig', 'len']
)
ESPRESSIF_VID = 12346
# This CRC32 gets added after DFUSUFFIX_STRUCT
DFUCRC_STRUCT = b'<I'
def dfu_crc(data, crc=0): # type: (bytes, int) -> int
""" Calculate CRC32/JAMCRC of data, with an optional initial value """
uint32_max = 0xFFFFFFFF
return uint32_max - (zlib.crc32(data, crc) & uint32_max)
def pad_bytes(b, multiple, padding=b'\x00'): # type: (bytes, int, bytes) -> bytes
""" Pad 'b' to a length divisible by 'multiple' """
padded_len = (len(b) + multiple - 1) // multiple * multiple
return b + padding * (padded_len - len(b))
class EspDfuWriter(object):
def __init__(self, dest_file, pid, part_size): # type: (typing.BinaryIO, int, int) -> None
self.dest = dest_file
self.pid = pid
self.part_size = part_size
self.entries = [] # type: typing.List[bytes]
self.index = [] # type: typing.List[DFUInfo]
def add_file(self, flash_addr, path): # type: (int, str) -> None
"""
Add file to be written into flash at given address
Files are split up into chunks in order avoid timing-out during erasing large regions. Instead of adding
"app.bin" at flash_addr it will add:
1. app.bin at flash_addr # sizeof(app.bin) == self.part_size
2. app.bin.1 at flash_addr + self.part_size
3. app.bin.2 at flash_addr + 2 * self.part_size
...
"""
f_name = os.path.basename(path)
with open(path, 'rb') as f:
for i, chunk in enumerate(iter(partial(f.read, self.part_size), b'')):
n = f_name if i == 0 else '.'.join([f_name, str(i)])
self._add_cpio_flash_entry(n, flash_addr, chunk)
flash_addr += len(chunk)
def finish(self): # type: () -> None
""" Write DFU file """
# Prepare and add dfuinfo0.dat file
dfuinfo = b''.join([struct.pack(DFUINFO_STRUCT, *item) for item in self.index])
self._add_cpio_entry(DFUINFO_FILE, dfuinfo, first=True)
# Add CPIO archive trailer
self._add_cpio_entry(CPIO_TRAILER, b'', trailer=True)
# Combine all the entries and pad the file
out_data = b''.join(self.entries)
cpio_block_size = 10240
out_data = pad_bytes(out_data, cpio_block_size)
# Add DFU suffix and CRC
dfu_suffix = DFUSuffix(0xFFFF, self.pid, ESPRESSIF_VID, 0x0100, b'UFD', 16)
out_data += struct.pack(DFUSUFFIX_STRUCT, *dfu_suffix)
out_data += struct.pack(DFUCRC_STRUCT, dfu_crc(out_data))
# Finally write the entire binary
self.dest.write(out_data)
def _add_cpio_flash_entry(
self, filename, flash_addr, data
): # type: (str, int, bytes) -> None
md5 = hashlib.md5()
md5.update(data)
self.index.append(
DFUInfo(
address=flash_addr,
flags=0,
name=filename.encode('utf-8'),
md5=md5.digest(),
)
)
self._add_cpio_entry(filename, data)
def _add_cpio_entry(
self, filename, data, first=False, trailer=False
): # type: (str, bytes, bool, bool) -> None
filename_b = filename.encode('utf-8') + b'\x00'
cpio_header = make_cpio_header(len(filename_b), len(data), is_trailer=trailer)
entry = pad_bytes(
struct.pack(CPIO_STRUCT, *cpio_header) + filename_b, 4
) + pad_bytes(data, 4)
if not first:
self.entries.append(entry)
else:
self.entries.insert(0, entry)
def action_write(args): # type: (typing.Mapping[str, typing.Any]) -> None
writer = EspDfuWriter(args['output_file'], args['pid'], args['part_size'])
for addr, f in args['files']:
print('Adding {} at {:#x}'.format(f, addr))
writer.add_file(addr, f)
writer.finish()
print('"{}" has been written. You may proceed with DFU flashing.'.format(args['output_file'].name))
if args['part_size'] % (4 * 1024) != 0:
print('WARNING: Partition size of DFU is not multiple of 4k (4096). You might get unexpected behavior.')
def main(): # type: () -> None
parser = argparse.ArgumentParser()
# Provision to add "info" command
subparsers = parser.add_subparsers(dest='command')
write_parser = subparsers.add_parser('write')
write_parser.add_argument('-o', '--output-file',
help='Filename for storing the output DFU image',
required=True,
type=argparse.FileType('wb'))
write_parser.add_argument('--pid',
required=True,
type=lambda h: int(h, 16),
help='Hexa-decimal product indentificator')
write_parser.add_argument('--json',
help='Optional file for loading "flash_files" dictionary with <address> <file> items')
write_parser.add_argument('--part-size',
default=os.environ.get('ESP_DFU_PART_SIZE', 512 * 1024),
type=lambda x: int(x, 0),
help='Larger files are split-up into smaller partitions of this size')
write_parser.add_argument('files',
metavar='<address> <file>', help='Add <file> at <address>',
nargs='*')
args = parser.parse_args()
def check_file(file_name): # type: (str) -> str
if not os.path.isfile(file_name):
raise RuntimeError('{} is not a regular file!'.format(file_name))
return file_name
files = []
if args.files:
files += [(int(addr, 0), check_file(f_name)) for addr, f_name in zip(args.files[::2], args.files[1::2])]
if args.json:
json_dir = os.path.dirname(os.path.abspath(args.json))
def process_json_file(path): # type: (str) -> str
'''
The input path is relative to json_dir. This function makes it relative to the current working
directory.
'''
return check_file(os.path.relpath(os.path.join(json_dir, path), start=os.curdir))
with open(args.json) as f:
files += [(int(addr, 0),
process_json_file(f_name)) for addr, f_name in iteritems(json.load(f)['flash_files'])]
files = sorted([(addr, f_name.decode('utf-8') if isinstance(f_name, type(b'')) else f_name) for addr, f_name in iteritems(dict(files))],
key=lambda x: x[0]) # remove possible duplicates and sort based on the address
cmd_args = {'output_file': args.output_file,
'files': files,
'pid': args.pid,
'part_size': args.part_size,
}
{'write': action_write
}[args.command](cmd_args)
if __name__ == '__main__':
main()
| apache-2.0 | -567,022,162,495,438,100 | 34.37234 | 140 | 0.589975 | false | 3.511088 | false | false | false |
johnmgregoire/JCAPdatavis | createdlist_benchmarkingstepCA.py | 1 | 2595 | import numpy, pylab, os, sys, csv, pickle
from echem_plate_fcns import *
from echem_plate_math import *
PyCodePath=os.path.split(os.path.split(os.path.realpath(__file__))[0])[0]
sys.path.append(os.path.join(PyCodePath,'ternaryplot'))
from myternaryutility import TernaryPlot
from myquaternaryutility import QuaternaryPlot
homefolder='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/benchmarking'
subfold_sample_fnstartl_vshl=[\
('NiFeCoCe50301703/stepCA', 170, ['complete02', 'complete03', 'complete04'], [-0.1813, -0.1818, -0.1838]), \
('NiFeCoCe40202020/stepCA', 725, ['complete02', 'complete03', 'complete04'], [-0.17705, -0.17905, -0.18255]), \
('NiFeCoCe30072043/stepCA', 1326, ['complete02', 'complete03', 'complete04'], [-0.17605, -0.1788, -0.18005]), \
]
savep=os.path.join(homefolder, 'benchmarkingstepCAs_dlist.pck')
dlist=[]
for subfold, sample, fnstartl, vshl in subfold_sample_fnstartl_vshl:
d={}
d['Sample']=sample
fold=os.path.join(homefolder, subfold)
fns=os.listdir(fold)
pl=[[os.path.join(fold, fn) for fn in fns if fn.startswith(fnstart)][0] for fnstart in fnstartl]
for p, fnstart, vsh in zip(pl, fnstartl, vshl):
d[fnstart]={}
f=open(p, mode='r')
f.readline()
f.readline()
dr=csv.DictReader(f, delimiter='\t')
for l in dr:
for kr in l.keys():
k=kr.strip()
if k in ['Unknown']:
continue
if not k in d[fnstart].keys():
d[fnstart][k]=[]
d[fnstart][k]+=[myeval(l[kr].strip())]
for k in d[fnstart].keys():
d[fnstart][k]=numpy.array(d[fnstart][k])
f.close()
try:
x=d[fnstart]['I/mA']/.196
except:
x=d[fnstart]['<I>/mA']/.196
try:
y=d[fnstart]['Ewe/V']+vsh
except:
y=d[fnstart]['<Ewe>/V']+vsh
iinterv=len(x)//9
indsl=[i*iinterv-numpy.arange(50)-5 for i in range(5, 10)]
xv=numpy.array([x[inds].mean() for inds in indsl])
yv=numpy.array([y[inds].mean() for inds in indsl])
iv=numpy.array([inds.mean() for inds in indsl])
pylab.figure()
pylab.plot(x, 'b-')
pylab.plot(iv, xv, 'bo')
pylab.twinx()
pylab.plot(y, 'g-')
pylab.plot(iv, yv, 'go')
pylab.title(subfold+fnstart)
d[fnstart]['I(mAcm2)']=xv
d[fnstart]['Ewe(VOER)']=yv
dlist+=[d]
#pylab.show()
if 1:
f=open(savep, mode='w')
pickle.dump(dlist, f)
f.close()
| bsd-3-clause | -1,207,282,057,356,617,200 | 33.6 | 115 | 0.568401 | false | 2.848518 | false | false | false |
ssato/python-jinja2-cli | jinja2_cli/tests/render.py | 1 | 2414 | #
# Copyright (C) 2011 - 2013 Satoru SATOH <ssato at redhat.com>
#
import os
import unittest
import jinja2_cli.render as TT # Stands for Test Target module.
import jinja2_cli.compat
import jinja2_cli.tests.common as C
class Test_00_pure_functions(unittest.TestCase):
def test_00_mk_template_paths__wo_paths(self):
self.assertEquals(TT.mk_template_paths("/a/b/c.yml"),
[os.curdir, "/a/b"])
def test_01_mk_template_paths__w_paths(self):
self.assertEquals(TT.mk_template_paths("/a/b/c.yml", ["/a/d"]),
["/a/d", "/a/b"])
def test_10_tmpl_env(self):
self.assertTrue(isinstance(TT.tmpl_env(["/a/b", ]),
TT.jinja2.Environment))
def test_20_render_s(self):
tmpl_s = 'a = {{ a }}, b = "{{ b }}"'
self.assertEquals(TT.render_s(tmpl_s, {'a': 1, 'b': 'bbb'}),
'a = 1, b = "bbb"')
class Test_10_effectful_functions(unittest.TestCase):
def setUp(self):
self.workdir = C.setup_workdir()
def test_10_render_impl(self):
tmpl = "a.j2"
open(os.path.join(self.workdir, tmpl), 'w').write("a = {{ a }}")
r = TT.render_impl(tmpl, {'a': "aaa", }, [self.workdir])
self.assertEquals(r, "a = aaa")
def test_20_render(self):
tmpl = "a.j2"
open(os.path.join(self.workdir, tmpl), 'w').write("a = {{ a }}")
r = TT.render(tmpl, {'a': "aaa", }, [self.workdir])
self.assertEquals(r, "a = aaa")
def test_22_render__ask(self):
"""FIXME: Write tests for jinja2_cli.render.render"""
pass
def test_30_template_path(self):
tmpl = "a.j2"
open(os.path.join(self.workdir, tmpl), 'w').write("a = {{ a }}")
self.assertEquals(TT.template_path(tmpl, [self.workdir]),
os.path.join(self.workdir, tmpl))
def test_32_template_path__not_exist(self):
tmpl = "template_not_exist.j2"
self.assertTrue(TT.template_path(tmpl, [self.workdir]) is None)
def test_50_renderto(self):
tmpl = "a.j2"
output = os.path.join(self.workdir, "a.out")
open(os.path.join(self.workdir, tmpl), 'w').write("a = {{ a }}")
TT.renderto(tmpl, dict(a="aaa", ), [self.workdir], output, False)
self.assertEquals(jinja2_cli.compat.copen(output).read(), "a = aaa")
# vim:sw=4:ts=4:et:
| bsd-3-clause | -3,658,749,903,160,472,000 | 31.186667 | 76 | 0.553853 | false | 3.071247 | true | false | false |
Bakuutin/Pitchers | pitchers.py | 1 | 6675 | """
Программа ищет оптимальное решение для задачи про два кувшина
Для работы требуется Python 3
"""
from enum import Enum
class Action(Enum):
"""
Действия с кувшинами
"""
empty = 0
fill = 1
transfer = 2
start = 3
def get_pitchers_volume():
"""
Возвращает список из целочисленных значений объёмов кувшинов
"""
while True: # pitchers_volume это list из целых чисел, полученных разбиением вводимой строки
pitchers_volume = list(int(pitcher) for pitcher in input("Введите через пробел объёмы двух кувшинов: ").split())
if len(pitchers_volume) != 2: # Мы рассматриваем только случаи с двумя кувшинами
print('Айайай! Попробуйте заново.')
else:
return pitchers_volume
def get_target():
"""
Возвращает целочисленный желаемый объём
"""
return int(input("Введите желаемый объём: "))
def greatest_common_divisor(a, b):
"""
Считает наибольший общий делитель.
"""
while b:
a, b = b, a % b
return a
def make_edges(i, j, i_max, j_max):
"""
Создаёт список из всех исходящих ветвей графа в точке (i, j)
Где i и j — наполненность первого и второго кувшинов
"""
edges = dict()
# Если кувшины не пусты, их можно опустошить
if i != 0:
edges[(0, j)] = Action.empty
if j != 0:
edges[(i, 0)] = Action.empty
# Если кувшины не полные, их можно наполнить
if i != i_max:
edges[(i_max, j)] = Action.fill
if j != j_max:
edges[(i, j_max)] = Action.fill
# Из непустого кувшина можно перелить в неполный
if i != 0 and j_max-j >= i:
edges[(0, j+i)] = Action.transfer
if j != 0 and i_max-i >= j:
edges[(i+j, 0)] = Action.transfer
# Причем, если в неполном не хватит места,
# то оба кувшина останутся непустыми
if j != 0 and 0 < i_max-i < j:
edges[(i_max, j - (i_max-i))] = Action.transfer
if i != 0 and 0 < j_max-j < i:
edges[(i - (j_max-j), j_max)] = Action.transfer
return edges
def make_pitchers_graph(pitchers_volume):
"""
Создаёт словарь, в котором ключи — все комбинации наполненности кувшинов,
а значения — возможные переходы из каждой комбинации
"""
pitchers_graph = dict()
gcd = greatest_common_divisor(pitchers_volume[0], pitchers_volume[1])
for i in range(0, int(pitchers_volume[0]/gcd)+1): # Найдём наименьшее общее кратное у объёмов кувшинов
for j in range(0, int(pitchers_volume[1]/gcd)+1): # И поделим каждый из объёмов на него, для оптимизации
pitchers_graph[(i*gcd, j*gcd)] = make_edges(i*gcd, j*gcd, pitchers_volume[0], pitchers_volume[1])
return pitchers_graph
def dijkstra(graph, start_node, target):
"""
Находит кратчайший путь в графе
"""
distance = dict.fromkeys(graph, float('inf'))
path = dict()
path[start_node] = [[[start_node], [Action.start]]] # Путь записывается в виде словаря, в котором к каждому из
distance[start_node] = 0 # имён узлов сосоставляется list из предыдущих узлов
node_set = set(graph) # с добавлением типа действия с кувшинами
targets_list = [node for node in node_set # Цели хранятся как list из всех узлов, которые
if node[0] == target or node[1] == target] # подходят в качестве финиша
while node_set:
node = min(node_set, key=distance.get)
if node in targets_list and node in path: # Как только нашли подходящий узел — выходим. Поскольку мы ищем
return path[node] # от точки (0, 0), а вес каждого ребра считаем одинаковым,
node_set.remove(node) # то первый найденный узел и будет оптимальным
for child_node in graph[node].keys():
if distance[child_node] >= distance[node] + 1: # Вес каждого ребра считаем за единицу
distance[child_node] = distance[node] + 1
path[child_node] = list()
path[child_node].extend(path[node]) # Путь до нового узла состоит из пути до его родителя
path[child_node].append([[child_node], # плюс сам переход
[graph[node].get(child_node)]]) # с добавлением типа действия
def show_answer(path, target):
"""
Выводит ответ в человекочитаемом виде
"""
if path is not None:
print('Требуется шагов: {}'.format(len(path)-1))
for node in path:
print(node[0][0], end=' ') # Состояние кувшинов
print(node[1][0].name) # Тип действия
else:
print('Нельзя получить {}л., имея только данные кувшины.'.format(target))
pitchers_volume = get_pitchers_volume() # Получаем с клавиатуры объёмы кувшинов
target_node = get_target() # И желаемый объём
start_node = (0, 0) # Начинаем с пустых кувшинов
pitchers_graph = make_pitchers_graph(pitchers_volume) # Создаём граф из всех состояний кувшинов
path = dijkstra(pitchers_graph, start_node, target_node) # Находим кратчайший путь
show_answer(path, target_node) # Выводим результат | mit | -342,281,821,695,193,660 | 36.282609 | 120 | 0.611975 | false | 2.001556 | false | false | false |
gpuhalla/discord_bot | newMusic.py | 1 | 16747 | """
Adapted from: https://gist.github.com/vbe0201/ade9b80f2d3b64643d854938d40a0a2d
"""
import asyncio
import functools
import itertools
import math
import random
import discord
import youtube_dlc
from async_timeout import timeout
from discord.ext import commands
# Silence useless bug reports messages
youtube_dlc.utils.bug_reports_message = lambda: ''
class VoiceError(Exception):
pass
class YTDLError(Exception):
pass
class YTDLSource(discord.PCMVolumeTransformer):
YTDL_OPTIONS = {
'format': 'bestaudio/best',
'extractaudio': True,
'audioformat': 'mp3',
'outtmpl': '%(extractor)s-%(id)s-%(title)s.%(ext)s',
'restrictfilenames': True,
'noplaylist': True,
'nocheckcertificate': True,
'ignoreerrors': False,
'logtostderr': False,
'quiet': True,
'no_warnings': True,
'default_search': 'auto',
'source_address': '0.0.0.0',
}
FFMPEG_OPTIONS = {
'before_options': '-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5',
'options': '-vn',
}
ytdl = youtube_dlc.YoutubeDL(YTDL_OPTIONS)
def __init__(self, ctx: commands.Context, source: discord.FFmpegPCMAudio, *, data: dict, volume: float = 0.5):
super().__init__(source, volume)
self.requester = ctx.author
self.channel = ctx.channel
self.data = data
self.uploader = data.get('uploader')
self.uploader_url = data.get('uploader_url')
date = data.get('upload_date')
self.upload_date = date[6:8] + '.' + date[4:6] + '.' + date[0:4]
self.title = data.get('title')
self.thumbnail = data.get('thumbnail')
self.description = data.get('description')
self.duration = self.parse_duration(int(data.get('duration')))
self.tags = data.get('tags')
self.url = data.get('webpage_url')
self.views = data.get('view_count')
self.likes = data.get('like_count')
self.dislikes = data.get('dislike_count')
self.stream_url = data.get('url')
def __str__(self):
return '**{0.title}** by **{0.uploader}**'.format(self)
@classmethod
async def create_source(cls, ctx: commands.Context, search: str, *, loop: asyncio.BaseEventLoop = None):
loop = loop or asyncio.get_event_loop()
partial = functools.partial(cls.ytdl.extract_info, search, download=False, process=False)
data = await loop.run_in_executor(None, partial)
if data is None:
raise YTDLError('Couldn\'t find anything that matches `{}`'.format(search))
if 'entries' not in data:
process_info = data
else:
process_info = None
for entry in data['entries']:
if entry:
process_info = entry
break
if process_info is None:
raise YTDLError('Couldn\'t find anything that matches `{}`'.format(search))
webpage_url = process_info['webpage_url']
partial = functools.partial(cls.ytdl.extract_info, webpage_url, download=False)
processed_info = await loop.run_in_executor(None, partial)
if processed_info is None:
raise YTDLError('Couldn\'t fetch `{}`'.format(webpage_url))
if 'entries' not in processed_info:
info = processed_info
else:
info = None
while info is None:
try:
info = processed_info['entries'].pop(0)
except IndexError:
raise YTDLError('Couldn\'t retrieve any matches for `{}`'.format(webpage_url))
return cls(ctx, discord.FFmpegPCMAudio(info['url'], **cls.FFMPEG_OPTIONS), data=info)
@staticmethod
def parse_duration(duration: int):
minutes, seconds = divmod(duration, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
duration = []
if days > 0:
duration.append('{} days'.format(days))
if hours > 0:
duration.append('{} hours'.format(hours))
if minutes > 0:
duration.append('{} minutes'.format(minutes))
if seconds > 0:
duration.append('{} seconds'.format(seconds))
return ', '.join(duration)
class Song:
__slots__ = ('source', 'requester')
def __init__(self, source: YTDLSource):
self.source = source
self.requester = source.requester
def create_embed(self):
embed = (discord.Embed(title='Now playing',
description='```css\n{0.source.title}\n```'.format(self),
color=discord.Color.blurple())
.add_field(name='Duration', value=self.source.duration)
.add_field(name='Requested by', value=self.requester.mention)
.add_field(name='Uploader', value='[{0.source.uploader}]({0.source.uploader_url})'.format(self))
.add_field(name='URL', value='[Click]({0.source.url})'.format(self))
.set_thumbnail(url=self.source.thumbnail))
return embed
class SongQueue(asyncio.Queue):
def __getitem__(self, item):
if isinstance(item, slice):
return list(itertools.islice(self._queue, item.start, item.stop, item.step))
else:
return self._queue[item]
def __iter__(self):
return self._queue.__iter__()
def __len__(self):
return self.qsize()
def clear(self):
self._queue.clear()
def shuffle(self):
random.shuffle(self._queue)
def remove(self, index: int):
del self._queue[index]
class VoiceState:
def __init__(self, bot: commands.Bot, ctx: commands.Context):
self.bot = bot
self._ctx = ctx
self.current = None
self.voice = None
self.next = asyncio.Event()
self.songs = SongQueue()
self._loop = False
self._volume = 0.5
self.skip_votes = set()
self.audio_player = bot.loop.create_task(self.audio_player_task())
def __del__(self):
self.audio_player.cancel()
@property
def loop(self):
return self._loop
@loop.setter
def loop(self, value: bool):
self._loop = value
@property
def volume(self):
return self._volume
@volume.setter
def volume(self, value: float):
self._volume = value
@property
def is_playing(self):
return self.voice and self.current
async def audio_player_task(self):
while True:
self.next.clear()
if not self.loop:
# Try to get the next song within 3 minutes.
# If no song will be added to the queue in time,
# the player will disconnect due to performance
# reasons.
try:
async with timeout(180): # 3 minutes
self.current = await self.songs.get()
except asyncio.TimeoutError:
self.bot.loop.create_task(self.stop())
return
self.current.source.volume = self._volume
self.voice.play(self.current.source, after=self.play_next_song)
await self.current.source.channel.send(embed=self.current.create_embed())
await self.next.wait()
def play_next_song(self, error=None):
if error:
raise VoiceError(str(error))
self.next.set()
def skip(self):
self.skip_votes.clear()
if self.is_playing:
self.voice.stop()
async def stop(self):
self.songs.clear()
if self.voice:
await self.voice.disconnect()
self.voice = None
class Music(commands.Cog, name='Music'):
def __init__(self, bot: commands.Bot):
self.bot = bot
self.voice_states = {}
def get_voice_state(self, ctx: commands.Context):
state = self.voice_states.get(ctx.guild.id)
if not state:
state = VoiceState(self.bot, ctx)
self.voice_states[ctx.guild.id] = state
return state
def cog_unload(self):
for state in self.voice_states.values():
self.bot.loop.create_task(state.stop())
def cog_check(self, ctx: commands.Context):
if not ctx.guild:
raise commands.NoPrivateMessage('This command can\'t be used in DM channels.')
return True
async def cog_before_invoke(self, ctx: commands.Context):
ctx.voice_state = self.get_voice_state(ctx)
async def cog_command_error(self, ctx: commands.Context, error: commands.CommandError):
await ctx.send('An error occurred: {}'.format(str(error)))
@commands.command(name='join', invoke_without_subcommand=True)
async def _join(self, ctx: commands.Context):
"""Joins a voice channel."""
destination = ctx.author.voice.channel
if ctx.voice_state.voice:
await ctx.voice_state.voice.move_to(destination)
return
ctx.voice_state.voice = await destination.connect()
@commands.command(name='summon')
@commands.guild_only()
async def _summon(self, ctx: commands.Context, *, channel: discord.VoiceChannel = None):
"""Summons the bot to a voice channel.
If no channel was specified, it joins your channel.
"""
if not channel and not ctx.author.voice:
raise VoiceError('You are neither connected to a voice channel nor specified a channel to join.')
destination = channel or ctx.author.voice.channel
if ctx.voice_state.voice:
await ctx.voice_state.voice.move_to(destination)
return
ctx.voice_state.voice = await destination.connect()
@commands.command(name='leave', aliases=['disconnect'])
@commands.guild_only()
async def _leave(self, ctx: commands.Context):
"""Clears the queue and leaves the voice channel."""
if not ctx.voice_state.voice:
return await ctx.send('Not connected to any voice channel.')
await ctx.voice_state.stop()
del self.voice_states[ctx.guild.id]
@commands.command(name='volume')
async def _volume(self, ctx: commands.Context, *, volume: int):
"""Sets the volume of the player."""
if not ctx.voice_state.is_playing:
return await ctx.send('Nothing being played at the moment.')
if 0 > volume > 100:
return await ctx.send('Volume must be between 0 and 100')
ctx.voice_state.volume = volume / 100
await ctx.send('Volume of the player set to {}%'.format(volume))
@commands.command(name='now', aliases=['current', 'playing'])
async def _now(self, ctx: commands.Context):
"""Displays the currently playing song."""
await ctx.send(embed=ctx.voice_state.current.create_embed())
@commands.command(name='pause')
@commands.guild_only()
async def _pause(self, ctx: commands.Context):
"""Pauses the currently playing song."""
if ctx.voice_state.is_playing and ctx.voice_state.voice.is_playing():
ctx.voice_state.voice.pause()
await ctx.message.add_reaction('⏯')
@commands.command(name='resume')
@commands.guild_only()
async def _resume(self, ctx: commands.Context):
"""Resumes a currently paused song."""
if ctx.voice_state.is_playing and ctx.voice_state.voice.is_paused():
ctx.voice_state.voice.resume()
await ctx.message.add_reaction('⏯')
@commands.command(name='stop')
@commands.guild_only()
async def _stop(self, ctx: commands.Context):
"""Stops playing song and clears the queue."""
ctx.voice_state.songs.clear()
if ctx.voice_state.is_playing:
ctx.voice_state.voice.stop()
await ctx.message.add_reaction('⏹')
@commands.command(name='skip')
async def _skip(self, ctx: commands.Context):
"""Vote to skip a song. The requester can automatically skip.
3 skip votes are needed for the song to be skipped.
"""
if not ctx.voice_state.is_playing:
return await ctx.send('Not playing any music right now...')
voter = ctx.message.author
if voter == ctx.voice_state.current.requester:
await ctx.message.add_reaction('⏭')
ctx.voice_state.skip()
elif voter.id not in ctx.voice_state.skip_votes:
ctx.voice_state.skip_votes.add(voter.id)
total_votes = len(ctx.voice_state.skip_votes)
if total_votes >= 3:
await ctx.message.add_reaction('⏭')
ctx.voice_state.skip()
else:
await ctx.send('Skip vote added, currently at **{}/3**'.format(total_votes))
else:
await ctx.send('You have already voted to skip this song.')
@commands.command(name='queue')
async def _queue(self, ctx: commands.Context, *, page: int = 1):
"""Shows the player's queue.
You can optionally specify the page to show. Each page contains 10 elements.
"""
if len(ctx.voice_state.songs) == 0:
return await ctx.send('Empty queue.')
items_per_page = 10
pages = math.ceil(len(ctx.voice_state.songs) / items_per_page)
start = (page - 1) * items_per_page
end = start + items_per_page
queue = ''
for i, song in enumerate(ctx.voice_state.songs[start:end], start=start):
queue += '`{0}.` [**{1.source.title}**]({1.source.url})\n'.format(i + 1, song)
embed = (discord.Embed(description='**{} tracks:**\n\n{}'.format(len(ctx.voice_state.songs), queue))
.set_footer(text='Viewing page {}/{}'.format(page, pages)))
await ctx.send(embed=embed)
@commands.command(name='shuffle')
async def _shuffle(self, ctx: commands.Context):
"""Shuffles the queue."""
if len(ctx.voice_state.songs) == 0:
return await ctx.send('Empty queue.')
ctx.voice_state.songs.shuffle()
await ctx.message.add_reaction('✅')
@commands.command(name='remove')
async def _remove(self, ctx: commands.Context, index: int):
"""Removes a song from the queue at a given index."""
if len(ctx.voice_state.songs) == 0:
return await ctx.send('Empty queue.')
ctx.voice_state.songs.remove(index - 1)
await ctx.message.add_reaction('✅')
@commands.command(name='loop')
async def _loop(self, ctx: commands.Context):
"""Loops the currently playing song.
Invoke this command again to unloop the song.
"""
if not ctx.voice_state.is_playing:
return await ctx.send('Nothing being played at the moment.')
# Inverse boolean value to loop and unloop.
ctx.voice_state.loop = not ctx.voice_state.loop
await ctx.message.add_reaction('✅')
@commands.command(name='play')
async def _play(self, ctx: commands.Context, *, search: str):
"""Plays a song.
If there are songs in the queue, this will be queued until the
other songs finished playing.
This command automatically searches from various sites if no URL is provided.
A list of these sites can be found here: https://rg3.github.io/youtube-dl/supportedsites.html
"""
if not ctx.voice_state.voice:
await ctx.invoke(self._join)
async with ctx.typing():
try:
source = await YTDLSource.create_source(ctx, search, loop=self.bot.loop)
except YTDLError as e:
await ctx.send('An error occurred while processing this request: {}'.format(str(e)))
else:
song = Song(source)
await ctx.voice_state.songs.put(song)
await ctx.send('Enqueued {}'.format(str(source)))
@commands.command(name='dead')
@commands.has_permissions(manage_guild=True)
async def __dead(self, ctx):
self.__play(self, ctx, "https://www.youtube.com/watch?v=CfihYWRWRTQ")
ctx.send("LEAGUE IS DEAD")
@_join.before_invoke
@_play.before_invoke
async def ensure_voice_state(self, ctx: commands.Context):
if not ctx.author.voice or not ctx.author.voice.channel:
raise commands.CommandError('You are not connected to any voice channel.')
if ctx.voice_client:
if ctx.voice_client.channel != ctx.author.voice.channel:
raise commands.CommandError('Bot is already in a voice channel.')
def setup(bot):
bot.add_cog(Music(bot))
| gpl-3.0 | -7,463,120,260,197,721,000 | 32.001972 | 114 | 0.592254 | false | 3.861297 | false | false | false |
zhlinh/leetcode | 0130.Surrounded Regions/solution.py | 1 | 1911 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
*****************************************
Author: zhlinh
Email: [email protected]
Version: 0.0.1
Created Time: 2016-03-11
Last_modify: 2016-03-11
******************************************
'''
'''
Given a 2D board containing 'X' and 'O',
capture all regions surrounded by 'X'.
A region is captured by flipping all 'O's
into 'X's in that surrounded region.
For example,
X X X X
X O O X
X X O X
X O X X
After running your function, the board should be:
X X X X
X X X X
X X X X
X O X X
'''
class Solution(object):
def solve(self, board):
"""
:type board: List[List[str]]
:rtype: void Do not return anything, modify board in-place instead.
"""
m = len(board)
if m < 2:
return
n = len(board[0])
for i in range(m):
self.helper(board, i, 0, m, n)
if n > 1:
self.helper(board, i, n - 1, m, n)
for j in range(n):
self.helper(board, 0, j, m, n)
if m > 1:
self.helper(board, m - 1, j, m, n)
for i in range(m):
for j in range(n):
if board[i][j] == 'O':
board[i][j] = 'X'
if board[i][j] == '1':
board[i][j] = 'O'
def helper(self, board, i, j, m, n):
if board[i][j] == 'O':
board[i][j] = '1'
# trick here, normally it could be i >= 1.
# but the boardary will alays get checked.
# so i == 1, then check 0 is duplicated.
if i > 1:
self.helper(board, i - 1, j, m, n)
if i < m - 2:
self.helper(board, i + 1, j, m, n)
if j > 1:
self.helper(board, i, j - 1, m, n)
if j < n - 2:
self.helper(board, i, j + 1, m, n)
| apache-2.0 | -6,414,174,349,112,948,000 | 25.915493 | 75 | 0.438514 | false | 3.244482 | false | false | false |
cmjatai/cmj | cmj/utils.py | 1 | 14127 | from datetime import date, datetime, timedelta
from functools import wraps
import re
import subprocess
import threading
from unicodedata import normalize as unicodedata_normalize
from PyPDF4.pdf import PdfFileReader
from asn1crypto import cms
from django.apps import apps
from django.conf import settings
from django.contrib import admin
from django.core.exceptions import ValidationError
from django.core.files.storage import FileSystemStorage
from django.db import connection
from django.template.loaders.filesystem import Loader
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from easy_thumbnails import source_generators
import magic
from reversion.admin import VersionAdmin
from unipath.path import Path
def pil_image(source, exif_orientation=False, **options):
return source_generators.pil_image(source, exif_orientation, **options)
def clear_thumbnails_cache(queryset, field, time_create=0):
now = datetime.now()
for r in queryset:
assert hasattr(r, field), _(
'Objeto da listagem não possui o campo informado')
if not getattr(r, field):
continue
path = Path(getattr(r, field).path)
cache_files = path.parent.walk()
for cf in cache_files:
if cf == path:
continue
if time_create:
data_arquivo = datetime.fromtimestamp(cf.mtime())
if now - data_arquivo < timedelta(time_create):
continue
cf.remove()
def normalize(txt):
return unicodedata_normalize(
'NFKD', txt).encode('ASCII', 'ignore').decode('ASCII')
def get_settings_auth_user_model():
return getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
def register_all_models_in_admin(module_name):
appname = module_name.split('.')
appname = appname[1] if appname[0] == 'cmj' else appname[0]
app = apps.get_app_config(appname)
for model in app.get_models():
class CustomModelAdmin(VersionAdmin):
list_display = [f.name for f in model._meta.fields
if f.name != 'id']
if not admin.site.is_registered(model):
admin.site.register(model, CustomModelAdmin)
def from_to(start, end):
return list(range(start, end + 1))
def make_pagination(index, num_pages):
'''Make a list of adjacent page ranges interspersed with "None"s
The list starts with [1, 2] and end with [num_pages-1, num_pages].
The list includes [index-1, index, index+1]
"None"s separate those ranges and mean ellipsis (...)
Example: [1, 2, None, 10, 11, 12, None, 29, 30]
'''
PAGINATION_LENGTH = 10
if num_pages <= PAGINATION_LENGTH:
return from_to(1, num_pages)
else:
if index - 1 <= 5:
tail = [num_pages - 1, num_pages]
head = from_to(1, PAGINATION_LENGTH - 3)
else:
if index + 1 >= num_pages - 3:
tail = from_to(index - 1, num_pages)
else:
tail = [index - 1, index, index + 1,
None, num_pages - 1, num_pages]
head = from_to(1, PAGINATION_LENGTH - len(tail) - 1)
return head + [None] + tail
def xstr(s):
return '' if s is None else str(s)
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
def get_base_url(request):
# TODO substituir por Site.objects.get_current().domain
# from django.contrib.sites.models import Site
current_domain = request.get_host()
protocol = 'https' if request.is_secure() else 'http'
return "{0}://{1}".format(protocol, current_domain)
def create_barcode(value):
'''
creates a base64 encoded barcode PNG image
'''
"""from base64 import b64encode
from reportlab.graphics.barcode import createBarcodeDrawing
barcode = createBarcodeDrawing('Code128',
value=value,
barWidth=170,
height=50,
fontSize=2,
humanReadable=True)
data = b64encode(barcode.asString('png'))
return data.decode('utf-8')"""
def CHOICE_SIGNEDS():
return [('', 'Ambos'),
(1, 'Documentos Com Assinatura Digital'),
(0, 'Documentos Sem Assinatura Digital')]
YES_NO_CHOICES = [(True, _('Sim')), (False, _('Não'))]
NONE_YES_NO_CHOICES = [(None, _('---------')),
(True, _('Sim')), (False, _('Não'))]
def listify(function):
@wraps(function)
def f(*args, **kwargs):
return list(function(*args, **kwargs))
return f
UF = [
('AC', 'Acre'),
('AL', 'Alagoas'),
('AP', 'Amapá'),
('AM', 'Amazonas'),
('BA', 'Bahia'),
('CE', 'Ceará'),
('DF', 'Distrito Federal'),
('ES', 'Espírito Santo'),
('GO', 'Goiás'),
('MA', 'Maranhão'),
('MT', 'Mato Grosso'),
('MS', 'Mato Grosso do Sul'),
('MG', 'Minas Gerais'),
('PR', 'Paraná'),
('PB', 'Paraíba'),
('PA', 'Pará'),
('PE', 'Pernambuco'),
('PI', 'Piauí'),
('RJ', 'Rio de Janeiro'),
('RN', 'Rio Grande do Norte'),
('RS', 'Rio Grande do Sul'),
('RO', 'Rondônia'),
('RR', 'Roraima'),
('SC', 'Santa Catarina'),
('SE', 'Sergipe'),
('SP', 'São Paulo'),
('TO', 'Tocantins'),
('EX', 'Exterior'),
]
RANGE_ANOS = [(year, year) for year in range(date.today().year, 1889, -1)]
RANGE_MESES = [
(1, 'Janeiro'),
(2, 'Fevereiro'),
(3, 'Março'),
(4, 'Abril'),
(5, 'Maio'),
(6, 'Junho'),
(7, 'Julho'),
(8, 'Agosto'),
(9, 'Setembro'),
(10, 'Outubro'),
(11, 'Novembro'),
(12, 'Dezembro'),
]
RANGE_DIAS_MES = [(n, n) for n in range(1, 32)]
TIPOS_MIDIAS_PERMITIDOS = {
'application/pdf': 'pdf',
'application/x-pdf': 'pdf',
'application/acrobat': 'pdf',
'applications/vnd.pdf': 'pdf',
'application/msword': 'doc',
'application/vnd.openxmlformats-officedocument.wordprocessingml.document': '.docx',
'image/jpeg': 'jpg',
'image/jpg': 'jpg',
'image/jpe_': 'jpg',
'image/pjpeg': 'jpg',
'image/vnd.swiftview-jpeg': 'jpg',
'application/jpg': 'jpg',
'application/x-jpg': 'jpg',
'image/pjpeg': 'jpg',
'image/pipeg': 'jpg',
'image/vnd.swiftview-jpeg': 'jpg',
'image/gif': 'gif',
'image/png': 'png',
'application/png': 'png',
'application/x-png': 'png',
}
TIPOS_IMG_PERMITIDOS = {
'image/jpeg',
'image/jpg',
'image/jpe_',
'image/pjpeg',
'image/vnd.swiftview-jpeg',
'application/jpg',
'application/x-jpg',
'image/pjpeg',
'image/pipeg',
'image/vnd.swiftview-jpeg',
'image/x-xbitmap',
'image/bmp',
'image/x-bmp',
'image/x-bitmap',
'image/png',
'application/png',
'application/x-png'
}
def fabrica_validador_de_tipos_de_arquivo(lista, nome):
def restringe_tipos_de_arquivo(value):
mime = magic.from_buffer(value.read(), mime=True)
if mime not in lista:
raise ValidationError(_('Tipo de arquivo não suportado'))
return mime, lista[mime]
# o nome é importante para as migrations
restringe_tipos_de_arquivo.__name__ = nome
return restringe_tipos_de_arquivo
restringe_tipos_de_arquivo_midias = fabrica_validador_de_tipos_de_arquivo(
TIPOS_MIDIAS_PERMITIDOS, 'restringe_tipos_de_arquivo_midias')
def intervalos_tem_intersecao(a_inicio, a_fim, b_inicio, b_fim):
maior_inicio = max(a_inicio, b_inicio)
menor_fim = min(a_fim, b_fim)
return maior_inicio <= menor_fim
media_protected_storage = FileSystemStorage(
location=settings.MEDIA_PROTECTED_ROOT, base_url='DO_NOT_USE')
def texto_upload_path(instance, filename, subpath='', pk_first=False):
filename = re.sub('\s', '_', normalize(filename.strip()).lower())
prefix = 'public'
str_path = ('./cmj/%(prefix)s/%(model_name)s/'
'%(subpath)s/%(pk)s/%(filename)s')
if pk_first:
str_path = ('./cmj/%(prefix)s/%(model_name)s/'
'%(pk)s/%(subpath)s/%(filename)s')
if subpath is None:
subpath = '_'
path = str_path % \
{
'prefix': prefix,
'model_name': instance._meta.model_name,
'pk': instance.pk,
'subpath': subpath,
'filename': filename
}
return path
def run_sql(sql):
with connection.cursor() as cursor:
cursor.execute(sql)
if sql.startswith('select'):
rows = cursor.fetchall()
if settings.DEBUG:
print(rows)
def run_signed_name_and_date_via_fields(fields):
signs = {}
for key, field in fields.items():
if '/FT' not in field and field['/FT'] != '/Sig':
continue
if '/V' not in field:
continue
# .format(field['/V']['/Reason'])
nome = 'Nome do assinante não localizado.'
content_sign = field['/V']['/Contents']
try:
signed_data = cms.ContentInfo.load(content_sign)['content']
oun_old = []
for cert in signed_data['certificates']:
subject = cert.native['tbs_certificate']['subject']
oun = subject['organizational_unit_name']
if isinstance(oun, str):
continue
if len(oun) > len(oun_old):
oun_old = oun
nome = subject['common_name'].split(':')[0]
except:
if '/Name' in field['/V']:
nome = field['/V']['/Name']
fd = None
try:
data = str(field['/V']['/M'])
if 'D:' not in data:
data = None
else:
if not data.endswith('Z'):
data = data.replace('Z', '+')
data = data.replace("'", '')
fd = datetime.strptime(data[2:], '%Y%m%d%H%M%S%z')
except:
pass
if nome not in signs:
signs[nome] = fd
return signs
def run_signed_name_and_date_extract(file):
signs = {}
fields = {}
pdfdata = file.read()
# se não tem byterange então não é assinado
byterange = []
n = -1
while True:
n = pdfdata.find(b"/ByteRange", n + 1)
if n == -1:
break
byterange.append(n)
if not byterange:
return signs
# tenta extrair via /Fields
try:
pdf = PdfFileReader(file)
fields = pdf.getFields()
except Exception as e:
try:
pdf = PdfFileReader(file, strict=False)
fields = pdf.getFields()
except Exception as ee:
fields = ee
try:
# se a extração via /Fields ocorrer sem erros e forem capturadas
# tantas assinaturas quanto byteranges
if isinstance(fields, dict):
signs = run_signed_name_and_date_via_fields(fields)
if len(signs) == len(byterange):
return signs
except Exception as e:
pass
for n in byterange:
start = pdfdata.find(b"[", n)
stop = pdfdata.find(b"]", start)
assert n != -1 and start != -1 and stop != -1
n += 1
br = [int(i, 10) for i in pdfdata[start + 1: stop].split()]
contents = pdfdata[br[0] + br[1] + 1: br[2] - 1]
bcontents = bytes.fromhex(contents.decode("utf8"))
data1 = pdfdata[br[0]: br[0] + br[1]]
data2 = pdfdata[br[2]: br[2] + br[3]]
#signedData = data1 + data2
nome = 'Nome do assinante não localizado.'
try:
signed_data = cms.ContentInfo.load(bcontents)['content']
oun_old = []
for cert in signed_data['certificates']:
subject = cert.native['tbs_certificate']['subject']
oun = subject['organizational_unit_name']
if isinstance(oun, str):
continue
if len(oun) > len(oun_old):
oun_old = oun
nome = subject['common_name'].split(':')[0]
if nome not in signs:
signs[nome] = timezone.localtime()
except:
pass
return signs
def signed_name_and_date_extract(file):
try:
signs = run_signed_name_and_date_extract(file)
except:
return {}
signs = list(signs.items())
signs = sorted(signs, key=lambda sign: sign[0])
sr = []
for s in signs:
tt = s[0].title().split(' ')
for idx, t in enumerate(tt):
if t in ('Dos', 'De', 'Da', 'Do', 'Das', 'E'):
tt[idx] = t.lower()
sr.append((' '.join(tt), s[1]))
signs = sr
meta_signs = {
'signs': [],
'hom': []
}
for s in signs:
cn = settings.CERT_PRIVATE_KEY_NAME
meta_signs['hom' if s[0] == cn else 'signs'].append(s)
return meta_signs
# checa se documento está homologado
class ProcessoExterno(object):
def __init__(self, cmd, logger):
self.cmd = cmd
self.process = None
self.logger = logger
def run(self, timeout):
def target():
self.logger.info('Thread started')
self.process = subprocess.Popen(
self.cmd, shell=True, stdout=subprocess.PIPE)
self.process.communicate()
self.logger.info('Thread finished:')
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
self.logger.info('Terminating process')
self.process.terminate()
return None
# thread.join()
self.logger.info(self.process.returncode)
return self.process.returncode
class CmjLoader(Loader):
def get_dirs(self):
return self.dirs if self.dirs is not None else self.engine.dirs
| gpl-3.0 | -1,395,734,842,059,924,000 | 26.065259 | 87 | 0.552301 | false | 3.445992 | false | false | false |
dhalima3/TravelSpark | Main.py | 1 | 7574 | import requests
import os
import time
import random
from flask import Flask, request, redirect, session, url_for, render_template
from flask.json import jsonify, dumps, loads
from requests_oauthlib import OAuth2Session
import requests
import json
import urllib2
import mechanize
from bs4 import BeautifulSoup
from urlparse import urlparse
from apiclient.discovery import build
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'
tmpl_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')
app = Flask(__name__, tmpl_dir)
app.config['DEBUG'] = True
app.config['PROPAGATE_EXCEPTIONS'] = True
app.secret_key = 'A0Zr98j/3yX R~XHH!jmN]LWX/,?RT'
instagram_client_id = "115a6c0fd0a64bccbf213e4eafec554a"
instagram_client_secret = "72f3282930444d9e826e5f083ede32d1"
instagram_authorization_base_url = "https://api.instagram.com/oauth/authorize"
instagram_token_url = "https://api.instagram.com/oauth/access_token"
instagram_image_search_url = "https://api.instagram.com/v1/media/search"
google_api_key = "AIzaSyCLehiRvLWhFXbwkI6zojampXcICC0-rMU"
google_geocoding_url = "https://maps.googleapis.com/maps/api/geocode/json?address=%s"
@app.route('/')
def instagram_authorization():
if(session.get("instagram_access_key" != None)):
return redirect("/home")
oauth = OAuth2Session(instagram_client_id, redirect_uri="http://127.0.0.1:5000/callback")
authorization_url, state = oauth.authorization_url(instagram_authorization_base_url)
session['oauth_state'] = state
return redirect(authorization_url)
@app.route('/callback', methods=["GET"])
def instagram_token_retrieval():
oauth = OAuth2Session(instagram_client_id, redirect_uri="http://127.0.0.1:5000/callback", state=session['oauth_state'])
# When grabbing the token Instagram requires you give the code you got in the authorization step in the token step, along with client_id + secret -_-
# This fetch token call might not be right for other APIs, it all demands on their requirements
my_token = oauth.fetch_token(instagram_token_url, code=request.args.get('code'), client_secret=instagram_client_secret, client_id=instagram_client_id, authorization_url=request.url)
session['instagram_access_key'] = my_token['access_token']
return redirect("/home")
'''
Route representing the home page
'''
@app.route('/home')
def home():
#TODO: Put in Flickr APi for the home page.
if(session.get('instagram_access_key') == None):
return redirect("/")
#Lets get info on myself the access_token holder
access_token = session['instagram_access_key']
r = requests.request("GET",'https://api.instagram.com/v1/users/self/?access_token=%s' % access_token)
return render_template('home.html', user_data=r.json())
'''
The main route for the collage page
'''
#after user hits submit button.
@app.route('/location/<place>', methods=["POST", "GET"])
def get_collage(place):
#headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
#payload = {'num_photos': 3, 'place': place}
url = 'http://127.0.0.1:5000/location/instagram/'+place
#import urlparse
#url = 'http://127.0.0.1:5000/location/instagram/holder'
#parts = urlparse.urlparse(url)
#parts = parts._replace(path=)
#parts.geturl()
#print payload
response = get_instagram_photos(place)
response2= json.loads(get_google_images(place))
response3= json.loads(get_google_images2(place))
response4 = json.loads(get_tumblr_images(place))
print "RECIEVES"
print response
print "GOOGLE"
print response2
print "TUMBLR"
print response4
place = place.replace("+", " ")
airport = get_airport(place)
price = "Packages for Jetblue start as low as " + str(get_lowest_price(place)) + ". "
average_savings = "And save up to " + str(get_savings_percentage(place)) + " compared to Expedia! Wow Jetblue is so awesome!"
return render_template('collage.html', place=place, photos_display=response, photos_google= response2, photos_tumblr= response4, photos_google2 = response3, lowest_price=price, average_savings=average_savings, airport=airport)
def get_airport(place):
f = open('./jetblue/jetblueresults', 'r')
for line in f:
lineList = line.split(',')
destination = lineList[2].lower()
if (destination == place.lower()):
return lineList[1]
def get_savings_percentage(place):
f = open('./jetblue/jetblueresults', 'r')
for line in f:
lineList = line.split(',')
destination = lineList[2].lower()
if (destination == place.lower()):
return lineList[5][:-1]
def get_lowest_price(place):
f = open('./jetblue/jetblueresults', 'r')
for line in f:
lineList = line.split(',')
destination = lineList[2].lower()
if (destination == place.lower()):
return lineList[4]
'''
Will return a list of image URLs from instagram given the name of a location
'''
def get_instagram_photos(place):
print "hell"
print place
if(session.get('instagram_access_key') == None):
print "REDIRECT"
return redirect("/")
#http://127.0.0.1:5000/location/instagram/Chicago/3
#place, num_photos,
# Use Google Geocoding to convert place to lat and long coordinates
num_photos = 25;
print place
location = requests.get(google_geocoding_url % place)
location = location.json()
print location
lat_coord = location.get("results")[0].get("geometry").get("location").get("lat")
long_coord = location.get("results")[0].get("geometry").get("location").get("lng")
print lat_coord
print long_coord
# Make the API call to get the Models
querys = {"lat": lat_coord, "lng" : long_coord, "min_timestamp": "1262304000", "max_timestamp":"1446940800", "distance" : "10000" , "access_token": session.get('instagram_access_key')}
instagram_models = requests.get(instagram_image_search_url, params=querys)
chosen_images = []
json_object = loads(instagram_models.text)
print json_object
if len(json_object["data"]) > num_photos:
for i in range(0, num_photos):
chosen_images.append(json_object["data"][i]["images"])
else:
for i in range(0, len(json_object["data"])):
chosen_images.append(json_object["data"][i]["images"])
print len(json_object["data"])
print num_photos
print chosen_images
return chosen_images
def get_tumblr_images(place):
print "GETTING TUMBLR"
url = ('https://api.tumblr.com/v2/tagged?tag='+urllib2.quote(place)+"&api_key=YaGrzj5NUOlMDxQyTtkSBz1KEAnVyUYcCRKWT74VzNUJwRbtH4")
print url
req = urllib2.Request(url, headers={'accept': '*/*'})
response = urllib2.urlopen(req)
print "TUMBLR"
ret = response.read()
print ret
return ret
def get_google_images(place):
print "MOVING ON TO GOOGLE"
url = ('https://ajax.googleapis.com/ajax/services/search/images?' + 'v=1.0&q='+ urllib2.quote(place) + "&rsz=8")
print url
req = urllib2.Request(url, headers={'accept': '*/*'})
response = urllib2.urlopen(req)
print "GOOGLE RESPONSE"
print type(response)
print "TYPE OF RESPONSE.READ"
ret = response.read()
print len(ret)
print "RET"
print ret
return ret
def get_google_images2(place):
print "MOVING ON TO GOOGLE"
url = ('https://ajax.googleapis.com/ajax/services/search/images?' + 'v=1.0&q='+ urllib2.quote(place) +'&rsz=8&start=9')
print url
req = urllib2.Request(url, headers={'accept': '*/*'})
response = urllib2.urlopen(req)
print "GOOGLE RESPONSE"
print type(response)
print "TYPE OF RESPONSE.READ"
ret = response.read()
print len(ret)
print "RET"
print ret
return ret
if __name__ == '__main__':
app.run()
| apache-2.0 | 4,999,414,832,929,667,000 | 34.227907 | 231 | 0.695933 | false | 3.065156 | false | false | false |
google-research/google-research | hierarchical_foresight/models/vae.py | 1 | 3093 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Variational Autoencoder Models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sonnet as snt
import tensorflow.compat.v1 as tf
class ImageTransformSC(snt.AbstractModule):
"""VAE for the Maze Environment."""
def __init__(self, latentsize, name='itmsc', width=64):
super(ImageTransformSC, self).__init__(name=name)
self.width = width
if self.width == 48:
self.lsz = 2
else:
self.lsz = 3
self.latentsize = latentsize
self.enc = snt.nets.ConvNet2D([16, 32, 64, 128], [3, 3, 3, 3],
[2, 2, 2, 2], ['VALID'])
self.dec = self.enc.transpose()
self.lin1 = snt.Linear(output_size=512, name='lin1')
self.lin2 = snt.Linear(output_size=self.latentsize*2, name='lin2')
self.lin3 = snt.Linear(output_size=self.lsz *3*128, name='lin3')
self.f1 = snt.Linear(output_size=512, name='f1')
self.f2 = snt.Linear(output_size=512, name='f2')
self.f3 = snt.Linear(output_size=256, name='f3')
self.fc1 = snt.Linear(output_size=256, name='fc')
self.fc2 = snt.Linear(output_size=256, name='fc')
self.fc3 = snt.Linear(output_size=256, name='fc')
def _build(self, bs):
self.s1 = tf.placeholder(tf.float32, shape=[None, self.width, 64, 3])
self.s2 = tf.placeholder(tf.float32, shape=[None, self.width, 64, 3])
c1 = self.enc(self.s1)
c2 = self.enc(self.s2)
e1 = tf.reshape(c1, [-1, self.lsz *3*128])
e2 = tf.reshape(c2, [-1, self.lsz *3*128])
e = tf.concat([e1, e2], 1)
l1 = tf.nn.relu(self.lin1(e))
l2 = self.lin2(l1)
mu, std = l2[:, :self.latentsize], tf.nn.relu(l2[:, self.latentsize:])
n = tf.distributions.Normal(loc=[0.]*self.latentsize,
scale=[1.]*self.latentsize)
a = n.sample(bs)
self.z = mu + std * a
emb1 = tf.nn.relu(self.f1(e1))
emb2 = tf.nn.relu(self.f2(emb1))
emb3 = self.f3(emb2)
s2emb = tf.nn.relu(self.fc1(tf.concat([emb3, self.z], 1)))
s2emb = tf.nn.relu(self.fc2(s2emb))
s2emb = self.fc3(s2emb)
ll = self.lin3(emb3)
ll = tf.reshape(ll, [-1, self.lsz, 3, 128])
dec1_3 = self.dec(ll+c1)
rec = tf.nn.sigmoid(dec1_3)
rec = tf.clip_by_value(rec, 1e-5, 1 - 1e-5)
l3 = self.lin3(s2emb)
l3 = tf.reshape(l3, [-1, self.lsz, 3, 128])
dec2_3 = self.dec(l3+c1)
o = tf.nn.sigmoid(dec2_3)
o = tf.clip_by_value(o, 1e-5, 1 - 1e-5)
return o, rec, mu, std**2
| apache-2.0 | -1,294,222,527,283,148,800 | 33.752809 | 74 | 0.628839 | false | 2.75423 | false | false | false |
a365541453/django | django/blog/docker_part/admin.py | 1 | 1206 | # coding=utf-8
from django.contrib import admin
from docker_part.models import docker_article
# Register your models here.
from django import forms
from django.forms import fields
# Register your models here.
# form是用来控制显示的,这段代码是用来修改form的默认显示
class FlatPageForm(forms.ModelForm):
class Meta:
model = docker_article
exclude = []
widgets = {
"type": fields.Select(choices=[
(1, "主页"),
(2, "vmware"),
(3, "自动化"),
(4, "KVM"),
(5, "LInux"),
(6, "Docker"),
(7, "数据库"),
])
}
# FlatPageForm是form类,不能用来与article一起注册
# 所以要将修改好的form类和admin类关联起来
class FlatPageAdmin(admin.ModelAdmin):
form = FlatPageForm
# 下面是添加富文本编辑器的代码
list_display = ('title', 'time', 'type')
class Media:
# 在管理后台的HTML文件中加入js文件, 每一个路径都会追加STATIC_URL/
js = (
'kindeditor/kindeditor-all.js',
'kindeditor/lang/zh_CN.js',
'kindeditor/config.js',
)
# 再将修改后的admin类的方式来注册article
# article要用admin类的方法来注册
admin.site.register(docker_article, FlatPageAdmin)
| gpl-3.0 | 5,791,728,908,724,285,000 | 19.291667 | 50 | 0.693018 | false | 2.037657 | false | false | false |
littmus/kutime_web | kutime/views.py | 1 | 2776 | # -*- coding: utf-8 -*-
try:
import simplejson as json
except ImportError:
import json
from django.shortcuts import render
from django.http import HttpResponse
from django.core import serializers
from django.views.decorators.csrf import csrf_exempt
import watson
from models import *
def JsonResponse(json):
return HttpResponse(json, content_type='application/json')
def index(request):
list_col = College.objects.all()
list_col_major_anam = list_col.filter(type='M', campus='A')
list_col_major_sejong = list_col.filter(type='M', campus='S')
list_col_etc_anam = list_col.filter(type='E', campus='A')
list_col_etc_sejong = list_col.filter(type='E', campus='S')
return render(
request,
'index.html',
{
'cols_major_anam': list_col_major_anam,
'cols_major_sejong': list_col_major_sejong,
'cols_etc_anam': list_col_etc_anam,
'cols_etc_sejong': list_col_etc_sejong,
'timetable_range': range(1, 13),
}
)
@csrf_exempt
def dept(request, col_num):
data = None
if col_num is not None:
list_dept = Department.objects.filter(col__number=col_num)
data = serializers.serialize('json', list_dept, fields=('name', 'number'))
return JsonResponse(data)
@csrf_exempt
def lec(request, dept_num):
data = None
if dept_num is not None:
if dept_num[0] in ['A', 'S']:
campus = dept_num[0]
num = dept_num[1:]
list_lec = Lecture.objects.filter(col__campus=campus, dept__number=num)
else:
list_lec = Lecture.objects.filter(dept__number=dept_num)
data = serializers.serialize('json', list_lec)
return JsonResponse(data)
@csrf_exempt
def search(request):
if request.method == 'GET':
data = None
q = request.GET.get('q', None)
if q is not None:
if u'교시' in q:
pass
if 'LP' in q:
q = q.replace('LP', 'L-P')
if u'관' in q:
q = q.replace(u'관', '')
# for _q in q.split(','):
# if q.endswith(u'교시'):
#result = [s.object for s in watson.search(q)]
""" TODO
- 검색어 유형 따라 필터 적용
ex) 5교시 -> dayAndPeriod 에서만 검색
"""
result = watson.filter(Lecture, q)
data = serializers.serialize('json', result)
return JsonResponse(data)
else:
return HttpResponse(status=404)
"""
from selenium import webdriver
def capture(request):
if request.method == 'POST':
drvier = webdriver.PhantomJS()
else:
return HttpResponse(status=404)
"""
| mit | -1,254,733,528,694,133,200 | 26.836735 | 83 | 0.569648 | false | 3.190643 | false | false | false |
SnabbCo/neutron | neutron/tests/unit/test_l3_agent.py | 1 | 54667 | # Copyright 2012 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import copy
import mock
from oslo.config import cfg
from testtools import matchers
from neutron.agent.common import config as agent_config
from neutron.agent import l3_agent
from neutron.agent.linux import interface
from neutron.common import config as base_config
from neutron.common import constants as l3_constants
from neutron.common import exceptions as n_exc
from neutron.openstack.common import processutils
from neutron.openstack.common import uuidutils
from neutron.tests import base
_uuid = uuidutils.generate_uuid
HOSTNAME = 'myhost'
FAKE_ID = _uuid()
class TestBasicRouterOperations(base.BaseTestCase):
def setUp(self):
super(TestBasicRouterOperations, self).setUp()
self.conf = cfg.ConfigOpts()
self.conf.register_opts(base_config.core_opts)
self.conf.register_opts(l3_agent.L3NATAgent.OPTS)
agent_config.register_interface_driver_opts_helper(self.conf)
agent_config.register_use_namespaces_opts_helper(self.conf)
agent_config.register_root_helper(self.conf)
self.conf.register_opts(interface.OPTS)
self.conf.set_override('router_id', 'fake_id')
self.conf.set_override('interface_driver',
'neutron.agent.linux.interface.NullDriver')
self.conf.root_helper = 'sudo'
self.device_exists_p = mock.patch(
'neutron.agent.linux.ip_lib.device_exists')
self.device_exists = self.device_exists_p.start()
self.utils_exec_p = mock.patch(
'neutron.agent.linux.utils.execute')
self.utils_exec = self.utils_exec_p.start()
self.external_process_p = mock.patch(
'neutron.agent.linux.external_process.ProcessManager')
self.external_process = self.external_process_p.start()
self.send_arp_p = mock.patch(
'neutron.agent.l3_agent.L3NATAgent._send_gratuitous_arp_packet')
self.send_arp = self.send_arp_p.start()
self.dvr_cls_p = mock.patch('neutron.agent.linux.interface.NullDriver')
driver_cls = self.dvr_cls_p.start()
self.mock_driver = mock.MagicMock()
self.mock_driver.DEV_NAME_LEN = (
interface.LinuxInterfaceDriver.DEV_NAME_LEN)
driver_cls.return_value = self.mock_driver
self.ip_cls_p = mock.patch('neutron.agent.linux.ip_lib.IPWrapper')
ip_cls = self.ip_cls_p.start()
self.mock_ip = mock.MagicMock()
ip_cls.return_value = self.mock_ip
self.l3pluginApi_cls_p = mock.patch(
'neutron.agent.l3_agent.L3PluginApi')
l3pluginApi_cls = self.l3pluginApi_cls_p.start()
self.plugin_api = mock.Mock()
l3pluginApi_cls.return_value = self.plugin_api
self.looping_call_p = mock.patch(
'neutron.openstack.common.loopingcall.FixedIntervalLoopingCall')
self.looping_call_p.start()
def test_router_info_create(self):
id = _uuid()
ri = l3_agent.RouterInfo(id, self.conf.root_helper,
self.conf.use_namespaces, None)
self.assertTrue(ri.ns_name.endswith(id))
def test_router_info_create_with_router(self):
id = _uuid()
ex_gw_port = {'id': _uuid(),
'network_id': _uuid(),
'fixed_ips': [{'ip_address': '19.4.4.4',
'subnet_id': _uuid()}],
'subnet': {'cidr': '19.4.4.0/24',
'gateway_ip': '19.4.4.1'}}
router = {
'id': _uuid(),
'enable_snat': True,
'routes': [],
'gw_port': ex_gw_port}
ri = l3_agent.RouterInfo(id, self.conf.root_helper,
self.conf.use_namespaces, router)
self.assertTrue(ri.ns_name.endswith(id))
self.assertEqual(ri.router, router)
def test_agent_create(self):
l3_agent.L3NATAgent(HOSTNAME, self.conf)
def _test_internal_network_action(self, action):
port_id = _uuid()
router_id = _uuid()
network_id = _uuid()
ri = l3_agent.RouterInfo(router_id, self.conf.root_helper,
self.conf.use_namespaces, None)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
cidr = '99.0.1.9/24'
mac = 'ca:fe:de:ad:be:ef'
interface_name = agent.get_internal_device_name(port_id)
if action == 'add':
self.device_exists.return_value = False
agent.internal_network_added(ri, network_id,
port_id, cidr, mac)
self.assertEqual(self.mock_driver.plug.call_count, 1)
self.assertEqual(self.mock_driver.init_l3.call_count, 1)
self.send_arp.assert_called_once_with(ri, interface_name,
'99.0.1.9')
elif action == 'remove':
self.device_exists.return_value = True
agent.internal_network_removed(ri, port_id, cidr)
self.assertEqual(self.mock_driver.unplug.call_count, 1)
else:
raise Exception("Invalid action %s" % action)
def test_agent_add_internal_network(self):
self._test_internal_network_action('add')
def test_agent_remove_internal_network(self):
self._test_internal_network_action('remove')
def _test_external_gateway_action(self, action):
router_id = _uuid()
ri = l3_agent.RouterInfo(router_id, self.conf.root_helper,
self.conf.use_namespaces, None)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
internal_cidrs = ['100.0.1.0/24', '200.74.0.0/16']
ex_gw_port = {'fixed_ips': [{'ip_address': '20.0.0.30',
'subnet_id': _uuid()}],
'subnet': {'gateway_ip': '20.0.0.1'},
'id': _uuid(),
'network_id': _uuid(),
'mac_address': 'ca:fe:de:ad:be:ef',
'ip_cidr': '20.0.0.30/24'}
interface_name = agent.get_external_device_name(ex_gw_port['id'])
if action == 'add':
self.device_exists.return_value = False
ri.router = mock.Mock()
ri.router.get.return_value = [{'floating_ip_address':
'192.168.1.34'}]
agent.external_gateway_added(ri, ex_gw_port,
interface_name, internal_cidrs)
self.assertEqual(self.mock_driver.plug.call_count, 1)
self.assertEqual(self.mock_driver.init_l3.call_count, 1)
self.send_arp.assert_called_once_with(ri, interface_name,
'20.0.0.30')
kwargs = {'preserve_ips': ['192.168.1.34/32'],
'namespace': 'qrouter-' + router_id,
'gateway': '20.0.0.1'}
self.mock_driver.init_l3.assert_called_with(interface_name,
['20.0.0.30/24'],
**kwargs)
elif action == 'remove':
self.device_exists.return_value = True
agent.external_gateway_removed(ri, ex_gw_port,
interface_name, internal_cidrs)
self.assertEqual(self.mock_driver.unplug.call_count, 1)
else:
raise Exception("Invalid action %s" % action)
def test_agent_add_external_gateway(self):
self._test_external_gateway_action('add')
def _test_arping(self, namespace):
if not namespace:
self.conf.set_override('use_namespaces', False)
router_id = _uuid()
ri = l3_agent.RouterInfo(router_id, self.conf.root_helper,
self.conf.use_namespaces, None)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
floating_ip = '20.0.0.101'
interface_name = agent.get_external_device_name(router_id)
agent._arping(ri, interface_name, floating_ip)
arping_cmd = ['arping', '-A',
'-I', interface_name,
'-c', self.conf.send_arp_for_ha,
floating_ip]
self.mock_ip.netns.execute.assert_any_call(
arping_cmd, check_exit_code=True)
def test_arping_namespace(self):
self._test_arping(namespace=True)
def test_arping_no_namespace(self):
self._test_arping(namespace=False)
def test_agent_remove_external_gateway(self):
self._test_external_gateway_action('remove')
def _check_agent_method_called(self, agent, calls, namespace):
self.mock_ip.netns.execute.assert_has_calls(
[mock.call(call, check_exit_code=False) for call in calls],
any_order=True)
def _test_routing_table_update(self, namespace):
if not namespace:
self.conf.set_override('use_namespaces', False)
router_id = _uuid()
ri = l3_agent.RouterInfo(router_id, self.conf.root_helper,
self.conf.use_namespaces,
None)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
fake_route1 = {'destination': '135.207.0.0/16',
'nexthop': '1.2.3.4'}
fake_route2 = {'destination': '135.207.111.111/32',
'nexthop': '1.2.3.4'}
agent._update_routing_table(ri, 'replace', fake_route1)
expected = [['ip', 'route', 'replace', 'to', '135.207.0.0/16',
'via', '1.2.3.4']]
self._check_agent_method_called(agent, expected, namespace)
agent._update_routing_table(ri, 'delete', fake_route1)
expected = [['ip', 'route', 'delete', 'to', '135.207.0.0/16',
'via', '1.2.3.4']]
self._check_agent_method_called(agent, expected, namespace)
agent._update_routing_table(ri, 'replace', fake_route2)
expected = [['ip', 'route', 'replace', 'to', '135.207.111.111/32',
'via', '1.2.3.4']]
self._check_agent_method_called(agent, expected, namespace)
agent._update_routing_table(ri, 'delete', fake_route2)
expected = [['ip', 'route', 'delete', 'to', '135.207.111.111/32',
'via', '1.2.3.4']]
self._check_agent_method_called(agent, expected, namespace)
def test_agent_routing_table_updated(self):
self._test_routing_table_update(namespace=True)
def test_agent_routing_table_updated_no_namespace(self):
self._test_routing_table_update(namespace=False)
def test_routes_updated(self):
self._test_routes_updated(namespace=True)
def test_routes_updated_no_namespace(self):
self._test_routes_updated(namespace=False)
def _test_routes_updated(self, namespace=True):
if not namespace:
self.conf.set_override('use_namespaces', False)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router_id = _uuid()
ri = l3_agent.RouterInfo(router_id, self.conf.root_helper,
self.conf.use_namespaces,
None)
ri.router = {}
fake_old_routes = []
fake_new_routes = [{'destination': "110.100.31.0/24",
'nexthop': "10.100.10.30"},
{'destination': "110.100.30.0/24",
'nexthop': "10.100.10.30"}]
ri.routes = fake_old_routes
ri.router['routes'] = fake_new_routes
agent.routes_updated(ri)
expected = [['ip', 'route', 'replace', 'to', '110.100.30.0/24',
'via', '10.100.10.30'],
['ip', 'route', 'replace', 'to', '110.100.31.0/24',
'via', '10.100.10.30']]
self._check_agent_method_called(agent, expected, namespace)
fake_new_routes = [{'destination': "110.100.30.0/24",
'nexthop': "10.100.10.30"}]
ri.router['routes'] = fake_new_routes
agent.routes_updated(ri)
expected = [['ip', 'route', 'delete', 'to', '110.100.31.0/24',
'via', '10.100.10.30']]
self._check_agent_method_called(agent, expected, namespace)
fake_new_routes = []
ri.router['routes'] = fake_new_routes
agent.routes_updated(ri)
expected = [['ip', 'route', 'delete', 'to', '110.100.30.0/24',
'via', '10.100.10.30']]
self._check_agent_method_called(agent, expected, namespace)
def _verify_snat_rules(self, rules, router, negate=False):
interfaces = router[l3_constants.INTERFACE_KEY]
source_cidrs = []
for interface in interfaces:
prefix = interface['subnet']['cidr'].split('/')[1]
source_cidr = "%s/%s" % (interface['fixed_ips'][0]['ip_address'],
prefix)
source_cidrs.append(source_cidr)
source_nat_ip = router['gw_port']['fixed_ips'][0]['ip_address']
interface_name = ('qg-%s' % router['gw_port']['id'])[:14]
expected_rules = [
'! -i %s ! -o %s -m conntrack ! --ctstate DNAT -j ACCEPT' %
(interface_name, interface_name)]
for source_cidr in source_cidrs:
value_dict = {'source_cidr': source_cidr,
'source_nat_ip': source_nat_ip}
expected_rules.append('-s %(source_cidr)s -j SNAT --to-source '
'%(source_nat_ip)s' % value_dict)
for r in rules:
if negate:
self.assertNotIn(r.rule, expected_rules)
else:
self.assertIn(r.rule, expected_rules)
def _prepare_router_data(self, enable_snat=None, num_internal_ports=1):
router_id = _uuid()
ex_gw_port = {'id': _uuid(),
'network_id': _uuid(),
'fixed_ips': [{'ip_address': '19.4.4.4',
'subnet_id': _uuid()}],
'subnet': {'cidr': '19.4.4.0/24',
'gateway_ip': '19.4.4.1'}}
int_ports = []
for i in range(num_internal_ports):
int_ports.append({'id': _uuid(),
'network_id': _uuid(),
'admin_state_up': True,
'fixed_ips': [{'ip_address': '35.4.%s.4' % i,
'subnet_id': _uuid()}],
'mac_address': 'ca:fe:de:ad:be:ef',
'subnet': {'cidr': '35.4.%s.0/24' % i,
'gateway_ip': '35.4.%s.1' % i}})
router = {
'id': router_id,
l3_constants.INTERFACE_KEY: int_ports,
'routes': [],
'gw_port': ex_gw_port}
if enable_snat is not None:
router['enable_snat'] = enable_snat
return router
def test_process_router(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
fake_fip_id = 'fake_fip_id'
agent.process_router_floating_ip_addresses = mock.Mock()
agent.process_router_floating_ip_nat_rules = mock.Mock()
agent.process_router_floating_ip_addresses.return_value = {
fake_fip_id: 'ACTIVE'}
agent.external_gateway_added = mock.Mock()
router = self._prepare_router_data()
fake_floatingips1 = {'floatingips': [
{'id': fake_fip_id,
'floating_ip_address': '8.8.8.8',
'fixed_ip_address': '7.7.7.7',
'port_id': _uuid()}]}
ri = l3_agent.RouterInfo(router['id'], self.conf.root_helper,
self.conf.use_namespaces, router=router)
agent.process_router(ri)
ex_gw_port = agent._get_ex_gw_port(ri)
agent.process_router_floating_ip_addresses.assert_called_with(
ri, ex_gw_port)
agent.process_router_floating_ip_addresses.reset_mock()
agent.process_router_floating_ip_nat_rules.assert_called_with(ri)
agent.process_router_floating_ip_nat_rules.reset_mock()
# remap floating IP to a new fixed ip
fake_floatingips2 = copy.deepcopy(fake_floatingips1)
fake_floatingips2['floatingips'][0]['fixed_ip_address'] = '7.7.7.8'
router[l3_constants.FLOATINGIP_KEY] = fake_floatingips2['floatingips']
agent.process_router(ri)
ex_gw_port = agent._get_ex_gw_port(ri)
agent.process_router_floating_ip_addresses.assert_called_with(
ri, ex_gw_port)
agent.process_router_floating_ip_addresses.reset_mock()
agent.process_router_floating_ip_nat_rules.assert_called_with(ri)
agent.process_router_floating_ip_nat_rules.reset_mock()
# remove just the floating ips
del router[l3_constants.FLOATINGIP_KEY]
agent.process_router(ri)
ex_gw_port = agent._get_ex_gw_port(ri)
agent.process_router_floating_ip_addresses.assert_called_with(
ri, ex_gw_port)
agent.process_router_floating_ip_addresses.reset_mock()
agent.process_router_floating_ip_nat_rules.assert_called_with(ri)
agent.process_router_floating_ip_nat_rules.reset_mock()
# now no ports so state is torn down
del router[l3_constants.INTERFACE_KEY]
del router['gw_port']
agent.process_router(ri)
self.assertEqual(self.send_arp.call_count, 1)
self.assertFalse(agent.process_router_floating_ip_addresses.called)
self.assertFalse(agent.process_router_floating_ip_nat_rules.called)
@mock.patch('neutron.agent.linux.ip_lib.IPDevice')
def test_process_router_floating_ip_addresses_add(self, IPDevice):
fip_id = _uuid()
fip = {
'id': fip_id, 'port_id': _uuid(),
'floating_ip_address': '15.1.2.3',
'fixed_ip_address': '192.168.0.1'
}
IPDevice.return_value = device = mock.Mock()
device.addr.list.return_value = []
ri = mock.MagicMock()
ri.router.get.return_value = [fip]
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
fip_statuses = agent.process_router_floating_ip_addresses(
ri, {'id': _uuid()})
self.assertEqual({fip_id: l3_constants.FLOATINGIP_STATUS_ACTIVE},
fip_statuses)
device.addr.add.assert_called_once_with(4, '15.1.2.3/32', '15.1.2.3')
def test_process_router_floating_ip_nat_rules_add(self):
fip = {
'id': _uuid(), 'port_id': _uuid(),
'floating_ip_address': '15.1.2.3',
'fixed_ip_address': '192.168.0.1'
}
ri = mock.MagicMock()
ri.router.get.return_value = [fip]
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent.process_router_floating_ip_nat_rules(ri)
nat = ri.iptables_manager.ipv4['nat']
nat.clear_rules_by_tag.assert_called_once_with('floating_ip')
rules = agent.floating_forward_rules('15.1.2.3', '192.168.0.1')
for chain, rule in rules:
nat.add_rule.assert_any_call(chain, rule, tag='floating_ip')
@mock.patch('neutron.agent.linux.ip_lib.IPDevice')
def test_process_router_floating_ip_addresses_remove(self, IPDevice):
IPDevice.return_value = device = mock.Mock()
device.addr.list.return_value = [{'cidr': '15.1.2.3/32'}]
ri = mock.MagicMock()
ri.router.get.return_value = []
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
fip_statuses = agent.process_router_floating_ip_addresses(
ri, {'id': _uuid()})
self.assertEqual({}, fip_statuses)
device.addr.delete.assert_called_once_with(4, '15.1.2.3/32')
def test_process_router_floating_ip_nat_rules_remove(self):
ri = mock.MagicMock()
ri.router.get.return_value = []
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent.process_router_floating_ip_nat_rules(ri)
nat = ri.iptables_manager.ipv4['nat']
nat = ri.iptables_manager.ipv4['nat`']
nat.clear_rules_by_tag.assert_called_once_with('floating_ip')
@mock.patch('neutron.agent.linux.ip_lib.IPDevice')
def test_process_router_floating_ip_addresses_remap(self, IPDevice):
fip_id = _uuid()
fip = {
'id': fip_id, 'port_id': _uuid(),
'floating_ip_address': '15.1.2.3',
'fixed_ip_address': '192.168.0.2'
}
IPDevice.return_value = device = mock.Mock()
device.addr.list.return_value = [{'cidr': '15.1.2.3/32'}]
ri = mock.MagicMock()
ri.router.get.return_value = [fip]
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
fip_statuses = agent.process_router_floating_ip_addresses(
ri, {'id': _uuid()})
self.assertEqual({fip_id: l3_constants.FLOATINGIP_STATUS_ACTIVE},
fip_statuses)
self.assertFalse(device.addr.add.called)
self.assertFalse(device.addr.delete.called)
@mock.patch('neutron.agent.linux.ip_lib.IPDevice')
def test_process_router_with_disabled_floating_ip(self, IPDevice):
fip_id = _uuid()
fip = {
'id': fip_id, 'port_id': _uuid(),
'floating_ip_address': '15.1.2.3',
'fixed_ip_address': '192.168.0.2'
}
ri = mock.MagicMock()
ri.floating_ips = [fip]
ri.router.get.return_value = []
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
fip_statuses = agent.process_router_floating_ip_addresses(
ri, {'id': _uuid()})
self.assertIsNone(fip_statuses.get(fip_id))
@mock.patch('neutron.agent.linux.ip_lib.IPDevice')
def test_process_router_floating_ip_with_device_add_error(self, IPDevice):
IPDevice.return_value = device = mock.Mock()
device.addr.add.side_effect = processutils.ProcessExecutionError
device.addr.list.return_value = []
fip_id = _uuid()
fip = {
'id': fip_id, 'port_id': _uuid(),
'floating_ip_address': '15.1.2.3',
'fixed_ip_address': '192.168.0.2'
}
ri = mock.MagicMock()
ri.router.get.return_value = [fip]
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
fip_statuses = agent.process_router_floating_ip_addresses(
ri, {'id': _uuid()})
self.assertEqual({fip_id: l3_constants.FLOATINGIP_STATUS_ERROR},
fip_statuses)
def test_process_router_snat_disabled(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = self._prepare_router_data(enable_snat=True)
ri = l3_agent.RouterInfo(router['id'], self.conf.root_helper,
self.conf.use_namespaces, router=router)
agent.external_gateway_added = mock.Mock()
# Process with NAT
agent.process_router(ri)
orig_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:]
# Reprocess without NAT
router['enable_snat'] = False
# Reassign the router object to RouterInfo
ri.router = router
agent.process_router(ri)
# For some reason set logic does not work well with
# IpTablesRule instances
nat_rules_delta = [r for r in orig_nat_rules
if r not in ri.iptables_manager.ipv4['nat'].rules]
self.assertEqual(len(nat_rules_delta), 2)
self._verify_snat_rules(nat_rules_delta, router)
self.assertEqual(self.send_arp.call_count, 1)
def test_process_router_snat_enabled(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = self._prepare_router_data(enable_snat=False)
ri = l3_agent.RouterInfo(router['id'], self.conf.root_helper,
self.conf.use_namespaces, router=router)
agent.external_gateway_added = mock.Mock()
# Process without NAT
agent.process_router(ri)
orig_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:]
# Reprocess with NAT
router['enable_snat'] = True
# Reassign the router object to RouterInfo
ri.router = router
agent.process_router(ri)
# For some reason set logic does not work well with
# IpTablesRule instances
nat_rules_delta = [r for r in ri.iptables_manager.ipv4['nat'].rules
if r not in orig_nat_rules]
self.assertEqual(len(nat_rules_delta), 2)
self._verify_snat_rules(nat_rules_delta, router)
self.assertEqual(self.send_arp.call_count, 1)
def test_process_router_interface_added(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = self._prepare_router_data()
ri = l3_agent.RouterInfo(router['id'], self.conf.root_helper,
self.conf.use_namespaces, router=router)
agent.external_gateway_added = mock.Mock()
# Process with NAT
agent.process_router(ri)
orig_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:]
# Add an interface and reprocess
router[l3_constants.INTERFACE_KEY].append(
{'id': _uuid(),
'network_id': _uuid(),
'admin_state_up': True,
'fixed_ips': [{'ip_address': '35.4.1.4',
'subnet_id': _uuid()}],
'mac_address': 'ca:fe:de:ad:be:ef',
'subnet': {'cidr': '35.4.1.0/24',
'gateway_ip': '35.4.1.1'}})
# Reassign the router object to RouterInfo
ri.router = router
agent.process_router(ri)
# For some reason set logic does not work well with
# IpTablesRule instances
nat_rules_delta = [r for r in ri.iptables_manager.ipv4['nat'].rules
if r not in orig_nat_rules]
self.assertEqual(len(nat_rules_delta), 1)
self._verify_snat_rules(nat_rules_delta, router)
# send_arp is called both times process_router is called
self.assertEqual(self.send_arp.call_count, 2)
def test_process_router_interface_removed(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = self._prepare_router_data(num_internal_ports=2)
ri = l3_agent.RouterInfo(router['id'], self.conf.root_helper,
self.conf.use_namespaces, router=router)
agent.external_gateway_added = mock.Mock()
# Process with NAT
agent.process_router(ri)
orig_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:]
# Add an interface and reprocess
del router[l3_constants.INTERFACE_KEY][1]
# Reassign the router object to RouterInfo
ri.router = router
agent.process_router(ri)
# For some reason set logic does not work well with
# IpTablesRule instances
nat_rules_delta = [r for r in orig_nat_rules
if r not in ri.iptables_manager.ipv4['nat'].rules]
self.assertEqual(len(nat_rules_delta), 1)
self._verify_snat_rules(nat_rules_delta, router, negate=True)
# send_arp is called both times process_router is called
self.assertEqual(self.send_arp.call_count, 2)
def test_process_router_internal_network_added_unexpected_error(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = self._prepare_router_data()
ri = l3_agent.RouterInfo(router['id'], self.conf.root_helper,
self.conf.use_namespaces, router=router)
agent.external_gateway_added = mock.Mock()
with mock.patch.object(
l3_agent.L3NATAgent,
'internal_network_added') as internal_network_added:
# raise RuntimeError to simulate that an unexpected exception
# occurrs
internal_network_added.side_effect = RuntimeError
self.assertRaises(RuntimeError, agent.process_router, ri)
self.assertNotIn(
router[l3_constants.INTERFACE_KEY][0], ri.internal_ports)
# The unexpected exception has been fixed manually
internal_network_added.side_effect = None
# _sync_routers_task finds out that _rpc_loop failed to process the
# router last time, it will retry in the next run.
agent.process_router(ri)
# We were able to add the port to ri.internal_ports
self.assertIn(
router[l3_constants.INTERFACE_KEY][0], ri.internal_ports)
def test_process_router_internal_network_removed_unexpected_error(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = self._prepare_router_data()
ri = l3_agent.RouterInfo(router['id'], self.conf.root_helper,
self.conf.use_namespaces, router=router)
agent.external_gateway_added = mock.Mock()
# add an internal port
agent.process_router(ri)
with mock.patch.object(
l3_agent.L3NATAgent,
'internal_network_removed') as internal_net_removed:
# raise RuntimeError to simulate that an unexpected exception
# occurrs
internal_net_removed.side_effect = RuntimeError
ri.internal_ports[0]['admin_state_up'] = False
# The above port is set to down state, remove it.
self.assertRaises(RuntimeError, agent.process_router, ri)
self.assertIn(
router[l3_constants.INTERFACE_KEY][0], ri.internal_ports)
# The unexpected exception has been fixed manually
internal_net_removed.side_effect = None
# _sync_routers_task finds out that _rpc_loop failed to process the
# router last time, it will retry in the next run.
agent.process_router(ri)
# We were able to remove the port from ri.internal_ports
self.assertNotIn(
router[l3_constants.INTERFACE_KEY][0], ri.internal_ports)
def test_process_router_floatingip_disabled(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
with mock.patch.object(
agent.plugin_rpc,
'update_floatingip_statuses') as mock_update_fip_status:
fip_id = _uuid()
router = self._prepare_router_data(num_internal_ports=1)
router[l3_constants.FLOATINGIP_KEY] = [
{'id': fip_id,
'floating_ip_address': '8.8.8.8',
'fixed_ip_address': '7.7.7.7',
'port_id': router[l3_constants.INTERFACE_KEY][0]['id']}]
ri = l3_agent.RouterInfo(router['id'], self.conf.root_helper,
self.conf.use_namespaces, router=router)
agent.external_gateway_added = mock.Mock()
agent.process_router(ri)
# Assess the call for putting the floating IP up was performed
mock_update_fip_status.assert_called_once_with(
mock.ANY, ri.router_id,
{fip_id: l3_constants.FLOATINGIP_STATUS_ACTIVE})
mock_update_fip_status.reset_mock()
# Process the router again, this time without floating IPs
router[l3_constants.FLOATINGIP_KEY] = []
ri.router = router
agent.process_router(ri)
# Assess the call for putting the floating IP up was performed
mock_update_fip_status.assert_called_once_with(
mock.ANY, ri.router_id,
{fip_id: l3_constants.FLOATINGIP_STATUS_DOWN})
def test_process_router_floatingip_exception(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent.process_router_floating_ip_addresses = mock.Mock()
agent.process_router_floating_ip_addresses.side_effect = RuntimeError
with mock.patch.object(
agent.plugin_rpc,
'update_floatingip_statuses') as mock_update_fip_status:
fip_id = _uuid()
router = self._prepare_router_data(num_internal_ports=1)
router[l3_constants.FLOATINGIP_KEY] = [
{'id': fip_id,
'floating_ip_address': '8.8.8.8',
'fixed_ip_address': '7.7.7.7',
'port_id': router[l3_constants.INTERFACE_KEY][0]['id']}]
ri = l3_agent.RouterInfo(router['id'], self.conf.root_helper,
self.conf.use_namespaces, router=router)
agent.external_gateway_added = mock.Mock()
agent.process_router(ri)
# Assess the call for putting the floating IP into Error
# was performed
mock_update_fip_status.assert_called_once_with(
mock.ANY, ri.router_id,
{fip_id: l3_constants.FLOATINGIP_STATUS_ERROR})
def test_handle_router_snat_rules_add_back_jump(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = mock.MagicMock()
port = {'fixed_ips': [{'ip_address': '192.168.1.4'}]}
agent._handle_router_snat_rules(ri, port, [], "iface", "add_rules")
nat = ri.iptables_manager.ipv4['nat']
nat.empty_chain.assert_any_call('snat')
nat.add_rule.assert_any_call('snat', '-j $float-snat')
for call in nat.mock_calls:
name, args, kwargs = call
if name == 'add_rule':
self.assertEqual(args, ('snat', '-j $float-snat'))
self.assertEqual(kwargs, {})
break
def test_handle_router_snat_rules_add_rules(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = l3_agent.RouterInfo(_uuid(), self.conf.root_helper,
self.conf.use_namespaces, None)
ex_gw_port = {'fixed_ips': [{'ip_address': '192.168.1.4'}]}
internal_cidrs = ['10.0.0.0/24']
agent._handle_router_snat_rules(ri, ex_gw_port, internal_cidrs,
"iface", "add_rules")
nat_rules = map(str, ri.iptables_manager.ipv4['nat'].rules)
wrap_name = ri.iptables_manager.wrap_name
jump_float_rule = "-A %s-snat -j %s-float-snat" % (wrap_name,
wrap_name)
internal_net_rule = ("-A %s-snat -s %s -j SNAT --to-source %s") % (
wrap_name, internal_cidrs[0],
ex_gw_port['fixed_ips'][0]['ip_address'])
self.assertIn(jump_float_rule, nat_rules)
self.assertIn(internal_net_rule, nat_rules)
self.assertThat(nat_rules.index(jump_float_rule),
matchers.LessThan(nat_rules.index(internal_net_rule)))
def test_process_router_delete_stale_internal_devices(self):
class FakeDev(object):
def __init__(self, name):
self.name = name
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
stale_devlist = [FakeDev('qr-a1b2c3d4-e5'),
FakeDev('qr-b2c3d4e5-f6')]
stale_devnames = [dev.name for dev in stale_devlist]
get_devices_return = []
get_devices_return.extend(stale_devlist)
self.mock_ip.get_devices.return_value = get_devices_return
router = self._prepare_router_data(enable_snat=True,
num_internal_ports=1)
ri = l3_agent.RouterInfo(router['id'],
self.conf.root_helper,
self.conf.use_namespaces,
router=router)
internal_ports = ri.router.get(l3_constants.INTERFACE_KEY, [])
self.assertEqual(len(internal_ports), 1)
internal_port = internal_ports[0]
with contextlib.nested(mock.patch.object(l3_agent.L3NATAgent,
'internal_network_removed'),
mock.patch.object(l3_agent.L3NATAgent,
'internal_network_added'),
mock.patch.object(l3_agent.L3NATAgent,
'external_gateway_removed'),
mock.patch.object(l3_agent.L3NATAgent,
'external_gateway_added')
) as (internal_network_removed,
internal_network_added,
external_gateway_removed,
external_gateway_added):
agent.process_router(ri)
self.assertEqual(external_gateway_added.call_count, 1)
self.assertFalse(external_gateway_removed.called)
self.assertFalse(internal_network_removed.called)
internal_network_added.assert_called_once_with(
ri,
internal_port['network_id'],
internal_port['id'],
internal_port['ip_cidr'],
internal_port['mac_address'])
self.assertEqual(self.mock_driver.unplug.call_count,
len(stale_devnames))
calls = [mock.call(stale_devname,
namespace=ri.ns_name,
prefix=l3_agent.INTERNAL_DEV_PREFIX)
for stale_devname in stale_devnames]
self.mock_driver.unplug.assert_has_calls(calls, any_order=True)
def test_process_router_delete_stale_external_devices(self):
class FakeDev(object):
def __init__(self, name):
self.name = name
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
stale_devlist = [FakeDev('qg-a1b2c3d4-e5')]
stale_devnames = [dev.name for dev in stale_devlist]
router = self._prepare_router_data(enable_snat=True,
num_internal_ports=1)
del router['gw_port']
ri = l3_agent.RouterInfo(router['id'],
self.conf.root_helper,
self.conf.use_namespaces,
router=router)
self.mock_ip.get_devices.return_value = stale_devlist
agent.process_router(ri)
self.mock_driver.unplug.assert_called_with(
stale_devnames[0],
bridge="br-ex",
namespace=ri.ns_name,
prefix=l3_agent.EXTERNAL_DEV_PREFIX)
def test_router_deleted(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent.router_deleted(None, FAKE_ID)
# verify that will set fullsync
self.assertIn(FAKE_ID, agent.removed_routers)
def test_routers_updated(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent.routers_updated(None, [FAKE_ID])
# verify that will set fullsync
self.assertIn(FAKE_ID, agent.updated_routers)
def test_removed_from_agent(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent.router_removed_from_agent(None, {'router_id': FAKE_ID})
# verify that will set fullsync
self.assertIn(FAKE_ID, agent.removed_routers)
def test_added_to_agent(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent.router_added_to_agent(None, [FAKE_ID])
# verify that will set fullsync
self.assertIn(FAKE_ID, agent.updated_routers)
def test_process_router_delete(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ex_gw_port = {'id': _uuid(),
'network_id': _uuid(),
'fixed_ips': [{'ip_address': '19.4.4.4',
'subnet_id': _uuid()}],
'subnet': {'cidr': '19.4.4.0/24',
'gateway_ip': '19.4.4.1'}}
router = {
'id': _uuid(),
'enable_snat': True,
'routes': [],
'gw_port': ex_gw_port}
agent._router_added(router['id'], router)
agent.router_deleted(None, router['id'])
agent._process_router_delete()
self.assertFalse(list(agent.removed_routers))
def test_destroy_router_namespace_skips_ns_removal(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent._destroy_router_namespace("fakens")
self.assertEqual(self.mock_ip.netns.delete.call_count, 0)
def test_destroy_router_namespace_removes_ns(self):
self.conf.set_override('router_delete_namespaces', True)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent._destroy_router_namespace("fakens")
self.mock_ip.netns.delete.assert_called_once_with("fakens")
def _configure_metadata_proxy(self, enableflag=True):
if not enableflag:
self.conf.set_override('enable_metadata_proxy', False)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router_id = _uuid()
router = {'id': _uuid(),
'external_gateway_info': {},
'routes': []}
with mock.patch.object(
agent, '_destroy_metadata_proxy') as destroy_proxy:
with mock.patch.object(
agent, '_spawn_metadata_proxy') as spawn_proxy:
agent._router_added(router_id, router)
if enableflag:
spawn_proxy.assert_called_with(mock.ANY, mock.ANY)
else:
self.assertFalse(spawn_proxy.call_count)
agent._router_removed(router_id)
if enableflag:
destroy_proxy.assert_called_with(mock.ANY, mock.ANY)
else:
self.assertFalse(destroy_proxy.call_count)
def test_enable_metadata_proxy(self):
self._configure_metadata_proxy()
def test_disable_metadata_proxy_spawn(self):
self._configure_metadata_proxy(enableflag=False)
def test_metadata_nat_rules(self):
self.conf.set_override('enable_metadata_proxy', False)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.assertEqual([], agent.metadata_nat_rules())
self.conf.set_override('metadata_port', '8775')
self.conf.set_override('enable_metadata_proxy', True)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
rules = ('PREROUTING', '-s 0.0.0.0/0 -d 169.254.169.254/32 '
'-p tcp -m tcp --dport 80 -j REDIRECT --to-port 8775')
self.assertEqual([rules], agent.metadata_nat_rules())
def test_router_id_specified_in_conf(self):
self.conf.set_override('use_namespaces', False)
self.conf.set_override('router_id', '')
self.assertRaises(SystemExit, l3_agent.L3NATAgent,
HOSTNAME, self.conf)
self.conf.set_override('router_id', '1234')
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.assertEqual(['1234'], agent._router_ids())
self.assertFalse(agent._clean_stale_namespaces)
def test_process_routers_with_no_ext_net_in_conf(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.plugin_api.get_external_network_id.return_value = 'aaa'
routers = [
{'id': _uuid(),
'routes': [],
'admin_state_up': True,
'external_gateway_info': {'network_id': 'aaa'}}]
agent._process_routers(routers)
self.assertIn(routers[0]['id'], agent.router_info)
self.plugin_api.get_external_network_id.assert_called_with(
agent.context)
def test_process_routers_with_cached_ext_net(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.plugin_api.get_external_network_id.return_value = 'aaa'
agent.target_ex_net_id = 'aaa'
routers = [
{'id': _uuid(),
'routes': [],
'admin_state_up': True,
'external_gateway_info': {'network_id': 'aaa'}}]
agent._process_routers(routers)
self.assertIn(routers[0]['id'], agent.router_info)
self.assertFalse(self.plugin_api.get_external_network_id.called)
def test_process_routers_with_stale_cached_ext_net(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.plugin_api.get_external_network_id.return_value = 'aaa'
agent.target_ex_net_id = 'bbb'
routers = [
{'id': _uuid(),
'routes': [],
'admin_state_up': True,
'external_gateway_info': {'network_id': 'aaa'}}]
agent._process_routers(routers)
self.assertIn(routers[0]['id'], agent.router_info)
self.plugin_api.get_external_network_id.assert_called_with(
agent.context)
def test_process_routers_with_no_ext_net_in_conf_and_two_net_plugin(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
routers = [
{'id': _uuid(),
'routes': [],
'admin_state_up': True,
'external_gateway_info': {'network_id': 'aaa'}}]
agent.router_info = {}
self.plugin_api.get_external_network_id.side_effect = (
n_exc.TooManyExternalNetworks())
self.assertRaises(n_exc.TooManyExternalNetworks,
agent._process_routers,
routers)
self.assertNotIn(routers[0]['id'], agent.router_info)
def test_process_routers_with_ext_net_in_conf(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.plugin_api.get_external_network_id.return_value = 'aaa'
routers = [
{'id': _uuid(),
'routes': [],
'admin_state_up': True,
'external_gateway_info': {'network_id': 'aaa'}},
{'id': _uuid(),
'routes': [],
'admin_state_up': True,
'external_gateway_info': {'network_id': 'bbb'}}]
agent.router_info = {}
self.conf.set_override('gateway_external_network_id', 'aaa')
agent._process_routers(routers)
self.assertIn(routers[0]['id'], agent.router_info)
self.assertNotIn(routers[1]['id'], agent.router_info)
def test_process_routers_with_no_bridge_no_ext_net_in_conf(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.plugin_api.get_external_network_id.return_value = 'aaa'
routers = [
{'id': _uuid(),
'routes': [],
'admin_state_up': True,
'external_gateway_info': {'network_id': 'aaa'}},
{'id': _uuid(),
'routes': [],
'admin_state_up': True,
'external_gateway_info': {'network_id': 'bbb'}}]
agent.router_info = {}
self.conf.set_override('external_network_bridge', '')
agent._process_routers(routers)
self.assertIn(routers[0]['id'], agent.router_info)
self.assertIn(routers[1]['id'], agent.router_info)
def test_nonexistent_interface_driver(self):
self.conf.set_override('interface_driver', None)
with mock.patch.object(l3_agent, 'LOG') as log:
self.assertRaises(SystemExit, l3_agent.L3NATAgent,
HOSTNAME, self.conf)
msg = 'An interface driver must be specified'
log.error.assert_called_once_with(msg)
self.conf.set_override('interface_driver', 'wrong_driver')
with mock.patch.object(l3_agent, 'LOG') as log:
self.assertRaises(SystemExit, l3_agent.L3NATAgent,
HOSTNAME, self.conf)
msg = "Error importing interface driver 'wrong_driver'"
log.error.assert_called_once_with(msg)
def test_metadata_filter_rules(self):
self.conf.set_override('enable_metadata_proxy', False)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.assertEqual([], agent.metadata_filter_rules())
self.conf.set_override('metadata_port', '8775')
self.conf.set_override('enable_metadata_proxy', True)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
rules = ('INPUT', '-s 0.0.0.0/0 -d 127.0.0.1 '
'-p tcp -m tcp --dport 8775 -j ACCEPT')
self.assertEqual([rules], agent.metadata_filter_rules())
def _cleanup_namespace_test(self,
stale_namespace_list,
router_list,
other_namespaces):
self.conf.set_override('router_delete_namespaces', True)
good_namespace_list = [l3_agent.NS_PREFIX + r['id']
for r in router_list]
self.mock_ip.get_namespaces.return_value = (stale_namespace_list +
good_namespace_list +
other_namespaces)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.assertTrue(agent._clean_stale_namespaces)
pm = self.external_process.return_value
pm.reset_mock()
agent._destroy_router_namespace = mock.MagicMock()
agent._cleanup_namespaces(router_list)
self.assertEqual(pm.disable.call_count, len(stale_namespace_list))
self.assertEqual(agent._destroy_router_namespace.call_count,
len(stale_namespace_list))
expected_args = [mock.call(ns) for ns in stale_namespace_list]
agent._destroy_router_namespace.assert_has_calls(expected_args,
any_order=True)
self.assertFalse(agent._clean_stale_namespaces)
def test_cleanup_namespace(self):
self.conf.set_override('router_id', None)
stale_namespaces = [l3_agent.NS_PREFIX + 'foo',
l3_agent.NS_PREFIX + 'bar']
other_namespaces = ['unknown']
self._cleanup_namespace_test(stale_namespaces,
[],
other_namespaces)
def test_cleanup_namespace_with_registered_router_ids(self):
self.conf.set_override('router_id', None)
stale_namespaces = [l3_agent.NS_PREFIX + 'cccc',
l3_agent.NS_PREFIX + 'eeeee']
router_list = [{'id': 'foo'}, {'id': 'aaaa'}]
other_namespaces = ['qdhcp-aabbcc', 'unknown']
self._cleanup_namespace_test(stale_namespaces,
router_list,
other_namespaces)
def test_cleanup_namespace_with_conf_router_id(self):
self.conf.set_override('router_id', 'bbbbb')
stale_namespaces = [l3_agent.NS_PREFIX + 'cccc',
l3_agent.NS_PREFIX + 'eeeee',
l3_agent.NS_PREFIX + self.conf.router_id]
router_list = [{'id': 'foo'}, {'id': 'aaaa'}]
other_namespaces = ['qdhcp-aabbcc', 'unknown']
self._cleanup_namespace_test(stale_namespaces,
router_list,
other_namespaces)
class TestL3AgentEventHandler(base.BaseTestCase):
def setUp(self):
super(TestL3AgentEventHandler, self).setUp()
cfg.CONF.register_opts(l3_agent.L3NATAgent.OPTS)
agent_config.register_interface_driver_opts_helper(cfg.CONF)
agent_config.register_use_namespaces_opts_helper(cfg.CONF)
cfg.CONF.set_override(
'interface_driver', 'neutron.agent.linux.interface.NullDriver'
)
cfg.CONF.set_override('use_namespaces', True)
agent_config.register_root_helper(cfg.CONF)
device_exists_p = mock.patch(
'neutron.agent.linux.ip_lib.device_exists')
device_exists_p.start()
utils_exec_p = mock.patch(
'neutron.agent.linux.utils.execute')
utils_exec_p.start()
drv_cls_p = mock.patch('neutron.agent.linux.interface.NullDriver')
driver_cls = drv_cls_p.start()
mock_driver = mock.MagicMock()
mock_driver.DEV_NAME_LEN = (
interface.LinuxInterfaceDriver.DEV_NAME_LEN)
driver_cls.return_value = mock_driver
l3_plugin_p = mock.patch(
'neutron.agent.l3_agent.L3PluginApi')
l3_plugin_cls = l3_plugin_p.start()
l3_plugin_cls.return_value = mock.Mock()
self.external_process_p = mock.patch(
'neutron.agent.linux.external_process.ProcessManager'
)
self.external_process_p.start()
looping_call_p = mock.patch(
'neutron.openstack.common.loopingcall.FixedIntervalLoopingCall')
looping_call_p.start()
self.agent = l3_agent.L3NATAgent(HOSTNAME)
def test_spawn_metadata_proxy(self):
router_id = _uuid()
metadata_port = 8080
ip_class_path = 'neutron.agent.linux.ip_lib.IPWrapper'
cfg.CONF.set_override('metadata_port', metadata_port)
cfg.CONF.set_override('log_file', 'test.log')
cfg.CONF.set_override('debug', True)
self.external_process_p.stop()
ns = 'qrouter-' + router_id
try:
with mock.patch(ip_class_path) as ip_mock:
self.agent._spawn_metadata_proxy(router_id, ns)
ip_mock.assert_has_calls([
mock.call('sudo', ns),
mock.call().netns.execute([
'neutron-ns-metadata-proxy',
mock.ANY,
mock.ANY,
'--router_id=%s' % router_id,
mock.ANY,
'--metadata_port=%s' % metadata_port,
'--debug',
'--log-file=neutron-ns-metadata-proxy-%s.log' %
router_id
])
])
finally:
self.external_process_p.start()
| apache-2.0 | 4,219,393,043,147,675,600 | 42.044882 | 79 | 0.562021 | false | 3.71253 | true | false | false |
dblalock/bolt | experiments/python/compress.py | 1 | 10068 | #!/usr/bin/env python
import numpy as np
import numba
import zstandard as zstd # pip install zstandard
# ================================================================ Funcs
def nbits_cost(diffs, signed=True):
"""
>>> [nbits_cost(i) for i in [0, 1, 2, 3, 4, 5, 7, 8, 9]]
[0, 2, 3, 3, 4, 4, 4, 5, 5]
>>> [nbits_cost(i) for i in [-1, -2, -3, -4, -5, -7, -8, -9]]
[1, 2, 3, 3, 4, 4, 4, 5]
>>> nbits_cost([])
array([], dtype=int32)
>>> nbits_cost([0, 2, 1, 0])
array([0, 3, 2, 0], dtype=int32)
>>> nbits_cost([0, 2, 1, 3, 4, 0], signed=False)
array([0, 2, 1, 2, 3, 0], dtype=int32)
"""
if diffs is None:
return None
diffs = np.asarray(diffs, dtype=np.int32)
if diffs.size == 0:
return np.array([], dtype=np.int32)
if not signed:
assert np.all(diffs >= 0)
pos_idxs = diffs > 0
nbits = np.zeros(diffs.shape, dtype=np.int32)
nbits[pos_idxs] = np.floor(np.log2(diffs[pos_idxs])) + 1
nbits[~pos_idxs] = 0
return nbits
# shape = diffs.shape
# diffs = diffs.ravel()
# zero_idxs = (diffs == 0)
# # nbits[zero_idxs] = 0
# nbits = np.zeros(len(diffs), dtype=np.int32)
# diffs = diffs[~zero_idxs]
# equiv_diffs = np.abs(diffs) + (diffs >= 0).astype(np.int32) # +1 if < 0
# # assert np.all(np.abs(diffs) > 0)
# # assert np.all(equiv_diffs > 0)
# nbits[~zero_idxs] = np.ceil(np.log2(equiv_diffs)) + 1
# nbits = np.asarray(nbits, dtype=np.int32) # next line can't handle scalar
# assert np.all(nbits >= 0)
shape = diffs.shape
diffs = diffs.ravel()
equiv_diffs = np.abs(diffs) + (diffs >= 0).astype(np.int32) # +1 if < 0
nbits = np.ceil(np.log2(equiv_diffs)) + 1
nbits = np.asarray(nbits, dtype=np.int32) # next line can't handle scalar
nbits[diffs == 0] = 0
assert np.all(nbits >= 0)
return nbits.reshape(shape) if nbits.size > 1 else nbits[0] # unpack if scalar
@numba.njit(fastmath=True)
def zigzag_encode(x):
"""
>>> [zigzag_encode(i) for i in [0,1,-1,2,-2,3,-3]]
[0, 1, 2, 3, 4, 5, 6]
>>> zigzag_encode([0,1,-1,2,-2,3,-3])
array([0, 1, 2, 3, 4, 5, 6], dtype=int32)
"""
x = np.asarray(x, dtype=np.int32)
return (np.abs(x) << 1) - (x > 0).astype(np.int32)
@numba.njit(fastmath=True)
def zigzag_decode(x):
return np.bitwise_xor(x >> 1, -np.bitwise_and(x, 1))
def quantize(X, nbits=16, minval=None, maxval=None):
minval = np.min(X) if minval is None else minval
maxval = np.max(X) if maxval is None else maxval
unsigned_max = (1 << nbits) - 1
dtype_min = 1 << (nbits - 1)
scale = float(unsigned_max) / maxval
X = np.maximum(0, X - minval)
X = np.minimum(unsigned_max, X * scale)
X -= dtype_min # center at 0
dtype = {16: np.int16, 12: np.int16, 8: np.int8}[nbits]
return X.astype(dtype)
# ================================================================
def zstd_compress(buff, comp=None):
comp = zstd.ZstdCompressor() if comp is None else comp
if isinstance(buff, str):
buff = bytes(buff, encoding='utf8')
return comp.compress(buff)
def zstd_decompress(buff, decomp=None):
decomp = zstd.ZstdDecompressor() if decomp is None else decomp
return decomp.decompress(decomp)
# ============================================================== sprintz
# except without the predictive coding part because we do that manually;
# we also omit the run-length encoding because the author says that's a
# huge pain to code and won't change the results much for our fast-changing
# time series; also we don't do the grouping thing since it only
# affects the decoding speed (it could affect the ratio slightly if the
# number of variables were really low and not a multiple of 8, but neither
# is the case for us)
# def bitpack_vec(x, nbits_per_element):
# n = len(x)
# total_nbits = n * nbits_per_element
# bitvec = np.zeros(total_nbits, dtype=np.bool)
# for i, val in enumerate(x):
# start_idx = i * nbits_per_element
# for b in range(nbits_per_element):
# bit = (val >> b) & 1
# bitvec[start_idx + b] = bit
# return np.packbits(bitvec)
# def bitunpack(X, nbits_per_element):
# was_1d = X.ndim == 1
# X = np.atleast_2d(X)
# N, D = X.shape
# ret = np.unpackbits(X, axis=1)
# if was_1d:
# ret = ret.squeeze()
# return ret
# @numba.njit(fastmath=True)
def bitpack(X, nbits_per_element):
was_1d = X.ndim == 1
X = np.atleast_2d(X)
N, D = X.shape
# orig_elemsz = X.dtype.itemsize
orig_elemsz_bits = 8 * X.dtype.itemsize
assert X.dtype in (np.uint8, np.uint16)
assert X.dtype in (np.uint8, np.uint16)
if nbits_per_element == orig_elemsz_bits:
ret = X
elif X.dtype == np.uint8:
# print("N, D, nbits: ", N, D, nbits_per_element)
# shape = X.shape
X = X.ravel()
# unpacked = np.unpackbits(X, count=nbits_per_element, bitorder='little', axis=-1)
unpacked = np.unpackbits(X, bitorder='little', axis=-1)
# print("unpacked initial shape: ", unpacked.shape)
unpacked = unpacked.reshape(N * D, 8)[:, :nbits_per_element]
# print("unpacked new shape: ", unpacked.shape)
ret = np.packbits(unpacked.reshape(N, -1), axis=1)
# ret = ret.reshape(N, -1)
# print("ret.shape: ", ret.shape)
else:
# X_low = (X & 0xff)[:, :, np.newaxis]
# X_high = ((X & 0xff00) >> 8)[:, :, np.newaxis]
# X_combined = np.concatenate([X_low, X_high], axis=-1)
# X = X[:, :, np.newaxis]
# X = np.concatenate([X, X], axis=-1)
# X[:, :, 0] = X[:, :, 0] & 0xff
# X[:, :, 1] = (X[:, :, 1] & 0xff00) >> 8
# X = X.reshape(N, 2 * D).astype(np.uint8)
X = np.ascontiguousarray(X).view(np.uint8).reshape(N, 2 * D)
# print("X shape: ", X.shape)
unpacked = np.unpackbits(X, axis=1, bitorder='little')
unpacked = unpacked.reshape(N, orig_elemsz_bits, D)
# unpacked = unpacked[:, ::-1, :] # low bits in low idxs
unpacked = np.ascontiguousarray(unpacked[:, :nbits_per_element])
ret = np.packbits(unpacked.reshape(N, -1))
# nbits_per_row = D * nbits_per_element
# bitmat = np.zeros((N, nbits_per_row), dtype=np.uint8)
# for j in range(D):
# col = X[:, j]
# start_idx = j * nbits_per_element
# for b in range(nbits_per_element):
# bit = (col >> b) & 1
# bitmat[:, start_idx + b] = bit
# ret = np.packbits(bitmat, axis=1)
if was_1d:
ret = ret.squeeze()
return ret
@numba.njit(fastmath=True)
def _sprintz_header_sz(headers, header_elem_nbits):
_, D = headers.shape
header_row_sz = int(np.ceil(D * header_elem_nbits / 8))
rows_total_nbits = headers.sum(axis=1)
# zero_rows = rows_total_nbits == 0
# header_sz = np.sum(nzero_rows) # one byte for run length
# pair_sums = zero_rows +
header_sz = 0
prev_was_zero = False
for row in rows_total_nbits:
is_zero = row == 0
if is_zero:
if prev_was_zero:
continue
else:
header_sz += 1 # start of run
else:
header_sz += header_row_sz
prev_was_zero = is_zero
return header_sz
# def sprintz_packed_size(X, nbits=None, just_return_sz=False, postproc='zstd'):
def sprintz_packed_size(X, nbits=None, just_return_sz=True, postproc=None):
if nbits is None:
nbits = {1: 8, 2: 16}.get(X.dtype.itemsize, 16)
unsigned_dtype = {8: np.uint8, 16: np.uint16}[nbits]
window_len = 8
pad_nrows = X.shape[0] % window_len
if pad_nrows != 0:
pad_rows = np.zeros((pad_nrows, X.shape[1]), dtype=X.dtype)
X = np.vstack([X, pad_rows])
N, D = X.shape
if X.dtype.itemsize > 2: # basically just catching floats
# print("sprintz: quantizing X...WTF")
X = quantize(X, nbits=nbits)
if np.min(X) < 0:
# print("sprintz: zigzag_encoding X!")
X = zigzag_encode(X).astype(unsigned_dtype)
# else:
# print("sprintz: not zigzag_encoding X!")
header_elem_nbits = {8: 3, 16: 4}[nbits]
X_nbits = nbits_cost(X, signed=False)
X_nbits = np.asfarray(X_nbits).reshape(N // window_len, window_len, -1)
block_nbits = X_nbits.max(axis=1).astype(np.uint8)
block_nbits[block_nbits == (nbits - 1)] = nbits
headers = block_nbits
if just_return_sz:
payload_sz = int(block_nbits.sum() * window_len / 8)
header_sz = _sprintz_header_sz(headers, header_elem_nbits)
# print("header sz: ", header_sz)
return header_sz + payload_sz
nwindows = N // window_len
payloads = []
for i in range(nwindows):
start_idx = i * window_len
end_idx = start_idx + window_len
X_slice = X[start_idx:end_idx]
for j in range(D):
col = X_slice[:, j]
payloads.append(bitpack(col, headers[i, j]))
headers = bitpack(headers, header_elem_nbits)
payloads = np.hstack(payloads)
if postproc is None:
return headers.nbytes + payloads.nbytes
elif postproc == 'zstd':
return len(zstd_compress(headers)) + len(zstd_compress(payloads))
# # nbits_slice = nbits_cost(X_slice, signed=False)
# nbits_slice = X_nbits[start_idx:end_idx]
# max_nbits = nbits_slice.max(axis=0)
# headers[i] = np.minimum(max_nbits, nbits - 1) # 8->7, 16->15
# max_nbits[max_nbits == nbits - 1] = nbits # 7->8, 15->16
# for j in range(D):
# col = X_slice[:, j]
# payloads.append(bitpack(col, max_nbits[j]))
# headers = bitpack(headers, header_elem_nbits)
# payloads = np.hstack(payloads)
# header_bytes = headers.tobytes()
# # payload_bytes = headers.tobytes()
# blosc.compress(buff, typesize=elem_sz,
# cname=compressor, shuffle=shuffle)
#
if __name__ == '__main__':
import doctest
doctest.testmod()
| mpl-2.0 | -8,800,742,138,520,516,000 | 31.794788 | 90 | 0.562078 | false | 2.918261 | false | false | false |
NoBodyCam/TftpPxeBootBareMetal | nova/api/openstack/wsgi.py | 1 | 42066 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import inspect
import math
import time
from xml.dom import minidom
from xml.parsers import expat
from lxml import etree
import webob
from nova import exception
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova import wsgi
XMLNS_V10 = 'http://docs.rackspacecloud.com/servers/api/v1.0'
XMLNS_V11 = 'http://docs.openstack.org/compute/api/v1.1'
XMLNS_ATOM = 'http://www.w3.org/2005/Atom'
LOG = logging.getLogger(__name__)
# The vendor content types should serialize identically to the non-vendor
# content types. So to avoid littering the code with both options, we
# map the vendor to the other when looking up the type
_CONTENT_TYPE_MAP = {
'application/vnd.openstack.compute+json': 'application/json',
'application/vnd.openstack.compute+xml': 'application/xml',
}
SUPPORTED_CONTENT_TYPES = (
'application/json',
'application/vnd.openstack.compute+json',
'application/xml',
'application/vnd.openstack.compute+xml',
)
_MEDIA_TYPE_MAP = {
'application/vnd.openstack.compute+json': 'json',
'application/json': 'json',
'application/vnd.openstack.compute+xml': 'xml',
'application/xml': 'xml',
'application/atom+xml': 'atom',
}
class Request(webob.Request):
"""Add some OpenStack API-specific logic to the base webob.Request."""
def __init__(self, *args, **kwargs):
super(Request, self).__init__(*args, **kwargs)
self._extension_data = {'db_instances': {}}
def cache_db_instances(self, instances):
"""
Allow API methods to store instances from a DB query to be
used by API extensions within the same API request.
An instance of this class only lives for the lifetime of a
single API request, so there's no need to implement full
cache management.
"""
db_instances = self._extension_data['db_instances']
for instance in instances:
db_instances[instance['uuid']] = instance
def cache_db_instance(self, instance):
"""
Allow API methods to store an instance from a DB query to be
used by API extensions within the same API request.
An instance of this class only lives for the lifetime of a
single API request, so there's no need to implement full
cache management.
"""
self.cache_db_instances([instance])
def get_db_instances(self):
"""
Allow an API extension to get previously stored instances within
the same API request.
Note that the instance data will be slightly stale.
"""
return self._extension_data['db_instances']
def get_db_instance(self, instance_uuid):
"""
Allow an API extension to get a previously stored instance
within the same API request.
Note that the instance data will be slightly stale.
"""
return self._extension_data['db_instances'].get(instance_uuid)
def best_match_content_type(self):
"""Determine the requested response content-type."""
if 'nova.best_content_type' not in self.environ:
# Calculate the best MIME type
content_type = None
# Check URL path suffix
parts = self.path.rsplit('.', 1)
if len(parts) > 1:
possible_type = 'application/' + parts[1]
if possible_type in SUPPORTED_CONTENT_TYPES:
content_type = possible_type
if not content_type:
content_type = self.accept.best_match(SUPPORTED_CONTENT_TYPES)
self.environ['nova.best_content_type'] = (content_type or
'application/json')
return self.environ['nova.best_content_type']
def get_content_type(self):
"""Determine content type of the request body.
Does not do any body introspection, only checks header
"""
if not "Content-Type" in self.headers:
return None
content_type = self.content_type
# NOTE(markmc): text/plain is the default for eventlet and
# other webservers which use mimetools.Message.gettype()
# whereas twisted defaults to ''.
if not content_type or content_type == 'text/plain':
return None
if content_type not in SUPPORTED_CONTENT_TYPES:
raise exception.InvalidContentType(content_type=content_type)
return content_type
class ActionDispatcher(object):
"""Maps method name to local methods through action name."""
def dispatch(self, *args, **kwargs):
"""Find and call local method."""
action = kwargs.pop('action', 'default')
action_method = getattr(self, str(action), self.default)
return action_method(*args, **kwargs)
def default(self, data):
raise NotImplementedError()
class TextDeserializer(ActionDispatcher):
"""Default request body deserialization"""
def deserialize(self, datastring, action='default'):
return self.dispatch(datastring, action=action)
def default(self, datastring):
return {}
class JSONDeserializer(TextDeserializer):
def _from_json(self, datastring):
try:
return jsonutils.loads(datastring)
except ValueError:
msg = _("cannot understand JSON")
raise exception.MalformedRequestBody(reason=msg)
def default(self, datastring):
return {'body': self._from_json(datastring)}
class XMLDeserializer(TextDeserializer):
def __init__(self, metadata=None):
"""
:param metadata: information needed to deserialize xml into
a dictionary.
"""
super(XMLDeserializer, self).__init__()
self.metadata = metadata or {}
def _from_xml(self, datastring):
plurals = set(self.metadata.get('plurals', {}))
try:
node = minidom.parseString(datastring).childNodes[0]
return {node.nodeName: self._from_xml_node(node, plurals)}
except expat.ExpatError:
msg = _("cannot understand XML")
raise exception.MalformedRequestBody(reason=msg)
def _from_xml_node(self, node, listnames):
"""Convert a minidom node to a simple Python type.
:param listnames: list of XML node names whose subnodes should
be considered list items.
"""
if len(node.childNodes) == 1 and node.childNodes[0].nodeType == 3:
return node.childNodes[0].nodeValue
elif node.nodeName in listnames:
return [self._from_xml_node(n, listnames) for n in node.childNodes]
else:
result = dict()
for attr in node.attributes.keys():
result[attr] = node.attributes[attr].nodeValue
for child in node.childNodes:
if child.nodeType != node.TEXT_NODE:
result[child.nodeName] = self._from_xml_node(child,
listnames)
return result
def find_first_child_named(self, parent, name):
"""Search a nodes children for the first child with a given name"""
for node in parent.childNodes:
if node.nodeName == name:
return node
return None
def find_children_named(self, parent, name):
"""Return all of a nodes children who have the given name"""
for node in parent.childNodes:
if node.nodeName == name:
yield node
def extract_text(self, node):
"""Get the text field contained by the given node"""
if len(node.childNodes) == 1:
child = node.childNodes[0]
if child.nodeType == child.TEXT_NODE:
return child.nodeValue
return ""
def find_attribute_or_element(self, parent, name):
"""Get an attribute value; fallback to an element if not found"""
if parent.hasAttribute(name):
return parent.getAttribute(name)
node = self.find_first_child_named(parent, name)
if node:
return self.extract_text(node)
return None
def default(self, datastring):
return {'body': self._from_xml(datastring)}
class MetadataXMLDeserializer(XMLDeserializer):
def extract_metadata(self, metadata_node):
"""Marshal the metadata attribute of a parsed request"""
metadata = {}
if metadata_node is not None:
for meta_node in self.find_children_named(metadata_node, "meta"):
key = meta_node.getAttribute("key")
metadata[key] = self.extract_text(meta_node)
return metadata
class DictSerializer(ActionDispatcher):
"""Default request body serialization"""
def serialize(self, data, action='default'):
return self.dispatch(data, action=action)
def default(self, data):
return ""
class JSONDictSerializer(DictSerializer):
"""Default JSON request body serialization"""
def default(self, data):
return jsonutils.dumps(data)
class XMLDictSerializer(DictSerializer):
def __init__(self, metadata=None, xmlns=None):
"""
:param metadata: information needed to deserialize xml into
a dictionary.
:param xmlns: XML namespace to include with serialized xml
"""
super(XMLDictSerializer, self).__init__()
self.metadata = metadata or {}
self.xmlns = xmlns
def default(self, data):
# We expect data to contain a single key which is the XML root.
root_key = data.keys()[0]
doc = minidom.Document()
node = self._to_xml_node(doc, self.metadata, root_key, data[root_key])
return self.to_xml_string(node)
def to_xml_string(self, node, has_atom=False):
self._add_xmlns(node, has_atom)
return node.toxml('UTF-8')
#NOTE (ameade): the has_atom should be removed after all of the
# xml serializers and view builders have been updated to the current
# spec that required all responses include the xmlns:atom, the has_atom
# flag is to prevent current tests from breaking
def _add_xmlns(self, node, has_atom=False):
if self.xmlns is not None:
node.setAttribute('xmlns', self.xmlns)
if has_atom:
node.setAttribute('xmlns:atom', "http://www.w3.org/2005/Atom")
def _to_xml_node(self, doc, metadata, nodename, data):
"""Recursive method to convert data members to XML nodes."""
result = doc.createElement(nodename)
# Set the xml namespace if one is specified
# TODO(justinsb): We could also use prefixes on the keys
xmlns = metadata.get('xmlns', None)
if xmlns:
result.setAttribute('xmlns', xmlns)
#TODO(bcwaldon): accomplish this without a type-check
if isinstance(data, list):
collections = metadata.get('list_collections', {})
if nodename in collections:
metadata = collections[nodename]
for item in data:
node = doc.createElement(metadata['item_name'])
node.setAttribute(metadata['item_key'], str(item))
result.appendChild(node)
return result
singular = metadata.get('plurals', {}).get(nodename, None)
if singular is None:
if nodename.endswith('s'):
singular = nodename[:-1]
else:
singular = 'item'
for item in data:
node = self._to_xml_node(doc, metadata, singular, item)
result.appendChild(node)
#TODO(bcwaldon): accomplish this without a type-check
elif isinstance(data, dict):
collections = metadata.get('dict_collections', {})
if nodename in collections:
metadata = collections[nodename]
for k, v in data.items():
node = doc.createElement(metadata['item_name'])
node.setAttribute(metadata['item_key'], str(k))
text = doc.createTextNode(str(v))
node.appendChild(text)
result.appendChild(node)
return result
attrs = metadata.get('attributes', {}).get(nodename, {})
for k, v in data.items():
if k in attrs:
result.setAttribute(k, str(v))
else:
node = self._to_xml_node(doc, metadata, k, v)
result.appendChild(node)
else:
# Type is atom
node = doc.createTextNode(str(data))
result.appendChild(node)
return result
def _create_link_nodes(self, xml_doc, links):
link_nodes = []
for link in links:
link_node = xml_doc.createElement('atom:link')
link_node.setAttribute('rel', link['rel'])
link_node.setAttribute('href', link['href'])
if 'type' in link:
link_node.setAttribute('type', link['type'])
link_nodes.append(link_node)
return link_nodes
def _to_xml(self, root):
"""Convert the xml object to an xml string."""
return etree.tostring(root, encoding='UTF-8', xml_declaration=True)
def serializers(**serializers):
"""Attaches serializers to a method.
This decorator associates a dictionary of serializers with a
method. Note that the function attributes are directly
manipulated; the method is not wrapped.
"""
def decorator(func):
if not hasattr(func, 'wsgi_serializers'):
func.wsgi_serializers = {}
func.wsgi_serializers.update(serializers)
return func
return decorator
def deserializers(**deserializers):
"""Attaches deserializers to a method.
This decorator associates a dictionary of deserializers with a
method. Note that the function attributes are directly
manipulated; the method is not wrapped.
"""
def decorator(func):
if not hasattr(func, 'wsgi_deserializers'):
func.wsgi_deserializers = {}
func.wsgi_deserializers.update(deserializers)
return func
return decorator
def response(code):
"""Attaches response code to a method.
This decorator associates a response code with a method. Note
that the function attributes are directly manipulated; the method
is not wrapped.
"""
def decorator(func):
func.wsgi_code = code
return func
return decorator
class ResponseObject(object):
"""Bundles a response object with appropriate serializers.
Object that app methods may return in order to bind alternate
serializers with a response object to be serialized. Its use is
optional.
"""
def __init__(self, obj, code=None, headers=None, **serializers):
"""Binds serializers with an object.
Takes keyword arguments akin to the @serializer() decorator
for specifying serializers. Serializers specified will be
given preference over default serializers or method-specific
serializers on return.
"""
self.obj = obj
self.serializers = serializers
self._default_code = 200
self._code = code
self._headers = headers or {}
self.serializer = None
self.media_type = None
def __getitem__(self, key):
"""Retrieves a header with the given name."""
return self._headers[key.lower()]
def __setitem__(self, key, value):
"""Sets a header with the given name to the given value."""
self._headers[key.lower()] = value
def __delitem__(self, key):
"""Deletes the header with the given name."""
del self._headers[key.lower()]
def _bind_method_serializers(self, meth_serializers):
"""Binds method serializers with the response object.
Binds the method serializers with the response object.
Serializers specified to the constructor will take precedence
over serializers specified to this method.
:param meth_serializers: A dictionary with keys mapping to
response types and values containing
serializer objects.
"""
# We can't use update because that would be the wrong
# precedence
for mtype, serializer in meth_serializers.items():
self.serializers.setdefault(mtype, serializer)
def get_serializer(self, content_type, default_serializers=None):
"""Returns the serializer for the wrapped object.
Returns the serializer for the wrapped object subject to the
indicated content type. If no serializer matching the content
type is attached, an appropriate serializer drawn from the
default serializers will be used. If no appropriate
serializer is available, raises InvalidContentType.
"""
default_serializers = default_serializers or {}
try:
mtype = _MEDIA_TYPE_MAP.get(content_type, content_type)
if mtype in self.serializers:
return mtype, self.serializers[mtype]
else:
return mtype, default_serializers[mtype]
except (KeyError, TypeError):
raise exception.InvalidContentType(content_type=content_type)
def preserialize(self, content_type, default_serializers=None):
"""Prepares the serializer that will be used to serialize.
Determines the serializer that will be used and prepares an
instance of it for later call. This allows the serializer to
be accessed by extensions for, e.g., template extension.
"""
mtype, serializer = self.get_serializer(content_type,
default_serializers)
self.media_type = mtype
self.serializer = serializer()
def attach(self, **kwargs):
"""Attach slave templates to serializers."""
if self.media_type in kwargs:
self.serializer.attach(kwargs[self.media_type])
def serialize(self, request, content_type, default_serializers=None):
"""Serializes the wrapped object.
Utility method for serializing the wrapped object. Returns a
webob.Response object.
"""
if self.serializer:
serializer = self.serializer
else:
_mtype, _serializer = self.get_serializer(content_type,
default_serializers)
serializer = _serializer()
response = webob.Response()
response.status_int = self.code
for hdr, value in self._headers.items():
response.headers[hdr] = value
response.headers['Content-Type'] = content_type
if self.obj is not None:
response.body = serializer.serialize(self.obj)
return response
@property
def code(self):
"""Retrieve the response status."""
return self._code or self._default_code
@property
def headers(self):
"""Retrieve the headers."""
return self._headers.copy()
def action_peek_json(body):
"""Determine action to invoke."""
try:
decoded = jsonutils.loads(body)
except ValueError:
msg = _("cannot understand JSON")
raise exception.MalformedRequestBody(reason=msg)
# Make sure there's exactly one key...
if len(decoded) != 1:
msg = _("too many body keys")
raise exception.MalformedRequestBody(reason=msg)
# Return the action and the decoded body...
return decoded.keys()[0]
def action_peek_xml(body):
"""Determine action to invoke."""
dom = minidom.parseString(body)
action_node = dom.childNodes[0]
return action_node.tagName
class ResourceExceptionHandler(object):
"""Context manager to handle Resource exceptions.
Used when processing exceptions generated by API implementation
methods (or their extensions). Converts most exceptions to Fault
exceptions, with the appropriate logging.
"""
def __enter__(self):
return None
def __exit__(self, ex_type, ex_value, ex_traceback):
if not ex_value:
return True
if isinstance(ex_value, exception.NotAuthorized):
msg = unicode(ex_value)
raise Fault(webob.exc.HTTPForbidden(explanation=msg))
elif isinstance(ex_value, exception.Invalid):
raise Fault(exception.ConvertedException(
code=ex_value.code, explanation=unicode(ex_value)))
# Under python 2.6, TypeError's exception value is actually a string,
# so test # here via ex_type instead:
# http://bugs.python.org/issue7853
elif issubclass(ex_type, TypeError):
exc_info = (ex_type, ex_value, ex_traceback)
LOG.error(_('Exception handling resource: %s') % ex_value,
exc_info=exc_info)
raise Fault(webob.exc.HTTPBadRequest())
elif isinstance(ex_value, Fault):
LOG.info(_("Fault thrown: %s"), unicode(ex_value))
raise ex_value
elif isinstance(ex_value, webob.exc.HTTPException):
LOG.info(_("HTTP exception thrown: %s"), unicode(ex_value))
raise Fault(ex_value)
# We didn't handle the exception
return False
class Resource(wsgi.Application):
"""WSGI app that handles (de)serialization and controller dispatch.
WSGI app that reads routing information supplied by RoutesMiddleware
and calls the requested action method upon its controller. All
controller action methods must accept a 'req' argument, which is the
incoming wsgi.Request. If the operation is a PUT or POST, the controller
method must also accept a 'body' argument (the deserialized request body).
They may raise a webob.exc exception or return a dict, which will be
serialized by requested content type.
Exceptions derived from webob.exc.HTTPException will be automatically
wrapped in Fault() to provide API friendly error responses.
"""
def __init__(self, controller, action_peek=None, inherits=None,
**deserializers):
"""
:param controller: object that implement methods created by routes lib
:param action_peek: dictionary of routines for peeking into an action
request body to determine the desired action
:param inherits: another resource object that this resource should
inherit extensions from. Any action extensions that
are applied to the parent resource will also apply
to this resource.
"""
self.controller = controller
default_deserializers = dict(xml=XMLDeserializer,
json=JSONDeserializer)
default_deserializers.update(deserializers)
self.default_deserializers = default_deserializers
self.default_serializers = dict(xml=XMLDictSerializer,
json=JSONDictSerializer)
self.action_peek = dict(xml=action_peek_xml,
json=action_peek_json)
self.action_peek.update(action_peek or {})
# Copy over the actions dictionary
self.wsgi_actions = {}
if controller:
self.register_actions(controller)
# Save a mapping of extensions
self.wsgi_extensions = {}
self.wsgi_action_extensions = {}
self.inherits = inherits
def register_actions(self, controller):
"""Registers controller actions with this resource."""
actions = getattr(controller, 'wsgi_actions', {})
for key, method_name in actions.items():
self.wsgi_actions[key] = getattr(controller, method_name)
def register_extensions(self, controller):
"""Registers controller extensions with this resource."""
extensions = getattr(controller, 'wsgi_extensions', [])
for method_name, action_name in extensions:
# Look up the extending method
extension = getattr(controller, method_name)
if action_name:
# Extending an action...
if action_name not in self.wsgi_action_extensions:
self.wsgi_action_extensions[action_name] = []
self.wsgi_action_extensions[action_name].append(extension)
else:
# Extending a regular method
if method_name not in self.wsgi_extensions:
self.wsgi_extensions[method_name] = []
self.wsgi_extensions[method_name].append(extension)
def get_action_args(self, request_environment):
"""Parse dictionary created by routes library."""
# NOTE(Vek): Check for get_action_args() override in the
# controller
if hasattr(self.controller, 'get_action_args'):
return self.controller.get_action_args(request_environment)
try:
args = request_environment['wsgiorg.routing_args'][1].copy()
except (KeyError, IndexError, AttributeError):
return {}
try:
del args['controller']
except KeyError:
pass
try:
del args['format']
except KeyError:
pass
return args
def get_body(self, request):
try:
content_type = request.get_content_type()
except exception.InvalidContentType:
LOG.debug(_("Unrecognized Content-Type provided in request"))
return None, ''
if not content_type:
LOG.debug(_("No Content-Type provided in request"))
return None, ''
if len(request.body) <= 0:
LOG.debug(_("Empty body provided in request"))
return None, ''
return content_type, request.body
def deserialize(self, meth, content_type, body):
meth_deserializers = getattr(meth, 'wsgi_deserializers', {})
try:
mtype = _MEDIA_TYPE_MAP.get(content_type, content_type)
if mtype in meth_deserializers:
deserializer = meth_deserializers[mtype]
else:
deserializer = self.default_deserializers[mtype]
except (KeyError, TypeError):
raise exception.InvalidContentType(content_type=content_type)
return deserializer().deserialize(body)
def pre_process_extensions(self, extensions, request, action_args):
# List of callables for post-processing extensions
post = []
for ext in extensions:
if inspect.isgeneratorfunction(ext):
response = None
# If it's a generator function, the part before the
# yield is the preprocessing stage
try:
with ResourceExceptionHandler():
gen = ext(req=request, **action_args)
response = gen.next()
except Fault as ex:
response = ex
# We had a response...
if response:
return response, []
# No response, queue up generator for post-processing
post.append(gen)
else:
# Regular functions only perform post-processing
post.append(ext)
# Run post-processing in the reverse order
return None, reversed(post)
def post_process_extensions(self, extensions, resp_obj, request,
action_args):
for ext in extensions:
response = None
if inspect.isgenerator(ext):
# If it's a generator, run the second half of
# processing
try:
with ResourceExceptionHandler():
response = ext.send(resp_obj)
except StopIteration:
# Normal exit of generator
continue
except Fault as ex:
response = ex
else:
# Regular functions get post-processing...
try:
with ResourceExceptionHandler():
response = ext(req=request, resp_obj=resp_obj,
**action_args)
except Fault as ex:
response = ex
# We had a response...
if response:
return response
return None
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, request):
"""WSGI method that controls (de)serialization and method dispatch."""
LOG.info("%(method)s %(url)s" % {"method": request.method,
"url": request.url})
# Identify the action, its arguments, and the requested
# content type
action_args = self.get_action_args(request.environ)
action = action_args.pop('action', None)
content_type, body = self.get_body(request)
accept = request.best_match_content_type()
# NOTE(Vek): Splitting the function up this way allows for
# auditing by external tools that wrap the existing
# function. If we try to audit __call__(), we can
# run into troubles due to the @webob.dec.wsgify()
# decorator.
return self._process_stack(request, action, action_args,
content_type, body, accept)
def _process_stack(self, request, action, action_args,
content_type, body, accept):
"""Implement the processing stack."""
# Get the implementing method
try:
meth, extensions = self.get_method(request, action,
content_type, body)
except (AttributeError, TypeError):
return Fault(webob.exc.HTTPNotFound())
except KeyError as ex:
msg = _("There is no such action: %s") % ex.args[0]
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
except exception.MalformedRequestBody:
msg = _("Malformed request body")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
# Now, deserialize the request body...
try:
if content_type:
contents = self.deserialize(meth, content_type, body)
else:
contents = {}
except exception.InvalidContentType:
msg = _("Unsupported Content-Type")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
except exception.MalformedRequestBody:
msg = _("Malformed request body")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
# Update the action args
action_args.update(contents)
project_id = action_args.pop("project_id", None)
context = request.environ.get('nova.context')
if (context and project_id and (project_id != context.project_id)):
msg = _("Malformed request url")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
# Run pre-processing extensions
response, post = self.pre_process_extensions(extensions,
request, action_args)
if not response:
try:
with ResourceExceptionHandler():
action_result = self.dispatch(meth, request, action_args)
except Fault as ex:
response = ex
if not response:
# No exceptions; convert action_result into a
# ResponseObject
resp_obj = None
if type(action_result) is dict or action_result is None:
resp_obj = ResponseObject(action_result)
elif isinstance(action_result, ResponseObject):
resp_obj = action_result
else:
response = action_result
# Run post-processing extensions
if resp_obj:
_set_request_id_header(request, resp_obj)
# Do a preserialize to set up the response object
serializers = getattr(meth, 'wsgi_serializers', {})
resp_obj._bind_method_serializers(serializers)
if hasattr(meth, 'wsgi_code'):
resp_obj._default_code = meth.wsgi_code
resp_obj.preserialize(accept, self.default_serializers)
# Process post-processing extensions
response = self.post_process_extensions(post, resp_obj,
request, action_args)
if resp_obj and not response:
response = resp_obj.serialize(request, accept,
self.default_serializers)
try:
msg_dict = dict(url=request.url, status=response.status_int)
msg = _("%(url)s returned with HTTP %(status)d") % msg_dict
except AttributeError, e:
msg_dict = dict(url=request.url, e=e)
msg = _("%(url)s returned a fault: %(e)s") % msg_dict
LOG.info(msg)
return response
def get_method(self, request, action, content_type, body):
meth, extensions = self._get_method(request,
action,
content_type,
body)
if self.inherits:
_meth, parent_ext = self.inherits.get_method(request,
action,
content_type,
body)
extensions.extend(parent_ext)
return meth, extensions
def _get_method(self, request, action, content_type, body):
"""Look up the action-specific method and its extensions."""
# Look up the method
try:
if not self.controller:
meth = getattr(self, action)
else:
meth = getattr(self.controller, action)
except AttributeError:
if (not self.wsgi_actions or
action not in ['action', 'create', 'delete']):
# Propagate the error
raise
else:
return meth, self.wsgi_extensions.get(action, [])
if action == 'action':
# OK, it's an action; figure out which action...
mtype = _MEDIA_TYPE_MAP.get(content_type)
action_name = self.action_peek[mtype](body)
else:
action_name = action
# Look up the action method
return (self.wsgi_actions[action_name],
self.wsgi_action_extensions.get(action_name, []))
def dispatch(self, method, request, action_args):
"""Dispatch a call to the action-specific method."""
return method(req=request, **action_args)
def action(name):
"""Mark a function as an action.
The given name will be taken as the action key in the body.
This is also overloaded to allow extensions to provide
non-extending definitions of create and delete operations.
"""
def decorator(func):
func.wsgi_action = name
return func
return decorator
def extends(*args, **kwargs):
"""Indicate a function extends an operation.
Can be used as either::
@extends
def index(...):
pass
or as::
@extends(action='resize')
def _action_resize(...):
pass
"""
def decorator(func):
# Store enough information to find what we're extending
func.wsgi_extends = (func.__name__, kwargs.get('action'))
return func
# If we have positional arguments, call the decorator
if args:
return decorator(*args)
# OK, return the decorator instead
return decorator
class ControllerMetaclass(type):
"""Controller metaclass.
This metaclass automates the task of assembling a dictionary
mapping action keys to method names.
"""
def __new__(mcs, name, bases, cls_dict):
"""Adds the wsgi_actions dictionary to the class."""
# Find all actions
actions = {}
extensions = []
for key, value in cls_dict.items():
if not callable(value):
continue
if getattr(value, 'wsgi_action', None):
actions[value.wsgi_action] = key
elif getattr(value, 'wsgi_extends', None):
extensions.append(value.wsgi_extends)
# Add the actions and extensions to the class dict
cls_dict['wsgi_actions'] = actions
cls_dict['wsgi_extensions'] = extensions
return super(ControllerMetaclass, mcs).__new__(mcs, name, bases,
cls_dict)
class Controller(object):
"""Default controller."""
__metaclass__ = ControllerMetaclass
_view_builder_class = None
def __init__(self, view_builder=None):
"""Initialize controller with a view builder instance."""
if view_builder:
self._view_builder = view_builder
elif self._view_builder_class:
self._view_builder = self._view_builder_class()
else:
self._view_builder = None
class Fault(webob.exc.HTTPException):
"""Wrap webob.exc.HTTPException to provide API friendly response."""
_fault_names = {
400: "badRequest",
401: "unauthorized",
403: "forbidden",
404: "itemNotFound",
405: "badMethod",
409: "conflictingRequest",
413: "overLimit",
415: "badMediaType",
501: "notImplemented",
503: "serviceUnavailable"}
def __init__(self, exception):
"""Create a Fault for the given webob.exc.exception."""
self.wrapped_exc = exception
self.status_int = exception.status_int
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
"""Generate a WSGI response based on the exception passed to ctor."""
# Replace the body with fault details.
code = self.wrapped_exc.status_int
fault_name = self._fault_names.get(code, "computeFault")
fault_data = {
fault_name: {
'code': code,
'message': self.wrapped_exc.explanation}}
if code == 413:
retry = self.wrapped_exc.headers['Retry-After']
fault_data[fault_name]['retryAfter'] = retry
# 'code' is an attribute on the fault tag itself
metadata = {'attributes': {fault_name: 'code'}}
xml_serializer = XMLDictSerializer(metadata, XMLNS_V11)
content_type = req.best_match_content_type()
serializer = {
'application/xml': xml_serializer,
'application/json': JSONDictSerializer(),
}[content_type]
self.wrapped_exc.body = serializer.serialize(fault_data)
self.wrapped_exc.content_type = content_type
_set_request_id_header(req, self.wrapped_exc.headers)
return self.wrapped_exc
def __str__(self):
return self.wrapped_exc.__str__()
class OverLimitFault(webob.exc.HTTPException):
"""
Rate-limited request response.
"""
def __init__(self, message, details, retry_time):
"""
Initialize new `OverLimitFault` with relevant information.
"""
hdrs = OverLimitFault._retry_after(retry_time)
self.wrapped_exc = webob.exc.HTTPRequestEntityTooLarge(headers=hdrs)
self.content = {
"overLimitFault": {
"code": self.wrapped_exc.status_int,
"message": message,
"details": details,
},
}
@staticmethod
def _retry_after(retry_time):
delay = int(math.ceil(retry_time - time.time()))
retry_after = delay if delay > 0 else 0
headers = {'Retry-After': '%d' % retry_after}
return headers
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, request):
"""
Return the wrapped exception with a serialized body conforming to our
error format.
"""
content_type = request.best_match_content_type()
metadata = {"attributes": {"overLimitFault": "code"}}
xml_serializer = XMLDictSerializer(metadata, XMLNS_V11)
serializer = {
'application/xml': xml_serializer,
'application/json': JSONDictSerializer(),
}[content_type]
content = serializer.serialize(self.content)
self.wrapped_exc.body = content
return self.wrapped_exc
def _set_request_id_header(req, headers):
context = req.environ.get('nova.context')
if context:
headers['x-compute-request-id'] = context.request_id
| apache-2.0 | -310,886,773,118,716,200 | 34.113523 | 79 | 0.590429 | false | 4.671924 | false | false | false |
thtrieu/qclass_dl | lstm.py | 1 | 3699 | import tensorflow as tf
from tensorflow.models.rnn import rnn_cell, rnn
from tensorflow.models.rnn import seq2seq
from tensorflow.models.rnn.ptb import reader
import numpy as np
class lstm_class(object):
def __init__(
self, embedding_mat, non_static, lstm_type, hidden_unit, sequence_length, num_classes, vocab_size,
embedding_size, l2_reg_lambda=0.0):
# Placeholders for input, output and dropout
self.batch_size = tf.placeholder(tf.int32, name = "batch_size")
self.input_x = tf.placeholder(tf.int32, [None, sequence_length], name="input_x")
self.input_y = tf.placeholder(tf.float32, [None, num_classes], name="input_y")
self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
self.real_len = tf.placeholder(tf.int32, [None], name = "real_len")
# Keeping track of l2 regularization loss (optional)
l2_loss = tf.constant(0.0)
# Lookup
with tf.device('/cpu:0'), tf.name_scope("embedding"):
if not non_static:
W = tf.constant(embedding_mat, name = "W")
else:
W = tf.Variable(self.embedding_mat, name = "W")
inputs = tf.nn.embedding_lookup(W, self.input_x)
# LSTM
if lstm_type == "gru":
lstm_cell = rnn_cell.GRUCell(num_units = hidden_unit, input_size = embedding_size)
else:
if lstm_type == "basic":
lstm_cell = rnn_cell.BasicLSTMCell(num_units = hidden_unit, input_size = embedding_size)
else:
lstm_cell = rnn_cell.LSTMCell(num_units = hidden_unit, input_size = embedding_size, use_peepholes = True)
lstm_cell = rnn_cell.DropoutWrapper(lstm_cell, output_keep_prob = self.dropout_keep_prob)
self._initial_state = lstm_cell.zero_state(self.batch_size, tf.float32)
inputs = [tf.squeeze(input_, [1]) for input_ in tf.split(1, sequence_length, inputs)]
outputs, state = rnn.rnn(lstm_cell, inputs, initial_state=self._initial_state, sequence_length = self.real_len)
# Collect the appropriate last words into variable output (dimension = batch x embedding_size)
output = outputs[0]
one = tf.ones([1, hidden_unit], tf.float32)
with tf.variable_scope("Output"):
tf.get_variable_scope().reuse_variables()
for i in range(1,len(outputs)):
ind = self.real_len < (i+1)
ind = tf.to_float(ind)
ind = tf.expand_dims(ind, -1)
mat = tf.matmul(ind, one)
output = tf.add(tf.mul(output, mat),tf.mul(outputs[i], 1.0 - mat))
# Final (unnormalized) scores and predictions
with tf.name_scope("output"):
self.W = tf.Variable(tf.truncated_normal([hidden_unit, num_classes], stddev=0.1), name="W")
b = tf.Variable(tf.constant(0.1, shape=[num_classes]), name="b")
l2_loss += tf.nn.l2_loss(self.W)
l2_loss += tf.nn.l2_loss(b)
self.scores = tf.nn.xw_plus_b(output, self.W, b, name="scores")
self.predictions = tf.argmax(self.scores, 1, name="predictions")
# CalculateMean cross-entropy loss
with tf.name_scope("loss"):
losses = tf.nn.softmax_cross_entropy_with_logits(self.scores, self.input_y)
self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss
# Accuracy
with tf.name_scope("accuracy"):
correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
| gpl-3.0 | 4,280,998,755,685,231,600 | 48.32 | 121 | 0.600703 | false | 3.45056 | false | false | false |
Kentzo/Power | power/win32.py | 1 | 3384 | # coding=utf-8
"""
Implements PowerManagement functions using GetSystemPowerStatus.
Requires Windows XP+.
Observing is not supported
"""
from ctypes import Structure, wintypes, POINTER, windll, WinError, pointer, WINFUNCTYPE
import warnings
from power import common
# GetSystemPowerStatus
# Returns brief description of current system power status.
# Windows XP+
# REQUIRED.
GetSystemPowerStatus = None
try:
GetSystemPowerStatus = windll.kernel32.GetSystemPowerStatus
class SYSTEM_POWER_STATUS(Structure):
_fields_ = [
('ACLineStatus', wintypes.c_ubyte),
('BatteryFlag', wintypes.c_ubyte),
('BatteryLifePercent', wintypes.c_ubyte),
('Reserved1', wintypes.c_ubyte),
('BatteryLifeTime', wintypes.DWORD),
('BatteryFullLifeTime', wintypes.DWORD)
]
GetSystemPowerStatus.argtypes = [POINTER(SYSTEM_POWER_STATUS)]
GetSystemPowerStatus.restype = wintypes.BOOL
except AttributeError as e:
raise RuntimeError("Unable to load GetSystemPowerStatus."
"The system does not provide it (Win XP is required) or kernel32.dll is damaged.")
POWER_TYPE_MAP = {
0: common.POWER_TYPE_BATTERY,
1: common.POWER_TYPE_AC,
255: common.POWER_TYPE_AC
}
class PowerManagement(common.PowerManagementBase):
def get_providing_power_source_type(self):
"""
Returns GetSystemPowerStatus().ACLineStatus
@raise: WindowsError if any underlying error occures.
"""
power_status = SYSTEM_POWER_STATUS()
if not GetSystemPowerStatus(pointer(power_status)):
raise WinError()
return POWER_TYPE_MAP[power_status.ACLineStatus]
def get_low_battery_warning_level(self):
"""
Returns warning according to GetSystemPowerStatus().BatteryLifeTime/BatteryLifePercent
@raise WindowsError if any underlying error occures.
"""
power_status = SYSTEM_POWER_STATUS()
if not GetSystemPowerStatus(pointer(power_status)):
raise WinError()
if POWER_TYPE_MAP[power_status.ACLineStatus] == common.POWER_TYPE_AC:
return common.LOW_BATTERY_WARNING_NONE
else:
if power_status.BatteryLifeTime != -1 and power_status.BatteryLifeTime <= 600:
return common.LOW_BATTERY_WARNING_FINAL
elif power_status.BatteryLifePercent <= 22:
return common.LOW_BATTERY_WARNING_EARLY
else:
return common.LOW_BATTERY_WARNING_NONE
def get_time_remaining_estimate(self):
"""
Returns time remaining estimate according to GetSystemPowerStatus().BatteryLifeTime
"""
power_status = SYSTEM_POWER_STATUS()
if not GetSystemPowerStatus(pointer(power_status)):
raise WinError()
if POWER_TYPE_MAP[power_status.ACLineStatus] == common.POWER_TYPE_AC:
return common.TIME_REMAINING_UNLIMITED
elif power_status.BatteryLifeTime == -1:
return common.TIME_REMAINING_UNKNOWN
else:
return float(power_status.BatteryLifeTime) / 60.0
def add_observer(self, observer):
warnings.warn("Current system does not support observing.")
pass
def remove_observer(self, observer):
warnings.warn("Current system does not support observing.")
pass
| mit | -1,949,248,720,823,125,000 | 33.886598 | 105 | 0.663121 | false | 4.141983 | false | false | false |
tata-antares/LHCb-topo-trigger | BBDTconvert.py | 1 | 3846 | from __future__ import division, absolute_import
__author__ = 'Tatiana Likhomanenko'
import sys
import struct
from scipy.special import expit
import numpy
from rep_ef.estimators._matrixnetapplier import MatrixnetClassifier
def unpack_formula(formula_stream, print_=True):
features = list() # feature names
bins_quantities = list() # bins quantity for each feature
bins = list() # list for bins for each feature
bytes = formula_stream.read(4)
features_quantity = struct.unpack('i', bytes)[0]
for index in range(0, features_quantity):
bytes = formula_stream.read(4)
factor_length = struct.unpack('i', bytes)[0]
features.append(formula_stream.read(factor_length))
bytes = formula_stream.read(4) # skip formula length
used_features_quantity = struct.unpack('I', formula_stream.read(4))[0]
bins_quantities = struct.unpack(
'I' * used_features_quantity,
formula_stream.read(4 * used_features_quantity)
)
bins_total = struct.unpack('I', formula_stream.read(4))[0]
if print_:
print bins_total
for index in range(used_features_quantity):
bins.append(
struct.unpack(
'f' * bins_quantities[index],
formula_stream.read(4 * bins_quantities[index])
)
)
if print_:
print str(features[index]) + " - " + str(bins_quantities[index])
for j in range(len(bins[index])):
print bins[index][j]
print "------------"
return features, bins_quantities, bins
def convert_lookup_index_to_bins(points_in_bins, lookup_indices):
result = numpy.zeros([len(lookup_indices), len(points_in_bins)], dtype=float)
lookup_indices = lookup_indices.copy()
for i, points_in_variable in list(enumerate(points_in_bins))[::-1]:
print(points_in_variable)
n_columns = len(points_in_variable)
result[:, i] = points_in_variable[lookup_indices % n_columns]
lookup_indices //= n_columns
assert numpy.prod([len(x) for x in points_in_bins]) == len(lookup_indices)
return result
def write_formula(inp_file, out_file, threshold):
with open(inp_file) as formula_stream:
features, bins_quantities, bins = unpack_formula(formula_stream, False)
with open(inp_file) as formula_stream:
mx = MatrixnetClassifier(formula_stream)
bins_quantities = list(bins_quantities)
for i in xrange(len(bins)):
bins[i] = sorted(list(bins[i]))
bins[i] = [-10 * abs(bins[i][0])] + bins[i]
bins_quantities[i] += 1
bins_quantities = numpy.array(bins_quantities)
count = numpy.prod(bins_quantities)
points_in_bins = []
for i in range(len(features)):
edges = numpy.array(bins[i])
points_in = (edges[1:] + edges[:-1]) / 2.
points_in = numpy.array(list(points_in) + [edges[-1] + 1.])
points_in_bins.append(points_in)
with open(out_file, "w") as output_stream:
print "Total event count: " + str(count)
output_stream.write(str(len(features)) + " # feature count\n")
output_stream.write(" ".join([str(f) for f in features]) + " # features\n")
output_stream.write(" ".join([str(b) for b in bins_quantities]) + "\n")
for fbins in bins:
output_stream.write(" ".join([str(b) for b in fbins]) + "\n")
fbins.append(abs(fbins[-1]) * 3)
divider = 10000
output_stream.write(str(divider) + "\n")
events = convert_lookup_index_to_bins(points_in_bins, lookup_indices=numpy.arange(count))
predictions = expit(mx.apply(events))
assert len(predictions) == count
for q, pred in enumerate(predictions):
if pred > threshold:
output_stream.write(str(q) + " " + str(int(pred * divider)) + "\n") | apache-2.0 | -4,683,562,495,968,357,000 | 35.638095 | 97 | 0.615965 | false | 3.518756 | false | false | false |
midnightercz/pulp_docker | pulp-dev.py | 1 | 5020 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import optparse
import os
import sys
from pulp.devel import environment
WARNING_COLOR = '\033[31m'
WARNING_RESET = '\033[0m'
DIRS = ('/var/lib/pulp/published/docker/web',)
#
# Str entry assumes same src and dst relative path.
# Tuple entry is explicit (src, dst)
#
# Please keep alphabetized and by subproject
# Standard directories
DIR_PLUGINS = '/usr/lib/pulp/plugins'
ROOT_DIR = os.path.abspath(os.path.dirname(__file__))
LINKS = (
('plugins/etc/httpd/conf.d/pulp_docker.conf', '/etc/httpd/conf.d/pulp_docker.conf'),
('plugins/etc/pulp/server/plugins.conf.d/docker_distributor.json',
'/etc/pulp/server/plugins.conf.d/docker_distributor.json'),
)
def parse_cmdline():
"""
Parse and validate the command line options.
"""
parser = optparse.OptionParser()
parser.add_option('-I', '--install',
action='store_true',
help='install pulp development files')
parser.add_option('-U', '--uninstall',
action='store_true',
help='uninstall pulp development files')
parser.add_option('-D', '--debug',
action='store_true',
help=optparse.SUPPRESS_HELP)
parser.set_defaults(install=False,
uninstall=False,
debug=True)
opts, args = parser.parse_args()
if opts.install and opts.uninstall:
parser.error('both install and uninstall specified')
if not (opts.install or opts.uninstall):
parser.error('neither install or uninstall specified')
return (opts, args)
def warning(msg):
print "%s%s%s" % (WARNING_COLOR, msg, WARNING_RESET)
def debug(opts, msg):
if not opts.debug:
return
sys.stderr.write('%s\n' % msg)
def create_dirs(opts):
for d in DIRS:
if os.path.exists(d) and os.path.isdir(d):
debug(opts, 'skipping %s exists' % d)
continue
debug(opts, 'creating directory: %s' % d)
os.makedirs(d, 0777)
def getlinks():
links = []
for l in LINKS:
if isinstance(l, (list, tuple)):
src = l[0]
dst = l[1]
else:
src = l
dst = os.path.join('/', l)
links.append((src, dst))
return links
def install(opts):
# Install the packages in developer mode
environment.manage_setup_pys('install', ROOT_DIR)
warnings = []
create_dirs(opts)
# Ensure the directory is owned by apache
os.system('chown -R apache:apache /var/lib/pulp/published/docker')
currdir = os.path.abspath(os.path.dirname(__file__))
for src, dst in getlinks():
warning_msg = create_link(opts, os.path.join(currdir, src), dst)
if warning_msg:
warnings.append(warning_msg)
if warnings:
print "\n***\nPossible problems: Please read below\n***"
for w in warnings:
warning(w)
return os.EX_OK
def uninstall(opts):
for src, dst in getlinks():
debug(opts, 'removing link: %s' % dst)
if not os.path.islink(dst):
debug(opts, '%s does not exist, skipping' % dst)
continue
os.unlink(dst)
# Uninstall the packages
environment.manage_setup_pys('uninstall', ROOT_DIR)
return os.EX_OK
def create_link(opts, src, dst):
if not os.path.lexists(dst):
return _create_link(opts, src, dst)
if not os.path.islink(dst):
return "[%s] is not a symbolic link as we expected, please adjust if this " \
"is not what you intended." % (dst)
if not os.path.exists(os.readlink(dst)):
warning('BROKEN LINK: [%s] attempting to delete and fix it to point to %s.' % (dst, src))
try:
os.unlink(dst)
return _create_link(opts, src, dst)
except:
msg = "[%s] was a broken symlink, failed to delete and relink to [%s], " \
"please fix this manually"\
% (dst, src)
return msg
debug(opts, 'verifying link: %s points to %s' % (dst, src))
dst_stat = os.stat(dst)
src_stat = os.stat(src)
if dst_stat.st_ino != src_stat.st_ino:
msg = "[%s] is pointing to [%s] which is different than the intended target [%s]"\
% (dst, os.readlink(dst), src)
return msg
def _create_link(opts, src, dst):
debug(opts, 'creating link: %s pointing to %s' % (dst, src))
try:
os.symlink(src, dst)
except OSError, e:
msg = "Unable to create symlink for [%s] pointing to [%s], received error: <%s>"\
% (dst, src, e)
return msg
# -----------------------------------------------------------------------------
if __name__ == '__main__':
# TODO add something to check for permissions
opts, args = parse_cmdline()
if opts.install:
sys.exit(install(opts))
if opts.uninstall:
sys.exit(uninstall(opts))
| gpl-2.0 | -7,254,177,743,593,166,000 | 27.202247 | 97 | 0.566932 | false | 3.669591 | false | false | false |
hanyangii/SummThing | bptree.py | 1 | 18095 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import bisect
import itertools
import operator
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
class _BNode(object):
__slots__ = ["tree", "contents", "children"]
def __init__(self, tree, contents=None, children=None):
self.tree = tree
self.contents = contents or []
self.children = children or []
if self.children:
assert len(self.contents) + 1 == len(self.children), \
"one more child than data item required"
def __repr__(self):
name = getattr(self, "children", 0) and "Branch" or "Leaf"
return "<%s %s>" % (name, ", ".join(map(str, self.contents)))
def lateral(self, parent, parent_index, dest, dest_index):
if parent_index > dest_index:
dest.contents.append(parent.contents[dest_index])
parent.contents[dest_index] = self.contents.pop(0)
if self.children:
dest.children.append(self.children.pop(0))
else:
dest.contents.insert(0, parent.contents[parent_index])
parent.contents[parent_index] = self.contents.pop()
if self.children:
dest.children.insert(0, self.children.pop())
def shrink(self, ancestors):
parent = None
if ancestors:
parent, parent_index = ancestors.pop()
# try to lend to the left neighboring sibling
if parent_index:
left_sib = parent.children[parent_index - 1]
if len(left_sib.contents) < self.tree.order:
self.lateral(
parent, parent_index, left_sib, parent_index - 1)
return
# try the right neighbor
if parent_index + 1 < len(parent.children):
right_sib = parent.children[parent_index + 1]
if len(right_sib.contents) < self.tree.order:
self.lateral(
parent, parent_index, right_sib, parent_index + 1)
return
center = len(self.contents) // 2
sibling, push = self.split()
if not parent:
parent, parent_index = self.tree.BRANCH(
self.tree, children=[self]), 0
self.tree._root = parent
# pass the median up to the parent
parent.contents.insert(parent_index, push)
parent.children.insert(parent_index + 1, sibling)
if len(parent.contents) > parent.tree.order:
parent.shrink(ancestors)
def grow(self, ancestors):
parent, parent_index = ancestors.pop()
minimum = self.tree.order // 2
left_sib = right_sib = None
# try to borrow from the right sibling
if parent_index + 1 < len(parent.children):
right_sib = parent.children[parent_index + 1]
if len(right_sib.contents) > minimum:
right_sib.lateral(parent, parent_index + 1, self, parent_index)
return
# try to borrow from the left sibling
if parent_index:
left_sib = parent.children[parent_index - 1]
if len(left_sib.contents) > minimum:
left_sib.lateral(parent, parent_index - 1, self, parent_index)
return
# consolidate with a sibling - try left first
if left_sib:
left_sib.contents.append(parent.contents[parent_index - 1])
left_sib.contents.extend(self.contents)
if self.children:
left_sib.children.extend(self.children)
parent.contents.pop(parent_index - 1)
parent.children.pop(parent_index)
else:
self.contents.append(parent.contents[parent_index])
self.contents.extend(right_sib.contents)
if self.children:
self.children.extend(right_sib.children)
parent.contents.pop(parent_index)
parent.children.pop(parent_index + 1)
if len(parent.contents) < minimum:
if ancestors:
# parent is not the root
parent.grow(ancestors)
elif not parent.contents:
# parent is root, and its now empty
self.tree._root = left_sib or self
def split(self):
center = len(self.contents) // 2
median = self.contents[center]
sibling = type(self)(
self.tree,
self.contents[center + 1:],
self.children[center + 1:])
self.contents = self.contents[:center]
self.children = self.children[:center + 1]
return sibling, median
def insert(self, index, item, ancestors):
self.contents.insert(index, item)
if len(self.contents) > self.tree.order:
self.shrink(ancestors)
def remove(self, index, ancestors):
minimum = self.tree.order // 2
if self.children:
# find the smallest in the right subtree, exchange the value with the current node
# then delete the smallest one, just like the idea in the binary search tree.
# Note: only if len(descendent.contents) > minimum, we do this way in order to avoid 'grow' operation.
# Or we will inspect the left tree and do it any way
# all internal nodes have both left and right subtree.
additional_ancestors = [(self, index + 1)]
descendent = self.children[index + 1]
while descendent.children:
additional_ancestors.append((descendent, 0))
descendent = descendent.children[0]
if len(descendent.contents) > minimum:
ancestors.extend(additional_ancestors)
self.contents[index] = descendent.contents[0]
descendent.remove(0, ancestors)
return
# fall back to the left child, and exchange with the biggest, then delete the biggest anyway.
additional_ancestors = [(self, index)]
descendent = self.children[index]
while descendent.children:
additional_ancestors.append(
(descendent, len(descendent.children) - 1))
descendent = descendent.children[-1]
ancestors.extend(additional_ancestors)
self.contents[index] = descendent.contents[-1]
descendent.remove(len(descendent.children) - 1, ancestors)
else:
self.contents.pop(index)
if len(self.contents) < minimum and ancestors:
self.grow(ancestors)
class _BPlusLeaf(_BNode):
__slots__ = ["tree", "contents", "data", "next"]
def __init__(self, tree, contents=None, data=None, next=None):
self.tree = tree
self.contents = contents or []
self.data = data or []
self.next = next
assert len(self.contents) == len(self.data), "one data per key"
def insert(self, index, key, data, ancestors):
self.contents.insert(index, key)
self.data.insert(index, data)
if len(self.contents) > self.tree.order:
self.shrink(ancestors)
def lateral(self, parent, parent_index, dest, dest_index):
if parent_index > dest_index:
dest.contents.append(self.contents.pop(0))
dest.data.append(self.data.pop(0))
parent.contents[dest_index] = self.contents[0]
else:
dest.contents.insert(0, self.contents.pop())
dest.data.insert(0, self.data.pop())
parent.contents[parent_index] = dest.contents[0]
def split(self):
center = len(self.contents) // 2
median = self.contents[center - 1]
sibling = type(self)(
self.tree,
self.contents[center:],
self.data[center:],
self.next)
self.contents = self.contents[:center]
self.data = self.data[:center]
self.next = sibling
return sibling, sibling.contents[0]
def remove(self, index, ancestors):
minimum = self.tree.order // 2
if index >= len(self.contents):
self, index = self.next, 0
key = self.contents[index]
# if any leaf that could accept the key can do so
# without any rebalancing necessary, then go that route
current = self
while current is not None and current.contents[0] == key:
if len(current.contents) > minimum:
if current.contents[0] == key:
index = 0
else:
index = bisect.bisect_left(current.contents, key)
current.contents.pop(index)
current.data.pop(index)
return
current = current.next
self.grow(ancestors)
def grow(self, ancestors):
minimum = self.tree.order // 2
parent, parent_index = ancestors.pop()
left_sib = right_sib = None
# try borrowing from a neighbor - try right first
if parent_index + 1 < len(parent.children):
right_sib = parent.children[parent_index + 1]
if len(right_sib.contents) > minimum:
right_sib.lateral(parent, parent_index + 1, self, parent_index)
return
# fallback to left
if parent_index:
left_sib = parent.children[parent_index - 1]
if len(left_sib.contents) > minimum:
left_sib.lateral(parent, parent_index - 1, self, parent_index)
return
# join with a neighbor - try left first
if left_sib:
left_sib.contents.extend(self.contents)
left_sib.data.extend(self.data)
parent.remove(parent_index - 1, ancestors)
return
# fallback to right
self.contents.extend(right_sib.contents)
self.data.extend(right_sib.data)
parent.remove(parent_index, ancestors)
class BTree(object):
BRANCH = LEAF = _BNode
def __init__(self, order):
self.order = order
self._root = self._bottom = self.LEAF(self)
def _path_to(self, item):
"""
"""
current = self._root
ancestry = []
while getattr(current, "children", None):
index = bisect.bisect_left(current.contents, item)
ancestry.append((current, index))
if index < len(current.contents) \
and current.contents[index] == item:
return ancestry
current = current.children[index]
index = bisect.bisect_left(current.contents, item)
ancestry.append((current, index))
present = index < len(current.contents)
present = present and current.contents[index] == item
return ancestry
def _present(self, item, ancestors):
last, index = ancestors[-1]
return index < len(last.contents) and last.contents[index] == item
def insert(self, item):
current = self._root
ancestors = self._path_to(item)
node, index = ancestors[-1]
while getattr(node, "children", None):
node = node.children[index]
index = bisect.bisect_left(node.contents, item)
ancestors.append((node, index))
node, index = ancestors.pop()
node.insert(index, item, ancestors)
def remove(self, item):
current = self._root
ancestors = self._path_to(item)
if self._present(item, ancestors):
node, index = ancestors.pop()
node.remove(index, ancestors)
else:
raise ValueError("%r not in %s" % (item, self.__class__.__name__))
def __contains__(self, item):
return self._present(item, self._path_to(item))
def __iter__(self):
def _recurse(node):
if node.children:
for child, item in zip(node.children, node.contents):
for child_item in _recurse(child):
yield child_item
yield item
for child_item in _recurse(node.children[-1]):
yield child_item
else:
for item in node.contents:
yield item
for item in _recurse(self._root):
yield item
def __repr__(self):
def recurse(node, accum, depth):
accum.append((" " * depth) + repr(node))
for node in getattr(node, "children", []):
recurse(node, accum, depth + 1)
accum = []
recurse(self._root, accum, 0)
return "\n".join(accum)
@classmethod
def bulkload(cls, items, order):
tree = object.__new__(cls)
tree.order = order
leaves = tree._build_bulkloaded_leaves(items)
tree._build_bulkloaded_branches(leaves)
return tree
def _build_bulkloaded_leaves(self, items):
minimum = self.order // 2
leaves, seps = [[]], []
for item in items:
if len(leaves[-1]) < self.order:
leaves[-1].append(item)
else:
seps.append(item)
leaves.append([])
if len(leaves[-1]) < minimum and seps:
last_two = leaves[-2] + [seps.pop()] + leaves[-1]
leaves[-2] = last_two[:minimum]
leaves[-1] = last_two[minimum + 1:]
seps.append(last_two[minimum])
return [self.LEAF(self, contents=node) for node in leaves], seps
def _build_bulkloaded_branches(self, (leaves, seps)):
minimum = self.order // 2
levels = [leaves]
while len(seps) > self.order + 1:
items, nodes, seps = seps, [[]], []
for item in items:
if len(nodes[-1]) < self.order:
nodes[-1].append(item)
else:
seps.append(item)
nodes.append([])
if len(nodes[-1]) < minimum and seps:
last_two = nodes[-2] + [seps.pop()] + nodes[-1]
nodes[-2] = last_two[:minimum]
nodes[-1] = last_two[minimum + 1:]
seps.append(last_two[minimum])
offset = 0
for i, node in enumerate(nodes):
children = levels[-1][offset:offset + len(node) + 1]
nodes[i] = self.BRANCH(self, contents=node, children=children)
offset += len(node) + 1
levels.append(nodes)
self._root = self.BRANCH(self, contents=seps, children=levels[-1])
class BPlusTree(BTree):
LEAF = _BPlusLeaf
def _get(self, key):
node, index = self._path_to(key)[-1]
if index == len(node.contents):
if node.next:
node, index = node.next, 0
else:
return
while node.contents[index] == key:
yield node.data[index]
index += 1
if index == len(node.contents):
if node.next:
node, index = node.next, 0
else:
return
def _path_to(self, item):
path = super(BPlusTree, self)._path_to(item)
node, index = path[-1]
while hasattr(node, "children"):
node = node.children[index]
index = bisect.bisect_left(node.contents, item)
path.append((node, index))
return path
def get(self, key, default=None):
try:
return self._get(key).next()
except StopIteration:
return default
def getlist(self, key):
return list(self._get(key))
def insert(self, key, data):
path = self._path_to(key)
node, index = path.pop()
node.insert(index, key, data, path)
def remove(self, key):
path = self._path_to(key)
node, index = path.pop()
node.remove(index, path)
__getitem__ = get
__setitem__ = insert
__delitem__ = remove
def __contains__(self, key):
for item in self._get(key):
return True
return False
def iteritems(self):
node = self._root
while hasattr(node, "children"):
node = node.children[0]
while node:
for pair in itertools.izip(node.contents, node.data):
yield pair
node = node.next
def iterkeys(self):
return itertools.imap(operator.itemgetter(0), self.iteritems())
def itervalues(self):
return itertools.imap(operator.itemgetter(1), self.iteritems())
__iter__ = iterkeys
def items(self):
return list(self.iteritems())
def keys(self):
return list(self.iterkeys())
def values(self):
return list(self.itervalues())
def _build_bulkloaded_leaves(self, items):
minimum = self.order // 2
leaves, seps = [[]], []
for item in items:
if len(leaves[-1]) >= self.order:
seps.append(item)
leaves.append([])
leaves[-1].append(item)
if len(leaves[-1]) < minimum and seps:
last_two = leaves[-2] + leaves[-1]
leaves[-2] = last_two[:minimum]
leaves[-1] = last_two[minimum:]
seps.append(last_two[minimum])
leaves = [self.LEAF(
self,
contents=[p[0] for p in pairs],
data=[p[1] for p in pairs])
for pairs in leaves]
for i in xrange(len(leaves) - 1):
leaves[i].next = leaves[i + 1]
return leaves, [s[0] for s in seps]
def main():
bt = BTree(2)
l = range(20, 0, -1)
bt.insert('박씨')
bt.insert('정씨')
bt.insert('김씨')
bt.insert('이씨')
bt.insert('황씨')
BP = BPlusTree(bt)
BP.insert('박', '박씨')
for i in range(0,5):
print list(bt)[i]
print BP.values()[0]
print BP.keys()[0]
print BP.items()[0]
print BP.get('박씨')
#for i, item in enumerate(l):
# bt.insert(item)
# print list(bt)
if __name__ == '__main__':
#unittest.main()
main()
| apache-2.0 | -7,392,944,706,407,312,000 | 32.207721 | 114 | 0.543814 | false | 4.040483 | false | false | false |
authman/Python201609 | Nguyen_Ken/Assignments/Flask with MySQL/full_friends/server.py | 1 | 2098 | from flask import Flask, render_template, request, redirect, session, flash
from mysqlconnection import MySQLConnector
app = Flask(__name__)
app.secret_key = 'secretsquirrel'
mysql = MySQLConnector(app, 'friendsdb')
@app.route('/')
def index():
showQuery = 'SELECT * FROM friends'
friends = mysql.query_db(showQuery)
return render_template('index.html', all_friends = friends)
@app.route('/friends/<friend_id>/edit')
def edit(friend_id):
friend_id = friend_id
return render_template('edit.html', friend_id = friend_id)
@app.route('/friends/<friend_id>', methods=['POST'])
def update(friend_id):
data = {
'first_name' : request.form['first_name'],
'last_name' : request.form['last_name'],
'occupation' : request.form['occupation'],
'id' : friend_id
}
updateQuery = "UPDATE friends SET first_name = :first_name, last_name = :last_name, occupation = :occupation WHERE id = :id"
mysql.query_db(updateQuery, data)
return redirect('/')
@app.route('/friends', methods=['POST'])
def create():
data = {
'first_name' : request.form['first_name'],
'last_name' : request.form['last_name'],
'occupation' : request.form['occupation']
}
createQuery = 'INSERT INTO friends (first_name, last_name, occupation, created_at, updated_at) VALUES (:first_name, :last_name, :occupation, NOW(), NOW())'
mysql.query_db(createQuery, data)
return redirect('/')
@app.route('/friends/<friend_id>/confirm')
def confirm(friend_id):
data = {
'id' : friend_id
}
friend_id = friend_id
singleFriendQuery = 'SELECT * FROM friends WHERE id = :id'
oneFriend = mysql.query_db(singleFriendQuery, data)
return render_template('delete.html', friend_id = friend_id, oneFriend = oneFriend)
@app.route('/friends/<friend_id>/delete', methods=['POST'])
def destroy(friend_id):
data = {'id' : friend_id}
deleteQuery = 'DELETE FROM friends WHERE id = :id'
mysql.query_db(deleteQuery, data)
return redirect('/')
app.run(debug=True)
| mit | -6,934,720,650,099,028,000 | 22.054945 | 159 | 0.63918 | false | 3.439344 | false | false | false |
vdv7/stap | tasks/stdio/pvt.py | 1 | 1754 | #!/usr/bin/env python3
'''Psychomotor Vigilance Task'''
#########################################################
# STAP constants and stdio
import json,sys
if 'raw_input' in vars(__builtins__): input = raw_input #Fix for Python 2.x raw_input
def send(d): print(json.dumps(d)); sys.stdout.flush()
def recv(): return json.loads(input())
CLEAR = None
def obj(id=None,content=NotImplemented,**options):
if id is not None: options['id']=id
if content is not NotImplemented: options['v']=content
return options
#########################################################
import random,statistics
TRIALS = 10
INSTRUCTIONS = 'Click a button when one appears here'
BUTTON = obj('Click Me',False,onin={'v':CLEAR})
def main():
log=[]
ums=0
#announce required options
send({'require':{'options':['U','onin']},'template':'[type="bin"][level="1"]{height:200px}'})
#display Trial and instructions containers; let user software know that any buttons inside the instructions container should be deleted once user-input (i.e. click) is detected
send([ obj('Trial',1,max=TRIALS),
obj(INSTRUCTIONS,[]) ])
#do trials
for trial in range(1,TRIALS+1):
#set random time for button appearance
buttonAppearanceTime=ums+random.randrange(2000,10000)
#update trial time, wait till buttonAppearanceTime, then add the 'Click me' button
send([ obj('Trial',trial),
obj(INSTRUCTIONS, [BUTTON], U=buttonAppearanceTime) ])
#get participant action
ums=recv()[0]
log.append(ums-buttonAppearanceTime)
send([ obj('Your response time is',log[-1],unit='ms') ])
#display goodbye message in popup
send([ CLEAR,
obj('Your mean response time is',statistics.mean(log)),
'Thank you for your participation.' ])
if __name__=='__main__': main()
| mit | 8,034,561,188,412,784,000 | 30.321429 | 177 | 0.657925 | false | 3.296992 | false | false | false |
cheery/pyllisp | compiler/__init__.py | 1 | 33018 | from rpython.rlib import jit
from rpython.rlib.listsort import make_timsort_class
from rpython.rlib.objectmodel import always_inline, specialize
import base
import reader
import space
class ProgramBody:
def __init__(self, blocks, functions, is_generator):
self.blocks = reverse_postorder(blocks[0])
self.functions = functions
self.is_generator = is_generator
self.tmpc = 0
allocate_tmp(self)
@specialize.argtype(0)
def reversed(seq):
for i in range(len(seq), 0, -1):
yield seq[i-1]
# Since the frame is virtualizable now, it copies everything
# from tmp to juggle them.
# Instead of using separate index for every temporary value,
# we can do some live range analysis and reuse the indices.
# for items that quarranteely aren't simultaneously live.
def allocate_tmp(body):
index = 0
base = 0
for block in body.blocks:
block.base = base
block.index = index
block.depends = {}
index += 1
base += len(block)
done = False
while not done:
done = True
for block in reversed(body.blocks):
N = len(block.depends)
for succ in block.succ:
block.depends.update(succ.depends)
for op in reversed(block):
if op in block.depends:
block.depends.pop(op)
for use in op.uses():
block.depends[use] = None
M = len(block.depends)
if N < M:
done = False
live_ranges = {}
for block in body.blocks:
for op in block.depends:
plot_range(live_ranges, op, block.base)
for succ in block.succ:
assert succ.index >= 0
for op in succ.depends:
plot_range(live_ranges, op, block.base + len(block))
i = 0
for op in block:
plot_range(live_ranges, op, block.base+i)
for use in op.uses():
plot_range(live_ranges, use, block.base+i+1)
i += 1
starts = []
stops = []
avail = []
for op, (start, stop) in live_ranges.iteritems():
starts.append((start, stop, op))
sort_starts(starts).sort()
for current, stop, op in starts:
assert current <= stop
if len(avail) > 0:
op.i = avail.pop()
else:
op.i = body.tmpc
body.tmpc += 1
stops.append((stop, op))
sort_ends(stops).sort()
while len(stops) > 0 and stops[0][0] < current:
_, exp = stops.pop(0)
assert exp.i not in avail
avail.append(exp.i)
sort_starts = make_timsort_class(lt=lambda x, y: x[0] < y[0])
sort_ends = make_timsort_class(lt=lambda x, y: x[0] < y[0])
# This is just here in case my register alloc func messes up
# But I will require better tools for debugging my dumps.
# if True:
# tab = {}
# def opval_repr(op):
# return "%s:r%d" % (tab[op], op.i)
# for block in body.blocks:
# i = block.base
# for op in block:
# tab[op] = i
# i += 1
# for block in body.blocks:
# i = block.base
# for op in block:
# if op.start:
# print ("%s:" % op.start.repr()).ljust(8),
# else:
# print "".ljust(8)
# if isinstance(op, Constant):
# print "%4i: r%d = const %s" % (i, op.i, op.value.repr())
# elif isinstance(op, Variable):
# print "%4i: r%d = var %s" % (i, op.i, op.name)
# elif isinstance(op, SetLocal):
# print "%4i: r%d = var %s <- %s" % (i, op.i, op.name, opval_repr(op.value))
# else:
# print "%4i: r%d = %s (%s)" % (i, op.i, op.__class__.__name__, ' '.join(map(opval_repr, op.uses())))
# i += 1
# print "TMPC %d" % body.tmpc
def plot_range(ranges, key, pos):
if key not in ranges:
ranges[key] = (pos, pos)
else:
start, stop = ranges[key]
ranges[key] = (min(start, pos), max(stop, pos))
def reverse_postorder(entry):
seq = postorder_visit([], entry)
seq.reverse()
return seq
def postorder_visit(sequence, block):
if block.visited:
return
block.visited = True
for succ in block.succ:
postorder_visit(sequence, succ)
sequence.append(block)
return sequence
class ActivationRecord:
_immutable_fields_ = ['module', 'parent']
def __init__(self, module, parent):
self.var = {}
self.module = module
self.parent = parent
class Program(space.Object):
_immutable_fields_ = ['body']
def __init__(self, body):
self.body = body
def call(self, argv):
if len(argv) != 1:
raise space.Error(u"program expects module as an argument")
module = argv[0]
assert isinstance(module, space.Module)
frame = ActivationRecord(module, None)
return interpret(self.body, frame)
class Closure(space.Object):
_immutable_fields_ = ['frame', 'func']
def __init__(self, frame, func):
self.frame = frame
self.func = func
def call(self, argv):
argc = len(self.func.args)
if len(argv) < argc:
raise space.Error(u"closure requires %d arguments" % argc)
frame = ActivationRecord(self.frame.module, self.frame)
for i in range(argc):
frame.var[self.func.args[i]] = argv[i]
return interpret(self.func.body, frame)
class Generator(space.Object):
_immutable_fields_ = ['tmp', 'frame']
def __init__(self, block, tmp, frame, loop_break, op_i):
self.block = block
self.tmp = tmp
self.frame = frame
self.loop_break = loop_break
self.op_i = op_i
def iter(self):
return self
@Generator.builtin_method
def next(argv):
self = argv[0]
assert isinstance(self, Generator)
if len(argv) > 1:
self.tmp[self.op_i] = argv[1]
else:
self.tmp[self.op_i] = space.null
try:
interpret_body(self.block, self.tmp, self.frame, self.loop_break)
raise StopIteration()
except YieldIteration as yi:
self.block = yi.block
self.loop_break = yi.loop_break
self.op_i = yi.op_i
return yi.value
class YieldIteration(Exception):
_immutable_fields_ = ['block', 'loop_break', 'op_i', 'value']
def __init__(self, block, loop_break, op_i, value):
self.block = block
self.loop_break = loop_break
self.op_i = op_i
self.value = value
class Block:
_immutable_fields_ = ['index', 'contents[*]', 'succ']
def __init__(self, index, contents, succ):
self.index = index
self.contents = None
self.contents_mut = []
self.succ = succ
self.visited = False
def __iter__(self):
return iter(self.contents)
def __getitem__(self, index):
return self.contents[index]
def __len__(self):
return len(self.contents)
def append(self, op):
assert isinstance(op, Op)
self.contents_mut.append(op)
def freeze(self):
self.contents = self.contents_mut[:]
self.contents_mut = None
# def label(self):
# return "b" + str(self.index)
#
# def repr(self):
# out = "b" + str(self.index) + ":"
# for op in self:
# out += '\n '
# out += op.repr()
# return out
class Scope:
def __init__(self, parent=None):
self.blocks = []
self.block = self.new_block()
self.capture_catch = []
self.functions = []
self.bodies = []
self.chain = []
self.start = None
self.stop = None
self.is_generator = False
self.loop_stack = []
def new_block(self):
block = Block(-1, [], [])
self.blocks.append(block)
return block
def new_function(self, argv, body):
func = Function(argv)
self.functions.append(func)
self.bodies.append(body)
return self.add(func)
def new_label(self):
if len(self.block.contents_mut) > 0:
exit = self.new_block()
self.add(Jump(exit))
self.block = exit
return self.block
def add(self, op):
self.block.append(op)
op.start = self.start
op.stop = self.stop
return op
def capture(self, exp):
if len(self.capture_catch) == 0:
raise space.Error(u"%s: expecting capture" % exp.start.repr())
cap = self.capture_catch
self.capture_catch = []
return cap
def pull_chain(self):
chain = self.chain
self.chain = []
return chain
def close(self):
for block in self.blocks:
block.freeze()
for op in block:
if isinstance(op, Cond):
block.succ.extend([op.then, op.exit])
if isinstance(op, Jump):
block.succ.extend([op.exit])
if isinstance(op, SetBreak):
block.succ.extend([op.block])
if isinstance(op, Yield):
block.succ.extend([op.block])
return ProgramBody(self.blocks, self.functions, self.is_generator)
class Op:
_immutable_fields_ = ['i', 'start', 'stop', 'then', 'exit', 'value', 'body', 'args[*]', 'values[*]', 'name', 'cond', 'dst', 'src', 'it', 'block', 'upscope', 'ref']
i = 0
start = None
stop = None
# def repr(self):
# return str(self.__class__.__name__) + " " + self.args_str()
#
# def args_str(self):
# return "..."
def uses(self):
return []
class Assert(Op):
_immutable_fields_ = ['i', 'start', 'stop', 'value']
def __init__(self, value):
self.value = value
def uses(self):
return [self.value]
class ValuedOp(Op):
pass
# def repr(self):
# return str(self.i) + " = " + str(self.__class__.__name__) + " " + self.args_str()
class Function(ValuedOp):
_immutable_fields_ = ['i', 'start', 'stop', 'args', 'body']
def __init__(self, args):
self.args = args
self.body = None
class Call(ValuedOp):
_immutable_fields_ = ['i', 'start', 'stop', 'callee', 'args[*]']
def __init__(self, callee, args):
self.callee = callee
self.args = args[:]
def uses(self):
return [self.callee] + self.args
#
# def args_str(self):
# out = str(self.callee.i)
# for a in self.args:
# out += ", " + str(a.i)
# return out
class Cond(ValuedOp):
_immutable_fields_ = ['i', 'start', 'stop', 'cond', 'then', 'exit']
def __init__(self, cond):
self.cond = cond
self.then = None
self.exit = None
def uses(self):
return [self.cond]
#
# def args_str(self):
# return str(self.cond.i) + ", " + self.then.label() + ", " + self.exit.label()
class Merge(Op):
_immutable_fields_ = ['i', 'start', 'stop', 'dst', 'src']
def __init__(self, dst, src):
self.dst = dst
self.src = src
def uses(self):
return [self.dst, self.src]
#
# def args_str(self):
# return str(self.dst.i) + ", " + str(self.src.i)
class Jump(ValuedOp):
_immutable_fields_ = ['i', 'start', 'stop', 'exit']
def __init__(self, exit):
self.exit = exit
class Iter(ValuedOp):
_immutable_fields_ = ['i', 'start', 'stop', 'value']
def __init__(self, value):
self.value = value
def uses(self):
return [self.value]
# It could be that the 'next' should be like 'iter', and that this
# operation should supply contents of SetBreak instead.
class Next(ValuedOp):
_immutable_fields_ = ['i', 'start', 'stop', 'it']
def __init__(self, it):
self.it = it
def uses(self):
return [self.it]
class SetBreak(ValuedOp):
_immutable_fields_ = ['i', 'start', 'stop', 'block']
def __init__(self, block):
self.block = block
# def args_str(self):
# return self.exit.label()
class Constant(ValuedOp):
_immutable_fields_ = ['i', 'start', 'stop', 'value']
def __init__(self, value):
self.value = value
class MakeList(ValuedOp):
_immutable_fields_ = ['i', 'start', 'stop', 'values[*]']
def __init__(self, values):
self.values = values[:]
def uses(self):
return self.values
class GetAttr(ValuedOp):
_immutable_fields_ = ['i', 'start', 'stop', 'value', 'name']
def __init__(self, value, name):
self.value = value
self.name = name
def uses(self):
return [self.value]
class GetItem(ValuedOp):
_immutable_fields_ = ['i', 'start', 'stop', 'value', 'index']
def __init__(self, value, index):
self.value = value
self.index = index
def uses(self):
return [self.value, self.index]
class Variable(ValuedOp):
_immutable_fields_ = ['i', 'start', 'stop', 'name']
def __init__(self, name):
self.name = name
# def args_str(self):
# return self.name
class Yield(ValuedOp):
_immutable_fields_ = ['i', 'start', 'stop', 'value', 'block']
def __init__(self, value, block):
self.value = value
self.block = block
def uses(self):
return [self.value]
class SetAttr(ValuedOp):
_immutable_fields_ = ['i', 'start', 'stop', 'name', 'value']
def __init__(self, obj, name, value):
self.obj = obj
self.name = name
self.value = value
def uses(self):
return [self.obj, self.value]
class SetItem(ValuedOp):
_immutable_fields_ = ['i', 'start', 'stop', 'index', 'value']
def __init__(self, obj, index, value):
self.obj = obj
self.index = index
self.value = value
def uses(self):
return [self.obj, self.index, self.value]
class SetLocal(ValuedOp):
_immutable_fields_ = ['i', 'start', 'stop', 'name', 'value', 'upscope']
def __init__(self, name, value, upscope):
assert isinstance(name, unicode)
assert isinstance(value, ValuedOp)
self.name = name
self.value = value
self.upscope = upscope
def uses(self):
return [self.value]
class Return(Op):
_immutable_fields_ = ['i', 'start', 'stop', 'ref']
def __init__(self, ref):
self.ref = ref
def uses(self):
return [self.ref]
class Frame:
_virtualizable_ = ['tmp[*]'] # XXX
def __init__(self, tmp):
self = jit.hint(self, access_directly=True, fresh_virtualizable=True)
self.tmp = tmp
@always_inline
def store(self, index, value):
assert index >= 0
self.tmp[index] = value
@always_inline
def load(self, index):
assert index >= 0
return self.tmp[index]
def interpret(prog, frame):
block = prog.blocks[0]
tmp = []
for i in range(prog.tmpc):
tmp.append(space.null)
#for blk in prog.blocks:
# print blk.repr()
if prog.is_generator:
return Generator(block, tmp, frame, None, 0)
return interpret_body(block, tmp, frame, None)
def get_printable_location(pc, block, loop_break, cl_frame_module):
if loop_break is None:
return "pc=%d block=%d cl_frame_module=%s" % (pc, block.index, cl_frame_module.repr().encode('utf-8'))
return "pc=%d block=%d loop_break=%d cl_frame_module=%s" % (pc, block.index, loop_break.index, cl_frame_module.repr().encode('utf-8'))
#
#def get_printable_location(pc, block, loop_break):
# if loop_break is None:
# return "pc=%d block=%d" % (pc, block.index)
# return "pc=%d block=%d loop_break=%d" % (pc, block.index, loop_break.index)
jitdriver = jit.JitDriver(
greens=['pc', 'block', 'loop_break', 'module'],
reds=['cl_frame', 'frame'],
virtualizables = ['frame'], # XXX
get_printable_location=get_printable_location)
def interpret_body(block, t, cl_frame, loop_break):
frame = Frame(t)
pc = 0
module = jit.promote(cl_frame.module)
try:
while pc < len(block):
try:
jitdriver.jit_merge_point(
pc=pc, block=block, loop_break=loop_break, module=module,
cl_frame=cl_frame, frame=frame)
op = block[pc]
pc += 1
if isinstance(op, Call):
do_call(frame, op)
elif isinstance(op, Assert):
if space.is_false(frame.load(op.value.i)):
raise space.Error(u"Assertion error")
elif isinstance(op, Cond):
pc = 0
if space.is_false(frame.load(op.cond.i)):
block = op.exit
else:
block = op.then
elif isinstance(op, Jump):
pc = 0
block = op.exit
elif isinstance(op, Next):
frame.store(op.i, frame.load(op.it.i).callattr(u'next', []))
elif isinstance(op, Yield):
raise YieldIteration(op.block, loop_break, op.i, frame.load(op.value.i))
elif isinstance(op, SetBreak):
loop_break = op.block
elif isinstance(op, Iter):
frame.store(op.i, frame.load(op.value.i).iter())
elif isinstance(op, Constant):
frame.store(op.i, op.value)
elif isinstance(op, Variable):
frame.store(op.i, lookup(module, cl_frame, op.name))
elif isinstance(op, Merge):
frame.store(op.dst.i, frame.load(op.src.i))
elif isinstance(op, Function):
frame.store(op.i, Closure(cl_frame, op))
elif isinstance(op, MakeList):
contents = []
for val in op.values:
contents.append(frame.load(val.i))
frame.store(op.i, space.List(contents))
elif isinstance(op, GetAttr):
frame.store(op.i, frame.load(op.value.i).getattr(op.name))
elif isinstance(op, GetItem):
frame.store(op.i, frame.load(op.value.i).getitem(frame.load(op.index.i)))
elif isinstance(op, SetAttr):
frame.store(op.i, frame.load(op.obj.i).setattr(op.name, frame.load(op.value.i)))
elif isinstance(op, SetItem):
frame.store(op.i, frame.load(op.obj.i).setitem(
frame.load(op.index.i),
frame.load(op.value.i)))
elif isinstance(op, SetLocal):
frame.store(op.i, set_local(module, cl_frame, op.name, frame.load(op.value.i), op.upscope))
elif isinstance(op, Return):
return frame.load(op.ref.i)
else:
raise space.Error(u"spaced out")
except StopIteration as stopiter:
if loop_break is not None:
block = loop_break
loop_break = None
continue
op = block[pc-1]
error = space.Error(u"stop iteration")
error.stacktrace.append((cl_frame, op.start, op.stop))
raise error
raise space.Error(u"crappy compiler")
except space.Error as e:
op = block[pc-1]
e.stacktrace.append((cl_frame, op.start, op.stop))
raise e
@jit.unroll_safe
def do_call(frame, op):
callee = frame.load(op.callee.i)
argv = []
for arg in op.args:
argv.append(frame.load(arg.i))
frame.store(op.i, callee.call(argv))
def lookup(module, frame, name):
if frame.parent is None:
return module.getattr(name)
if name in frame.var:
return frame.var[name]
return lookup(module, frame.parent, name)
def set_local(module, frame, name, value, upscope):
if frame.parent is None:
return module.setattr(name, value)
elif upscope:
if name in frame.var:
frame.var[name] = value
return value
else:
return set_local(module, frame.parent, name, value, upscope)
else:
frame.var[name] = value
return value
def assert_macro(env, exp):
if len(exp.exps) != 2:
raise space.Error(u"no translation for %s with length != 2" % exp.name)
val = translate(env, exp.exps[1])
env.add(Assert(val))
return val
def func_macro(env, exp):
argv = []
for i in range(1, len(exp.exps)):
arg = exp.exps[i]
if isinstance(arg, reader.Literal) and arg.name == u'symbol':
argv.append(arg.value)
else:
raise space.Error(u"%s: expected symbol inside func" % arg.start.repr())
body = env.capture(exp)
return env.new_function(argv, body)
def if_macro(env, exp):
if len(exp.exps) != 2:
raise space.Error(u"no translation for %s with length != 2" % exp.name)
chain = env.pull_chain()
cond = Cond(translate(env, exp.exps[1]))
env.add(cond)
cond.then = env.block = env.new_block()
cond.exit = exit = env.new_block()
val = translate_flow(env, env.capture(exp))
env.add(Merge(cond, val))
env.add(Jump(exit))
if len(chain) > 0:
first = chain[0]
if len(chain) > 1 and macro_name(first.exps[0]) != u'else' and len(first.exps) != 1:
raise space.Error(u"%s: non-else longer chains not supported" % exp.start.repr())
env.block, exit = exit, env.new_block()
val = translate_flow(env, first.capture)
env.add(Merge(cond, val))
env.add(Jump(exit))
env.block = exit
return cond
def return_macro(env, exp):
if len(exp.exps) != 2:
raise space.Error(u"no translation for %s with length != 2" % exp.name)
return env.add(Return(translate(env, exp.exps[1])))
def while_macro(env, exp):
if len(exp.exps) != 2:
raise space.Error(u"no translation for %s with length != 2" % exp.name)
current_loop = (loop, exit, _) = (env.new_label(), env.new_block(), False)
env.loop_stack.append(current_loop)
loop = env.new_label()
cond = env.add(Cond(translate(env, exp.exps[1])))
cond.then = env.block = env.new_block()
cond.exit = env.new_block()
val = translate_flow(env, env.capture(exp))
env.add(Merge(cond, val))
env.add(Jump(loop))
env.block = cond.exit
loop_exit(env)
return cond
def and_macro(env, exp):
if len(exp.exps) != 3:
raise space.Error(u"no translation for %s with length != 3" % exp.name)
val = translate(env, exp.exps[1])
cond = env.add(Cond(val))
cond.then = env.block = env.new_block()
cond.exit = env.new_block()
env.add(Merge(val, translate(env, exp.exps[2])))
env.add(Jump(cond.exit))
env.block = cond.exit
return val
def or_macro(env, exp):
if len(exp.exps) != 3:
raise space.Error(u"no translation for %s with length != 3" % exp.name)
val = translate(env, exp.exps[1])
cond = env.add(Cond(val))
cond.exit = env.block = env.new_block()
cond.then = env.new_block()
env.add(Merge(val, translate(env, exp.exps[2])))
env.add(Jump(cond.then))
env.block = cond.then
return val
def syntax_chain(env, exp):
if len(exp.exps) < 3:
raise space.Error(u"no translation for %s with length < 3" % exp.name)
and_ = Variable(u'and')
if len(exp.exps) > 3:
env.add(and_)
lhs = translate(env, exp.exps[0])
op = translate(env, exp.exps[1])
rhs = translate(env, exp.exps[2])
res = env.add(Call(op, [lhs, rhs]))
i = 3
while i < len(exp.exps):
lhs = rhs
op = translate(env, exp.exps[i])
rhs = translate(env, exp.exps[i+1])
res = env.add(Call(and_, [
res,
env.add(Call(op, [lhs, rhs]))]))
i += 2
return res
def for_macro(env, exp):
if len(exp.exps) != 3:
raise space.Error(u"no translation for %s with length != 2" % exp.name)
var = exp.exps[1]
if not isinstance(var, reader.Literal):
raise space.Error(u"%s: format: for variable exp" % exp.start.repr())
it = env.add(Iter(translate(env, exp.exps[2])))
current_loop = (loop, exit, _) = (env.new_block(), env.new_block(), True)
env.loop_stack.append(current_loop)
cond = env.add(SetBreak(exit))
env.add(Jump(loop))
env.block = loop
env.add(SetLocal(var.value, env.add(Next(it)), False))
val = translate_flow(env, env.capture(exp))
env.add(Merge(cond, val))
env.add(Jump(loop))
env.block = exit
loop_exit(env)
return cond
def loop_exit(env):
_, exit, _ = env.loop_stack.pop(-1)
if len(env.loop_stack) > 0 and env.loop_stack[-1][2]:
env.add(SetBreak(env.loop_stack[-1][1]))
def break_macro(env, exp):
if len(exp.exps) != 1:
raise space.Error(u"%s: format: break" % exp.start.repr())
if len(env.loop_stack) == 0:
raise space.Error(u"%s: not inside a loop" % exp.start.repr())
return env.add(Jump(env.loop_stack[-1][1]))
def continue_macro(env, exp):
if len(exp.exps) != 1:
raise space.Error(u"%s: format: continue" % exp.start.repr())
if len(env.loop_stack) == 0:
raise space.Error(u"%s: not inside a loop" % exp.start.repr())
return env.add(Jump(env.loop_stack[-1][0]))
def yield_macro(env, exp):
if len(exp.exps) != 2:
raise space.Error(u"%s: format: yield expr" % exp.start.repr())
env.is_generator = True
val = translate(env, exp.exps[1])
yield_ = env.add(Yield(val, env.new_block()))
env.block = yield_.block
return yield_
def from_macro(env, exp):
if len(exp.exps) == 2:
exp.exps.extend(env.capture(exp))
if len(exp.exps) <= 2:
raise space.Error(u"%s: format: from expr symbol..." % exp.start.repr())
val = translate(env, exp.exps[1])
for attr in exp.exps[2:]:
if isinstance(attr, reader.Literal) and attr.name == u'symbol':
var = env.add(GetAttr(val, attr.value))
env.add(SetLocal(attr.value, var, False))
else:
raise space.Error(u"%s: expected symbol" % attr.start.repr())
return val
macros = {
u'break': break_macro,
u'continue': continue_macro,
u'assert': assert_macro,
u'func': func_macro,
u'for': for_macro,
u'if': if_macro,
u'return': return_macro,
u'while': while_macro,
u'and': and_macro,
u'or': or_macro,
u'yield': yield_macro,
u'from': from_macro,
}
chain_macros = [u'else']
def macro_name(exp):
if isinstance(exp, reader.Expr):
if exp.name == u'form' and len(exp.exps) > 0:
first = exp.exps[0]
if isinstance(first, reader.Literal) and first.name == u'symbol':
return first.value
return u""
def translate_flow(env, exps):
val = None
for chain in chains(exps):
val = translate_chain(env, chain)
assert val is not None
return val
def translate_map(env, exps):
res = []
for chain in chains(exps):
res.append(translate_chain(env, chain))
return res
def chains(exps):
out = []
chain = None
for exp in exps:
if chain is None:
chain = [exp]
elif macro_name(exp) in chain_macros:
chain.append(exp)
else:
out.append(chain)
chain = [exp]
if chain is not None:
out.append(chain)
return out
def translate_chain(env, chain):
chain_above = env.chain
exp = chain.pop(0)
env.chain = chain
val = translate(env, exp)
if len(env.chain) > 0:
raise space.Error(u"%s: chain without receiver" % exp.start.repr())
env.chain = chain_above
return val
def translate(env, exp):
start, stop = env.start, env.stop
env.start, env.stop = exp.start, exp.stop
res = translate_(env, exp)
env.start, env.stop = start, stop
return res
def translate_(env, exp):
if isinstance(exp, reader.Literal):
if exp.name == u'string':
return env.add(Constant(space.from_ustring(exp.value)))
elif exp.name == u'int':
return env.add(Constant(space.Integer(int(exp.value.encode('utf-8')))))
elif exp.name == u'hex':
return env.add(Constant(space.Integer(int(exp.value[2:].encode('utf-8'), 16))))
elif exp.name == u'float':
return env.add(Constant(space.Float(float(exp.value.encode('utf-8')))))
elif exp.name == u'symbol':
return env.add(Variable(exp.value))
raise space.Error(u"no translation for " + exp.name)
assert isinstance(exp, reader.Expr), exp.__class__.__name__
if exp.name == u'form' and len(exp.exps) > 0:
if macro_name(exp) in macros:
cc = env.capture_catch
if len(exp.capture) > 0:
env.capture_catch = exp.capture
res = macros[macro_name(exp)](env, exp)
if len(exp.capture) > 0 and len(env.capture_catch) > 0:
raise space.Error(u"%s: capture without receiver" % exp.start.repr())
env.capture_catch = cc
return res
# callattr goes here, if it'll be needed
args = translate_map(env, exp.exps)
callee = args.pop(0)
args.extend(translate_map(env, exp.capture))
return env.add(Call(callee, args))
elif exp.name == u'list':
return env.add(MakeList(translate_map(env, exp.exps)))
elif exp.name == u'attr' and len(exp.exps) == 2:
lhs, name = exp.exps
lhs = translate(env, lhs)
if not isinstance(name, reader.Literal):
raise space.Error(u"%s: bad attribute expr" % exp.repr())
return env.add(GetAttr(lhs, name.value))
sym.value
elif exp.name == u'index' and len(exp.exps) == 2:
lhs, rhs = exp.exps
lhs = translate(env, lhs)
rhs = translate(env, rhs)
return env.add(GetItem(lhs, rhs))
elif exp.name == u'let' or exp.name == u'set':
lhs, rhs = exp.exps
rhs = translate(env, rhs)
return store_value(env, lhs, rhs, exp.name == u'set')
elif exp.name == u'aug' and len(exp.exps) == 3:
aug, lhs, rhs = exp.exps
if not isinstance(aug, reader.Literal):
raise space.Error(u"%s: bad augmented expr" % exp.repr())
rhs = translate(env, rhs)
return store_aug_value(env, aug, lhs, rhs)
elif exp.name == u'chain':
return syntax_chain(env, exp)
raise space.Error(u"no translation for " + exp.name)
def store_value(env, lhs, value, upscope):
if isinstance(lhs, reader.Literal) and lhs.name == u'symbol':
return env.add(SetLocal(lhs.value, value, upscope))
elif isinstance(lhs, reader.Expr) and lhs.name == u'attr' and len(lhs.exps) == 2:
obj, name = lhs.exps
obj = translate(env, obj)
assert isinstance(name, reader.Literal)
return env.add(SetAttr(obj, name.value, value))
elif isinstance(lhs, reader.Expr) and lhs.name == u'index' and len(lhs.exps) == 2:
obj, index = lhs.exps
obj = translate(env, obj)
index = translate(env, index)
return env.add(SetItem(obj, index, value))
else:
raise space.Error(u"no translation for " + lhs.name)
def store_aug_value(env, aug, lhs, value):
aug = env.add(Variable(aug.value))
if isinstance(lhs, reader.Literal) and lhs.name == u'symbol':
name = lhs.value
value = env.add(Call(aug, [env.add(Variable(name)), value]))
return env.add(SetLocal(name, value, True))
elif isinstance(lhs, reader.Expr) and lhs.name == u'attr' and len(lhs.exps) == 2:
obj, name = lhs.exps
assert isinstance(name, reader.Literal)
obj = translate(env, obj)
value = env.add(Call(aug, [env.add(GetAttr(obj, name.value)), value]))
return env.add(SetAttr(obj, name.value, value))
elif isinstance(lhs, reader.Expr) and lhs.name == u'index' and len(lhs.exps) == 2:
obj, index = lhs.exps
obj = translate(env, obj)
index = translate(env, index)
value = env.add(Call(aug, [env.add(GetItem(obj, index)), value]))
return env.add(SetItem(obj, index, value))
else:
raise space.Error(u"no translation for " + lhs.name)
def build_closures(parent):
for i in range(len(parent.functions)):
env = Scope(parent)
func = parent.functions[i]
translate_flow(env, parent.bodies[i])
w = env.add(Constant(space.null))
env.add(Return(w))
build_closures(env)
func.body = env.close()
def to_program(exps):
env = Scope()
if len(exps) == 0:
env.add(Return(env.add(Constant(space.null))))
return Program(env.close())
value = translate_flow(env, exps)
env.add(Return(value))
build_closures(env)
return Program(env.close())
| agpl-3.0 | -1,058,595,052,669,584,400 | 31.952096 | 167 | 0.557847 | false | 3.370215 | false | false | false |
start-jsk/jsk_apc | demos/baxtergv6_apc2016/node_scripts/json_saver.py | 1 | 7469 | #!/usr/bin/env python
import datetime
from jsk_arc2017_common.msg import Content
from jsk_arc2017_common.msg import ContentArray
from jsk_arc2017_common.srv import UpdateJSON
from jsk_arc2017_common.srv import UpdateJSONResponse
import json
import os
import os.path as osp
import rospy
import shutil
from std_msgs.msg import String
from std_srvs.srv import Trigger
from std_srvs.srv import TriggerResponse
import threading
class JSONSaver(threading.Thread):
def __init__(self):
super(JSONSaver, self).__init__(target=self._run_services)
json_dir = rospy.get_param('~json_dir', None)
output_dir = rospy.get_param('~output_dir', None)
if json_dir is None:
rospy.logerr('must set json dir path to ~json_dir')
return
if output_dir is None:
rospy.logerr('must set output dir path to ~output_dir')
return
now = datetime.datetime.now()
output_dir = osp.join(output_dir, now.strftime('%Y%m%d_%H%M%S'))
if not osp.exists(output_dir):
os.makedirs(output_dir)
location_path = osp.join(json_dir, 'item_location_file.json')
self.output_json_path = osp.join(
output_dir, 'item_location_file.json')
if osp.exists(location_path):
shutil.copy(location_path, self.output_json_path)
with open(location_path) as location_f:
data = json.load(location_f)
else:
rospy.logerr(
'item_location_file.json does not exists in {}', location_path)
self.bin_contents = {}
for bin_ in data['bins']:
self.bin_contents[bin_['bin_id']] = bin_['contents']
self.tote_contents = data['tote']['contents']
self.cardboard_contents = {}
self.cardboard_ids = {}
# this is for pick task
# order file is only used in pick task
order_path = osp.join(json_dir, 'order_file.json')
if osp.exists(order_path):
output_order_path = osp.join(output_dir, 'order_file.json')
shutil.copy(order_path, output_order_path)
order_path = osp.join(json_dir, 'order_file.json')
with open(order_path) as order_f:
orders = json.load(order_f)['orders']
for order in orders:
size_id = order['size_id']
if len(order['contents']) == 2:
cardboard_id = 'A'
elif len(order['contents']) == 3:
cardboard_id = 'B'
else: # len(order['contents']) == 5
cardboard_id = 'C'
self.cardboard_ids[cardboard_id] = size_id
cardboard_contents = {}
for box in data['boxes']:
size_id = box['size_id']
cardboard_contents[size_id] = box['contents']
for key in 'ABC':
size_id = self.cardboard_ids[key]
self.cardboard_contents[key] = cardboard_contents[size_id]
# publish stamped json_dir
self.pub = rospy.Publisher('~output/json_dir', String, queue_size=1)
self.pub_bin = rospy.Publisher(
'~output/bin_contents',
ContentArray,
queue_size=1)
rate = rospy.get_param('~rate', 1)
self.timer_pub = rospy.Timer(rospy.Duration(1. / rate), self._cb_pub)
self.lock = threading.Lock()
self.daemon = True
def _cb_pub(self, event):
self.pub.publish(String(data=osp.dirname(self.output_json_path)))
contents_msg = ContentArray()
contents = []
for idx_ in range(0, len(self.bin_contents)):
bin_ = chr(ord('A') + idx_)
msg = Content()
msg.bin = bin_
msg.items = self.bin_contents[bin_]
contents.append(msg)
contents_msg.header.stamp = rospy.Time.now()
contents_msg.contents = contents
self.pub_bin.publish(contents_msg)
def _run_services(self):
self.services = []
self.services.append(rospy.Service(
'~update_json', UpdateJSON, self._update))
self.services.append(rospy.Service(
'~save_json', Trigger, self._save))
def _update(self, req):
is_updated = self._update_location(req)
is_saved = self._save_json()
is_updated = is_saved and is_updated
return UpdateJSONResponse(updated=is_updated)
def _save(self, req):
is_saved = self._save_json()
return TriggerResponse(success=is_saved)
def _save_json(self):
separators = (',', ': ')
self.lock.acquire()
is_saved = True
boxes = []
bins = []
if len(self.cardboard_contents.keys()) > 0:
for key in 'ABC':
boxes.append({
'size_id': self.cardboard_ids[key],
'contents': self.cardboard_contents[key]
})
for idx_ in range(0, len(self.bin_contents)):
bin_ = chr(ord('A') + idx_)
bins.append({
'bin_id': bin_,
'contents': self.bin_contents[bin_]
})
location = {
'bins': bins,
'boxes': boxes,
'tote': {
'contents': self.tote_contents,
}
}
try:
with open(self.output_json_path, 'w+') as f:
json.dump(
location, f, sort_keys=True,
indent=4, separators=separators)
except Exception:
rospy.logerr('could not save json in {}'
.format(self.output_json_path))
is_saved = False
self.lock.release()
return is_saved
def _update_location(self, req):
is_updated = True
self.lock.acquire()
item = req.item
src = req.src
dst = req.dst
if src[:3] == 'bin':
src = src[4]
try:
self.bin_contents[src].remove(item)
except Exception:
rospy.logerr('{0} does not exist in bin {1}'.format(item, src))
self.lock.release()
return False
elif src[:9] == 'cardboard':
src = src[10]
try:
self.cardboard_contents[src].remove(item)
except Exception:
rospy.logerr('{0} does not exist in bin {1}'.format(item, src))
self.lock.release()
return False
elif src == 'tote':
try:
self.tote_contents.remove(item)
except Exception:
rospy.logerr('{} does not exist in tote'.format(item))
self.lock.release()
return False
else:
rospy.logerr('Invalid src request {}', src)
is_updated = False
if dst[:3] == 'bin':
dst = dst[4]
self.bin_contents[dst].append(item)
elif dst[:9] == 'cardboard':
dst = dst[10]
self.cardboard_contents[dst].append(item)
elif dst == 'tote':
self.tote_contents.append(item)
else:
rospy.logerr('Invalid dst request {}', dst)
is_updated = False
self.lock.release()
return is_updated
if __name__ == '__main__':
rospy.init_node('json_saver')
json_saver = JSONSaver()
json_saver.start()
rospy.spin()
| bsd-3-clause | 6,995,504,888,770,529,000 | 33.419355 | 79 | 0.528585 | false | 3.877985 | false | false | false |
iw3hxn/LibrERP | account_financial_report_aeroo_xls/report/common_report_header.py | 1 | 6668 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) BrowseInfo (http://browseinfo.in)
# Copyright (C) Didotech SRL
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.tools.translate import _
# Mixin to use with rml_parse, so self.pool will be defined.
class common_report_header(object):
def _sum_debit(self, period_id=False, journal_id=False):
if journal_id and isinstance(journal_id, int):
journal_id = [journal_id]
if period_id and isinstance(period_id, int):
period_id = [period_id]
if not journal_id:
journal_id = self.journal_ids
if not period_id:
period_id = self.period_ids
if not (period_id and journal_id):
return 0.0
self.cr.execute('SELECT SUM(debit) FROM account_move_line l '
'WHERE period_id IN %s AND journal_id IN %s ' + self.query_get_clause + ' ',
(tuple(period_id), tuple(journal_id)))
return self.cr.fetchone()[0] or 0.0
def _sum_credit(self, period_id=False, journal_id=False):
if journal_id and isinstance(journal_id, int):
journal_id = [journal_id]
if period_id and isinstance(period_id, int):
period_id = [period_id]
if not journal_id:
journal_id = self.journal_ids
if not period_id:
period_id = self.period_ids
if not (period_id and journal_id):
return 0.0
self.cr.execute('SELECT SUM(credit) FROM account_move_line l '
'WHERE period_id IN %s AND journal_id IN %s ' + self.query_get_clause + '',
(tuple(period_id), tuple(journal_id)))
return self.cr.fetchone()[0] or 0.0
def _get_start_date(self, data):
if data.get('form', False) and data['form'].get('date_from', False):
return data['form']['date_from']
return ''
def _get_target_move(self, data):
if data.get('form', False) and data['form'].get('target_move', False):
if data['form']['target_move'] == 'all':
return _('All Entries')
return _('All Posted Entries')
return ''
def _get_end_date(self, data):
if data.get('form', False) and data['form'].get('date_to', False):
return data['form']['date_to']
return ''
def get_start_period(self, data):
if data.get('form', False) and data['form'].get('period_from', False):
return self.pool['account.period'].browse(self.cr, self.uid, data['form']['period_from'][0], self.context).name
return ''
def get_end_period(self, data):
if data.get('form', False) and data['form'].get('period_to', False):
return self.pool['account.period'].browse(self.cr, self.uid, data['form']['period_to'][0], self.context).name
return ''
def _get_account(self, data):
if data.get('form', False) and data['form'].get('chart_account_id', False):
return self.pool['account.account'].browse(self.cr, self.uid, data['form']['chart_account_id'][0], self.context).name
return ''
def _get_sortby(self, data):
raise (_('Error!'), _('Not implemented.'))
def _get_filter(self, data):
if data.get('form', False) and data['form'].get('filter', False):
if data['form']['filter'] == 'filter_date':
return self._translate('Date')
elif data['form']['filter'] == 'filter_period':
return self._translate('Periods')
return self._translate('No Filters')
def _sum_debit_period(self, period_id, journal_id=None):
journals = journal_id or self.journal_ids
if not journals:
return 0.0
self.cr.execute('SELECT SUM(debit) FROM account_move_line l '
'WHERE period_id=%s AND journal_id IN %s ' + self.query_get_clause + '',
(period_id, tuple(journals)))
return self.cr.fetchone()[0] or 0.0
def _sum_credit_period(self, period_id, journal_id=None):
journals = journal_id or self.journal_ids
if not journals:
return 0.0
self.cr.execute('SELECT SUM(credit) FROM account_move_line l '
'WHERE period_id=%s AND journal_id IN %s ' + self.query_get_clause + ' ',
(period_id, tuple(journals)))
return self.cr.fetchone()[0] or 0.0
def _get_fiscalyear(self, data):
if data.get('form', False) and data['form'].get('fiscalyear_id', False):
return self.pool['account.fiscalyear'].browse(self.cr, self.uid, data['form']['fiscalyear_id'][0], self.context).name
return ''
def _get_company(self, data):
if data.get('form', False) and data['form'].get('chart_account_id', False):
return self.pool['account.account'].browse(self.cr, self.uid,
data['form']['chart_account_id'][0], self.context).company_id.name
return ''
def _get_journal(self, data):
codes = []
if data.get('form', False) and data['form'].get('journal_ids', False):
self.cr.execute('select code from account_journal where id IN %s', (tuple(data['form']['journal_ids']),))
codes = [x for x, in self.cr.fetchall()]
return codes
def _get_currency(self, data):
if data.get('form', False) and data['form'].get('chart_account_id', False):
return self.pool['account.account'].browse(self.cr, self.uid, data['form'][
'chart_account_id'][0], self.context).company_id.currency_id.symbol
return ''
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -1,271,005,539,179,233,300 | 44.054054 | 129 | 0.574985 | false | 3.819015 | false | false | false |
goodmami/pydelphin | delphin/mrs/__init__.py | 1 | 4262 | # -*- coding: utf-8 -*-
"""
This module contains classes and methods related to Minimal Recursion
Semantics [MRS]_. In addition to MRS, there are the related formalisms
Robust Minimal Recursion Semantics [RMRS]_, Elementary Dependency
Structures [EDS]_, and Dependency Minimal Recursion Semantics [DMRS]_.
As a convenience, \*MRS refers to the collection of MRS and related
formalisms (so "MRS" then refers to the original formalism), and
PyDelphin accordingly defines :class:`~delphin.mrs.xmrs.Xmrs` as the
common subclass for the various formalisms.
Users will interact mostly with :class:`~delphin.mrs.xmrs.Xmrs`
objects, but will not often instantiate them directly. Instead, they
are created by serializing one of the various formats (such as
:mod:`delphin.mrs.simplemrs`, :mod:`delphin.mrs.mrx`, or
:mod;`delphin.mrs.dmrx`). No matter what serialization format (or
formalism) is used to load a \*MRS structure, it will be stored the
same way in memory, so any queries or actions taken on these structures
will use the same methods.
.. [MRS] Copestake, Ann, Dan Flickinger, Carl Pollard,
and Ivan A. Sag. "Minimal recursion semantics: An introduction."
Research on language and computation 3, no. 2-3 (2005): 281-332.
.. [RMRS] Copestake, Ann. "Report on the design of RMRS."
DeepThought project deliverable (2003).
.. [EDS] Stephan Oepen, Dan Flickinger, Kristina Toutanova, and
Christopher D Manning. Lingo Redwoods. Research on Language and
Computation, 2(4):575–596, 2004.;
Stephan Oepen and Jan Tore Lønning. Discriminant-based MRS
banking. In Proceedings of the 5th International Conference on
Language Resources and Evaluation, pages 1250–1255, 2006.
.. [DMRS] Copestake, Ann. Slacker Semantics: Why superficiality,
dependency and avoidance of commitment can be the right way to go.
In Proceedings of the 12th Conference of the European Chapter of
the Association for Computational Linguistics, pages 1–9.
Association for Computational Linguistics, 2009.
"""
# these may be order-sensitive
from .components import (
Lnk, Node, ElementaryPredication,
HandleConstraint, Pred, Link
)
from .xmrs import Xmrs, Mrs, Rmrs, Dmrs
from delphin.util import deprecated
__all__ = ['Lnk', 'Node', 'ElementaryPredication',
'HandleConstraint', 'Pred', 'Link', 'Xmrs', 'Mrs', 'Dmrs']
@deprecated(final_version='1.0.0', alternative='delphin.commands.convert()')
def convert(txt, src_fmt, tgt_fmt, single=True, **kwargs):
"""
Convert a textual representation of \*MRS from one the src_fmt
representation to the tgt_fmt representation. By default, only
read and convert a single \*MRS object (e.g. for `mrx` this
starts at <mrs> and not <mrs-list>), but changing the `mode`
argument to `corpus` (alternatively: `list`) reads and converts
multiple \*MRSs.
Args:
txt: A string of semantic data.
src_fmt: The original representation format of txt.
tgt_fmt: The representation format to convert to.
single: If True, assume txt represents a single \*MRS, otherwise
read it as a corpus (or list) of \*MRSs.
kwargs: Any other keyword arguments to pass to the serializer
of the target format. See Notes.
Returns:
A string in the target format.
Notes:
src_fmt and tgt_fmt may be one of the following:
| format | description |
| --------- | ---------------------------- |
| simplemrs | The popular SimpleMRS format |
| mrx | The XML format of MRS |
| dmrx | The XML format of DMRS |
Additional keyword arguments for the serializer may include:
| option | description |
| ------------ | ----------------------------------- |
| pretty_print | print with newlines and indentation |
| color | print with syntax highlighting |
"""
from importlib import import_module
reader = import_module('{}.{}'.format('delphin.mrs', src_fmt.lower()))
writer = import_module('{}.{}'.format('delphin.mrs', tgt_fmt.lower()))
return writer.dumps(
reader.loads(txt, single=single),
single=single,
**kwargs
)
| mit | -4,533,146,085,247,655,000 | 43.789474 | 76 | 0.671445 | false | 3.578638 | false | false | false |
DavidPurcell/murano_temp | murano/cfapi/cfapi.py | 1 | 13455 | # Copyright (c) 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import uuid
from oslo_config import cfg
from oslo_log import log as logging
import retrying
import six
from webob import response
from murano.common.i18n import _LI, _LW
from murano.common import auth_utils # noqa
from murano.common import wsgi
from murano.db.services import cf_connections as db_cf
import muranoclient.client as muranoclient
from muranoclient.common import exceptions
from muranoclient.glance import client as glare_client
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class Controller(object):
"""WSGI controller for application catalog resource in Murano v1 API"""
def _package_to_service(self, package):
srv = {}
srv['id'] = package.id
srv['name'] = package.name
if len(package.description) > 256:
srv['description'] = u"{0} ...".format(package.description[:253])
else:
srv['description'] = package.description
srv['bindable'] = True
srv['tags'] = []
for tag in package.tags:
srv['tags'].append(tag)
plan = {'id': package.id + '-1',
'name': 'default',
'description': 'Default plan for the service {name}'.format(
name=package.name)}
srv['plans'] = [plan]
return srv
def _make_service(self, name, package, plan_id):
id = uuid.uuid4().hex
return {"name": name,
"?": {plan_id: {"name": package.name},
"type": package.fully_qualified_name,
"id": id}}
def _get_service(self, env, service_id):
for service in env.services:
if service['?']['id'] == service_id:
return service
return None
def list(self, req):
token = req.headers['X-Auth-Token']
m_cli = _get_muranoclient(token, req)
kwargs = {'type': 'Application'}
packages = m_cli.packages.filter(**kwargs)
services = []
for package in packages:
services.append(self._package_to_service(package))
resp = {'services': services}
return resp
def provision(self, req, body, instance_id):
"""Here is the example of request body given us from Cloud Foundry:
{
"service_id": "service-guid-here",
"plan_id": "plan-guid-here",
"organization_guid": "org-guid-here",
"space_guid": "space-guid-here",
"parameters": {"param1": "value1",
"param2": "value2"}
}
"""
data = json.loads(req.body)
space_guid = data['space_guid']
org_guid = data['organization_guid']
plan_id = data['plan_id']
service_id = data['service_id']
parameters = data['parameters']
self.current_session = None
# Here we'll take an entry for CF org and space from db. If we
# don't have any entries we will create it from scratch.
try:
tenant = db_cf.get_tenant_for_org(org_guid)
except AttributeError:
tenant = req.headers['X-Project-Id']
db_cf.set_tenant_for_org(org_guid, tenant)
LOG.info(_LI("Cloud Foundry {org_id} mapped to tenant "
"{tenant_name}").format(org_id=org_guid,
tenant_name=tenant))
token = req.headers['X-Auth-Token']
m_cli = _get_muranoclient(token, req)
def _set_new_environment_for_space(space_guid, log_msg):
body = {'name': 'my_{uuid}'.format(uuid=uuid.uuid4().hex)}
env = m_cli.environments.create(body)
db_cf.set_environment_for_space(space_guid, env.id)
LOG.info(log_msg.format(space_id=space_guid,
environment_id=env.id))
return env.id
try:
environment_id = db_cf.get_environment_for_space(space_guid)
# NOTE: Check that environment which was previously linked with
# CF space still exist, reset a new environment for space.
try:
env = m_cli.environments.get(environment_id)
except exceptions.HTTPNotFound:
msg = (_LI("Can not find environment_id {environment_id}, "
"will create a new one."
).format(environment_id=environment_id))
LOG.info(msg)
env = {}
if not env:
log_msg = (_LI("Cloud Foundry {space_id} remapped to "
"{environment_id}"))
environment_id = _set_new_environment_for_space(
space_guid, log_msg)
except AttributeError:
log_msg = (_LI("Cloud Foundry {space_id} mapped to "
"{environment_id}"))
environment_id = _set_new_environment_for_space(
space_guid, log_msg)
package = m_cli.packages.get(service_id)
LOG.debug('Adding service {name}'.format(name=package.name))
service = self._make_service(space_guid, package, plan_id)
db_cf.set_instance_for_service(instance_id, service['?']['id'],
environment_id, tenant)
# NOTE(Kezar): Here we are going through JSON and add ids where
# it's necessary. Before that we need to drop '?' key from parameters
# dictionary as far it contains murano package related info which is
# necessary in our scenario
if '?' in parameters.keys():
parameters.pop('?', None)
LOG.warning(_LW("Incorrect input parameters. Package related "
"parameters shouldn't be passed through Cloud "
"Foundry"))
params = [parameters]
while params:
a = params.pop()
for k, v in six.iteritems(a):
if isinstance(v, dict):
params.append(v)
if k == '?':
v['id'] = uuid.uuid4().hex
service.update(parameters)
# Now we need to obtain session to modify the env
session_id = create_session(m_cli, environment_id)
m_cli.services.post(environment_id,
path='/',
data=service,
session_id=session_id)
m_cli.sessions.deploy(environment_id, session_id)
self.current_session = session_id
return response.Response(status=202, json_body={})
def deprovision(self, req, instance_id):
service = db_cf.get_service_for_instance(instance_id)
if not service:
return {}
service_id = service.service_id
environment_id = service.environment_id
token = req.headers['X-Auth-Token']
m_cli = _get_muranoclient(token, req)
session_id = create_session(m_cli, environment_id)
m_cli.services.delete(environment_id, '/' + service_id, session_id)
m_cli.sessions.deploy(environment_id, session_id)
return response.Response(status=202, json_body={})
def bind(self, req, body, instance_id, app_id):
db_service = db_cf.get_service_for_instance(instance_id)
if not db_service:
return {}
service_id = db_service.service_id
environment_id = db_service.environment_id
token = req.headers['X-Auth-Token']
m_cli = _get_muranoclient(token, req)
session_id = create_session(m_cli, environment_id)
env = m_cli.environments.get(environment_id, session_id)
LOG.debug('Got environment {0}'.format(env))
service = self._get_service(env, service_id)
LOG.debug('Got service {0}'.format(service))
# NOTE(starodubcevna): Here we need to find an action which will return
# us needed credentials. By default we will looking for getCredentials
# action.
result = {}
try:
actions = service['?']['_actions']
for action_id in list(actions):
if 'getCredentials' in action_id:
@retrying.retry(retry_on_exception=lambda e: isinstance(e,
TypeError),
wait_random_min=1000,
wait_random_max=10000,
stop_max_delay=30000)
def _get_creds(client, task_id, environment_id):
result = m_cli.actions.get_result(environment_id,
task_id)['result']
return result
task_id = m_cli.actions.call(environment_id, action_id)
result = _get_creds(m_cli, task_id, environment_id)
if not result:
LOG.warning(_LW("This application doesn't have action "
"getCredentials"))
return response.Response(status=500)
except KeyError:
# NOTE(starodubcevna): In CF service broker API spec return
# code for failed bind is not present, so we will return 500.
LOG.warning(_LW("This application doesn't have actions at all"))
return response.Response(status=500)
if 'credentials' in list(result):
return result
else:
return {'credentials': result}
def unbind(self, req, instance_id, app_id):
"""Unsupported functionality
murano doesn't support this kind of functionality, so we just need
to create a stub where the call will come. We can't raise something
like NotImplementedError because we will have problems on Cloud Foundry
side. The best way now it to return empty dict which will be correct
answer for Cloud Foundry.
"""
return {}
def get_last_operation(self, req, instance_id):
service = db_cf.get_service_for_instance(instance_id)
# NOTE(freerunner): Prevent code 500 if requested environment
# already doesn't exist.
if not service:
LOG.warning(_LW('Requested service for instance {} is not found'))
body = {}
resp = response.Response(status=410, json_body=body)
return resp
env_id = service.environment_id
token = req.headers["X-Auth-Token"]
m_cli = _get_muranoclient(token, req)
# NOTE(starodubcevna): we can track only environment status. it's
# murano API limitation.
m_environment = m_cli.environments.get(env_id)
if m_environment.status == 'ready':
body = {'state': 'succeeded',
'description': 'operation succeed'}
resp = response.Response(status=200, json_body=body)
elif m_environment.status in ['pending', 'deleting', 'deploying']:
body = {'state': 'in progress',
'description': 'operation in progress'}
resp = response.Response(status=202, json_body=body)
elif m_environment.status in ['deploy failure', 'delete failure']:
body = {'state': 'failed',
'description': '{0}. Please correct it manually'.format(
m_environment.status)}
resp = response.Response(status=200, json_body=body)
return resp
def _get_muranoclient(token_id, req):
artifacts_client = None
if CONF.cfapi.packages_service in ['glance', 'glare']:
artifacts_client = _get_glareclient(token_id, req)
murano_url = CONF.murano.url or req.endpoints.get('murano')
if not murano_url:
LOG.error('No murano url is specified and no "application-catalog" '
'service is registered in keystone.')
return muranoclient.Client(1, murano_url, token=token_id,
artifacts_client=artifacts_client)
def _get_glareclient(token_id, req):
glare_settings = CONF.glare
url = glare_settings.url or req.endpoints.get('glare')
if not url:
LOG.error('No glare url is specified and no "artifact" '
'service is registered in keystone.')
return glare_client.Client(
endpoint=url, token=token_id,
insecure=glare_settings.insecure,
key_file=glare_settings.key_file or None,
ca_file=glare_settings.ca_file or None,
cert_file=glare_settings.cert_file or None,
type_name='murano',
type_version=1)
def create_session(client, environment_id):
id = client.sessions.configure(environment_id).id
return id
def create_resource():
return wsgi.Resource(Controller(),
serializer=wsgi.ServiceBrokerResponseSerializer())
| apache-2.0 | -3,214,018,305,001,191,000 | 38.690265 | 79 | 0.570197 | false | 4.146379 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.