filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_27608 | import pytplot
import numpy as np
import copy
def avg_res_data(tvar,res,new_tvar=None):
"""
Averages the variable over a specified period of time.
Parameters:
tvar1 : str
Name of tplot variable.
res : int/float
The new data resolution
new_tvar : str
Name of new tvar for averaged data. If not set, then the data in tvar is replaced.
Returns:
None
Examples:
>>> #Average the data over every two seconds
>>> pytplot.store_data('d', data={'x':[2,5,8,11,14,17,21], 'y':[[1,1,50],[2,2,3],[100,4,47],[4,90,5],[5,5,99],[6,6,25],[7,7,-5]]})
>>> pytplot.avg_res_data('d',2,'d2res')
>>> print(pytplot.data_quants['d'].values)
"""
tvar_new = pytplot.data_quants[tvar].coarsen(time=res, boundary='trim').mean()
tvar_new.name = pytplot.data_quants[tvar].name
tvar_new.attrs = copy.deepcopy(pytplot.data_quants[tvar].attrs)
if new_tvar is None:
pytplot.data_quants[tvar] = tvar_new
else:
if 'spec_bins' in pytplot.data_quants[tvar].coords:
pytplot.store_data(new_tvar, data={'x': tvar_new.coords['time'].values, 'y': tvar_new.values,
'v': tvar_new.coords['spec_bins'].values})
else:
pytplot.store_data(new_tvar, data={'x': tvar_new.coords['time'].values, 'y': tvar_new.values})
pytplot.data_quants[new_tvar].attrs = copy.deepcopy(pytplot.data_quants[tvar].attrs)
return
|
the-stack_106_27610 | # Bundles for JS/CSS Minification
PIPELINE_JS = {
"common": {
"source_filenames": (
"sumo/js/i18n.js",
"underscore/underscore.js",
"moment/moment.js",
"jquery/dist/jquery.min.js",
"jquery/jquery-migrate.js",
"sumo/js/libs/jquery.cookie.js",
"sumo/js/libs/jquery.placeholder.js",
"sumo/js/templates/macros.js",
"sumo/js/templates/search-results-list.js",
"sumo/js/templates/search-results.js",
"nunjucks/browser/nunjucks-slim.js",
"sumo/js/nunjucks.js",
"sumo/js/cached_xhr.js",
"sumo/js/search_utils.es6",
"sumo/js/browserdetect.js",
"sumo/js/libs/uitour.js",
"sumo/js/kbox.js",
"sumo/js/main.js",
"sumo/js/format.js",
"sumo/js/libs/modernizr-custom-build.js",
"sumo/js/geoip-locale.js",
"mailcheck/src/mailcheck.js",
"sumo/js/ui.js",
"sumo/js/analytics.js",
"sumo/js/instant_search.es6",
"sumo/js/responsive-nav-toggle.js",
"sumo/js/profile-avatars.js",
"protocol/js/protocol-base.js",
"protocol/js/protocol-utils.js",
"protocol/js/protocol-supports.js",
"protocol/js/protocol-details.js",
"protocol/js/protocol-footer.js",
"protocol/js/protocol-menu.js",
"protocol/js/protocol-modal.js",
"protocol/js/protocol-navigation.js",
"protocol/js/protocol-newsletter.js",
"protocol/js/protocol-notification-bar.js",
"protocol/js/protocol-lang-switcher.js",
"sumo/js/protocol-nav.es6",
"sumo/js/protocol-details-init.js",
"sumo/js/protocol-modal-init.es6",
"sumo/js/protocol-notification-init.js",
"sumo/js/protocol-language-switcher-init.js",
"sumo/js/sumo-tabs.es6",
"sumo/js/sumo-close-this.es6",
),
"output_filename": "build/common-min.js",
},
"common.fx.download": {
"source_filenames": ("sumo/js/show-fx-download.js",),
"output_filename": "build/show-fx-download.js",
},
"community": {
"source_filenames": (
"jquery/jquery.min.js",
"jquery/jquery-migrate.js",
"community/js/community.js",
"community/js/select.js",
),
"output_filename": "build/community-min.js",
},
"community.metrics": {
"source_filenames": ("kpi/js/kpi.browserify.js",),
"output_filename": "build/kpi.dashboard-min.js",
},
"jqueryui": {
"source_filenames": ("sumo/js/jquery-ui-custom.js",),
"output_filename": "build/jqueryui-min.js",
},
"questions": {
"source_filenames": (
"sumo/js/markup.js",
"sumo/js/ajaxvote.js",
"sumo/js/ajaxpreview.js",
"sumo/js/remote.js",
"sumo/js/aaq.js",
"sumo/js/questions.js",
"sumo/js/libs/jquery.tokeninput.js",
"sumo/js/tags.filter.js",
"sumo/js/tags.js",
"sumo/js/reportabuse.js",
"sumo/js/questions.metrics.js",
"sumo/js/libs/jquery.ajaxupload.js",
"sumo/js/upload.js",
),
"output_filename": "build/questions-min.js",
},
"questions.metrics": {
"source_filenames": ("sumo/js/questions.metrics-dashboard.js",),
"output_filename": "build/questions.metrics-min.js",
},
"products": {
"source_filenames": (
"sumo/js/compare_versions.js",
"sumo/js/products.js",
),
"output_filename": "build/products-min.js",
},
"search": {
"source_filenames": ("sumo/js/search.js",),
"output_filename": "build/search-min.js",
},
"forums": {
"source_filenames": (
"sumo/js/markup.js",
"sumo/js/ajaxpreview.js",
"sumo/js/forums.js",
"sumo/js/reportabuse.js",
),
"output_filename": "build/forums-min.js",
},
"gallery": {
"source_filenames": (
"sumo/js/libs/jquery.ajaxupload.js",
"sumo/js/gallery.js",
),
"output_filename": "build/gallery-min.js",
},
"wiki": {
"source_filenames": (
"sumo/js/markup.js",
"sumo/js/libs/django/urlify.js",
"sumo/js/libs/django/prepopulate.js",
"sumo/js/libs/jquery.lazyload.js",
"sumo/js/libs/jquery.tokeninput.js",
"sumo/js/users.autocomplete.js",
"sumo/js/screencast.js",
"sumo/js/showfor.js",
"sumo/js/ajaxvote.js",
"sumo/js/ajaxpreview.js",
"sumo/js/wiki.js",
"sumo/js/tags.js",
"sumo/js/dashboards.js",
"sumo/js/editable.js",
"sumo/js/wiki.metrics.js",
"sumo/js/templates/wiki-related-doc.js",
"sumo/js/templates/wiki-search-results.js",
"sumo/js/wiki_search.js",
),
"output_filename": "build/wiki-min.js",
},
"rickshaw": {
"source_filenames": (
"d3/d3.js",
"sumo/js/libs/d3.layout.min.js",
"sumo/js/libs/rickshaw.js",
"sumo/js/rickshaw_utils.js",
),
"output_filename": "build/rickshaw-min.js",
},
"wiki.history": {
"source_filenames": ("sumo/js/historycharts.js",),
"output_filename": "build/wiki.history-min.js",
},
"wiki.diff": {
"source_filenames": (
"sumo/js/libs/diff_match_patch_uncompressed.js",
"sumo/js/diff.js",
),
"output_filename": "build/wiki.diff-min.js",
},
"wiki.editor": {
"source_filenames": (
"codemirror/lib/codemirror.js",
"codemirror/addon/mode/simple.js",
"codemirror/addon/hint/show-hint.js",
"sumo/js/codemirror.sumo-hint.js",
"sumo/js/codemirror.sumo-mode.js",
),
"output_filename": "build/wiki.editor-min.js",
},
"wiki.dashboard": {
"source_filenames": ("sumo/js/wiki.dashboard.js",),
"output_filename": "build/wiki.dashboard-min.js",
},
"users": {
"source_filenames": (
"sumo/js/users.js",
"sumo/js/reportabuse.js",
),
"output_filename": "build/users-min.js",
},
"messages": {
"source_filenames": (
"sumo/js/markup.js",
"sumo/js/libs/jquery.autoresize.js",
"sumo/js/libs/jquery.tokeninput.js",
"sumo/js/users.autocomplete.js",
"sumo/js/ajaxpreview.js",
"sumo/js/messages.js",
),
"output_filename": "build/messages-min.js",
},
"groups": {
"source_filenames": (
"sumo/js/libs/jquery.tokeninput.js",
"sumo/js/users.autocomplete.js",
"sumo/js/markup.js",
"sumo/js/groups.js",
"sumo/js/editable.js",
),
"output_filename": "build/groups-min.js",
},
"kpi.dashboard": {
"source_filenames": (
"d3/d3.js",
"kpi/js/kpi.browserify.js",
),
"output_filename": "build/kpi.dashboard-min.js",
},
"gtm-snippet": {
"source_filenames": (
"sumo/js/dnt-helper.js",
"sumo/js/gtm-snippet.js",
),
"output_filename": "build/gtm-snippet-min.js",
},
}
|
the-stack_106_27612 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class PartnerVO(object):
def __init__(self):
self._biz_status = None
self._partner_id = None
self._partner_name = None
self._short_code = None
@property
def biz_status(self):
return self._biz_status
@biz_status.setter
def biz_status(self, value):
self._biz_status = value
@property
def partner_id(self):
return self._partner_id
@partner_id.setter
def partner_id(self, value):
self._partner_id = value
@property
def partner_name(self):
return self._partner_name
@partner_name.setter
def partner_name(self, value):
self._partner_name = value
@property
def short_code(self):
return self._short_code
@short_code.setter
def short_code(self, value):
self._short_code = value
def to_alipay_dict(self):
params = dict()
if self.biz_status:
if hasattr(self.biz_status, 'to_alipay_dict'):
params['biz_status'] = self.biz_status.to_alipay_dict()
else:
params['biz_status'] = self.biz_status
if self.partner_id:
if hasattr(self.partner_id, 'to_alipay_dict'):
params['partner_id'] = self.partner_id.to_alipay_dict()
else:
params['partner_id'] = self.partner_id
if self.partner_name:
if hasattr(self.partner_name, 'to_alipay_dict'):
params['partner_name'] = self.partner_name.to_alipay_dict()
else:
params['partner_name'] = self.partner_name
if self.short_code:
if hasattr(self.short_code, 'to_alipay_dict'):
params['short_code'] = self.short_code.to_alipay_dict()
else:
params['short_code'] = self.short_code
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = PartnerVO()
if 'biz_status' in d:
o.biz_status = d['biz_status']
if 'partner_id' in d:
o.partner_id = d['partner_id']
if 'partner_name' in d:
o.partner_name = d['partner_name']
if 'short_code' in d:
o.short_code = d['short_code']
return o
|
the-stack_106_27613 | import torch
from torch2trt_dynamic.torch2trt_dynamic import (get_arg, tensorrt_converter,
trt_)
@tensorrt_converter('torch.Tensor.unsqueeze')
@tensorrt_converter('torch.unsqueeze')
def convert_unsqueeze(ctx):
input = ctx.method_args[0]
dim = get_arg(ctx, 'dim', pos=1, default=None)
if dim < 0:
dim = len(input.shape) + dim + 1
input_trt = trt_(ctx.network, input)
shape_trt = ctx.network.add_shape(input_trt).get_output(0)
unsqueeze_trt = trt_(ctx.network, input.new_ones((1), dtype=torch.int32))
output = ctx.method_return
shape1_trt = None
shape2_trt = None
if dim == 0:
shape2_trt = shape_trt
elif dim == len(input.shape):
shape1_trt = shape_trt
else:
slice1_start = [0]
slice1_size = [dim]
slice1_stride = [1]
shape1_trt = ctx.network.add_slice(shape_trt, slice1_start,
slice1_size,
slice1_stride).get_output(0)
slice2_start = [dim]
slice2_size = [len(input.shape) - dim]
slice2_stride = [1]
shape2_trt = ctx.network.add_slice(shape_trt, slice2_start,
slice2_size,
slice2_stride).get_output(0)
if shape1_trt is None:
new_shape_trt = ctx.network.add_concatenation(
[unsqueeze_trt, shape2_trt]).get_output(0)
elif shape2_trt is None:
new_shape_trt = ctx.network.add_concatenation(
[shape1_trt, unsqueeze_trt]).get_output(0)
else:
new_shape_trt = ctx.network.add_concatenation(
[shape1_trt, unsqueeze_trt, shape2_trt]).get_output(0)
layer = ctx.network.add_shuffle(input_trt)
layer.set_input(1, new_shape_trt)
output._trt = layer.get_output(0)
|
the-stack_106_27614 | import datetime
from decimal import Decimal
import django_filters
import pytz
from django.db import transaction
from django.db.models import F, Prefetch, Q
from django.db.models.functions import Coalesce, Concat
from django.http import FileResponse
from django.shortcuts import get_object_or_404
from django.utils.timezone import make_aware, now
from django.utils.translation import ugettext as _
from django_filters.rest_framework import DjangoFilterBackend, FilterSet
from rest_framework import mixins, serializers, status, viewsets
from rest_framework.decorators import action
from rest_framework.exceptions import (
APIException, NotFound, PermissionDenied, ValidationError,
)
from rest_framework.filters import OrderingFilter
from rest_framework.mixins import CreateModelMixin
from rest_framework.response import Response
from pretix.api.models import OAuthAccessToken
from pretix.api.serializers.order import (
InvoiceSerializer, OrderCreateSerializer, OrderPaymentSerializer,
OrderPositionSerializer, OrderRefundCreateSerializer,
OrderRefundSerializer, OrderSerializer, PriceCalcSerializer,
)
from pretix.base.i18n import language
from pretix.base.models import (
CachedCombinedTicket, CachedTicket, Device, Event, Invoice, InvoiceAddress,
Order, OrderPayment, OrderPosition, OrderRefund, Quota, TeamAPIToken,
generate_position_secret, generate_secret,
)
from pretix.base.payment import PaymentException
from pretix.base.services import tickets
from pretix.base.services.invoices import (
generate_cancellation, generate_invoice, invoice_pdf, invoice_qualified,
regenerate_invoice,
)
from pretix.base.services.mail import SendMailException
from pretix.base.services.orders import (
OrderChangeManager, OrderError, approve_order, cancel_order, deny_order,
extend_order, mark_order_expired, mark_order_refunded,
)
from pretix.base.services.pricing import get_price
from pretix.base.services.tickets import generate
from pretix.base.signals import (
order_modified, order_placed, register_ticket_outputs,
)
from pretix.base.templatetags.money import money_filter
class OrderFilter(FilterSet):
email = django_filters.CharFilter(field_name='email', lookup_expr='iexact')
code = django_filters.CharFilter(field_name='code', lookup_expr='iexact')
status = django_filters.CharFilter(field_name='status', lookup_expr='iexact')
modified_since = django_filters.IsoDateTimeFilter(field_name='last_modified', lookup_expr='gte')
created_since = django_filters.IsoDateTimeFilter(field_name='datetime', lookup_expr='gte')
class Meta:
model = Order
fields = ['code', 'status', 'email', 'locale', 'testmode', 'require_approval']
class OrderViewSet(viewsets.ModelViewSet):
serializer_class = OrderSerializer
queryset = Order.objects.none()
filter_backends = (DjangoFilterBackend, OrderingFilter)
ordering = ('datetime',)
ordering_fields = ('datetime', 'code', 'status', 'last_modified')
filterset_class = OrderFilter
lookup_field = 'code'
permission = 'can_view_orders'
write_permission = 'can_change_orders'
def get_serializer_context(self):
ctx = super().get_serializer_context()
ctx['event'] = self.request.event
return ctx
def get_queryset(self):
qs = self.request.event.orders.prefetch_related(
'fees', 'payments', 'refunds', 'refunds__payment'
).select_related(
'invoice_address'
)
if self.request.query_params.get('pdf_data', 'false') == 'true':
qs = qs.prefetch_related(
Prefetch(
'positions',
OrderPosition.objects.all().prefetch_related(
'checkins', 'item', 'variation', 'answers', 'answers__options', 'answers__question',
'item__category', 'addon_to',
Prefetch('addons', OrderPosition.objects.select_related('item', 'variation'))
)
)
)
else:
qs = qs.prefetch_related(
Prefetch(
'positions',
OrderPosition.objects.all().prefetch_related(
'checkins', 'item', 'variation', 'answers', 'answers__options', 'answers__question',
)
)
)
return qs
def _get_output_provider(self, identifier):
responses = register_ticket_outputs.send(self.request.event)
for receiver, response in responses:
prov = response(self.request.event)
if prov.identifier == identifier:
return prov
raise NotFound('Unknown output provider.')
def list(self, request, **kwargs):
date = serializers.DateTimeField().to_representation(now())
queryset = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
resp = self.get_paginated_response(serializer.data)
resp['X-Page-Generated'] = date
return resp
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data, headers={'X-Page-Generated': date})
@action(detail=True, url_name='download', url_path='download/(?P<output>[^/]+)')
def download(self, request, output, **kwargs):
provider = self._get_output_provider(output)
order = self.get_object()
if order.status != Order.STATUS_PAID:
raise PermissionDenied("Downloads are not available for unpaid orders.")
ct = CachedCombinedTicket.objects.filter(
order=order, provider=provider.identifier, file__isnull=False
).last()
if not ct or not ct.file:
generate.apply_async(args=('order', order.pk, provider.identifier))
raise RetryException()
else:
resp = FileResponse(ct.file.file, content_type=ct.type)
resp['Content-Disposition'] = 'attachment; filename="{}-{}-{}{}"'.format(
self.request.event.slug.upper(), order.code,
provider.identifier, ct.extension
)
return resp
@action(detail=True, methods=['POST'])
def mark_paid(self, request, **kwargs):
order = self.get_object()
if order.status in (Order.STATUS_PENDING, Order.STATUS_EXPIRED):
ps = order.pending_sum
try:
p = order.payments.get(
state__in=(OrderPayment.PAYMENT_STATE_PENDING, OrderPayment.PAYMENT_STATE_CREATED),
provider='manual',
amount=ps
)
except OrderPayment.DoesNotExist:
order.payments.filter(state__in=(OrderPayment.PAYMENT_STATE_PENDING,
OrderPayment.PAYMENT_STATE_CREATED)) \
.update(state=OrderPayment.PAYMENT_STATE_CANCELED)
p = order.payments.create(
state=OrderPayment.PAYMENT_STATE_CREATED,
provider='manual',
amount=ps,
fee=None
)
try:
p.confirm(auth=self.request.auth,
user=self.request.user if request.user.is_authenticated else None,
count_waitinglist=False)
except Quota.QuotaExceededException as e:
return Response({'detail': str(e)}, status=status.HTTP_400_BAD_REQUEST)
except PaymentException as e:
return Response({'detail': str(e)}, status=status.HTTP_400_BAD_REQUEST)
except SendMailException:
pass
return self.retrieve(request, [], **kwargs)
return Response(
{'detail': 'The order is not pending or expired.'},
status=status.HTTP_400_BAD_REQUEST
)
@action(detail=True, methods=['POST'])
def mark_canceled(self, request, **kwargs):
send_mail = request.data.get('send_email', True)
cancellation_fee = request.data.get('cancellation_fee', None)
if cancellation_fee:
try:
cancellation_fee = float(Decimal(cancellation_fee))
except:
cancellation_fee = None
order = self.get_object()
if not order.cancel_allowed():
return Response(
{'detail': 'The order is not allowed to be canceled.'},
status=status.HTTP_400_BAD_REQUEST
)
try:
cancel_order(
order,
user=request.user if request.user.is_authenticated else None,
api_token=request.auth if isinstance(request.auth, TeamAPIToken) else None,
device=request.auth if isinstance(request.auth, Device) else None,
oauth_application=request.auth.application if isinstance(request.auth, OAuthAccessToken) else None,
send_mail=send_mail,
cancellation_fee=cancellation_fee
)
except OrderError as e:
return Response(
{'detail': str(e)},
status=status.HTTP_400_BAD_REQUEST
)
return self.retrieve(request, [], **kwargs)
@action(detail=True, methods=['POST'])
def approve(self, request, **kwargs):
send_mail = request.data.get('send_email', True)
order = self.get_object()
try:
approve_order(
order,
user=request.user if request.user.is_authenticated else None,
auth=request.auth if isinstance(request.auth, (Device, TeamAPIToken, OAuthAccessToken)) else None,
send_mail=send_mail,
)
except Quota.QuotaExceededException as e:
return Response({'detail': str(e)}, status=status.HTTP_400_BAD_REQUEST)
except OrderError as e:
return Response({'detail': str(e)}, status=status.HTTP_400_BAD_REQUEST)
return self.retrieve(request, [], **kwargs)
@action(detail=True, methods=['POST'])
def deny(self, request, **kwargs):
send_mail = request.data.get('send_email', True)
comment = request.data.get('comment', '')
order = self.get_object()
try:
deny_order(
order,
user=request.user if request.user.is_authenticated else None,
auth=request.auth if isinstance(request.auth, (Device, TeamAPIToken, OAuthAccessToken)) else None,
send_mail=send_mail,
comment=comment,
)
except OrderError as e:
return Response({'detail': str(e)}, status=status.HTTP_400_BAD_REQUEST)
return self.retrieve(request, [], **kwargs)
@action(detail=True, methods=['POST'])
def mark_pending(self, request, **kwargs):
order = self.get_object()
if order.status != Order.STATUS_PAID:
return Response(
{'detail': 'The order is not paid.'},
status=status.HTTP_400_BAD_REQUEST
)
order.status = Order.STATUS_PENDING
order.save(update_fields=['status'])
order.log_action(
'pretix.event.order.unpaid',
user=request.user if request.user.is_authenticated else None,
auth=request.auth,
)
return self.retrieve(request, [], **kwargs)
@action(detail=True, methods=['POST'])
def mark_expired(self, request, **kwargs):
order = self.get_object()
if order.status != Order.STATUS_PENDING:
return Response(
{'detail': 'The order is not pending.'},
status=status.HTTP_400_BAD_REQUEST
)
mark_order_expired(
order,
user=request.user if request.user.is_authenticated else None,
auth=request.auth,
)
return self.retrieve(request, [], **kwargs)
@action(detail=True, methods=['POST'])
def mark_refunded(self, request, **kwargs):
order = self.get_object()
if order.status != Order.STATUS_PAID:
return Response(
{'detail': 'The order is not paid.'},
status=status.HTTP_400_BAD_REQUEST
)
mark_order_refunded(
order,
user=request.user if request.user.is_authenticated else None,
auth=(request.auth if isinstance(request.auth, (TeamAPIToken, OAuthAccessToken, Device)) else None),
)
return self.retrieve(request, [], **kwargs)
@action(detail=True, methods=['POST'])
def create_invoice(self, request, **kwargs):
order = self.get_object()
has_inv = order.invoices.exists() and not (
order.status in (Order.STATUS_PAID, Order.STATUS_PENDING)
and order.invoices.filter(is_cancellation=True).count() >= order.invoices.filter(is_cancellation=False).count()
)
if self.request.event.settings.get('invoice_generate') not in ('admin', 'user', 'paid', 'True') or not invoice_qualified(order):
return Response(
{'detail': _('You cannot generate an invoice for this order.')},
status=status.HTTP_400_BAD_REQUEST
)
elif has_inv:
return Response(
{'detail': _('An invoice for this order already exists.')},
status=status.HTTP_400_BAD_REQUEST
)
inv = generate_invoice(order)
order.log_action(
'pretix.event.order.invoice.generated',
user=self.request.user,
auth=self.request.auth,
data={
'invoice': inv.pk
}
)
return Response(
InvoiceSerializer(inv).data,
status=status.HTTP_201_CREATED
)
@action(detail=True, methods=['POST'])
def resend_link(self, request, **kwargs):
order = self.get_object()
if not order.email:
return Response({'detail': 'There is no email address associated with this order.'}, status=status.HTTP_400_BAD_REQUEST)
try:
order.resend_link(user=self.request.user, auth=self.request.auth)
except SendMailException:
return Response({'detail': _('There was an error sending the mail. Please try again later.')}, status=status.HTTP_503_SERVICE_UNAVAILABLE)
return Response(
status=status.HTTP_204_NO_CONTENT
)
@action(detail=True, methods=['POST'])
@transaction.atomic
def regenerate_secrets(self, request, **kwargs):
order = self.get_object()
order.secret = generate_secret()
for op in order.all_positions.all():
op.secret = generate_position_secret()
op.save()
order.save(update_fields=['secret'])
CachedTicket.objects.filter(order_position__order=order).delete()
CachedCombinedTicket.objects.filter(order=order).delete()
tickets.invalidate_cache.apply_async(kwargs={'event': self.request.event.pk,
'order': order.pk})
order.log_action(
'pretix.event.order.secret.changed',
user=self.request.user,
auth=self.request.auth,
)
return self.retrieve(request, [], **kwargs)
@action(detail=True, methods=['POST'])
def extend(self, request, **kwargs):
new_date = request.data.get('expires', None)
force = request.data.get('force', False)
if not new_date:
return Response(
{'detail': 'New date is missing.'},
status=status.HTTP_400_BAD_REQUEST
)
df = serializers.DateField()
try:
new_date = df.to_internal_value(new_date)
except:
return Response(
{'detail': 'New date is invalid.'},
status=status.HTTP_400_BAD_REQUEST
)
tz = pytz.timezone(self.request.event.settings.timezone)
new_date = make_aware(datetime.datetime.combine(
new_date,
datetime.time(hour=23, minute=59, second=59)
), tz)
order = self.get_object()
try:
extend_order(
order,
new_date=new_date,
force=force,
user=request.user if request.user.is_authenticated else None,
auth=request.auth,
)
return self.retrieve(request, [], **kwargs)
except OrderError as e:
return Response(
{'detail': str(e)},
status=status.HTTP_400_BAD_REQUEST
)
def create(self, request, *args, **kwargs):
serializer = OrderCreateSerializer(data=request.data, context=self.get_serializer_context())
serializer.is_valid(raise_exception=True)
with transaction.atomic():
self.perform_create(serializer)
order = serializer.instance
serializer = OrderSerializer(order, context=serializer.context)
order.log_action(
'pretix.event.order.placed',
user=request.user if request.user.is_authenticated else None,
auth=request.auth,
)
order_placed.send(self.request.event, order=order)
gen_invoice = invoice_qualified(order) and (
(order.event.settings.get('invoice_generate') == 'True') or
(order.event.settings.get('invoice_generate') == 'paid' and order.status == Order.STATUS_PAID)
) and not order.invoices.last()
if gen_invoice:
generate_invoice(order, trigger_pdf=True)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
def update(self, request, *args, **kwargs):
partial = kwargs.get('partial', False)
if not partial:
return Response(
{"detail": "Method \"PUT\" not allowed."},
status=status.HTTP_405_METHOD_NOT_ALLOWED,
)
return super().update(request, *args, **kwargs)
def perform_update(self, serializer):
with transaction.atomic():
if 'comment' in self.request.data and serializer.instance.comment != self.request.data.get('comment'):
serializer.instance.log_action(
'pretix.event.order.comment',
user=self.request.user,
auth=self.request.auth,
data={
'new_comment': self.request.data.get('comment')
}
)
if 'checkin_attention' in self.request.data and serializer.instance.checkin_attention != self.request.data.get('checkin_attention'):
serializer.instance.log_action(
'pretix.event.order.checkin_attention',
user=self.request.user,
auth=self.request.auth,
data={
'new_value': self.request.data.get('checkin_attention')
}
)
if 'email' in self.request.data and serializer.instance.email != self.request.data.get('email'):
serializer.instance.log_action(
'pretix.event.order.contact.changed',
user=self.request.user,
auth=self.request.auth,
data={
'old_email': serializer.instance.email,
'new_email': self.request.data.get('email'),
}
)
if 'locale' in self.request.data and serializer.instance.locale != self.request.data.get('locale'):
serializer.instance.log_action(
'pretix.event.order.locale.changed',
user=self.request.user,
auth=self.request.auth,
data={
'old_locale': serializer.instance.locale,
'new_locale': self.request.data.get('locale'),
}
)
if 'invoice_address' in self.request.data:
serializer.instance.log_action(
'pretix.event.order.modified',
user=self.request.user,
auth=self.request.auth,
data={
'invoice_data': self.request.data.get('invoice_address'),
}
)
serializer.save()
if 'invoice_address' in self.request.data:
order_modified.send(sender=serializer.instance.event, order=serializer.instance)
def perform_create(self, serializer):
serializer.save()
def perform_destroy(self, instance):
if not instance.testmode:
raise PermissionDenied('Only test mode orders can be deleted.')
with transaction.atomic():
self.get_object().gracefully_delete(user=self.request.user if self.request.user.is_authenticated else None, auth=self.request.auth)
class OrderPositionFilter(FilterSet):
order = django_filters.CharFilter(field_name='order', lookup_expr='code__iexact')
has_checkin = django_filters.rest_framework.BooleanFilter(method='has_checkin_qs')
attendee_name = django_filters.CharFilter(method='attendee_name_qs')
search = django_filters.CharFilter(method='search_qs')
def search_qs(self, queryset, name, value):
return queryset.filter(
Q(secret__istartswith=value)
| Q(attendee_name_cached__icontains=value)
| Q(addon_to__attendee_name_cached__icontains=value)
| Q(attendee_email__icontains=value)
| Q(addon_to__attendee_email__icontains=value)
| Q(order__code__istartswith=value)
| Q(order__invoice_address__name_cached__icontains=value)
| Q(order__email__icontains=value)
)
def has_checkin_qs(self, queryset, name, value):
return queryset.filter(checkins__isnull=not value)
def attendee_name_qs(self, queryset, name, value):
return queryset.filter(Q(attendee_name_cached__iexact=value) | Q(addon_to__attendee_name_cached__iexact=value))
class Meta:
model = OrderPosition
fields = {
'item': ['exact', 'in'],
'variation': ['exact', 'in'],
'secret': ['exact'],
'order__status': ['exact', 'in'],
'addon_to': ['exact', 'in'],
'subevent': ['exact', 'in'],
'pseudonymization_id': ['exact'],
'voucher__code': ['exact'],
'voucher': ['exact'],
}
class OrderPositionViewSet(mixins.DestroyModelMixin, viewsets.ReadOnlyModelViewSet):
serializer_class = OrderPositionSerializer
queryset = OrderPosition.objects.none()
filter_backends = (DjangoFilterBackend, OrderingFilter)
ordering = ('order__datetime', 'positionid')
ordering_fields = ('order__code', 'order__datetime', 'positionid', 'attendee_name', 'order__status',)
filterset_class = OrderPositionFilter
permission = 'can_view_orders'
write_permission = 'can_change_orders'
ordering_custom = {
'attendee_name': {
'_order': F('display_name').asc(nulls_first=True),
'display_name': Coalesce('attendee_name_cached', 'addon_to__attendee_name_cached')
},
'-attendee_name': {
'_order': F('display_name').asc(nulls_last=True),
'display_name': Coalesce('attendee_name_cached', 'addon_to__attendee_name_cached')
},
}
def get_queryset(self):
qs = OrderPosition.objects.filter(order__event=self.request.event)
if self.request.query_params.get('pdf_data', 'false') == 'true':
qs = qs.prefetch_related(
'checkins', 'answers', 'answers__options', 'answers__question',
Prefetch('addons', OrderPosition.objects.select_related('item', 'variation')),
Prefetch('order', Order.objects.select_related('invoice_address').prefetch_related(
Prefetch(
'event',
Event.objects.select_related('organizer')
),
Prefetch(
'positions',
OrderPosition.objects.prefetch_related(
'checkins', 'item', 'variation', 'answers', 'answers__options', 'answers__question',
)
)
))
).select_related(
'item', 'variation', 'item__category', 'addon_to'
)
else:
qs = qs.prefetch_related(
'checkins', 'answers', 'answers__options', 'answers__question'
).select_related(
'item', 'order', 'order__event', 'order__event__organizer'
)
return qs
def _get_output_provider(self, identifier):
responses = register_ticket_outputs.send(self.request.event)
for receiver, response in responses:
prov = response(self.request.event)
if prov.identifier == identifier:
return prov
raise NotFound('Unknown output provider.')
@action(detail=True, methods=['POST'], url_name='price_calc')
def price_calc(self, request, *args, **kwargs):
"""
This calculates the price assuming a change of product or subevent. This endpoint
is deliberately not documented and considered a private API, only to be used by
pretix' web interface.
Sample input:
{
"item": 2,
"variation": null,
"subevent": 3
}
Sample output:
{
"gross": "2.34",
"gross_formatted": "2,34",
"net": "2.34",
"tax": "0.00",
"rate": "0.00",
"name": "VAT"
}
"""
serializer = PriceCalcSerializer(data=request.data, event=request.event)
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
pos = self.get_object()
try:
ia = pos.order.invoice_address
except InvoiceAddress.DoesNotExist:
ia = InvoiceAddress()
kwargs = {
'item': pos.item,
'variation': pos.variation,
'voucher': pos.voucher,
'subevent': pos.subevent,
'addon_to': pos.addon_to,
'invoice_address': ia,
}
if data.get('item'):
item = data.get('item')
kwargs['item'] = item
if item.has_variations:
variation = data.get('variation') or pos.variation
if not variation:
raise ValidationError('No variation given')
if variation.item != item:
raise ValidationError('Variation does not belong to item')
kwargs['variation'] = variation
else:
variation = None
kwargs['variation'] = None
if pos.voucher and not pos.voucher.applies_to(item, variation):
kwargs['voucher'] = None
if data.get('subevent'):
kwargs['subevent'] = data.get('subevent')
price = get_price(**kwargs)
with language(data.get('locale') or self.request.event.settings.locale):
return Response({
'gross': price.gross,
'gross_formatted': money_filter(price.gross, self.request.event.currency, hide_currency=True),
'net': price.net,
'rate': price.rate,
'name': str(price.name),
'tax': price.tax,
})
@action(detail=True, url_name='download', url_path='download/(?P<output>[^/]+)')
def download(self, request, output, **kwargs):
provider = self._get_output_provider(output)
pos = self.get_object()
if pos.order.status != Order.STATUS_PAID:
raise PermissionDenied("Downloads are not available for unpaid orders.")
if not pos.generate_ticket:
raise PermissionDenied("Downloads are not enabled for this product.")
ct = CachedTicket.objects.filter(
order_position=pos, provider=provider.identifier, file__isnull=False
).last()
if not ct or not ct.file:
generate.apply_async(args=('orderposition', pos.pk, provider.identifier))
raise RetryException()
else:
resp = FileResponse(ct.file.file, content_type=ct.type)
resp['Content-Disposition'] = 'attachment; filename="{}-{}-{}-{}{}"'.format(
self.request.event.slug.upper(), pos.order.code, pos.positionid,
provider.identifier, ct.extension
)
return resp
def perform_destroy(self, instance):
try:
ocm = OrderChangeManager(
instance.order,
user=self.request.user if self.request.user.is_authenticated else None,
auth=self.request.auth,
notify=False
)
ocm.cancel(instance)
ocm.commit()
except OrderError as e:
raise ValidationError(str(e))
except Quota.QuotaExceededException as e:
raise ValidationError(str(e))
class PaymentViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = OrderPaymentSerializer
queryset = OrderPayment.objects.none()
permission = 'can_view_orders'
write_permission = 'can_change_orders'
lookup_field = 'local_id'
def get_queryset(self):
order = get_object_or_404(Order, code=self.kwargs['order'], event=self.request.event)
return order.payments.all()
@action(detail=True, methods=['POST'])
def confirm(self, request, **kwargs):
payment = self.get_object()
force = request.data.get('force', False)
if payment.state not in (OrderPayment.PAYMENT_STATE_PENDING, OrderPayment.PAYMENT_STATE_CREATED):
return Response({'detail': 'Invalid state of payment'}, status=status.HTTP_400_BAD_REQUEST)
try:
payment.confirm(user=self.request.user if self.request.user.is_authenticated else None,
auth=self.request.auth,
count_waitinglist=False,
force=force)
except Quota.QuotaExceededException as e:
return Response({'detail': str(e)}, status=status.HTTP_400_BAD_REQUEST)
except PaymentException as e:
return Response({'detail': str(e)}, status=status.HTTP_400_BAD_REQUEST)
except SendMailException:
pass
return self.retrieve(request, [], **kwargs)
@action(detail=True, methods=['POST'])
def refund(self, request, **kwargs):
payment = self.get_object()
amount = serializers.DecimalField(max_digits=10, decimal_places=2).to_internal_value(
request.data.get('amount', str(payment.amount))
)
if 'mark_refunded' in request.data:
mark_refunded = request.data.get('mark_refunded', False)
else:
mark_refunded = request.data.get('mark_canceled', False)
if payment.state != OrderPayment.PAYMENT_STATE_CONFIRMED:
return Response({'detail': 'Invalid state of payment.'}, status=status.HTTP_400_BAD_REQUEST)
full_refund_possible = payment.payment_provider.payment_refund_supported(payment)
partial_refund_possible = payment.payment_provider.payment_partial_refund_supported(payment)
available_amount = payment.amount - payment.refunded_amount
if amount <= 0:
return Response({'amount': ['Invalid refund amount.']}, status=status.HTTP_400_BAD_REQUEST)
if amount > available_amount:
return Response(
{'amount': ['Invalid refund amount, only {} are available to refund.'.format(available_amount)]},
status=status.HTTP_400_BAD_REQUEST)
if amount != payment.amount and not partial_refund_possible:
return Response({'amount': ['Partial refund not available for this payment method.']},
status=status.HTTP_400_BAD_REQUEST)
if amount == payment.amount and not full_refund_possible:
return Response({'amount': ['Full refund not available for this payment method.']},
status=status.HTTP_400_BAD_REQUEST)
r = payment.order.refunds.create(
payment=payment,
source=OrderRefund.REFUND_SOURCE_ADMIN,
state=OrderRefund.REFUND_STATE_CREATED,
amount=amount,
provider=payment.provider
)
try:
r.payment_provider.execute_refund(r)
except PaymentException as e:
r.state = OrderRefund.REFUND_STATE_FAILED
r.save()
return Response({'detail': 'External error: {}'.format(str(e))},
status=status.HTTP_400_BAD_REQUEST)
else:
payment.order.log_action('pretix.event.order.refund.created', {
'local_id': r.local_id,
'provider': r.provider,
}, user=self.request.user if self.request.user.is_authenticated else None, auth=self.request.auth)
if payment.order.pending_sum > 0:
if mark_refunded:
mark_order_refunded(payment.order,
user=self.request.user if self.request.user.is_authenticated else None,
auth=self.request.auth)
else:
payment.order.status = Order.STATUS_PENDING
payment.order.set_expires(
now(),
payment.order.event.subevents.filter(
id__in=payment.order.positions.values_list('subevent_id', flat=True))
)
payment.order.save(update_fields=['status', 'expires'])
return Response(OrderRefundSerializer(r).data, status=status.HTTP_200_OK)
@action(detail=True, methods=['POST'])
def cancel(self, request, **kwargs):
payment = self.get_object()
if payment.state not in (OrderPayment.PAYMENT_STATE_PENDING, OrderPayment.PAYMENT_STATE_CREATED):
return Response({'detail': 'Invalid state of payment'}, status=status.HTTP_400_BAD_REQUEST)
with transaction.atomic():
payment.state = OrderPayment.PAYMENT_STATE_CANCELED
payment.save()
payment.order.log_action('pretix.event.order.payment.canceled', {
'local_id': payment.local_id,
'provider': payment.provider,
}, user=self.request.user if self.request.user.is_authenticated else None, auth=self.request.auth)
return self.retrieve(request, [], **kwargs)
class RefundViewSet(CreateModelMixin, viewsets.ReadOnlyModelViewSet):
serializer_class = OrderRefundSerializer
queryset = OrderRefund.objects.none()
permission = 'can_view_orders'
write_permission = 'can_change_orders'
lookup_field = 'local_id'
def get_queryset(self):
order = get_object_or_404(Order, code=self.kwargs['order'], event=self.request.event)
return order.refunds.all()
@action(detail=True, methods=['POST'])
def cancel(self, request, **kwargs):
refund = self.get_object()
if refund.state not in (OrderRefund.REFUND_STATE_CREATED, OrderRefund.REFUND_STATE_TRANSIT,
OrderRefund.REFUND_STATE_EXTERNAL):
return Response({'detail': 'Invalid state of refund'}, status=status.HTTP_400_BAD_REQUEST)
with transaction.atomic():
refund.state = OrderRefund.REFUND_STATE_CANCELED
refund.save()
refund.order.log_action('pretix.event.order.refund.canceled', {
'local_id': refund.local_id,
'provider': refund.provider,
}, user=self.request.user if self.request.user.is_authenticated else None, auth=self.request.auth)
return self.retrieve(request, [], **kwargs)
@action(detail=True, methods=['POST'])
def process(self, request, **kwargs):
refund = self.get_object()
if refund.state != OrderRefund.REFUND_STATE_EXTERNAL:
return Response({'detail': 'Invalid state of refund'}, status=status.HTTP_400_BAD_REQUEST)
refund.done(user=self.request.user if self.request.user.is_authenticated else None, auth=self.request.auth)
if 'mark_refunded' in request.data:
mark_refunded = request.data.get('mark_refunded', False)
else:
mark_refunded = request.data.get('mark_canceled', False)
if mark_refunded:
mark_order_refunded(refund.order, user=self.request.user if self.request.user.is_authenticated else None,
auth=self.request.auth)
elif not (refund.order.status == Order.STATUS_PAID and refund.order.pending_sum <= 0):
refund.order.status = Order.STATUS_PENDING
refund.order.set_expires(
now(),
refund.order.event.subevents.filter(
id__in=refund.order.positions.values_list('subevent_id', flat=True))
)
refund.order.save(update_fields=['status', 'expires'])
return self.retrieve(request, [], **kwargs)
@action(detail=True, methods=['POST'])
def done(self, request, **kwargs):
refund = self.get_object()
if refund.state not in (OrderRefund.REFUND_STATE_CREATED, OrderRefund.REFUND_STATE_TRANSIT):
return Response({'detail': 'Invalid state of refund'}, status=status.HTTP_400_BAD_REQUEST)
refund.done(user=self.request.user if self.request.user.is_authenticated else None, auth=self.request.auth)
return self.retrieve(request, [], **kwargs)
def get_serializer_context(self):
ctx = super().get_serializer_context()
ctx['order'] = get_object_or_404(Order, code=self.kwargs['order'], event=self.request.event)
return ctx
def create(self, request, *args, **kwargs):
if 'mark_refunded' in request.data:
mark_refunded = request.data.pop('mark_refunded', False)
else:
mark_refunded = request.data.pop('mark_canceled', False)
serializer = OrderRefundCreateSerializer(data=request.data, context=self.get_serializer_context())
serializer.is_valid(raise_exception=True)
with transaction.atomic():
self.perform_create(serializer)
r = serializer.instance
serializer = OrderRefundSerializer(r, context=serializer.context)
r.order.log_action(
'pretix.event.order.refund.created', {
'local_id': r.local_id,
'provider': r.provider,
},
user=request.user if request.user.is_authenticated else None,
auth=request.auth
)
if mark_refunded:
mark_order_refunded(
r.order,
user=request.user if request.user.is_authenticated else None,
auth=(request.auth if request.auth else None),
)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
def perform_create(self, serializer):
serializer.save()
class InvoiceFilter(FilterSet):
refers = django_filters.CharFilter(method='refers_qs')
number = django_filters.CharFilter(method='nr_qs')
order = django_filters.CharFilter(field_name='order', lookup_expr='code__iexact')
def refers_qs(self, queryset, name, value):
return queryset.annotate(
refers_nr=Concat('refers__prefix', 'refers__invoice_no')
).filter(refers_nr__iexact=value)
def nr_qs(self, queryset, name, value):
return queryset.filter(nr__iexact=value)
class Meta:
model = Invoice
fields = ['order', 'number', 'is_cancellation', 'refers', 'locale']
class RetryException(APIException):
status_code = 409
default_detail = 'The requested resource is not ready, please retry later.'
default_code = 'retry_later'
class InvoiceViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = InvoiceSerializer
queryset = Invoice.objects.none()
filter_backends = (DjangoFilterBackend, OrderingFilter)
ordering = ('nr',)
ordering_fields = ('nr', 'date')
filterset_class = InvoiceFilter
permission = 'can_view_orders'
lookup_url_kwarg = 'number'
lookup_field = 'nr'
write_permission = 'can_change_orders'
def get_queryset(self):
return self.request.event.invoices.prefetch_related('lines').select_related('order', 'refers').annotate(
nr=Concat('prefix', 'invoice_no')
)
@action(detail=True, )
def download(self, request, **kwargs):
invoice = self.get_object()
if not invoice.file:
invoice_pdf(invoice.pk)
invoice.refresh_from_db()
if invoice.shredded:
raise PermissionDenied('The invoice file is no longer stored on the server.')
if not invoice.file:
raise RetryException()
resp = FileResponse(invoice.file.file, content_type='application/pdf')
resp['Content-Disposition'] = 'attachment; filename="{}.pdf"'.format(invoice.number)
return resp
@action(detail=True, methods=['POST'])
def regenerate(self, request, **kwarts):
inv = self.get_object()
if inv.canceled:
raise ValidationError('The invoice has already been canceled.')
elif inv.shredded:
raise PermissionDenied('The invoice file is no longer stored on the server.')
else:
inv = regenerate_invoice(inv)
inv.order.log_action(
'pretix.event.order.invoice.regenerated',
data={
'invoice': inv.pk
},
user=self.request.user,
auth=self.request.auth,
)
return Response(status=204)
@action(detail=True, methods=['POST'])
def reissue(self, request, **kwarts):
inv = self.get_object()
if inv.canceled:
raise ValidationError('The invoice has already been canceled.')
elif inv.shredded:
raise PermissionDenied('The invoice file is no longer stored on the server.')
else:
c = generate_cancellation(inv)
if inv.order.status != Order.STATUS_CANCELED:
inv = generate_invoice(inv.order)
else:
inv = c
inv.order.log_action(
'pretix.event.order.invoice.reissued',
data={
'invoice': inv.pk
},
user=self.request.user,
auth=self.request.auth,
)
return Response(status=204)
|
the-stack_106_27616 | from django.http import HttpResponseRedirect, Http404
from django.shortcuts import render
from django.urls import reverse
from .models import *
def notice(request):
notices = {'notices': Notice.objects.all()}
return render(request, 'notice.html', notices)
def notice_post(request):
if request.method == "POST":
author = request.POST['author']
title = request.POST['title']
content = request.POST['content']
notice = Notice(author=author, title=title, content=content)
notice.save()
return HttpResponseRedirect(reverse('notice'))
else:
return render(request, 'notice_post.html')
def notice_detail(request, id):
try:
notice = Notice.objects.get(pk=id)
except Notice.DoesNotExist:
raise Http404("Does not exist!")
return render(request, 'notice_detail.html', {'notice': notice}) |
the-stack_106_27618 | # -*- coding: utf-8 -*-
"""
flaskๆไพไปฃ็็ๅขๅ ๆฅๅฃ
้่ฆnginx้จ็ฝฒ๏ผไฟ่ฏredis่ฝ่ขซ่ฎฟ้ฎ๏ผๆ่ฝ่ฟ่กๅฏนๅบ็ๅขๅ ๆไฝ
"""
from flask import Flask, g
from flask import request
from redis_server.db import RedisClient
__all__ = ['app']
app = Flask(__name__)
def get_conn():
if not hasattr(g, 'redis'): # ็จไบๅคๆญๅฏน่ฑกๆฏๅฆๅ
ๅซๅฏนๅบ็ๅฑๆง
g.redis = RedisClient()
return g.redis
@app.route('/')
def index():
return '<h1><p style="width: 100%;height: 45px;display: block;line-height: 45px;text-align: center;">ๆฌข่ฟๆฅๅฐๅไธ็ไปฃ็ๆฑ ็ณป็ป</p></h1>'
@app.route('/put')
def upload_proxy():
""" ๅฐproxyไธไผ ๅฐredisๆฐๆฎๅบไธญ """
conn = get_conn()
proxy = request.args.get("proxy")
name = request.args.get("name")
# remote_ip = request.remote_addr
# port = proxy.split(":")[1]
proxy = "{}".format(proxy)
if not proxy:
return "ไธไผ ไปฃ็ไธ่ฝไธบ็ฉบ"
conn.add(name, proxy)
return "ๅทฒๆๅไธไผ ไปฃ็: {}".format(proxy)
@app.route('/remove')
def remove_proxy():
""" ๅ ้คredisไธญ็ไปฃ็ """
conn = get_conn()
name = request.args.get("name")
if not name:
return "้ฎไธ่ฝไธบ็ฉบ"
conn.remove(name)
return "ๅทฒๆๅๅ ้คไปฃ็:{}".format(name)
@app.route('/random')
def random_proxy():
"""
้ๆบ่ทๅๅฏ็จไปฃ็
:return: redis้้ข็ไปฃ็IP
"""
connection = get_conn()
return connection.random()
if __name__ == '__main__':
app.run() |
the-stack_106_27619 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core.client_options import ClientOptions
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.cloud.debugger_v2.types import controller
from google.cloud.debugger_v2.types import data
from .transports.base import Controller2Transport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import Controller2GrpcAsyncIOTransport
from .client import Controller2Client
class Controller2AsyncClient:
"""The Controller service provides the API for orchestrating a
collection of debugger agents to perform debugging tasks. These
agents are each attached to a process of an application which may
include one or more replicas.
The debugger agents register with the Controller to identify the
application being debugged, the Debuggee. All agents that register
with the same data, represent the same Debuggee, and are assigned
the same ``debuggee_id``.
The debugger agents call the Controller to retrieve the list of
active Breakpoints. Agents with the same ``debuggee_id`` get the
same breakpoints list. An agent that can fulfill the breakpoint
request updates the Controller with the breakpoint result. The
controller selects the first result received and discards the rest
of the results. Agents that poll again for active breakpoints will
no longer have the completed breakpoint in the list and should
remove that breakpoint from their attached process.
The Controller service does not provide a way to retrieve the
results of a completed breakpoint. This functionality is available
using the Debugger service.
"""
_client: Controller2Client
DEFAULT_ENDPOINT = Controller2Client.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = Controller2Client.DEFAULT_MTLS_ENDPOINT
common_billing_account_path = staticmethod(
Controller2Client.common_billing_account_path
)
parse_common_billing_account_path = staticmethod(
Controller2Client.parse_common_billing_account_path
)
common_folder_path = staticmethod(Controller2Client.common_folder_path)
parse_common_folder_path = staticmethod(Controller2Client.parse_common_folder_path)
common_organization_path = staticmethod(Controller2Client.common_organization_path)
parse_common_organization_path = staticmethod(
Controller2Client.parse_common_organization_path
)
common_project_path = staticmethod(Controller2Client.common_project_path)
parse_common_project_path = staticmethod(
Controller2Client.parse_common_project_path
)
common_location_path = staticmethod(Controller2Client.common_location_path)
parse_common_location_path = staticmethod(
Controller2Client.parse_common_location_path
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
Controller2AsyncClient: The constructed client.
"""
return Controller2Client.from_service_account_info.__func__(Controller2AsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
Controller2AsyncClient: The constructed client.
"""
return Controller2Client.from_service_account_file.__func__(Controller2AsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@property
def transport(self) -> Controller2Transport:
"""Returns the transport used by the client instance.
Returns:
Controller2Transport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(
type(Controller2Client).get_transport_class, type(Controller2Client)
)
def __init__(
self,
*,
credentials: ga_credentials.Credentials = None,
transport: Union[str, Controller2Transport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the controller2 client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.Controller2Transport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = Controller2Client(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def register_debuggee(
self,
request: Union[controller.RegisterDebuggeeRequest, dict] = None,
*,
debuggee: data.Debuggee = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> controller.RegisterDebuggeeResponse:
r"""Registers the debuggee with the controller service.
All agents attached to the same application must call this
method with exactly the same request content to get back the
same stable ``debuggee_id``. Agents should call this method
again whenever ``google.rpc.Code.NOT_FOUND`` is returned from
any controller method.
This protocol allows the controller service to disable
debuggees, recover from data loss, or change the ``debuggee_id``
format. Agents must handle ``debuggee_id`` value changing upon
re-registration.
Args:
request (Union[google.cloud.debugger_v2.types.RegisterDebuggeeRequest, dict]):
The request object. Request to register a debuggee.
debuggee (:class:`google.cloud.debugger_v2.types.Debuggee`):
Required. Debuggee information to register. The fields
``project``, ``uniquifier``, ``description`` and
``agent_version`` of the debuggee must be set.
This corresponds to the ``debuggee`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.debugger_v2.types.RegisterDebuggeeResponse:
Response for registering a debuggee.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([debuggee])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = controller.RegisterDebuggeeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if debuggee is not None:
request.debuggee = debuggee
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.register_debuggee,
default_timeout=600.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def list_active_breakpoints(
self,
request: Union[controller.ListActiveBreakpointsRequest, dict] = None,
*,
debuggee_id: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> controller.ListActiveBreakpointsResponse:
r"""Returns the list of all active breakpoints for the debuggee.
The breakpoint specification (``location``, ``condition``, and
``expressions`` fields) is semantically immutable, although the
field values may change. For example, an agent may update the
location line number to reflect the actual line where the
breakpoint was set, but this doesn't change the breakpoint
semantics.
This means that an agent does not need to check if a breakpoint
has changed when it encounters the same breakpoint on a
successive call. Moreover, an agent should remember the
breakpoints that are completed until the controller removes them
from the active list to avoid setting those breakpoints again.
Args:
request (Union[google.cloud.debugger_v2.types.ListActiveBreakpointsRequest, dict]):
The request object. Request to list active breakpoints.
debuggee_id (:class:`str`):
Required. Identifies the debuggee.
This corresponds to the ``debuggee_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.debugger_v2.types.ListActiveBreakpointsResponse:
Response for listing active
breakpoints.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([debuggee_id])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = controller.ListActiveBreakpointsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if debuggee_id is not None:
request.debuggee_id = debuggee_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_active_breakpoints,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def update_active_breakpoint(
self,
request: Union[controller.UpdateActiveBreakpointRequest, dict] = None,
*,
debuggee_id: str = None,
breakpoint_: data.Breakpoint = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> controller.UpdateActiveBreakpointResponse:
r"""Updates the breakpoint state or mutable fields. The entire
Breakpoint message must be sent back to the controller service.
Updates to active breakpoint fields are only allowed if the new
value does not change the breakpoint specification. Updates to
the ``location``, ``condition`` and ``expressions`` fields
should not alter the breakpoint semantics. These may only make
changes such as canonicalizing a value or snapping the location
to the correct line of code.
Args:
request (Union[google.cloud.debugger_v2.types.UpdateActiveBreakpointRequest, dict]):
The request object. Request to update an active
breakpoint.
debuggee_id (:class:`str`):
Required. Identifies the debuggee
being debugged.
This corresponds to the ``debuggee_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
breakpoint_ (:class:`google.cloud.debugger_v2.types.Breakpoint`):
Required. Updated breakpoint information. The field
``id`` must be set. The agent must echo all Breakpoint
specification fields in the update.
This corresponds to the ``breakpoint_`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.debugger_v2.types.UpdateActiveBreakpointResponse:
Response for updating an active
breakpoint. The message is defined to
allow future extensions.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([debuggee_id, breakpoint_])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = controller.UpdateActiveBreakpointRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if debuggee_id is not None:
request.debuggee_id = debuggee_id
if breakpoint_ is not None:
request.breakpoint_ = breakpoint_
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.update_active_breakpoint,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
await self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-debugger-client",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("Controller2AsyncClient",)
|
the-stack_106_27620 | """Config flow to configure Xiaomi Miio."""
import logging
from re import search
from micloud import MiCloud
from micloud.micloudexception import MiCloudAccessDenied
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components import zeroconf
from homeassistant.config_entries import SOURCE_REAUTH
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_TOKEN
from homeassistant.core import callback
from homeassistant.data_entry_flow import FlowResult
from homeassistant.helpers.device_registry import format_mac
from .const import (
CONF_CLOUD_COUNTRY,
CONF_CLOUD_PASSWORD,
CONF_CLOUD_SUBDEVICES,
CONF_CLOUD_USERNAME,
CONF_DEVICE,
CONF_FLOW_TYPE,
CONF_GATEWAY,
CONF_MAC,
CONF_MANUAL,
CONF_MODEL,
DEFAULT_CLOUD_COUNTRY,
DOMAIN,
MODELS_ALL,
MODELS_ALL_DEVICES,
MODELS_GATEWAY,
SERVER_COUNTRY_CODES,
AuthException,
SetupException,
)
from .device import ConnectXiaomiDevice
_LOGGER = logging.getLogger(__name__)
DEVICE_SETTINGS = {
vol.Required(CONF_TOKEN): vol.All(str, vol.Length(min=32, max=32)),
}
DEVICE_CONFIG = vol.Schema({vol.Required(CONF_HOST): str}).extend(DEVICE_SETTINGS)
DEVICE_MODEL_CONFIG = vol.Schema({vol.Required(CONF_MODEL): vol.In(MODELS_ALL)})
DEVICE_CLOUD_CONFIG = vol.Schema(
{
vol.Optional(CONF_CLOUD_USERNAME): str,
vol.Optional(CONF_CLOUD_PASSWORD): str,
vol.Optional(CONF_CLOUD_COUNTRY, default=DEFAULT_CLOUD_COUNTRY): vol.In(
SERVER_COUNTRY_CODES
),
vol.Optional(CONF_MANUAL, default=False): bool,
}
)
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Options for the component."""
def __init__(self, config_entry: config_entries.ConfigEntry) -> None:
"""Init object."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Manage the options."""
errors = {}
if user_input is not None:
use_cloud = user_input.get(CONF_CLOUD_SUBDEVICES, False)
cloud_username = self.config_entry.data.get(CONF_CLOUD_USERNAME)
cloud_password = self.config_entry.data.get(CONF_CLOUD_PASSWORD)
cloud_country = self.config_entry.data.get(CONF_CLOUD_COUNTRY)
if use_cloud and (
not cloud_username or not cloud_password or not cloud_country
):
errors["base"] = "cloud_credentials_incomplete"
# trigger re-auth flow
self.hass.async_create_task(
self.hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_REAUTH},
data=self.config_entry.data,
)
)
if not errors:
return self.async_create_entry(title="", data=user_input)
settings_schema = vol.Schema(
{
vol.Optional(
CONF_CLOUD_SUBDEVICES,
default=self.config_entry.options.get(CONF_CLOUD_SUBDEVICES, False),
): bool
}
)
return self.async_show_form(
step_id="init", data_schema=settings_schema, errors=errors
)
class XiaomiMiioFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a Xiaomi Miio config flow."""
VERSION = 1
def __init__(self):
"""Initialize."""
self.host = None
self.mac = None
self.token = None
self.model = None
self.name = None
self.cloud_username = None
self.cloud_password = None
self.cloud_country = None
self.cloud_devices = {}
@staticmethod
@callback
def async_get_options_flow(config_entry) -> OptionsFlowHandler:
"""Get the options flow."""
return OptionsFlowHandler(config_entry)
async def async_step_reauth(self, user_input=None):
"""Perform reauth upon an authentication error or missing cloud credentials."""
self.host = user_input[CONF_HOST]
self.token = user_input[CONF_TOKEN]
self.mac = user_input[CONF_MAC]
self.model = user_input.get(CONF_MODEL)
return await self.async_step_reauth_confirm()
async def async_step_reauth_confirm(self, user_input=None):
"""Dialog that informs the user that reauth is required."""
if user_input is not None:
return await self.async_step_cloud()
return self.async_show_form(
step_id="reauth_confirm", data_schema=vol.Schema({})
)
async def async_step_import(self, conf: dict):
"""Import a configuration from config.yaml."""
self.host = conf[CONF_HOST]
self.token = conf[CONF_TOKEN]
self.name = conf.get(CONF_NAME)
self.model = conf.get(CONF_MODEL)
self.context.update(
{"title_placeholders": {"name": f"YAML import {self.host}"}}
)
return await self.async_step_connect()
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
return await self.async_step_cloud()
async def async_step_zeroconf(
self, discovery_info: zeroconf.ZeroconfServiceInfo
) -> FlowResult:
"""Handle zeroconf discovery."""
name = discovery_info.name
self.host = discovery_info.host
self.mac = discovery_info.properties.get("mac")
if self.mac is None:
poch = discovery_info.properties.get("poch", "")
if (result := search(r"mac=\w+", poch)) is not None:
self.mac = result.group(0).split("=")[1]
if not name or not self.host or not self.mac:
return self.async_abort(reason="not_xiaomi_miio")
self.mac = format_mac(self.mac)
# Check which device is discovered.
for gateway_model in MODELS_GATEWAY:
if name.startswith(gateway_model.replace(".", "-")):
unique_id = self.mac
await self.async_set_unique_id(unique_id)
self._abort_if_unique_id_configured({CONF_HOST: self.host})
self.context.update(
{"title_placeholders": {"name": f"Gateway {self.host}"}}
)
return await self.async_step_cloud()
for device_model in MODELS_ALL_DEVICES:
if name.startswith(device_model.replace(".", "-")):
unique_id = self.mac
await self.async_set_unique_id(unique_id)
self._abort_if_unique_id_configured({CONF_HOST: self.host})
self.context.update(
{"title_placeholders": {"name": f"{device_model} {self.host}"}}
)
return await self.async_step_cloud()
# Discovered device is not yet supported
_LOGGER.debug(
"Not yet supported Xiaomi Miio device '%s' discovered with host %s",
name,
self.host,
)
return self.async_abort(reason="not_xiaomi_miio")
def extract_cloud_info(self, cloud_device_info):
"""Extract the cloud info."""
if self.host is None:
self.host = cloud_device_info["localip"]
if self.mac is None:
self.mac = format_mac(cloud_device_info["mac"])
if self.model is None:
self.model = cloud_device_info["model"]
if self.name is None:
self.name = cloud_device_info["name"]
self.token = cloud_device_info["token"]
async def async_step_cloud(self, user_input=None):
"""Configure a xiaomi miio device through the Miio Cloud."""
errors = {}
if user_input is not None:
if user_input[CONF_MANUAL]:
return await self.async_step_manual()
cloud_username = user_input.get(CONF_CLOUD_USERNAME)
cloud_password = user_input.get(CONF_CLOUD_PASSWORD)
cloud_country = user_input.get(CONF_CLOUD_COUNTRY)
if not cloud_username or not cloud_password or not cloud_country:
errors["base"] = "cloud_credentials_incomplete"
return self.async_show_form(
step_id="cloud", data_schema=DEVICE_CLOUD_CONFIG, errors=errors
)
miio_cloud = MiCloud(cloud_username, cloud_password)
try:
if not await self.hass.async_add_executor_job(miio_cloud.login):
errors["base"] = "cloud_login_error"
except MiCloudAccessDenied:
errors["base"] = "cloud_login_error"
if errors:
return self.async_show_form(
step_id="cloud", data_schema=DEVICE_CLOUD_CONFIG, errors=errors
)
devices_raw = await self.hass.async_add_executor_job(
miio_cloud.get_devices, cloud_country
)
if not devices_raw:
errors["base"] = "cloud_no_devices"
return self.async_show_form(
step_id="cloud", data_schema=DEVICE_CLOUD_CONFIG, errors=errors
)
self.cloud_devices = {}
for device in devices_raw:
if not device.get("parent_id"):
name = device["name"]
model = device["model"]
list_name = f"{name} - {model}"
self.cloud_devices[list_name] = device
self.cloud_username = cloud_username
self.cloud_password = cloud_password
self.cloud_country = cloud_country
if self.host is not None:
for device in self.cloud_devices.values():
cloud_host = device.get("localip")
if cloud_host == self.host:
self.extract_cloud_info(device)
return await self.async_step_connect()
if len(self.cloud_devices) == 1:
self.extract_cloud_info(list(self.cloud_devices.values())[0])
return await self.async_step_connect()
return await self.async_step_select()
return self.async_show_form(
step_id="cloud", data_schema=DEVICE_CLOUD_CONFIG, errors=errors
)
async def async_step_select(self, user_input=None):
"""Handle multiple cloud devices found."""
errors = {}
if user_input is not None:
cloud_device = self.cloud_devices[user_input["select_device"]]
self.extract_cloud_info(cloud_device)
return await self.async_step_connect()
select_schema = vol.Schema(
{vol.Required("select_device"): vol.In(list(self.cloud_devices))}
)
return self.async_show_form(
step_id="select", data_schema=select_schema, errors=errors
)
async def async_step_manual(self, user_input=None):
"""Configure a xiaomi miio device Manually."""
errors = {}
if user_input is not None:
self.token = user_input[CONF_TOKEN]
if user_input.get(CONF_HOST):
self.host = user_input[CONF_HOST]
return await self.async_step_connect()
if self.host:
schema = vol.Schema(DEVICE_SETTINGS)
else:
schema = DEVICE_CONFIG
return self.async_show_form(step_id="manual", data_schema=schema, errors=errors)
async def async_step_connect(self, user_input=None):
"""Connect to a xiaomi miio device."""
errors = {}
if self.host is None or self.token is None:
return self.async_abort(reason="incomplete_info")
if user_input is not None:
self.model = user_input[CONF_MODEL]
# Try to connect to a Xiaomi Device.
connect_device_class = ConnectXiaomiDevice(self.hass)
try:
await connect_device_class.async_connect_device(self.host, self.token)
except AuthException:
if self.model is None:
errors["base"] = "wrong_token"
except SetupException:
if self.model is None:
errors["base"] = "cannot_connect"
device_info = connect_device_class.device_info
if self.model is None and device_info is not None:
self.model = device_info.model
if self.model is None and not errors:
errors["base"] = "cannot_connect"
if errors:
return self.async_show_form(
step_id="connect", data_schema=DEVICE_MODEL_CONFIG, errors=errors
)
if self.mac is None and device_info is not None:
self.mac = format_mac(device_info.mac_address)
unique_id = self.mac
existing_entry = await self.async_set_unique_id(
unique_id, raise_on_progress=False
)
if existing_entry:
data = existing_entry.data.copy()
data[CONF_HOST] = self.host
data[CONF_TOKEN] = self.token
if (
self.cloud_username is not None
and self.cloud_password is not None
and self.cloud_country is not None
):
data[CONF_CLOUD_USERNAME] = self.cloud_username
data[CONF_CLOUD_PASSWORD] = self.cloud_password
data[CONF_CLOUD_COUNTRY] = self.cloud_country
self.hass.config_entries.async_update_entry(existing_entry, data=data)
await self.hass.config_entries.async_reload(existing_entry.entry_id)
return self.async_abort(reason="reauth_successful")
if self.name is None:
self.name = self.model
flow_type = None
for gateway_model in MODELS_GATEWAY:
if self.model.startswith(gateway_model):
flow_type = CONF_GATEWAY
if flow_type is None:
for device_model in MODELS_ALL_DEVICES:
if self.model.startswith(device_model):
flow_type = CONF_DEVICE
if flow_type is not None:
return self.async_create_entry(
title=self.name,
data={
CONF_FLOW_TYPE: flow_type,
CONF_HOST: self.host,
CONF_TOKEN: self.token,
CONF_MODEL: self.model,
CONF_MAC: self.mac,
CONF_CLOUD_USERNAME: self.cloud_username,
CONF_CLOUD_PASSWORD: self.cloud_password,
CONF_CLOUD_COUNTRY: self.cloud_country,
},
)
errors["base"] = "unknown_device"
return self.async_show_form(
step_id="connect", data_schema=DEVICE_MODEL_CONFIG, errors=errors
)
|
the-stack_106_27623 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Hive Netius System
# Copyright (c) 2008-2020 Hive Solutions Lda.
#
# This file is part of Hive Netius System.
#
# Hive Netius System is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by the Apache
# Foundation, either version 2.0 of the License, or (at your option) any
# later version.
#
# Hive Netius System is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License along with
# Hive Netius System. If not, see <http://www.apache.org/licenses/>.
__author__ = "Joรฃo Magalhรฃes <[email protected]>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2020 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "Apache License, Version 2.0"
""" The license for the module """
import netius.servers
def app(environ, start_response):
status = "200 OK"
contents = "Hello World"
content_l = len(contents)
headers = (
("Content-Length", content_l),
("Content-type", "text/plain"),
("Connection", "keep-alive")
)
start_response(status, headers)
yield contents
if __name__ == "__main__":
server = netius.servers.WSGIServer(app = app)
server.serve(env = True)
else:
__path__ = []
|
the-stack_106_27624 | import os
print("XLang 0.0.1 Alpha")
print("type `help@' for more info")
while True:
request = input(">>> ")
if request.startswith("PRINT >"):
print = request.replace("PRINT >","",1)
print(print)
elif request.startswith("external@"):
com = request.replace("external@","",1)
import os
os.system(com)
|
the-stack_106_27625 | from neuron import h, crxd as rxd, gui
import numpy
import sys
import time
import itertools
npar = len(sys.argv)
# if(npar<2):
# print "usage: python wave1d.py <nseg> <nsubseg>"
# sys.exit(0)
# rxd.options.nsubseg =int(sys.argv[2])
rxd.options.subseg_interpolation = 0
rxd.options.subseg_averaging = 0
sec = h.Section()
L = 100
# sec.diam = 1
sec.nseg = 25
# h.pt3dadd(0, 0, 0, 1)
# h.pt3dadd(L, 0, 0, 10)
sec.Ra = 150
Rm = 25370
dend = sec
for myseg in dend:
myseg.v = -64
for myseg in dend:
myseg.cm = 1.41
dend.insert("pas")
for myseg in dend:
myseg.pas.g = 1.0 / Rm
for myseg in dend:
myseg.pas.e = -64
dend.insert("cal") # insert L-type Ca channel
for myseg in dend:
myseg.cal.gcalbar = 1.0e-6
h.pt3dadd(0, 0, 0, 1)
h.pt3dadd(L, 0, 0, 10)
nsubseg = 5
nstim = 5
st_dur = 2
st_interv = 50
st_start = 1000
stims = []
for i in range(nstim):
stim = h.IClamp(0.5, sec=sec)
stim.delay = st_start + i * (st_interv)
stim.dur = st_dur
stim.amp = 1.0 / 8000 * 1200
stims.append(stim)
h.CVode().active(1)
h.cvode.atol(1e-8)
s = h.PlotShape()
s.exec_menu("Shape Plot")
if __name__ == "__main__":
def set_plotshape_colormap(plotshape, cmap="jet"):
import matplotlib.cm
s = matplotlib.cm.ScalarMappable(cmap=cmap)
cmap = s.get_cmap()
s.set_clim(0, cmap.N)
rs, gs, bs = itertools.islice(zip(*s.to_rgba(list(range(cmap.N)))), 0, 3)
plotshape.colormap(cmap.N)
for i, r, g, b in zip(range(cmap.N), rs, gs, bs):
plotshape.colormap(i, r * 255, g * 255, b * 255)
# call s.scale(lo, hi) to replot the legend
set_plotshape_colormap(s)
# show the diameters
s.show(0)
# cytoplasmic, er volume fractions
fc, fe = 0.83, 0.17
# parameters
caDiff = 0.016
# caDiff =0
ip3Diff = 0.283
# ip3Diff = 0
cac_init = 1.0e-4
ip3_init = 0.1
gip3r = 12040
gserca = 0.3913
gleak = 6.020
kserca = 0.1
kip3 = 0.15
kact = 0.4
ip3rtau = 2000
# define the regions for the rxd
cyt = rxd.Region(
h.allsec(), nrn_region="i", geometry=rxd.FractionalVolume(fc, surface_fraction=1)
)
er = rxd.Region(h.allsec(), geometry=rxd.FractionalVolume(fe))
cyt_er_membrane = rxd.Region(h.allsec(), geometry=rxd.FixedPerimeter(1))
# the species and other states
ca = rxd.Species([cyt, er], d=caDiff, name="ca", charge=2, initial=cac_init)
ip3 = rxd.Species(cyt, d=ip3Diff, initial=ip3_init)
ip3r_gate_state = rxd.State(cyt_er_membrane, initial=0.8)
h_gate = ip3r_gate_state[cyt_er_membrane]
# pumps and channels between ER and Cytosol
serca = rxd.MultiCompartmentReaction(
ca[cyt] > ca[er],
gserca / ((kserca / (1000.0 * ca[cyt])) ** 2 + 1),
membrane=cyt_er_membrane,
custom_dynamics=True,
)
leak = rxd.MultiCompartmentReaction(
ca[er] != ca[cyt], gleak, gleak, membrane=cyt_er_membrane
)
minf = ip3[cyt] * 1000.0 * ca[cyt] / (ip3[cyt] + kip3) / (1000.0 * ca[cyt] + kact)
k = gip3r * (minf * h_gate) ** 3
ip3r = rxd.MultiCompartmentReaction(ca[er] != ca[cyt], k, k, membrane=cyt_er_membrane)
ip3rg = rxd.Rate(h_gate, (1.0 / (1 + 1000.0 * ca[cyt] / (0.3)) - h_gate) / ip3rtau)
v1 = h.Vector()
v1.record(sec(0.5)._ref_v)
ca1 = h.Vector()
ca1.record(sec(0.5)._ref_cai)
v2 = h.Vector()
v2.record(sec(0.25)._ref_v)
ca2 = h.Vector()
ca2.record(sec(0.25)._ref_cai)
times = h.Vector()
times.record(h._ref_t)
h.finitialize()
cae_init = (0.0017 - cac_init * fc) / fe
ca[er].concentration = cae_init
# ip3.nodes.concentration = 2
for node in ip3.nodes:
if node.x < 0.2:
node.concentration = 2
h.CVode().re_init()
s.variable("cai")
# s.scale(-70, -50)
s.scale(0, 2e-3)
tstop = 3000
recdt = 100
datacol = 0
del s
h.continuerun(tstop)
|
the-stack_106_27628 | # Copyright 2014 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
from __future__ import print_function
import glob
import hashlib
import itertools
import json
import logging
import os
import re
import shutil
import sys
import tarfile
import zipfile
from glob import iglob
from . import ports
from . import shared
from tools.shared import mangle_c_symbol_name, demangle_c_symbol_name
stdout = None
stderr = None
logger = logging.getLogger('system_libs')
LIBC_SOCKETS = ['socket.c', 'socketpair.c', 'shutdown.c', 'bind.c', 'connect.c',
'listen.c', 'accept.c', 'getsockname.c', 'getpeername.c', 'send.c',
'recv.c', 'sendto.c', 'recvfrom.c', 'sendmsg.c', 'recvmsg.c',
'getsockopt.c', 'setsockopt.c', 'freeaddrinfo.c']
def files_in_path(path_components, filenames):
srcdir = shared.path_from_root(*path_components)
return [os.path.join(srcdir, f) for f in filenames]
def glob_in_path(path_components, glob_pattern, excludes=()):
srcdir = shared.path_from_root(*path_components)
return [f for f in iglob(os.path.join(srcdir, glob_pattern)) if os.path.basename(f) not in excludes]
def get_all_files_under(dirname):
for path, subdirs, files in os.walk(dirname):
for name in files:
yield os.path.join(path, name)
def dir_is_newer(dir_a, dir_b):
assert os.path.exists(dir_a)
assert os.path.exists(dir_b)
newest_a = max([os.path.getmtime(x) for x in get_all_files_under(dir_a)])
newest_b = max([os.path.getmtime(x) for x in get_all_files_under(dir_b)])
return newest_a < newest_b
def get_cflags(force_object_files=False):
flags = []
if shared.Settings.WASM_BACKEND:
if shared.Settings.LTO and not force_object_files:
flags += ['-flto=' + shared.Settings.LTO]
if shared.Settings.RELOCATABLE:
flags += ['-s', 'RELOCATABLE']
return flags
def run_one_command(cmd):
# Helper function used by run_build_commands.
if shared.EM_BUILD_VERBOSE:
print(' '.join(cmd))
shared.run_process(cmd, stdout=stdout, stderr=stderr)
def run_build_commands(commands):
cores = min(len(commands), shared.Building.get_num_cores())
if cores <= 1:
for command in commands:
run_one_command(command)
else:
pool = shared.Building.get_multiprocessing_pool()
# https://stackoverflow.com/questions/1408356/keyboard-interrupts-with-pythons-multiprocessing-pool
# https://bugs.python.org/issue8296
# 999999 seconds (about 11 days) is reasonably huge to not trigger actual timeout
# and is smaller than the maximum timeout value 4294967.0 for Python 3 on Windows (threading.TIMEOUT_MAX)
pool.map_async(run_one_command, commands, chunksize=1).get(999999)
def static_library_ext():
return '.a' if shared.Settings.WASM_BACKEND else '.bc'
def create_lib(libname, inputs):
"""Create a library from a set of input objects."""
suffix = os.path.splitext(libname)[1]
if suffix in ('.bc', '.o'):
if len(inputs) == 1:
shutil.copyfile(inputs[0], libname)
else:
shared.Building.link_to_object(inputs, libname)
elif suffix == '.a':
shared.Building.emar('cr', libname, inputs)
else:
raise Exception('unknown suffix ' + libname)
def read_symbols(path):
with open(path) as f:
content = f.read()
# Require that Windows newlines should not be present in a symbols file, if running on Linux or macOS
# This kind of mismatch can occur if one copies a zip file of Emscripten cloned on Windows over to
# a Linux or macOS system. It will result in Emscripten linker getting confused on stray \r characters,
# and be unable to link any library symbols properly. We could harden against this by .strip()ping the
# opened files, but it is possible that the mismatching line endings can cause random problems elsewhere
# in the toolchain, hence abort execution if so.
if os.name != 'nt' and '\r\n' in content:
raise Exception('Windows newlines \\r\\n detected in symbols file "' + path + '"! This could happen for example when copying Emscripten checkout from Windows to Linux or macOS. Please use Unix line endings on checkouts of Emscripten on Linux and macOS!')
return shared.Building.parse_symbols(content).defs
def get_wasm_libc_rt_files():
# Static linking is tricky with LLVM, since e.g. memset might not be used
# from libc, but be used as an intrinsic, and codegen will generate a libc
# call from that intrinsic *after* static linking would have thought it is
# all in there. In asm.js this is not an issue as we do JS linking anyhow,
# and have asm.js-optimized versions of all the LLVM intrinsics. But for
# wasm, we need a better solution. For now, make another archive that gets
# included at the same time as compiler-rt.
# Note that this also includes things that may be depended on by those
# functions - fmin uses signbit, for example, so signbit must be here (so if
# fmin is added by codegen, it will have all it needs).
math_files = files_in_path(
path_components=['system', 'lib', 'libc', 'musl', 'src', 'math'],
filenames=[
'fmin.c', 'fminf.c', 'fminl.c',
'fmax.c', 'fmaxf.c', 'fmaxl.c',
'fmod.c', 'fmodf.c', 'fmodl.c',
'log2.c', 'log2f.c', 'log10.c', 'log10f.c',
'exp2.c', 'exp2f.c', 'exp10.c', 'exp10f.c',
'scalbn.c', '__fpclassifyl.c',
'__signbitl.c', '__signbitf.c', '__signbit.c'
])
other_files = files_in_path(
path_components=['system', 'lib', 'libc'],
filenames=['emscripten_memcpy.c', 'emscripten_memset.c',
'emscripten_memmove.c'])
return math_files + other_files
class Library(object):
"""
`Library` is the base class of all system libraries.
There are two types of libraries: abstract and concrete.
* An abstract library, e.g. MTLibrary, is a subclass of `Library` that
implements certain behaviour common to multiple libraries. The features
of multiple abstract libraries can be used through multiple inheritance.
* A concrete library, e.g. libc, is a subclass of `Library` that describes
how to build a particular library, and its properties, such as name and
dependencies.
This library system is meant to handle having many versions of the same library,
which we call *variations*. For example, some libraries (those that inherit
from MTLibrary), have both single-threaded and multi-threaded versions.
An instance of a `Library` subclass represents a specific variation of the
library. Instance methods perform operations relating to this variation.
For example, `get_cflags()` would return the emcc flags needed to build this
variation, and `build()` would generate the library file for this variation.
The constructor takes keyword arguments that defines the variation.
Class methods perform tasks relating to all variations. For example,
`variations()` returns a list of all variations that exists for this library,
and `get_default_variation()` returns the variation suitable for the current
environment.
Other class methods act upon a group of libraries. For example,
`Library.get_all_variations()` returns a mapping of all variations of
existing libraries.
To add a new type of variation, you must add an parameter to `__init__` that
selects the variant. Then, override one of `vary_on` or `variations`, as well
as `get_default_variation`.
If the parameter is boolean, overriding `vary_on` to add the parameter name
to the returned list is sufficient:
@classmethod
def vary_on(cls):
return super().vary_on() + ['my_parameter']
Otherwise, you must override `variations`:
@classmethod
def variations(cls):
return [{'my_parameter': value, **other} for value, other in
itertools.product([1, 2, 3], super().variations())]
Overriding either `vary_on` or `variations` allows `embuilder.py` to know all
possible variations so it can build all of them.
You then need to modify `get_default_variation` to detect the correct value
for your new parameter based on the settings:
@classmethod
def get_default_variation(cls, **kwargs):
return super().get_default_variation(my_parameter=shared.Settings.MY_PARAMETER, **kwargs)
This allows the correct variation of the library to be selected when building
code with Emscripten.
"""
# The simple name of the library. When linking, this is the name to use to
# automatically get the correct version of the library.
# This should only be overridden in a concrete library class, e.g. libc,
# and left as None in an abstract library class, e.g. MTLibrary.
name = None
# A list of simple names of other libraries that this one depends on.
# For dynamic values, override `get_depends()` instead.
depends = []
# A set of symbols that this library exports. This will be set with a set
# returned by `read_symbols`.
symbols = set()
# A list of symbols that must be exported to keep the JavaScript
# dependencies of this library working.
js_depends = []
# Set to true to prevent EMCC_FORCE_STDLIBS from linking this library.
never_force = False
# The C compile executable to use. You can override this to shared.EMXX for C++.
emcc = shared.EMCC
# A list of flags to pass to emcc.
# The flags for the parent class is automatically inherited.
cflags = ['-Werror']
# A list of directories to put in the include path when building.
# This is a list of tuples of path components.
# For example, to put system/lib/a and system/lib/b under the emscripten
# directory into the include path, you would write:
# includes = [('system', 'lib', 'a'), ('system', 'lib', 'b')]
# The include path of the parent class is automatically inherited.
includes = []
# By default, `get_files` look for source files for this library under `src_dir`.
# It will either use the files listed in `src_files`, or use the glob pattern in
# `src_glob`. You may not specify both `src_files` and `src_glob`.
# When using `src_glob`, you can specify a list of files in `src_glob_exclude`
# to be excluded from the library.
# Alternatively, you can override `get_files` to use your own logic.
src_dir = None
src_files = None
src_glob = None
src_glob_exclude = None
# Whether to always generate WASM object files, even when LTO is set
force_object_files = False
def __init__(self):
"""
Creates a variation of this library.
A variation is a specific combination of settings a library can have.
For example, libc++-mt-noexcept is a variation of libc++.
There might be only one variation of a library.
The constructor keyword arguments will define what variation to use.
Use the `variations` classmethod to get the list of all possible constructor
arguments for this library.
Use the `get_default_variation` classmethod to construct the variation
suitable for the current invocation of emscripten.
"""
if not self.name:
raise NotImplementedError('Cannot instantiate an abstract library')
# Read .symbols file if it exists. This first tries to read a symbols file
# with the same basename with the library file name (e.g.
# libc++-mt.symbols), and if there isn't one, it tries to read the 'default'
# symbol file, which does not have any optional suffices (e.g.
# libc++.symbols).
basename = os.path.splitext(self.get_filename())[0]
if shared.Settings.WASM_BACKEND:
symbols_dir = shared.path_from_root('system', 'lib', 'symbols', 'wasm')
else:
symbols_dir = shared.path_from_root('system', 'lib', 'symbols', 'asmjs')
symbols_file = os.path.join(symbols_dir, basename + '.symbols')
default_symbols_file = os.path.join(symbols_dir, self.name + '.symbols')
if os.path.isfile(symbols_file):
self.symbols = read_symbols(symbols_file)
elif os.path.isfile(default_symbols_file):
self.symbols = read_symbols(default_symbols_file)
def in_temp(cls, *args):
"""Gets the path of a file in our temporary directory."""
return os.path.join(shared.get_emscripten_temp_dir(), *args)
def can_use(self):
"""
Whether this library can be used in the current environment.
For example, libmalloc would override this and return False
if the user requested no malloc.
"""
return True
def can_build(self):
"""
Whether this library can be built in the current environment.
Override this if, for example, the library can only be built on WASM backend.
"""
return True
def erase(self):
shared.Cache.erase_file(self.get_filename())
def get_path(self):
"""
Gets the cached path of this library.
This will trigger a build if this library is not in the cache.
"""
return shared.Cache.get(self.get_filename(), self.build)
def get_files(self):
"""
Gets a list of source files for this library.
Typically, you will use `src_dir`, `src_files`, `src_glob` and `src_glob_exclude`.
If those are insufficient to describe the files needed, you can override this method.
"""
if self.src_dir:
if self.src_files and self.src_glob:
raise Exception('Cannot use src_files and src_glob together')
if self.src_files:
return files_in_path(self.src_dir, self.src_files)
elif self.src_glob:
return glob_in_path(self.src_dir, self.src_glob, self.src_glob_exclude or ())
raise NotImplementedError()
def build_objects(self):
"""
Returns a list of compiled object files for this library.
By default, this builds all the source files returned by `self.get_files()`,
with the `cflags` returned by `self.get_cflags()`.
"""
commands = []
objects = []
cflags = self.get_cflags()
for src in self.get_files():
o = self.in_temp(os.path.basename(src) + '.o')
commands.append([shared.PYTHON, self.emcc, '-c', src, '-o', o] + cflags)
objects.append(o)
run_build_commands(commands)
return objects
def build(self):
"""Builds the library and returns the path to the file."""
out_filename = self.in_temp(self.get_filename())
create_lib(out_filename, self.build_objects())
return out_filename
@classmethod
def _inherit_list(cls, attr):
# Some properties, like cflags and includes, makes more sense to inherit
# via concatenation than replacement.
result = []
for item in cls.__mro__[::-1]:
# Using __dict__ to avoid inheritance
result += item.__dict__.get(attr, [])
return result
def get_cflags(self):
"""
Returns the list of flags to pass to emcc when building this variation
of the library.
Override and add any flags as needed to handle new variations.
"""
cflags = self._inherit_list('cflags')
cflags += get_cflags(force_object_files=self.force_object_files)
if self.includes:
cflags += ['-I' + shared.path_from_root(*path) for path in self._inherit_list('includes')]
return cflags
def get_base_name_prefix(self):
"""
Returns the base name of the library without any suffixes.
"""
return self.name
def get_base_name(self):
"""
Returns the base name of the library file.
This will include suffixes such as -mt, but will not include a file extension.
"""
return self.get_base_name_prefix()
def get_ext(self):
"""
Return the appropriate file extension for this library.
"""
return static_library_ext()
def get_filename(self):
"""
Return the full name of the library file, including the file extension.
"""
return self.get_base_name() + self.get_ext()
def get_depends(self):
"""
Return a list of simple names of libraries that this library depends on.
This is the dynamic version of `depends`.
"""
return self.depends
@classmethod
def vary_on(cls):
"""
Returns a list of strings that are the names of boolean constructor
arguments that defines the variations of this library.
This is used by the default implementation of `cls.variations()` to generate
every possible combination of boolean values to pass to these arguments.
"""
return []
@classmethod
def variations(cls):
"""
Returns a list of keyword arguments to pass to the constructor to create
every possible variation of this library.
By default, this is every possible combination of boolean values to pass
to the list of arguments returned by `vary_on`, but you can override
the behaviour.
"""
vary_on = cls.vary_on()
return [dict(zip(vary_on, toggles)) for toggles in
itertools.product([False, True], repeat=len(vary_on))]
@classmethod
def get_default_variation(cls, **kwargs):
"""
Construct the variation suitable for the current invocation of emscripten.
Subclasses should pass the keyword arguments they introduce to the
superclass version, and propagate **kwargs. The base class collects
all the keyword arguments and creates the instance.
"""
return cls(**kwargs)
@classmethod
def get_inheritance_tree(cls):
"""Returns all the classes in the inheritance tree of the current class."""
yield cls
for subclass in cls.__subclasses__():
for subclass in subclass.get_inheritance_tree():
yield subclass
@classmethod
def get_all_variations(cls):
"""
Gets all the variations of libraries in the inheritance tree of the current
library.
Calling Library.get_all_variations() returns the variations of ALL libraries
that can be built as a dictionary of variation names to Library objects.
"""
result = {}
for library in cls.get_inheritance_tree():
if library.name:
for flags in library.variations():
variation = library(**flags)
if variation.can_build():
result[variation.get_base_name()] = variation
return result
@classmethod
def get_usable_variations(cls):
"""
Gets all libraries suitable for the current invocation of emscripten.
This returns a dictionary of simple names to Library objects.
"""
result = {}
for subclass in cls.get_inheritance_tree():
if subclass.name:
library = subclass.get_default_variation()
if library.can_build() and library.can_use():
result[subclass.name] = library
return result
class MTLibrary(Library):
def __init__(self, **kwargs):
self.is_mt = kwargs.pop('is_mt')
super(MTLibrary, self).__init__(**kwargs)
def get_cflags(self):
cflags = super(MTLibrary, self).get_cflags()
if self.is_mt:
cflags += ['-s', 'USE_PTHREADS=1', '-DUSE_THREADS']
return cflags
def get_base_name(self):
name = super(MTLibrary, self).get_base_name()
if self.is_mt:
name += '-mt'
return name
@classmethod
def vary_on(cls):
return super(MTLibrary, cls).vary_on() + ['is_mt']
@classmethod
def get_default_variation(cls, **kwargs):
return super(MTLibrary, cls).get_default_variation(is_mt=shared.Settings.USE_PTHREADS, **kwargs)
class NoExceptLibrary(Library):
def __init__(self, **kwargs):
self.is_noexcept = kwargs.pop('is_noexcept')
super(NoExceptLibrary, self).__init__(**kwargs)
def get_cflags(self):
cflags = super(NoExceptLibrary, self).get_cflags()
if self.is_noexcept:
cflags += ['-fno-exceptions']
else:
cflags += ['-s', 'DISABLE_EXCEPTION_CATCHING=0']
return cflags
def get_base_name(self):
name = super(NoExceptLibrary, self).get_base_name()
if self.is_noexcept:
name += '-noexcept'
return name
@classmethod
def vary_on(cls):
return super(NoExceptLibrary, cls).vary_on() + ['is_noexcept']
@classmethod
def get_default_variation(cls, **kwargs):
return super(NoExceptLibrary, cls).get_default_variation(is_noexcept=shared.Settings.DISABLE_EXCEPTION_CATCHING, **kwargs)
class MuslInternalLibrary(Library):
includes = [
['system', 'lib', 'libc', 'musl', 'src', 'internal'],
]
cflags = [
'-D_XOPEN_SOURCE=700',
'-Wno-unused-result', # system call results are often ignored in musl, and in wasi that warns
]
class AsanInstrumentedLibrary(Library):
def __init__(self, **kwargs):
self.is_asan = kwargs.pop('is_asan', False)
super(AsanInstrumentedLibrary, self).__init__(**kwargs)
def get_cflags(self):
cflags = super(AsanInstrumentedLibrary, self).get_cflags()
if self.is_asan:
cflags += ['-fsanitize=address']
return cflags
def get_base_name(self):
name = super(AsanInstrumentedLibrary, self).get_base_name()
if self.is_asan:
name += '-asan'
return name
@classmethod
def vary_on(cls):
vary_on = super(AsanInstrumentedLibrary, cls).vary_on()
if shared.Settings.WASM_BACKEND:
vary_on += ['is_asan']
return vary_on
@classmethod
def get_default_variation(cls, **kwargs):
return super(AsanInstrumentedLibrary, cls).get_default_variation(is_asan=shared.Settings.USE_ASAN, **kwargs)
class CXXLibrary(Library):
emcc = shared.EMXX
class NoBCLibrary(Library):
# Some libraries cannot be compiled as .bc files. This is because .bc files will link in every
# object in the library. While the optimizer will readily optimize out most of the unused
# functions, things like global constructors that are linked in cannot be optimized out, even
# though they are not actually needed. If we use .a files for such libraries, only the object
# files, and by extension, their contained global constructors, that are actually needed will be
# linked in.
def get_ext(self):
return '.a'
class libcompiler_rt(Library):
name = 'libcompiler_rt'
# compiler_rt files can't currently be part of LTO although we are hoping to remove this
# restriction soon: https://reviews.llvm.org/D71738
force_object_files = True
cflags = ['-O2', '-fno-builtin']
src_dir = ['system', 'lib', 'compiler-rt', 'lib', 'builtins']
if shared.Settings.WASM_BACKEND:
filelist = shared.path_from_root('system', 'lib', 'compiler-rt', 'filelist.txt')
src_files = open(filelist).read().splitlines()
src_files.append(shared.path_from_root('system', 'lib', 'compiler-rt', 'extras.c'))
else:
src_files = ['divdc3.c', 'divsc3.c', 'muldc3.c', 'mulsc3.c']
class libc(AsanInstrumentedLibrary, MuslInternalLibrary, MTLibrary):
name = 'libc'
depends = ['libcompiler_rt']
# Without -fno-builtin, LLVM can optimize away or convert calls to library
# functions to something else based on assumptions that they behave exactly
# like the standard library. This can cause unexpected bugs when we use our
# custom standard library. The same for other libc/libm builds.
cflags = ['-Os', '-fno-builtin']
# Hide several musl warnings that produce a lot of spam to unit test build
# server logs. TODO: When updating musl the next time, feel free to recheck
# which of their warnings might have been fixed, and which ones of these could
# be cleaned up.
cflags += ['-Wno-return-type', '-Wno-parentheses', '-Wno-ignored-attributes',
'-Wno-shift-count-overflow', '-Wno-shift-negative-value',
'-Wno-dangling-else', '-Wno-unknown-pragmas',
'-Wno-shift-op-parentheses', '-Wno-string-plus-int',
'-Wno-logical-op-parentheses', '-Wno-bitwise-op-parentheses',
'-Wno-visibility', '-Wno-pointer-sign', '-Wno-absolute-value',
'-Wno-empty-body']
def get_files(self):
libc_files = []
musl_srcdir = shared.path_from_root('system', 'lib', 'libc', 'musl', 'src')
# musl modules
blacklist = [
'ipc', 'passwd', 'thread', 'signal', 'sched', 'ipc', 'time', 'linux',
'aio', 'exit', 'legacy', 'mq', 'process', 'search', 'setjmp', 'env',
'ldso', 'conf'
]
# individual files
blacklist += [
'memcpy.c', 'memset.c', 'memmove.c', 'getaddrinfo.c', 'getnameinfo.c',
'inet_addr.c', 'res_query.c', 'res_querydomain.c', 'gai_strerror.c',
'proto.c', 'gethostbyaddr.c', 'gethostbyaddr_r.c', 'gethostbyname.c',
'gethostbyname2_r.c', 'gethostbyname_r.c', 'gethostbyname2.c',
'usleep.c', 'alarm.c', 'syscall.c', '_exit.c', 'popen.c',
'getgrouplist.c', 'initgroups.c', 'timer_create.c',
'faccessat.c',
]
blacklist += LIBC_SOCKETS
# individual math files
blacklist += [
'abs.c', 'cos.c', 'cosf.c', 'cosl.c', 'sin.c', 'sinf.c', 'sinl.c',
'tan.c', 'tanf.c', 'tanl.c', 'acos.c', 'acosf.c', 'acosl.c', 'asin.c',
'asinf.c', 'asinl.c', 'atan.c', 'atanf.c', 'atanl.c', 'atan2.c',
'atan2f.c', 'atan2l.c', 'exp.c', 'expf.c', 'expl.c', 'log.c', 'logf.c',
'logl.c', 'sqrtl.c', 'round.c', 'roundf.c',
'fabsl.c', 'ceill.c', 'floorl.c', 'pow.c', 'powf.c', 'powl.c',
]
if self.is_asan:
# With ASan, we need to use specialized implementations of certain libc
# functions that do not rely on undefined behavior, for example, reading
# multiple bytes at once as an int and overflowing a buffer.
# Otherwise, ASan will catch these errors and terminate the program.
blacklist += ['strcpy.c', 'memchr.c', 'strchrnul.c', 'strlen.c',
'aligned_alloc.c', 'fcntl.c']
libc_files += [
shared.path_from_root('system', 'lib', 'libc', 'emscripten_asan_strcpy.c'),
shared.path_from_root('system', 'lib', 'libc', 'emscripten_asan_memchr.c'),
shared.path_from_root('system', 'lib', 'libc', 'emscripten_asan_strchrnul.c'),
shared.path_from_root('system', 'lib', 'libc', 'emscripten_asan_strlen.c'),
shared.path_from_root('system', 'lib', 'libc', 'emscripten_asan_fcntl.c'),
]
if shared.Settings.WASM_BACKEND:
# With the wasm backend these are included in wasm_libc_rt instead
blacklist += [os.path.basename(f) for f in get_wasm_libc_rt_files()]
else:
blacklist += ['rintf.c', 'ceil.c', 'ceilf.c', 'floor.c', 'floorf.c',
'fabs.c', 'fabsf.c', 'sqrt.c', 'sqrtf.c']
blacklist = set(blacklist)
# TODO: consider using more math code from musl, doing so makes box2d faster
for dirpath, dirnames, filenames in os.walk(musl_srcdir):
for f in filenames:
if f.endswith('.c'):
if f in blacklist:
continue
dir_parts = os.path.split(dirpath)
cancel = False
for part in dir_parts:
if part in blacklist:
cancel = True
break
if not cancel:
libc_files.append(os.path.join(musl_srcdir, dirpath, f))
libc_files += files_in_path(
path_components=['system', 'lib', 'libc', 'musl', 'src', 'legacy'],
filenames=['getpagesize.c'])
if shared.Settings.WASM_BACKEND:
# See libc_extras below
libc_files.append(shared.path_from_root('system', 'lib', 'libc', 'extras.c'))
# Include all the getenv stuff with the wasm backend. With fastcomp we
# still use JS because libc is a .bc file and we don't want to have a
# global constructor there for __environ, which would mean it is always
# included.
libc_files += files_in_path(
path_components=['system', 'lib', 'libc', 'musl', 'src', 'env'],
filenames=['__environ.c', 'getenv.c', 'putenv.c', 'setenv.c', 'unsetenv.c'])
libc_files += files_in_path(
path_components=['system', 'lib', 'libc', 'musl', 'src', 'sched'],
filenames=['sched_yield.c'])
libc_files.append(shared.path_from_root('system', 'lib', 'libc', 'wasi-helpers.c'))
return libc_files
def get_depends(self):
depends = super(libc, self).get_depends()
if shared.Settings.WASM:
return depends + ['libc-wasm']
return depends
class libsockets(MuslInternalLibrary, MTLibrary):
name = 'libsockets'
symbols = set()
cflags = ['-Os', '-fno-builtin']
def get_files(self):
network_dir = shared.path_from_root('system', 'lib', 'libc', 'musl', 'src', 'network')
return [os.path.join(network_dir, x) for x in LIBC_SOCKETS]
class libsockets_proxy(MuslInternalLibrary, MTLibrary):
name = 'libsockets_proxy'
symbols = set()
cflags = ['-Os']
def get_files(self):
return [shared.path_from_root('system', 'lib', 'websocket', 'websocket_to_posix_socket.cpp'),
shared.path_from_root('system', 'lib', 'libc', 'musl', 'src', 'network', 'inet_addr.c')]
class libc_wasm(MuslInternalLibrary):
name = 'libc-wasm'
cflags = ['-O2', '-fno-builtin']
src_dir = ['system', 'lib', 'libc', 'musl', 'src', 'math']
src_files = ['cos.c', 'cosf.c', 'cosl.c', 'sin.c', 'sinf.c', 'sinl.c',
'tan.c', 'tanf.c', 'tanl.c', 'acos.c', 'acosf.c', 'acosl.c',
'asin.c', 'asinf.c', 'asinl.c', 'atan.c', 'atanf.c', 'atanl.c',
'atan2.c', 'atan2f.c', 'atan2l.c', 'exp.c', 'expf.c', 'expl.c',
'log.c', 'logf.c', 'logl.c', 'pow.c', 'powf.c', 'powl.c']
def can_use(self):
# if building to wasm, we need more math code, since we have fewer builtins
return shared.Settings.WASM
class crt1(MuslInternalLibrary):
name = 'crt1'
cflags = ['-O2']
src_dir = ['system', 'lib', 'libc']
src_files = ['crt1.c']
force_object_files = True
def get_ext(self):
return '.o'
def can_use(self):
return shared.Settings.STANDALONE_WASM
def can_build(self):
return shared.Settings.WASM_BACKEND
class libc_extras(MuslInternalLibrary):
"""This library is separate from libc itself for fastcomp only so that the
constructor it contains can be DCE'd. With the wasm backend libc it is a .a
file so object file granularity applies.
"""
name = 'libc-extras'
src_dir = ['system', 'lib', 'libc']
src_files = ['extras.c']
def can_build(self):
return not shared.Settings.WASM_BACKEND
class libcxxabi(CXXLibrary, NoExceptLibrary, MTLibrary):
name = 'libc++abi'
depends = ['libc']
cflags = [
'-std=c++11',
'-Oz',
'-D_LIBCPP_DISABLE_VISIBILITY_ANNOTATIONS',
# Remove this once we update to include this llvm
# revision: https://reviews.llvm.org/D64961
'-D_LIBCXXABI_GUARD_ABI_ARM',
]
def get_cflags(self):
cflags = super(libcxxabi, self).get_cflags()
cflags.append('-DNDEBUG')
if not self.is_mt:
cflags.append('-D_LIBCXXABI_HAS_NO_THREADS')
if self.is_noexcept:
cflags.append('-D_LIBCXXABI_NO_EXCEPTIONS')
return cflags
def get_files(self):
filenames = [
'abort_message.cpp',
'cxa_aux_runtime.cpp',
'cxa_default_handlers.cpp',
'cxa_demangle.cpp',
'cxa_exception_storage.cpp',
'cxa_guard.cpp',
'cxa_handlers.cpp',
'cxa_virtual.cpp',
'fallback_malloc.cpp',
'stdlib_new_delete.cpp',
'stdlib_exception.cpp',
'stdlib_stdexcept.cpp',
'stdlib_typeinfo.cpp',
'private_typeinfo.cpp'
]
if self.is_noexcept:
filenames += ['cxa_noexception.cpp']
return files_in_path(
path_components=['system', 'lib', 'libcxxabi', 'src'],
filenames=filenames)
class libcxx(NoBCLibrary, CXXLibrary, NoExceptLibrary, MTLibrary):
name = 'libc++'
depends = ['libc++abi']
cflags = ['-std=c++11', '-DLIBCXX_BUILDING_LIBCXXABI=1', '-D_LIBCPP_BUILDING_LIBRARY', '-Oz',
'-D_LIBCPP_DISABLE_VISIBILITY_ANNOTATIONS']
src_dir = ['system', 'lib', 'libcxx']
src_files = [
'algorithm.cpp',
'any.cpp',
'bind.cpp',
'charconv.cpp',
'chrono.cpp',
'condition_variable.cpp',
'condition_variable_destructor.cpp',
'debug.cpp',
'exception.cpp',
'functional.cpp',
'future.cpp',
'hash.cpp',
'ios.cpp',
'iostream.cpp',
'locale.cpp',
'memory.cpp',
'mutex.cpp',
'mutex_destructor.cpp',
'new.cpp',
'optional.cpp',
'random.cpp',
'regex.cpp',
'shared_mutex.cpp',
'stdexcept.cpp',
'string.cpp',
'strstream.cpp',
'system_error.cpp',
'thread.cpp',
'typeinfo.cpp',
'utility.cpp',
'valarray.cpp',
'variant.cpp',
'vector.cpp',
os.path.join('experimental', 'memory_resource.cpp'),
os.path.join('filesystem', 'directory_iterator.cpp'),
os.path.join('filesystem', 'int128_builtins.cpp'),
os.path.join('filesystem', 'operations.cpp')
]
class libmalloc(MTLibrary, NoBCLibrary):
name = 'libmalloc'
cflags = ['-O2', '-fno-builtin']
def __init__(self, **kwargs):
self.malloc = kwargs.pop('malloc')
if self.malloc not in ('dlmalloc', 'emmalloc', 'none'):
raise Exception('malloc must be one of "emmalloc", "dlmalloc" or "none", see settings.js')
self.is_debug = kwargs.pop('is_debug')
self.use_errno = kwargs.pop('use_errno')
self.is_tracing = kwargs.pop('is_tracing')
self.use_64bit_ops = kwargs.pop('use_64bit_ops')
super(libmalloc, self).__init__(**kwargs)
def get_files(self):
malloc = shared.path_from_root('system', 'lib', {
'dlmalloc': 'dlmalloc.c', 'emmalloc': 'emmalloc.cpp'
}[self.malloc])
sbrk = shared.path_from_root('system', 'lib', 'sbrk.c')
return [malloc, sbrk]
def get_cflags(self):
cflags = super(libmalloc, self).get_cflags()
if self.is_debug:
cflags += ['-UNDEBUG', '-DDLMALLOC_DEBUG']
# TODO: consider adding -DEMMALLOC_DEBUG, but that is quite slow
else:
cflags += ['-DNDEBUG']
if not self.use_errno:
cflags += ['-DMALLOC_FAILURE_ACTION=', '-DEMSCRIPTEN_NO_ERRNO']
if self.is_tracing:
cflags += ['--tracing']
if self.use_64bit_ops:
cflags += ['-DEMMALLOC_USE_64BIT_OPS=1']
return cflags
def get_base_name_prefix(self):
return 'lib%s' % self.malloc
def get_base_name(self):
name = super(libmalloc, self).get_base_name()
if self.is_debug:
name += '-debug'
if not self.use_errno:
# emmalloc doesn't actually use errno, but it's easier to build it again
name += '-noerrno'
if self.is_tracing:
name += '-tracing'
if self.use_64bit_ops:
name += '-64bit'
return name
def can_use(self):
return shared.Settings.MALLOC != 'none'
@classmethod
def vary_on(cls):
return super(libmalloc, cls).vary_on() + ['is_debug', 'use_errno', 'is_tracing', 'use_64bit_ops']
@classmethod
def get_default_variation(cls, **kwargs):
return super(libmalloc, cls).get_default_variation(
malloc=shared.Settings.MALLOC,
is_debug=shared.Settings.DEBUG_LEVEL >= 3,
use_errno=shared.Settings.SUPPORT_ERRNO,
is_tracing=shared.Settings.EMSCRIPTEN_TRACING,
use_64bit_ops=shared.Settings.MALLOC == 'emmalloc' and (shared.Settings.WASM == 1 or (shared.Settings.WASM_BACKEND and shared.Settings.WASM2JS == 0)),
**kwargs
)
@classmethod
def variations(cls):
combos = super(libmalloc, cls).variations()
return ([dict(malloc='dlmalloc', **combo) for combo in combos if not combo['use_64bit_ops']] +
[dict(malloc='emmalloc', **combo) for combo in combos])
class libal(Library):
name = 'libal'
depends = ['libc']
cflags = ['-Os']
src_dir = ['system', 'lib']
src_files = ['al.c']
class libgl(MTLibrary):
name = 'libgl'
depends = ['libc']
src_dir = ['system', 'lib', 'gl']
src_glob = '*.c'
cflags = ['-Oz']
def __init__(self, **kwargs):
self.is_legacy = kwargs.pop('is_legacy')
self.is_webgl2 = kwargs.pop('is_webgl2')
self.is_ofb = kwargs.pop('is_ofb')
self.is_full_es3 = kwargs.pop('is_full_es3')
super(libgl, self).__init__(**kwargs)
def get_base_name(self):
name = super(libgl, self).get_base_name()
if self.is_legacy:
name += '-emu'
if self.is_webgl2:
name += '-webgl2'
if self.is_ofb:
name += '-ofb'
if self.is_full_es3:
name += '-full_es3'
return name
def get_cflags(self):
cflags = super(libgl, self).get_cflags()
if self.is_legacy:
cflags += ['-DLEGACY_GL_EMULATION=1']
if self.is_webgl2:
cflags += ['-DMAX_WEBGL_VERSION=2', '-s', 'MAX_WEBGL_VERSION=2']
if self.is_ofb:
cflags += ['-D__EMSCRIPTEN_OFFSCREEN_FRAMEBUFFER__']
if self.is_full_es3:
cflags += ['-D__EMSCRIPTEN_FULL_ES3__']
return cflags
@classmethod
def vary_on(cls):
return super(libgl, cls).vary_on() + ['is_legacy', 'is_webgl2', 'is_ofb', 'is_full_es3']
@classmethod
def get_default_variation(cls, **kwargs):
return super(libgl, cls).get_default_variation(
is_legacy=shared.Settings.LEGACY_GL_EMULATION,
is_webgl2=shared.Settings.MAX_WEBGL_VERSION >= 2,
is_ofb=shared.Settings.OFFSCREEN_FRAMEBUFFER,
is_full_es3=shared.Settings.FULL_ES3,
**kwargs
)
class libembind(CXXLibrary):
name = 'libembind'
cflags = ['-std=c++11']
depends = ['libc++abi']
never_force = True
def __init__(self, **kwargs):
self.with_rtti = kwargs.pop('with_rtti', False)
super(libembind, self).__init__(**kwargs)
def get_cflags(self):
cflags = super(libembind, self).get_cflags()
if not self.with_rtti:
cflags += ['-fno-rtti', '-DEMSCRIPTEN_HAS_UNBOUND_TYPE_NAMES=0']
return cflags
@classmethod
def vary_on(cls):
return super(libembind, cls).vary_on() + ['with_rtti']
def get_base_name(self):
name = super(libembind, self).get_base_name()
if self.with_rtti:
name += '-rtti'
return name
def get_files(self):
return [shared.path_from_root('system', 'lib', 'embind', 'bind.cpp')]
@classmethod
def get_default_variation(cls, **kwargs):
return super(libembind, cls).get_default_variation(with_rtti=shared.Settings.USE_RTTI, **kwargs)
class libfetch(CXXLibrary, MTLibrary):
name = 'libfetch'
depends = ['libc++abi']
never_force = True
def get_files(self):
return [shared.path_from_root('system', 'lib', 'fetch', 'emscripten_fetch.cpp')]
class libasmfs(CXXLibrary, MTLibrary):
name = 'libasmfs'
depends = ['libc++abi']
never_force = True
def get_files(self):
return [shared.path_from_root('system', 'lib', 'fetch', 'asmfs.cpp')]
def can_build(self):
# ASMFS is looking for a maintainer
# https://github.com/emscripten-core/emscripten/issues/9534
return False
class libhtml5(Library):
name = 'libhtml5'
cflags = ['-Oz']
src_dir = ['system', 'lib', 'html5']
src_glob = '*.c'
class libpthread(AsanInstrumentedLibrary, MuslInternalLibrary, MTLibrary):
name = 'libpthread'
depends = ['libc']
cflags = ['-O2']
def get_files(self):
if self.is_mt:
files = files_in_path(
path_components=['system', 'lib', 'libc', 'musl', 'src', 'thread'],
filenames=[
'pthread_attr_destroy.c', 'pthread_condattr_setpshared.c',
'pthread_mutex_lock.c', 'pthread_spin_destroy.c', 'pthread_attr_get.c',
'pthread_cond_broadcast.c', 'pthread_mutex_setprioceiling.c',
'pthread_spin_init.c', 'pthread_attr_init.c', 'pthread_cond_destroy.c',
'pthread_mutex_timedlock.c', 'pthread_spin_lock.c',
'pthread_attr_setdetachstate.c', 'pthread_cond_init.c',
'pthread_mutex_trylock.c', 'pthread_spin_trylock.c',
'pthread_attr_setguardsize.c', 'pthread_cond_signal.c',
'pthread_mutex_unlock.c', 'pthread_spin_unlock.c',
'pthread_attr_setinheritsched.c', 'pthread_cond_timedwait.c',
'pthread_once.c', 'sem_destroy.c', 'pthread_attr_setschedparam.c',
'pthread_cond_wait.c', 'pthread_rwlockattr_destroy.c', 'sem_getvalue.c',
'pthread_attr_setschedpolicy.c', 'pthread_equal.c', 'pthread_rwlockattr_init.c',
'sem_init.c', 'pthread_attr_setscope.c', 'pthread_getspecific.c',
'pthread_rwlockattr_setpshared.c', 'sem_open.c', 'pthread_attr_setstack.c',
'pthread_key_create.c', 'pthread_rwlock_destroy.c', 'sem_post.c',
'pthread_attr_setstacksize.c', 'pthread_mutexattr_destroy.c',
'pthread_rwlock_init.c', 'sem_timedwait.c', 'pthread_barrierattr_destroy.c',
'pthread_mutexattr_init.c', 'pthread_rwlock_rdlock.c', 'sem_trywait.c',
'pthread_barrierattr_init.c', 'pthread_mutexattr_setprotocol.c',
'pthread_rwlock_timedrdlock.c', 'sem_unlink.c',
'pthread_barrierattr_setpshared.c', 'pthread_mutexattr_setpshared.c',
'pthread_rwlock_timedwrlock.c', 'sem_wait.c', 'pthread_barrier_destroy.c',
'pthread_mutexattr_setrobust.c', 'pthread_rwlock_tryrdlock.c',
'__timedwait.c', 'pthread_barrier_init.c', 'pthread_mutexattr_settype.c',
'pthread_rwlock_trywrlock.c', 'vmlock.c', 'pthread_barrier_wait.c',
'pthread_mutex_consistent.c', 'pthread_rwlock_unlock.c', '__wait.c',
'pthread_condattr_destroy.c', 'pthread_mutex_destroy.c',
'pthread_rwlock_wrlock.c', 'pthread_condattr_init.c',
'pthread_mutex_getprioceiling.c', 'pthread_setcanceltype.c',
'pthread_condattr_setclock.c', 'pthread_mutex_init.c',
'pthread_setspecific.c', 'pthread_setcancelstate.c'
])
files += [shared.path_from_root('system', 'lib', 'pthread', 'library_pthread.c')]
if shared.Settings.WASM_BACKEND:
files += [shared.path_from_root('system', 'lib', 'pthread', 'library_pthread_wasm.c')]
else:
files += [shared.path_from_root('system', 'lib', 'pthread', 'library_pthread_asmjs.c')]
return files
else:
return [shared.path_from_root('system', 'lib', 'pthread', 'library_pthread_stub.c')]
def get_base_name_prefix(self):
return 'libpthread' if self.is_mt else 'libpthread_stub'
class CompilerRTWasmLibrary(Library):
cflags = ['-O2', '-fno-builtin']
# compiler_rt files can't currently be part of LTO although we are hoping to remove this
# restriction soon: https://reviews.llvm.org/D71738
force_object_files = True
def can_build(self):
return shared.Settings.WASM_BACKEND
class libc_rt_wasm(AsanInstrumentedLibrary, CompilerRTWasmLibrary, MuslInternalLibrary):
name = 'libc_rt_wasm'
def get_files(self):
return get_wasm_libc_rt_files()
class libubsan_minimal_rt_wasm(CompilerRTWasmLibrary, MTLibrary):
name = 'libubsan_minimal_rt_wasm'
never_force = True
includes = [['system', 'lib', 'compiler-rt', 'lib']]
src_dir = ['system', 'lib', 'compiler-rt', 'lib', 'ubsan_minimal']
src_files = ['ubsan_minimal_handlers.cpp']
class libsanitizer_common_rt_wasm(CompilerRTWasmLibrary, MTLibrary):
name = 'libsanitizer_common_rt_wasm'
depends = ['libc++abi']
includes = [['system', 'lib', 'libc', 'musl', 'src', 'internal']]
js_depends = ['memalign', 'emscripten_builtin_memalign', '__data_end', '__heap_base']
never_force = True
cflags = ['-std=c++11']
src_dir = ['system', 'lib', 'compiler-rt', 'lib', 'sanitizer_common']
src_glob = '*.cc'
src_glob_exclude = ['sanitizer_common_nolibc.cc']
class SanitizerLibrary(CompilerRTWasmLibrary, MTLibrary):
depends = ['libsanitizer_common_rt_wasm']
never_force = True
includes = [['system', 'lib', 'compiler-rt', 'lib']]
cflags = ['-std=c++11']
src_glob = '*.cc'
class libubsan_rt_wasm(SanitizerLibrary):
name = 'libubsan_rt_wasm'
js_depends = ['emscripten_builtin_malloc', 'emscripten_builtin_free']
cflags = ['-DUBSAN_CAN_USE_CXXABI']
src_dir = ['system', 'lib', 'compiler-rt', 'lib', 'ubsan']
class liblsan_common_rt_wasm(SanitizerLibrary):
name = 'liblsan_common_rt_wasm'
js_depends = ['__global_base']
src_dir = ['system', 'lib', 'compiler-rt', 'lib', 'lsan']
src_glob = 'lsan_common*.cc'
class liblsan_rt_wasm(SanitizerLibrary):
name = 'liblsan_rt_wasm'
depends = ['liblsan_common_rt_wasm']
js_depends = ['emscripten_builtin_malloc', 'emscripten_builtin_free']
src_dir = ['system', 'lib', 'compiler-rt', 'lib', 'lsan']
src_glob_exclude = ['lsan_common.cc', 'lsan_common_mac.cc', 'lsan_common_linux.cc',
'lsan_common_emscripten.cc']
class libasan_rt_wasm(SanitizerLibrary):
name = 'libasan_rt_wasm'
depends = ['liblsan_common_rt_wasm', 'libubsan_rt_wasm']
src_dir = ['system', 'lib', 'compiler-rt', 'lib', 'asan']
# This library is used when STANDALONE_WASM is set. In that mode, we don't
# want to depend on JS, and so this library contains implementations of
# things that we'd normally do in JS. That includes some general things
# as well as some additional musl components (that normally we reimplement
# in JS as it's more efficient that way).
class libstandalonewasm(MuslInternalLibrary):
name = 'libstandalonewasm'
cflags = ['-Os']
src_dir = ['system', 'lib']
def __init__(self, **kwargs):
self.is_mem_grow = kwargs.pop('is_mem_grow')
super(libstandalonewasm, self).__init__(**kwargs)
def get_base_name(self):
name = super(libstandalonewasm, self).get_base_name()
if self.is_mem_grow:
name += '-memgrow'
return name
def get_cflags(self):
cflags = super(libstandalonewasm, self).get_cflags()
cflags += ['-DNDEBUG']
if self.is_mem_grow:
cflags += ['-D__EMSCRIPTEN_MEMORY_GROWTH__=1']
return cflags
@classmethod
def vary_on(cls):
return super(libstandalonewasm, cls).vary_on() + ['is_mem_grow']
@classmethod
def get_default_variation(cls, **kwargs):
return super(libstandalonewasm, cls).get_default_variation(
is_mem_grow=shared.Settings.ALLOW_MEMORY_GROWTH,
**kwargs
)
def get_files(self):
base_files = files_in_path(
path_components=['system', 'lib'],
filenames=['standalone_wasm.c'])
# It is more efficient to use JS methods for time, normally.
time_files = files_in_path(
path_components=['system', 'lib', 'libc', 'musl', 'src', 'time'],
filenames=['strftime.c',
'__month_to_secs.c',
'__tm_to_secs.c',
'__tz.c',
'__year_to_secs.c',
'gettimeofday.c',
'localtime.c',
'localtime_r.c',
'gmtime.c',
'gmtime_r.c',
'nanosleep.c',
'mktime.c'])
# It is more efficient to use JS for __assert_fail, as it avoids always
# including fprintf etc.
exit_files = files_in_path(
path_components=['system', 'lib', 'libc', 'musl', 'src', 'exit'],
filenames=['assert.c'])
return base_files + time_files + exit_files
def can_build(self):
return shared.Settings.WASM_BACKEND
# If main() is not in EXPORTED_FUNCTIONS, it may be dce'd out. This can be
# confusing, so issue a warning.
def warn_on_unexported_main(symbolses):
if '_main' not in shared.Settings.EXPORTED_FUNCTIONS:
for symbols in symbolses:
if 'main' in symbols.defs:
logger.warning('main() is in the input files, but "_main" is not in EXPORTED_FUNCTIONS, which means it may be eliminated as dead code. Export it if you want main() to run.')
return
def calculate(temp_files, in_temp, stdout_, stderr_, forced=[]):
global stdout, stderr
stdout = stdout_
stderr = stderr_
# Set of libraries to include on the link line, as opposed to `force` which
# is the set of libraries to force include (with --whole-archive).
always_include = set()
# Setting this will only use the forced libs in EMCC_FORCE_STDLIBS. This avoids spending time checking
# for unresolved symbols in your project files, which can speed up linking, but if you do not have
# the proper list of actually needed libraries, errors can occur. See below for how we must
# export all the symbols in deps_info when using this option.
only_forced = os.environ.get('EMCC_ONLY_FORCED_STDLIBS')
if only_forced:
temp_files = []
# Add in some hacks for js libraries. If a js lib depends on a symbol provided by a C library, it must be
# added to here, because our deps go only one way (each library here is checked, then we check the next
# in order - libc++, libcxextra, etc. - and then we run the JS compiler and provide extra symbols from
# library*.js files. But we cannot then go back to the C libraries if a new dep was added!
# TODO: Move all __deps from src/library*.js to deps_info.json, and use that single source of info
# both here and in the JS compiler.
deps_info = json.loads(open(shared.path_from_root('src', 'deps_info.json')).read())
added = set()
def add_back_deps(need):
more = False
for ident, deps in deps_info.items():
if ident in need.undefs and ident not in added:
added.add(ident)
more = True
for dep in deps:
need.undefs.add(dep)
if shared.Settings.VERBOSE:
logger.debug('adding dependency on %s due to deps-info on %s' % (dep, ident))
shared.Settings.EXPORTED_FUNCTIONS.append(mangle_c_symbol_name(dep))
if more:
add_back_deps(need) # recurse to get deps of deps
# Scan symbols
symbolses = shared.Building.parallel_llvm_nm([os.path.abspath(t) for t in temp_files])
warn_on_unexported_main(symbolses)
if len(symbolses) == 0:
class Dummy(object):
defs = set()
undefs = set()
symbolses.append(Dummy())
# depend on exported functions
for export in shared.Settings.EXPORTED_FUNCTIONS:
if shared.Settings.VERBOSE:
logger.debug('adding dependency on export %s' % export)
symbolses[0].undefs.add(demangle_c_symbol_name(export))
for symbols in symbolses:
add_back_deps(symbols)
# If we are only doing forced stdlibs, then we don't know the actual symbols we need,
# and must assume all of deps_info must be exported. Note that this might cause
# warnings on exports that do not exist.
if only_forced:
for key, value in deps_info.items():
for dep in value:
shared.Settings.EXPORTED_FUNCTIONS.append(mangle_c_symbol_name(dep))
always_include.add('libpthread')
if shared.Settings.MALLOC != 'none':
always_include.add('libmalloc')
if shared.Settings.WASM_BACKEND:
always_include.add('libcompiler_rt')
libs_to_link = []
already_included = set()
system_libs_map = Library.get_usable_variations()
system_libs = sorted(system_libs_map.values(), key=lambda lib: lib.name)
# Setting this in the environment will avoid checking dependencies and make
# building big projects a little faster 1 means include everything; otherwise
# it can be the name of a lib (libc++, etc.).
# You can provide 1 to include everything, or a comma-separated list with the
# ones you want
force = os.environ.get('EMCC_FORCE_STDLIBS')
if force == '1':
force = ','.join(name for name, lib in system_libs_map.items() if not lib.never_force)
force_include = set((force.split(',') if force else []) + forced)
if force_include:
logger.debug('forcing stdlibs: ' + str(force_include))
for lib in always_include:
assert lib in system_libs_map
for lib in force_include:
if lib not in system_libs_map:
shared.exit_with_error('invalid forced library: %s', lib)
def add_library(lib):
if lib.name in already_included:
return
already_included.add(lib.name)
logger.debug('including %s (%s)' % (lib.name, lib.get_filename()))
need_whole_archive = lib.name in force_include and lib.get_ext() == '.a'
libs_to_link.append((lib.get_path(), need_whole_archive))
# Recursively add dependencies
for d in lib.get_depends():
add_library(system_libs_map[d])
for d in lib.js_depends:
d = '_' + d
if d not in shared.Settings.EXPORTED_FUNCTIONS:
shared.Settings.EXPORTED_FUNCTIONS.append(d)
if shared.Settings.STANDALONE_WASM:
add_library(system_libs_map['crt1'])
# Go over libraries to figure out which we must include
for lib in system_libs:
if lib.name in already_included:
continue
force_this = lib.name in force_include
if not force_this and only_forced:
continue
include_this = force_this or lib.name in always_include
if not include_this:
need_syms = set()
has_syms = set()
for symbols in symbolses:
if shared.Settings.VERBOSE:
logger.debug('undefs: ' + str(symbols.undefs))
for library_symbol in lib.symbols:
if library_symbol in symbols.undefs:
need_syms.add(library_symbol)
if library_symbol in symbols.defs:
has_syms.add(library_symbol)
for haz in has_syms:
if haz in need_syms:
# remove symbols that are supplied by another of the inputs
need_syms.remove(haz)
if shared.Settings.VERBOSE:
logger.debug('considering %s: we need %s and have %s' % (lib.name, str(need_syms), str(has_syms)))
if not len(need_syms):
continue
# We need to build and link the library in
add_library(lib)
if shared.Settings.WASM_BACKEND:
add_library(system_libs_map['libc_rt_wasm'])
if shared.Settings.UBSAN_RUNTIME == 1:
add_library(system_libs_map['libubsan_minimal_rt_wasm'])
elif shared.Settings.UBSAN_RUNTIME == 2:
add_library(system_libs_map['libubsan_rt_wasm'])
if shared.Settings.USE_LSAN:
force_include.add('liblsan_rt_wasm')
add_library(system_libs_map['liblsan_rt_wasm'])
if shared.Settings.USE_ASAN:
force_include.add('libasan_rt_wasm')
add_library(system_libs_map['libasan_rt_wasm'])
if shared.Settings.STANDALONE_WASM:
add_library(system_libs_map['libstandalonewasm'])
if shared.Settings.PROXY_POSIX_SOCKETS:
add_library(system_libs_map['libsockets_proxy'])
else:
add_library(system_libs_map['libsockets'])
libs_to_link.sort(key=lambda x: x[0].endswith('.a')) # make sure to put .a files at the end.
# libc++abi and libc++ *static* linking is tricky. e.g. cxa_demangle.cpp disables c++
# exceptions, but since the string methods in the headers are *weakly* linked, then
# we might have exception-supporting versions of them from elsewhere, and if libc++abi
# is first then it would "win", breaking exception throwing from those string
# header methods. To avoid that, we link libc++abi last.
libs_to_link.sort(key=lambda x: x[0].endswith('libc++abi.bc'))
# Wrap libraries in --whole-archive, as needed. We need to do this last
# since otherwise the abort sorting won't make sense.
ret = []
in_group = False
for name, need_whole_archive in libs_to_link:
if need_whole_archive and not in_group:
ret.append('--whole-archive')
in_group = True
if in_group and not need_whole_archive:
ret.append('--no-whole-archive')
in_group = False
ret.append(name)
if in_group:
ret.append('--no-whole-archive')
return ret
class Ports(object):
"""emscripten-ports library management (https://github.com/emscripten-ports).
"""
@staticmethod
def get_lib_name(name):
return name + static_library_ext()
@staticmethod
def get_include_dir():
dirname = shared.Cache.get_path('include')
shared.safe_ensure_dirs(dirname)
return dirname
@staticmethod
def install_header_dir(src_dir, target=None):
if not target:
target = os.path.basename(src_dir)
dest = os.path.join(Ports.get_include_dir(), target)
shared.try_delete(dest)
logger.debug('installing headers: ' + dest)
shutil.copytree(src_dir, dest)
@staticmethod
def install_headers(src_dir, pattern="*.h", target=None):
logger.debug("install_headers")
dest = Ports.get_include_dir()
if target:
dest = os.path.join(dest, target)
shared.safe_ensure_dirs(dest)
matches = glob.glob(os.path.join(src_dir, pattern))
assert matches, "no headers found to install in %s" % src_dir
for f in matches:
logger.debug('installing: ' + os.path.join(dest, os.path.basename(f)))
shutil.copyfile(f, os.path.join(dest, os.path.basename(f)))
@staticmethod
def build_port(src_path, output_path, includes=[], flags=[], exclude_files=[], exclude_dirs=[]):
srcs = []
for root, dirs, files in os.walk(src_path, topdown=False):
if any((excluded in root) for excluded in exclude_dirs):
continue
for f in files:
ext = os.path.splitext(f)[1]
if ext in ('.c', '.cpp') and not any((excluded in f) for excluded in exclude_files):
srcs.append(os.path.join(root, f))
include_commands = ['-I' + src_path]
for include in includes:
include_commands.append('-I' + include)
commands = []
objects = []
for src in srcs:
obj = src + '.o'
commands.append([shared.PYTHON, shared.EMCC, '-c', src, '-O2', '-o', obj, '-w'] + include_commands + flags)
objects.append(obj)
Ports.run_commands(commands)
create_lib(output_path, objects)
return output_path
@staticmethod
def run_commands(commands):
# Runs a sequence of compiler commands, adding importand cflags as defined by get_cflags() so
# that the ports are built in the correct configuration.
def add_args(cmd):
# this must only be called on a standard build command
assert cmd[0] == shared.PYTHON and cmd[1] in (shared.EMCC, shared.EMXX)
# add standard cflags, but also allow the cmd to override them
return cmd[:2] + get_cflags() + cmd[2:]
run_build_commands([add_args(c) for c in commands])
@staticmethod
def create_lib(libname, inputs): # make easily available for port objects
create_lib(libname, inputs)
@staticmethod
def get_dir():
dirname = os.environ.get('EM_PORTS') or os.path.expanduser(os.path.join('~', '.emscripten_ports'))
shared.safe_ensure_dirs(dirname)
return dirname
@staticmethod
def erase():
dirname = Ports.get_dir()
shared.try_delete(dirname)
if os.path.exists(dirname):
logger.warning('could not delete ports dir %s - try to delete it manually' % dirname)
@staticmethod
def get_build_dir():
return shared.Cache.get_path('ports-builds')
name_cache = set()
@staticmethod
def fetch_project(name, url, subdir, is_tarbz2=False, sha512hash=None):
# To compute the sha512 hash, run `curl URL | sha512sum`.
fullname = os.path.join(Ports.get_dir(), name)
# EMCC_LOCAL_PORTS: A hacky way to use a local directory for a port. This
# is not tested but can be useful for debugging
# changes to a port.
#
# if EMCC_LOCAL_PORTS is set, we use a local directory as our ports. This is useful
# for testing. This env var should be in format
# name=dir,name=dir
# e.g.
# sdl2=/home/username/dev/ports/SDL2
# so you could run
# EMCC_LOCAL_PORTS="sdl2=/home/alon/Dev/ports/SDL2" ./tests/runner.py browser.test_sdl2_mouse
# this will simply copy that directory into the ports directory for sdl2, and use that. It also
# clears the build, so that it is rebuilt from that source.
local_ports = os.environ.get('EMCC_LOCAL_PORTS')
if local_ports:
shared.Cache.acquire_cache_lock()
logger.warning('using local ports: %s' % local_ports)
local_ports = [pair.split('=', 1) for pair in local_ports.split(',')]
try:
for local in local_ports:
if name == local[0]:
path = local[1]
if name not in ports.ports_by_name:
shared.exit_with_error('%s is not a known port' % name)
port = ports.ports_by_name[name]
if not hasattr(port, 'SUBDIR'):
logger.error('port %s lacks .SUBDIR attribute, which we need in order to override it locally, please update it' % name)
sys.exit(1)
subdir = port.SUBDIR
target = os.path.join(fullname, subdir)
if os.path.exists(target) and not dir_is_newer(path, target):
logger.warning('not grabbing local port: ' + name + ' from ' + path + ' to ' + fullname + ' (subdir: ' + subdir + ') as the destination ' + target + ' is newer (run emcc --clear-ports if that is incorrect)')
else:
logger.warning('grabbing local port: ' + name + ' from ' + path + ' to ' + fullname + ' (subdir: ' + subdir + ')')
shared.try_delete(fullname)
shutil.copytree(path, target)
Ports.clear_project_build(name)
return
finally:
shared.Cache.release_cache_lock()
if is_tarbz2:
fullpath = fullname + '.tar.bz2'
elif url.endswith('.tar.gz'):
fullpath = fullname + '.tar.gz'
else:
fullpath = fullname + '.zip'
if name not in Ports.name_cache: # only mention each port once in log
logger.debug('including port: ' + name)
logger.debug(' (at ' + fullname + ')')
Ports.name_cache.add(name)
class State(object):
retrieved = False
unpacked = False
def retrieve():
# retrieve from remote server
logger.info('retrieving port: ' + name + ' from ' + url)
try:
import requests
response = requests.get(url)
data = response.content
except ImportError:
try:
from urllib.request import urlopen
f = urlopen(url)
data = f.read()
except ImportError:
# Python 2 compatibility
from urllib2 import urlopen
f = urlopen(url)
data = f.read()
if sha512hash:
actual_hash = hashlib.sha512(data).hexdigest()
if actual_hash != sha512hash:
raise RuntimeError('Unexpected hash: ' + actual_hash + '\n'
'If you are updating the port, please update the hash in the port module.')
open(fullpath, 'wb').write(data)
State.retrieved = True
def check_tag():
if is_tarbz2:
names = tarfile.open(fullpath, 'r:bz2').getnames()
elif url.endswith('.tar.gz'):
names = tarfile.open(fullpath, 'r:gz').getnames()
else:
names = zipfile.ZipFile(fullpath, 'r').namelist()
# check if first entry of the archive is prefixed with the same
# tag as we need so no longer download and recompile if so
return bool(re.match(subdir + r'(\\|/|$)', names[0]))
def unpack():
logger.info('unpacking port: ' + name)
shared.safe_ensure_dirs(fullname)
# TODO: Someday when we are using Python 3, we might want to change the
# code below to use shlib.unpack_archive
# e.g.: shutil.unpack_archive(filename=fullpath, extract_dir=fullname)
# (https://docs.python.org/3/library/shutil.html#shutil.unpack_archive)
if is_tarbz2:
z = tarfile.open(fullpath, 'r:bz2')
elif url.endswith('.tar.gz'):
z = tarfile.open(fullpath, 'r:gz')
else:
z = zipfile.ZipFile(fullpath, 'r')
try:
cwd = os.getcwd()
os.chdir(fullname)
z.extractall()
finally:
os.chdir(cwd)
State.unpacked = True
# main logic. do this under a cache lock, since we don't want multiple jobs to
# retrieve the same port at once
shared.Cache.acquire_cache_lock()
try:
if not os.path.exists(fullpath):
retrieve()
if not os.path.exists(fullname):
unpack()
if not check_tag():
logger.warning('local copy of port is not correct, retrieving from remote server')
shared.try_delete(fullname)
shared.try_delete(fullpath)
retrieve()
unpack()
if State.unpacked:
# we unpacked a new version, clear the build in the cache
Ports.clear_project_build(name)
finally:
shared.Cache.release_cache_lock()
@staticmethod
def clear_project_build(name):
port = ports.ports_by_name[name]
port.clear(Ports, shared)
shared.try_delete(os.path.join(Ports.get_build_dir(), name))
# get all ports
def get_ports(settings):
ret = []
try:
process_dependencies(settings)
for port in ports.ports:
# ports return their output files, which will be linked, or a txt file
ret += [f for f in port.get(Ports, settings, shared) if not f.endswith('.txt')]
except Exception:
logger.error('a problem occurred when using an emscripten-ports library. try to run `emcc --clear-ports` and then run this command again')
raise
ret.reverse()
return ret
def process_dependencies(settings):
for port in reversed(ports.ports):
if hasattr(port, "process_dependencies"):
port.process_dependencies(settings)
def process_args(args, settings):
process_dependencies(settings)
for port in ports.ports:
args = port.process_args(Ports, args, settings, shared)
return args
# get a single port
def get_port(name, settings):
port = ports.ports_by_name[name]
if hasattr(port, "process_dependencies"):
port.process_dependencies(settings)
# ports return their output files, which will be linked, or a txt file
return [f for f in port.get(Ports, settings, shared) if not f.endswith('.txt')]
def show_ports():
print('Available ports:')
for port in ports.ports:
print(' ', port.show())
|
the-stack_106_27630 | # Copyright 2015 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from rally.common.i18n import _
from rally.common import logging
from rally.common import utils
from rally import consts as rally_consts
from rally.plugins.openstack.cleanup import manager as resource_manager
from rally.plugins.openstack.context.manila import consts
from rally.plugins.openstack.scenarios.manila import utils as manila_utils
from rally.task import context
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
CONTEXT_NAME = consts.SECURITY_SERVICES_CONTEXT_NAME
@context.configure(name=CONTEXT_NAME, order=445)
class SecurityServices(context.Context):
"""This context creates 'security services' for Manila project."""
CONFIG_SCHEMA = {
"type": "object",
"$schema": rally_consts.JSON_SCHEMA,
"properties": {
"security_services": {
"type": "array",
"description":
"It is expected to be list of dicts with data for creation"
" of security services.",
"items": {
"type": "object",
"properties": {"type": {"enum": ["active_directory",
"kerberos", "ldap"]}},
"required": ["type"],
"additionalProperties": True,
"description":
"Data for creation of security services. \n "
"Example:\n\n"
" .. code-block:: json\n\n"
" {'type': 'LDAP', 'dns_ip': 'foo_ip', \n"
" 'server': 'bar_ip', 'domain': 'quuz_domain',\n"
" 'user': 'ololo', 'password': 'fake_password'}\n"
}
},
},
"additionalProperties": False
}
DEFAULT_CONFIG = {
"security_services": [],
}
@logging.log_task_wrapper(
LOG.info, _("Enter context: `%s`") % CONTEXT_NAME)
def setup(self):
for user, tenant_id in (utils.iterate_per_tenants(
self.context.get("users", []))):
self.context["tenants"][tenant_id][CONTEXT_NAME] = {
"security_services": [],
}
if self.config["security_services"]:
manila_scenario = manila_utils.ManilaScenario({
"task": self.task,
"user": user,
"config": {
"api_versions": self.context["config"].get(
"api_versions", [])}
})
for ss in self.config["security_services"]:
inst = manila_scenario._create_security_service(
**ss).to_dict()
self.context["tenants"][tenant_id][CONTEXT_NAME][
"security_services"].append(inst)
@logging.log_task_wrapper(LOG.info, _("Exit context: `%s`") % CONTEXT_NAME)
def cleanup(self):
resource_manager.cleanup(
names=["manila.security_services"],
users=self.context.get("users", []),
)
|
the-stack_106_27631 | # -*- coding: utf-8 -*-
import os
def is_virtualenv(path):
if os.name == "nt":
# Windows!
clues = ("Scripts", "lib", "include")
else:
clues = ("bin", "lib", "include")
try:
dircontents = os.listdir(path)
except (OSError, TypeError):
# listdir failed, probably due to path length issues in windows
return False
if not all([clue in dircontents for clue in clues]):
# we don't have the 3 directories which would imply
# this is a virtualenvironment
return False
if not all([os.path.isdir(os.path.join(path, clue)) for clue in clues]):
# some of them are not actually directories
return False
# if we do have all three directories, make sure that it's not
# just a coincidence by doing some heuristics on the rest of
# the directory
if len(dircontents) > 7:
# if there are more than 7 things it's probably not a virtualenvironment
return False
return True
|
the-stack_106_27632 | # File: fully_connected_nn.py
# Version: 1.0
# Author: SkalskiP https://github.com/SkalskiP
# Date: 31.10.2018
# Description: The file contains a simple implementation of a fully connected neural network.
# The original implementation can be found in the Medium article:
# https://towardsdatascience.com/lets-code-a-neural-network-in-plain-numpy-ae7e74410795
import numpy as np
def sigmoid(Z):
return 1/(1+np.exp(-Z))
def relu(Z):
return np.maximum(0,Z)
def sigmoid_backward(dA, Z):
sig = sigmoid(Z)
return dA * sig * (1 - sig)
def relu_backward(dA, Z):
dZ = np.array(dA, copy = True)
dZ[Z <= 0] = 0
return dZ
def init_layers(nn_architecture, seed = 99):
np.random.seed(seed)
number_of_layers = len(nn_architecture)
params_values = {}
for idx, layer in enumerate(nn_architecture):
layer_idx = idx + 1
layer_input_size = layer["input_dim"]
layer_output_size = layer["output_dim"]
params_values['W' + str(layer_idx)] = np.random.randn(
layer_output_size, layer_input_size) * 0.1
params_values['b' + str(layer_idx)] = np.random.randn(
layer_output_size, 1) * 0.1
return params_values
def single_layer_forward_propagation(A_prev, W_curr, b_curr, activation="relu"):
Z_curr = np.dot(W_curr, A_prev) + b_curr
if activation is "relu":
activation_func = relu
elif activation is "sigmoid":
activation_func = sigmoid
else:
raise Exception('Non-supported activation function')
return activation_func(Z_curr), Z_curr
def full_forward_propagation(X, params_values, nn_architecture):
memory = {}
A_curr = X
for idx, layer in enumerate(nn_architecture):
layer_idx = idx + 1
A_prev = A_curr
activ_function_curr = layer["activation"]
W_curr = params_values["W" + str(layer_idx)]
b_curr = params_values["b" + str(layer_idx)]
A_curr, Z_curr = single_layer_forward_propagation(A_prev, W_curr, b_curr, activ_function_curr)
memory["A" + str(idx)] = A_prev
memory["Z" + str(layer_idx)] = Z_curr
return A_curr, memory
def get_cost_value(Y_hat, Y, eps = 0.001):
m = Y_hat.shape[1]
cost = -1 / m * (np.dot(Y, np.log(Y_hat + eps).T) + np.dot(1 - Y, np.log(1 - Y_hat + eps).T))
return np.squeeze(cost)
def convert_prob_into_class(probs):
probs_ = np.copy(probs)
probs_[probs_ > 0.5] = 1
probs_[probs_ <= 0.5] = 0
return probs_
def get_accuracy_value(Y_hat, Y):
Y_hat_ = convert_prob_into_class(Y_hat)
return (Y_hat_ == Y).all(axis=0).mean()
def single_layer_backward_propagation(dA_curr, W_curr, b_curr, Z_curr, A_prev, activation="relu"):
m = A_prev.shape[1]
if activation is "relu":
backward_activation_func = relu_backward
elif activation is "sigmoid":
backward_activation_func = sigmoid_backward
else:
raise Exception('Non-supported activation function')
dZ_curr = backward_activation_func(dA_curr, Z_curr)
dW_curr = np.dot(dZ_curr, A_prev.T) / m
db_curr = np.sum(dZ_curr, axis=1, keepdims=True) / m
dA_prev = np.dot(W_curr.T, dZ_curr)
return dA_prev, dW_curr, db_curr
def full_backward_propagation(Y_hat, Y, memory, params_values, nn_architecture, eps = 0.000000000001):
grads_values = {}
m = Y.shape[1]
Y = Y.reshape(Y_hat.shape)
dA_prev = - (np.divide(Y, Y_hat + eps) - np.divide(1 - Y, 1 - Y_hat + eps))
for layer_idx_prev, layer in reversed(list(enumerate(nn_architecture))):
layer_idx_curr = layer_idx_prev + 1
activ_function_curr = layer["activation"]
dA_curr = dA_prev
A_prev = memory["A" + str(layer_idx_prev)]
Z_curr = memory["Z" + str(layer_idx_curr)]
W_curr = params_values["W" + str(layer_idx_curr)]
b_curr = params_values["b" + str(layer_idx_curr)]
dA_prev, dW_curr, db_curr = single_layer_backward_propagation(
dA_curr, W_curr, b_curr, Z_curr, A_prev, activ_function_curr)
grads_values["dW" + str(layer_idx_curr)] = dW_curr
grads_values["db" + str(layer_idx_curr)] = db_curr
return grads_values
def update(params_values, grads_values, nn_architecture, learning_rate):
for layer_idx, layer in enumerate(nn_architecture, 1):
params_values["W" + str(layer_idx)] -= learning_rate * grads_values["dW" + str(layer_idx)]
params_values["b" + str(layer_idx)] -= learning_rate * grads_values["db" + str(layer_idx)]
return params_values
def train(X, Y, nn_architecture, epochs, learning_rate, verbose=False, callback=None):
params_values = init_layers(nn_architecture, 2)
cost_history = []
accuracy_history = []
for i in range(epochs):
Y_hat, cashe = full_forward_propagation(X, params_values, nn_architecture)
cost = get_cost_value(Y_hat, Y)
cost_history.append(cost)
accuracy = get_accuracy_value(Y_hat, Y)
accuracy_history.append(accuracy)
grads_values = full_backward_propagation(Y_hat, Y, cashe, params_values, nn_architecture)
params_values = update(params_values, grads_values, nn_architecture, learning_rate)
if(i % 50 == 0):
if(verbose):
print("Iteration: {:05} - cost: {:.5f} - accuracy: {:.5f}".format(i, cost, accuracy))
if(callback is not None):
callback(i, params_values)
return params_values, cost_history, accuracy_history |
the-stack_106_27633 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Xmlf90(AutotoolsPackage):
"""xmlf90 is a suite of libraries to handle XML in Fortran."""
homepage = "https://launchpad.net/xmlf90"
url = "https://launchpad.net/xmlf90/trunk/1.5/+download/xmlf90-1.5.4.tar.gz"
version('1.5.4', sha256='a0b1324ff224d5b5ad1127a6ad4f90979f6b127f1a517f98253eea377237bbe4')
version('1.5.3', sha256='a5378a5d9df4b617f51382092999eb0f20fa1a90ab49afbccfd80aa51650d27c')
version('1.5.2', sha256='666694db793828d1d1e9aea665f75c75ee21772693465a88b43e6370862abfa6')
depends_on('[email protected]:', type='build')
depends_on('[email protected]:', type='build')
depends_on('[email protected]:', type='build')
depends_on('m4', type='build')
def url_for_version(self, version):
url = 'https://launchpad.net/xmlf90/trunk/{0}/+download/xmlf90-{1}.tar.gz'
return url.format(version.up_to(2), version)
@when('@1.5.2')
def autoreconf(self, spec, prefix):
sh = which('sh')
sh('autogen.sh')
def configure_args(self):
if self.spec.satisfies('%gcc'):
return ['FCFLAGS=-ffree-line-length-none']
return []
@run_after('install')
def fix_mk(self):
install(join_path(self.prefix, 'share', 'org.siesta-project',
'xmlf90.mk'), prefix)
|
the-stack_106_27636 | #! /bin/python
import os
import sys
import json
import luigi
import nifty.tools as nt
import nifty.distributed as ndist
import cluster_tools.utils.volume_utils as vu
import cluster_tools.utils.function_utils as fu
from cluster_tools.cluster_tasks import SlurmTask, LocalTask, LSFTask
#
# Graph Tasks
#
class InitialSubGraphsBase(luigi.Task):
""" InitialSubGraph base class
"""
task_name = 'initial_sub_graphs'
src_file = os.path.abspath(__file__)
# input volumes and graph
input_path = luigi.Parameter()
input_key = luigi.Parameter()
graph_path = luigi.Parameter()
#
dependency = luigi.TaskParameter()
def requires(self):
return self.dependency
@staticmethod
def default_task_config():
# we use this to get also get the common default config
config = LocalTask.default_task_config()
config.update({'ignore_label': True})
return config
def clean_up_for_retry(self, block_list):
super().clean_up_for_retry(block_list)
# TODO remove any output of failed blocks because it might be corrupted
def run_impl(self):
# get the global config and init configs
shebang, block_shape, roi_begin, roi_end = self.global_config_values()
block_shape = tuple(block_shape)
self.init(shebang)
# load the task config
config = self.get_task_config()
# update the config with input and graph paths and keys
# as well as block shape
config.update({'input_path': self.input_path, 'input_key': self.input_key,
'graph_path': self.graph_path, 'block_shape': block_shape})
# make graph file and write shape and ignore-label as attribute
shape = vu.get_shape(self.input_path, self.input_key)
with vu.file_reader(self.graph_path) as f:
# make sub-graph dataset for nodes and edges
g = f.require_group('s0/sub_graphs')
g.attrs['shape'] = tuple(shape)
g.attrs['ignore_label'] = config['ignore_label']
g.require_dataset('nodes', shape=shape, chunks=block_shape,
compression='gzip', dtype='uint64')
g.require_dataset('edges', shape=shape, chunks=block_shape,
compression='gzip', dtype='uint64')
if self.n_retries == 0:
block_list = vu.blocks_in_volume(shape, block_shape, roi_begin, roi_end)
else:
block_list = self.block_list
self.clean_up_for_retry(block_list)
n_jobs = min(len(block_list), self.max_jobs)
# prime and run the jobs
self.prepare_jobs(n_jobs, block_list, config)
self.submit_jobs(n_jobs)
# wait till jobs finish and check for job success
self.wait_for_jobs()
self.check_jobs(n_jobs)
class InitialSubGraphsLocal(InitialSubGraphsBase, LocalTask):
""" InitialSubGraphs on local machine
"""
pass
class InitialSubGraphsSlurm(InitialSubGraphsBase, SlurmTask):
""" InitialSubGraphs on slurm cluster
"""
pass
class InitialSubGraphsLSF(InitialSubGraphsBase, LSFTask):
""" InitialSubGraphs on lsf cluster
"""
pass
#
# Implementation
#
def _graph_block(block_id, blocking, input_path, input_key, graph_path,
ignore_label):
fu.log("start processing block %i" % block_id)
block = blocking.getBlock(block_id)
# we only need the halo into one direction,
# hence we use the outer-block only for the end coordinate
subgraph_key = 's0/sub_graphs'
ndist.computeMergeableRegionGraph(input_path, input_key,
block.begin, block.end,
graph_path, subgraph_key,
ignore_label,
increaseRoi=True,
serializeToVarlen=True)
# log block success
fu.log_block_success(block_id)
def initial_sub_graphs(job_id, config_path):
fu.log("start processing job %i" % job_id)
fu.log("reading config from %s" % config_path)
# get the config
with open(config_path) as f:
config = json.load(f)
input_path = config['input_path']
input_key = config['input_key']
block_shape = config['block_shape']
block_list = config['block_list']
graph_path = config['graph_path']
ignore_label = config.get('ignore_label', True)
shape = vu.get_shape(input_path, input_key)
blocking = nt.blocking(roiBegin=[0, 0, 0],
roiEnd=list(shape),
blockShape=list(block_shape))
for block_id in block_list:
_graph_block(block_id, blocking, input_path, input_key, graph_path,
ignore_label)
fu.log_job_success(job_id)
if __name__ == '__main__':
path = sys.argv[1]
assert os.path.exists(path), path
job_id = int(os.path.split(path)[1].split('.')[0].split('_')[-1])
initial_sub_graphs(job_id, path)
|
the-stack_106_27640 | from django import forms
from django.utils.translation import pgettext_lazy
from ...seo.models import SeoModel
from ..widgets import CharsLeftWidget
SEO_FIELD_HELP_TEXT = pgettext_lazy(
'Form field help text',
'If empty, the preview shows what will be autogenerated.')
MIN_DESCRIPTION_LENGTH = 120
MIN_TITLE_LENGTH = 25
DESCRIPTION_MAX_LENGTH = SeoModel._meta.get_field('seo_description').max_length
TITLE_MAX_LENGTH = SeoModel._meta.get_field('seo_title').max_length
class SeoTitleField(forms.CharField):
widget = CharsLeftWidget(
attrs={
'data-min-recommended-length': MIN_TITLE_LENGTH,
'maxlength': TITLE_MAX_LENGTH})
def __init__(self, extra_attrs=None, required=False, *args, **kwargs):
super().__init__(*args, **kwargs)
self.max_length = TITLE_MAX_LENGTH
if extra_attrs:
self.widget.attrs.update(extra_attrs)
self.required = required
self.help_text = SEO_FIELD_HELP_TEXT
self.label = pgettext_lazy(
'A SEO friendly title', 'SEO Friendly Title')
class SeoDescriptionField(forms.CharField):
help_text = SEO_FIELD_HELP_TEXT
widget = CharsLeftWidget(
attrs={
'help_text': SEO_FIELD_HELP_TEXT,
'data-min-recommended-length': MIN_DESCRIPTION_LENGTH,
'maxlength': DESCRIPTION_MAX_LENGTH})
def __init__(self, extra_attrs=None, required=False, *args, **kwargs):
super().__init__(*args, **kwargs)
self.max_length = DESCRIPTION_MAX_LENGTH
if extra_attrs:
self.widget.attrs.update(extra_attrs)
self.required = required
self.help_text = SEO_FIELD_HELP_TEXT
self.label = pgettext_lazy(
'A SEO friendly description', 'SEO Friendly Description')
|
the-stack_106_27641 | # Inside custom tag - is_active.py
from django import template
from django.urls import reverse
register = template.Library()
@register.simple_tag
def is_active(request, url):
try:
path = request.path
# Main idea is to check if the url and the current path is a match
if path == reverse(url):
return "active"
return ""
except AttributeError:
return ""
|
the-stack_106_27642 | """Tradingview view"""
__docformat__ = "numpy"
import logging
import os
from gamestonk_terminal.decorators import log_start_end
from gamestonk_terminal.helper_funcs import export_data, print_rich_table
from gamestonk_terminal.rich_config import console
from gamestonk_terminal.stocks.technical_analysis import tradingview_model
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def print_recommendation(
ticker: str, screener: str, exchange: str, interval: str, export: str
):
"""Print tradingview recommendation based on technical indicators
Parameters
----------
ticker : str
Ticker to get tradingview recommendation based on technical indicators
screener : str
Screener based on tradingview docs https://python-tradingview-ta.readthedocs.io/en/latest/usage.html
exchange: str
Exchange based on tradingview docs https://python-tradingview-ta.readthedocs.io/en/latest/usage.html
interval: str
Interval time to check technical indicators and correspondent recommendation
export: str
Format of export file
"""
recom = tradingview_model.get_tradingview_recommendation(
ticker, screener, exchange, interval
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"recom",
recom,
)
print_rich_table(
recom,
headers=list(recom.columns),
title="Ticker Recomendation",
show_index=True,
)
console.print("")
|
the-stack_106_27644 | '''
ะะฐะฑะพั ะพะฑัะธั
ัะฝะธัะพะฒ
@author: Kosh
'''
import cv2
import random
import numpy as np
from streampy.units.base.pooled import Pool, Worker as Base
import copy
class Worker(Base):
'''
ะัะณะผะตะฝัะธััะตะผ ะธ ัะตัะฐะนะทะธะผ ะบะฐััะธะฝะบั ะดะพ ะฝัะถะฝะพะณะพ ัะฐะทะผะตัะฐ
ะขะฐะบ ะถะต ะฟัะตะพะฑัะฐะทัะตะผ ะฑะพะบัั, ะพัะฑัะฐััะฒะฐะตะผ ะฝะต ะฟะพะผะตััะธะฒัะธะตัั ะธ ัะปะธัะบะพะผ ะผะฐะปะตะฝัะบะธะต
'''
def process(self, inData, inMeta):
config = self.config
image = inData['sample']['image']
bboxes = []
for predict in inData['sample']['predict']:
bboxes.append(copy.copy(predict))
size = (image.shape[0], image.shape[1])
# ัะตัะฐะนะทะธะผ
ratio = min([float(config['height'] / size[0]), float(config['width'] / size[1])])
x = int(size[1] * ratio);
y = int(size[0] * ratio);
image = cv2.resize(image, (x, y), interpolation = cv2.INTER_AREA )
bboxes2 = []
for predict in bboxes:
predict[0] = float(predict[0]) * ratio
predict[1] = float(predict[1]) * ratio
predict[2] = float(predict[2]) * ratio
predict[3] = float(predict[3]) * ratio
if ((predict[2] / 2 + predict[0] > 0)
and (predict[2] /2 + predict[0] < x)
and (predict[3] / 2 + predict[1] > 0)
and (predict[3] / 2 + predict[1] < y)
and (predict[2] > config.get('minWidth', 10))
and (predict[3] > config.get('minHeigth', 10))
):
bboxes2.append(predict)
# print(('ratio', ratio, 'bboxes', bboxes))
# ัะธัะธะผ
w = config['width'] - x
h = config['height'] - y
top = h // 2
bottom = h - top
left = w // 2
right = w - left
image = cv2.copyMakeBorder(image, top, bottom, left, right,
cv2.BORDER_CONSTANT, value=[0, 0, 0])
bboxes = []
for predict in bboxes2:
predict[0] = float(predict[0]) + left
predict[1] = float(predict[1]) + top
bboxes.append(predict)
#
# image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
#
# if ('toFloat' in config) and config['toFloat']:
# image = np.float16(image)
#
# if 'multiply' in config:
# image *= config['multiply']
return [{'sample':{'image':image, 'predict': bboxes}}]
cnt = 0
|
the-stack_106_27645 | # To Find The Total Number Of Digits In A Number
N = int(input("Enter The number"))
count = 0
while(N!=0):
N = (N-N%10)/10
count+=1
print(count) |
the-stack_106_27646 | # -*- coding: utf-8 -*-
import os
import re
import socket
import sys
import threading
import time
import warnings
import six
from six.moves import queue
from airtest.core.android.constant import STFLIB
from airtest.utils.logger import get_logger
from airtest.utils.nbsp import NonBlockingStreamReader
from airtest.utils.safesocket import SafeSocket
from airtest.utils.snippet import (get_std_encoding, on_method_ready,
ready_method, reg_cleanup)
LOGGING = get_logger(__name__)
class Minitouch(object):
"""
Super fast operation from minitouch
References:
https://github.com/openstf/minitouch
"""
def __init__(self, adb, backend=False, ori_function=None):
self.adb = adb
self.backend = backend
self.server_proc = None
self.client = None
self.size_info = None
self.ori_function = ori_function if callable(ori_function) else self.adb.getPhysicalDisplayInfo
self.max_x, self.max_y = None, None
reg_cleanup(self.teardown)
@ready_method
def install_and_setup(self):
"""
Install and setup minitouch
Returns:
None
"""
self.install()
self.size_info = self.ori_function()
self.setup_server()
if self.backend:
self.setup_client_backend()
else:
self.setup_client()
def uninstall(self):
"""
Uninstall minitouch
Returns:
None
"""
self.adb.raw_shell("rm /data/local/tmp/minitouch*")
def install(self):
"""
Install minitouch
Returns:
None
"""
abi = self.adb.getprop("ro.product.cpu.abi")
sdk = int(self.adb.getprop("ro.build.version.sdk"))
if sdk >= 16:
binfile = "minitouch"
else:
binfile = "minitouch-nopie"
device_dir = "/data/local/tmp"
path = os.path.join(STFLIB, abi, binfile).replace("\\", r"\\")
if self.adb.exists_file('/data/local/tmp/minitouch'):
local_minitouch_size = int(os.path.getsize(path))
try:
file_size = self.adb.file_size('/data/local/tmp/minitouch')
except Exception:
self.uninstall()
else:
if local_minitouch_size == file_size:
LOGGING.debug("install_minitouch skipped")
return
self.uninstall()
self.adb.push(path, "%s/minitouch" % device_dir)
self.adb.shell("chmod 755 %s/minitouch" % (device_dir))
LOGGING.info("install_minitouch finished")
def __transform_xy(self, x, y):
"""
Transform coordinates (x, y) according to the device display
Args:
x: coordinate x
y: coordinate y
Returns:
transformed coordinates (x, y)
"""
width, height = self.size_info['width'], self.size_info['height']
nx = x * self.max_x / width
ny = y * self.max_y / height
# print(nx, ny, self.max_x, self.max_y, width, height)
return nx, ny
def setup_server(self):
"""
Setip minitouch server and adb forward
Returns:
server process
"""
if self.server_proc:
self.server_proc.kill()
self.server_proc = None
self.localport, deviceport = self.adb.setup_forward("localabstract:minitouch_{}".format)
deviceport = deviceport[len("localabstract:"):]
p = self.adb.start_shell("/data/local/tmp/minitouch -n '%s' 2>&1" % deviceport)
nbsp = NonBlockingStreamReader(p.stdout, name="minitouch_server")
while True:
line = nbsp.readline(timeout=5.0)
if line is None:
raise RuntimeError("minitouch setup timeout")
line = line.decode(get_std_encoding(sys.stdout))
# ่ฏๅซๅบsetupๆๅ็log๏ผๅนถๅน้
ๅบmax_x, max_y
m = re.match("Type \w touch device .+ \((\d+)x(\d+) with \d+ contacts\) detected on .+ \(.+\)", line)
if m:
self.max_x, self.max_y = int(m.group(1)), int(m.group(2))
break
else:
self.max_x = 32768
self.max_y = 32768
# nbsp.kill() # ไฟ็๏ผไธๆไบ๏ผๅ้ข่ฟไผ็ปง็ปญ่ฏปๅๅนถpirnt
if p.poll() is not None:
# server setup error, may be already setup by others
# subprocess exit immediately
raise RuntimeError("minitouch server quit immediately")
self.server_proc = p
# reg_cleanup(self.server_proc.kill)
return p
@on_method_ready('install_and_setup')
def touch(self, tuple_xy, duration=0.01):
"""
Perform touch event
minitouch protocol example::
d 0 10 10 50
c
<wait in your own code>
u 0
c
Args:
tuple_xy: coordinates (x, y)
duration: time interval for touch event, default is 0.01
Returns:
None
"""
x, y = tuple_xy
x, y = self.__transform_xy(x, y)
self.handle("d 0 {:.0f} {:.0f} 50\nc\n".format(x, y))
time.sleep(duration)
self.handle("u 0\nc\n")
def __swipe_move(self, tuple_from_xy, tuple_to_xy, duration=0.8, steps=5):
"""
Return a sequence of swipe motion events (only MoveEvent)
minitouch protocol example::
d 0 0 0 50
c
m 0 20 0 50
c
m 0 40 0 50
c
m 0 60 0 50
c
m 0 80 0 50
c
m 0 100 0 50
c
u 0
c
Args:
tuple_from_xy: start point
tuple_to_xy: end point
duration: time interval for swipe duration, default is 0.8
steps: size of swipe step, default is 5
Returns:
[MoveEvent(from_x, from_y), ..., MoveEvent(to_x, to_y)]
"""
from_x, from_y = tuple_from_xy
to_x, to_y = tuple_to_xy
ret = []
interval = float(duration) / (steps + 1)
for i in range(1, steps):
ret.append(MoveEvent((from_x + (to_x - from_x) * i / steps,
from_y + (to_y - from_y) * i / steps)))
ret.append(SleepEvent(interval))
ret += [MoveEvent((to_x, to_y)), SleepEvent(interval)]
return ret
@on_method_ready('install_and_setup')
def swipe_along(self, coordinates_list, duration=0.8, steps=5):
"""
Perform swipe event across multiple points in sequence.
Args:
coordinates_list: list of coordinates: [(x1, y1), (x2, y2), (x3, y3)]
duration: time interval for swipe duration, default is 0.8
steps: size of swipe step, default is 5
Returns:
None
"""
tuple_from_xy = coordinates_list[0]
swipe_events = [DownEvent(tuple_from_xy), SleepEvent(0.1)]
for tuple_to_xy in coordinates_list[1:]:
swipe_events += self.__swipe_move(tuple_from_xy, tuple_to_xy, duration=duration, steps=steps)
tuple_from_xy = tuple_to_xy
swipe_events.append(UpEvent())
self.perform(swipe_events)
@on_method_ready('install_and_setup')
def swipe(self, tuple_from_xy, tuple_to_xy, duration=0.8, steps=5):
"""
Perform swipe event.
Args:
tuple_from_xy: start point
tuple_to_xy: end point
duration: time interval for swipe duration, default is 0.8
steps: size of swipe step, default is 5
Returns:
None
"""
swipe_events = [DownEvent(tuple_from_xy), SleepEvent(0.1)]
swipe_events += self.__swipe_move(tuple_from_xy, tuple_to_xy, duration=duration, steps=steps)
swipe_events.append(UpEvent())
self.perform(swipe_events)
@on_method_ready('install_and_setup')
def two_finger_swipe(self, tuple_from_xy, tuple_to_xy, duration=0.8, steps=5):
"""
Perform two finger swipe action
minitouch protocol example::
d 0 0 0 50
d 1 1 0 50
c
m 0 20 0 50
m 1 21 0 50
c
m 0 40 0 50
m 1 41 0 50
c
m 0 60 0 50
m 1 61 0 50
c
m 0 80 0 50
m 1 81 0 50
c
m 0 100 0 50
m 1 101 0 50
c
u 0
u 1
c
Args:
tuple_from_xy: start point
tuple_to_xy: end point
duration: time interval for swipe duration, default is 0.8
steps: size of swipe step, default is 5
Returns:
None
"""
from_x, from_y = tuple_from_xy
to_x, to_y = tuple_to_xy
from_x, from_y = self.__transform_xy(from_x, from_y)
to_x, to_y = self.__transform_xy(to_x, to_y)
w = self.size_info['width']
shift_x = 1 if from_x + 1 >= w else -1
interval = float(duration) / (steps + 1)
self.handle("d 0 {:.0f} {:.0f} 50\nd 1 {:.0f} {:.0f} 50\nc\n".format(from_x, from_y, from_x + shift_x, from_y))
time.sleep(interval)
for i in range(1, steps):
self.handle("m 0 {:.0f} {:.0f} 50\nm 1 {:.0f} {:.0f} 50\nc\n".format(
from_x + (to_x - from_x) * i / steps,
from_y + (to_y - from_y) * i / steps,
from_x + (to_x - from_x) * i / steps + shift_x,
from_y + (to_y - from_y) * i / steps,
))
time.sleep(interval)
for i in range(10):
self.handle("m 0 {:.0f} {:.0f} 50\nm 1 {:.0f} {:.0f} 50\nc\n".format(to_x, to_y, to_x + shift_x, to_y))
time.sleep(interval)
self.handle("u 0\nu 1\nc\n")
@on_method_ready('install_and_setup')
def pinch(self, center=None, percent=0.5, duration=0.5, steps=5, in_or_out='in'):
"""
Perform pinch action
minitouch protocol example::
d 0 0 100 50
d 1 100 0 50
c
m 0 10 90 50
m 1 90 10 50
c
m 0 20 80 50
m 1 80 20 50
c
m 0 20 80 50
m 1 80 20 50
c
m 0 30 70 50
m 1 70 30 50
c
m 0 40 60 50
m 1 60 40 50
c
m 0 50 50 50
m 1 50 50 50
c
u 0
u 1
c
"""
w, h = self.size_info['width'], self.size_info['height']
if isinstance(center, (list, tuple)):
x0, y0 = center
elif center is None:
x0, y0 = w / 2, h / 2
else:
raise RuntimeError("center should be None or list/tuple, not %s" % repr(center))
x1, y1 = x0 - w * percent / 2, y0 - h * percent / 2
x2, y2 = x0 + w * percent / 2, y0 + h * percent / 2
# ๅฏน่ฎก็ฎๅบ็ๅๅงๅๆ ่ฟ่กๅฎ้
ๆปๅจไฝ็ฝฎ็่ฝฌๆข
x0, y0 = self.__transform_xy(x0, y0)
x1, y1 = self.__transform_xy(x1, y1)
x2, y2 = self.__transform_xy(x2, y2)
cmds = []
if in_or_out == 'in':
cmds.append("d 0 {:.0f} {:.0f} 50\nd 1 {:.0f} {:.0f} 50\nc\n".format(x1, y1, x2, y2))
for i in range(1, steps):
cmds.append("m 0 {:.0f} {:.0f} 50\nm 1 {:.0f} {:.0f} 50\nc\n".format(
x1+(x0-x1)*i/steps, y1+(y0-y1)*i/steps,
x2+(x0-x2)*i/steps, y2+(y0-y2)*i/steps
))
cmds.append("m 0 {:.0f} {:.0f} 50\nm 1 {:.0f} {:.0f} 50\nc\n".format(x0, y0, x0, y0))
cmds.append("u 0\nu 1\nc\n")
elif in_or_out == 'out':
cmds.append("d 0 {:.0f} {:.0f} 50\nd 1 {:.0f} {:.0f} 50\nc\n".format(x0, y0, x0, y0))
for i in range(1, steps):
cmds.append("m 0 {:.0f} {:.0f} 50\nm 1 {:.0f} {:.0f} 50\nc\n".format(
x0+(x1-x0)*i/steps, y0+(y1-y0)*i/steps,
x0+(x2-x0)*i/steps, y0+(y2-y0)*i/steps
))
cmds.append("m 0 {:.0f} {:.0f} 50\nm 1 {:.0f} {:.0f} 50\nc\n".format(x1, y1, x2, y2))
cmds.append("u 0\nu 1\nc\n")
else:
raise RuntimeError("center should be 'in' or 'out', not {}".format(repr(in_or_out)))
interval = float(duration) / (steps + 1)
for i, c in enumerate(cmds):
self.handle(c)
time.sleep(interval)
@on_method_ready('install_and_setup')
def operate(self, args):
"""
Perform down, up and move actions
Args:
args: action arguments, dictionary containing type and x, y coordinates, e.g.::
{
"type" : "down",
"x" : 10,
"y" : 10
}
Raises:
RuntimeError: is invalid arguments are provided
Returns:
None
"""
if args["type"] == "down":
x, y = self.__transform_xy(args["x"], args["y"])
# support py 3
cmd = "d 0 {:.0f} {:.0f} 50\nc\n".format(x, y)
elif args["type"] == "move":
x, y = self.__transform_xy(args["x"], args["y"])
# support py 3
cmd = "m 0 {:.0f} {:.0f} 50\nc\n".format(x, y)
elif args["type"] == "up":
# support py 3
cmd = "u 0\nc\n"
else:
raise RuntimeError("invalid operate args: {}".format(args))
self.handle(cmd)
@on_method_ready('install_and_setup')
def perform(self, motion_events, interval=0.01):
"""
Perform a sequence of motion events including: UpEvent, DownEvent, MoveEvent, SleepEvent
:param motion_events: a list of MotionEvent instances
:param interval: minimum interval between events
:return: None
"""
for event in motion_events:
if isinstance(event, SleepEvent):
time.sleep(event.seconds)
else:
cmd = event.getcmd(transform=self.__transform_xy)
self.handle(cmd)
time.sleep(interval)
def safe_send(self, data):
"""
Send data to client
Args:
data: data to send
Raises:
Exception: when data cannot be sent
Returns:
None
"""
if isinstance(data, six.text_type):
data = data.encode('utf-8')
try:
self.client.send(data)
except Exception as err:
# raise MinitouchError(err)
raise err
def _backend_worker(self):
"""
Backend worker queue thread
Returns:
None
"""
while not self.backend_stop_event.isSet():
cmd = self.backend_queue.get()
if cmd is None:
break
self.safe_send(cmd)
def setup_client_backend(self):
"""
Setup backend client thread as daemon
Returns:
None
"""
self.backend_queue = queue.Queue()
self.backend_stop_event = threading.Event()
self.setup_client()
t = threading.Thread(target=self._backend_worker, name="minitouch")
# t.daemon = True
t.start()
self.backend_thread = t
self.handle = self.backend_queue.put
def setup_client(self):
"""
Setup client in following steps::
1. connect to server
2. receive the header
v <version>
^ <max-contacts> <max-x> <max-y> <max-pressure>
$ <pid>
3. prepare to send
Returns:
None
"""
s = SafeSocket()
s.connect((self.adb.host, self.localport))
s.sock.settimeout(2)
header = b""
while True:
try:
header += s.sock.recv(4096) # size is not strict, so use raw socket.recv
except socket.timeout:
# raise RuntimeError("minitouch setup client error")
warnings.warn("minitouch header not recved")
break
if header.count(b'\n') >= 3:
break
LOGGING.debug("minitouch header:%s", repr(header))
self.client = s
self.handle = self.safe_send
def teardown(self):
"""
Stop the server and client
Returns:
None
"""
if hasattr(self, "backend_stop_event"):
self.backend_stop_event.set()
self.backend_queue.put(None)
if self.client:
self.client.close()
if self.server_proc:
self.server_proc.kill()
class MotionEvent(object):
"""
Motion Event to be performed by Minitouch
"""
def getcmd(self, transform=None):
raise NotImplementedError
class DownEvent(MotionEvent):
def __init__(self, coordinates, contact=0, pressure=50):
"""
Finger Down Event
:param coordinates: finger down coordinates in (x, y)
:param contact: multi-touch action, starts from 0
:param pressure: touch pressure
"""
super(DownEvent, self).__init__()
self.coordinates = coordinates
self.contact = contact
self.pressure = pressure
def getcmd(self, transform=None):
if transform:
x, y = transform(*self.coordinates)
else:
x, y = self.coordinates
cmd = "d {:.0f} {:.0f} {:.0f} {:.0f}\nc\n".format(self.contact, x, y, self.pressure)
return cmd
class UpEvent(MotionEvent):
def __init__(self, contact=0):
"""
Finger Up Event
:param contact: multi-touch action, starts from 0
"""
super(UpEvent, self).__init__()
self.contact = contact
def getcmd(self, transform=None):
cmd = "u {:.0f}\nc\n".format(self.contact)
return cmd
class MoveEvent(MotionEvent):
def __init__(self, coordinates, contact=0, pressure=50):
"""
Finger Move Event
:param coordinates: finger move to coordinates in (x, y)
:param contact: multi-touch action, starts from 0
:param pressure: touch pressure
"""
super(MoveEvent, self).__init__()
self.coordinates = coordinates
self.contact = contact
self.pressure = pressure
def getcmd(self, transform=None):
if transform:
x, y = transform(*self.coordinates)
else:
x, y = self.coordinates
cmd = "m {:.0f} {:.0f} {:.0f} {:.0f}\nc\n".format(self.contact, x, y, self.pressure)
return cmd
class SleepEvent(MotionEvent):
def __init__(self, seconds):
self.seconds = seconds
def getcmd(self, transform=None):
return None
|
the-stack_106_27650 | # -*- coding: utf-8 -*-
"""Implementation of basic instance factory which creates just instances based on standard KG triples."""
from dataclasses import dataclass
from typing import Mapping
import numpy as np
import scipy.sparse
from torch.utils import data
from ..typing import MappedTriples
from ..utils import fix_dataclass_init_docs
__all__ = [
'Instances',
'SLCWAInstances',
'LCWAInstances',
'MultimodalInstances',
'MultimodalSLCWAInstances',
'MultimodalLCWAInstances',
]
@fix_dataclass_init_docs
@dataclass
class Instances(data.Dataset):
"""Triples and mappings to their indices."""
def __len__(self): # noqa:D401
"""The number of instances."""
raise NotImplementedError
@fix_dataclass_init_docs
@dataclass
class SLCWAInstances(Instances):
"""Triples and mappings to their indices for sLCWA."""
#: The mapped triples, shape: (num_triples, 3)
mapped_triples: MappedTriples
def __len__(self): # noqa: D105
return self.mapped_triples.shape[0]
def __getitem__(self, item): # noqa: D105
return self.mapped_triples[item]
@fix_dataclass_init_docs
@dataclass
class LCWAInstances(Instances):
"""Triples and mappings to their indices for LCWA."""
#: The unique pairs
pairs: np.ndarray
#: The compressed triples in CSR format
compressed: scipy.sparse.csr_matrix
@classmethod
def from_triples(cls, mapped_triples: MappedTriples, num_entities: int) -> Instances:
"""
Create LCWA instances from triples.
:param mapped_triples: shape: (num_triples, 3)
The ID-based triples.
:param num_entities:
The number of entities.
:return:
The instances.
"""
mapped_triples = mapped_triples.numpy()
unique_hr, pair_idx_to_triple_idx = np.unique(mapped_triples[:, :2], return_inverse=True, axis=0)
num_pairs = unique_hr.shape[0]
tails = mapped_triples[:, 2]
compressed = scipy.sparse.coo_matrix(
(np.ones(mapped_triples.shape[0], dtype=np.float32), (pair_idx_to_triple_idx, tails)),
shape=(num_pairs, num_entities),
)
# convert to csr for fast row slicing
compressed = compressed.tocsr()
return cls(pairs=unique_hr, compressed=compressed)
def __len__(self) -> int: # noqa: D105
return self.pairs.shape[0]
def __getitem__(self, item): # noqa: D105
return self.pairs[item], np.asarray(self.compressed[item, :].todense())[0, :]
@fix_dataclass_init_docs
@dataclass
class MultimodalInstances(Instances):
"""Triples and mappings to their indices as well as multimodal data."""
#: TODO: do we need these?
numeric_literals: Mapping[str, np.ndarray]
literals_to_id: Mapping[str, int]
@fix_dataclass_init_docs
@dataclass
class MultimodalSLCWAInstances(SLCWAInstances, MultimodalInstances):
"""Triples and mappings to their indices as well as multimodal data for sLCWA."""
@fix_dataclass_init_docs
@dataclass
class MultimodalLCWAInstances(LCWAInstances, MultimodalInstances):
"""Triples and mappings to their indices as well as multimodal data for LCWA."""
|
the-stack_106_27651 | '''generic limbs. Offsets, etc.'''
import maya.cmds as cmds
import mpyr.lib.rigmath as mpMath
import mpyr.lib.rig as mpRig
import mpyr.lib.joint as mpJoint
import mpyr.lib.nurbs as mpNurbs
import mpyr.lib.attr as mpAttr
import mpyr.rig.limb.limbBase as limbBase
class WorldOffset(limbBase.Limb):
'''A character's root offset.
This is a special limb in that it's always built by the
rig base class, on every character, and has no startJoint. It serves as a layout
control and also as the 'world parent' of anything else in the rig.
'''
def begin(self):
'''since this is a special limb, hardcode some names'''
self.name.loc = 'M'
self.name.part = 'World'
limbBase.Limb.begin(self)
def build(self):
#make pyramids for world offsets
ctrlXform = mpMath.Transform()
ctrlXform.scale(2)
zero, c1 = self.addCtrl('01',type='FK',shape='pyramid',parent=self.limbNode,shapeXform=ctrlXform)
ctrlXform.scale(0.7)
zero, c2 = self.addCtrl('02',type='FK',shape='pyramid',parent=c1,shapeXform=ctrlXform)
ctrlXform.scale(0.7)
zero, c3 = self.addCtrl('03',type='FK',shape='pyramid',parent=c2,shapeXform=ctrlXform)
mpRig.addPickParent(c3,c2)
mpRig.addPickParent(c2,c1)
class FKOffset(limbBase.Limb):
'''simple offset control. One control driving one joint.
Attrs:
- translate: Offset will drive joints translate as well as rotate. Defalt True
- useConstraint: use constraints instead of rotate connection. Slower, but sometimes
needed for good behavior when parent rotation is needed. Default True.
'''
def __init__(self):
limbBase.Limb.__init__(self)
self.translate = True
self.useConstraint = True
self.pin=None
def begin(self):
limbBase.Limb.begin(self)
#sanity checks on start and endJoint
if not self.startJoint or not cmds.objExists(self.startJoint):
raise RuntimeError('invalid startJoint: %s' % self.startJoint)
def addPin(self):
self.pin = self.addPinParent()
def build(self):
self.addPin()
zero,c1 = self.addCtrl('01',type='FK',shape='sphere',parent=self.pin,xform=self.startJoint)
if self.useConstraint:
cmds.orientConstraint(c1,self.startJoint,mo=True)
else:
for attr in ['rx','ry','rz']:
cmds.connectAttr(c1+'.'+attr, self.startJoint+'.'+attr,f=True)
if self.translate:
cmds.pointConstraint(c1,self.startJoint,mo=True)
cmds.parentConstraint(self.pin,zero,mo=True)
class FKOffsetBlend(FKOffset):
'''version of FKOffset that uses a blended pin setup instead of a simple pin setup.
useConstraint is forced True in this case.
'''
def addPin(self):
self.pin = self.addPinBlend()
def build(self):
self.useConstraint = True #must be true for blend to work
FKOffset.build(self)
class FKChain(limbBase.Limb):
'''simple FK chain, given a start and endjoint create FK ctrls between'''
def begin(self):
limbBase.Limb.begin(self)
#sanity checks on start and endJoint
if not self.startJoint or not cmds.objExists(self.startJoint):
raise RuntimeError('invalid startJoint: %s' % self.startJoint)
if not self.endJoint or not cmds.objExists(self.endJoint):
raise RuntimeError('invalid endJoint: %s' % self.endJoint)
def build(self):
self.addPinBlend()
self.addFKChain(self.startJoint,self.endJoint,self.pinBlend)
class FKCurlChain(FKChain):
'''simple FK chain with addition of a 'curl' ctrl at the base that lets you rotate all ctrls
at once.
'''
def build(self):
FKChain.build(self)
#add offset between each ctrl
curls=list()
for idx,ctrl in enumerate(self.ctrls):
#get zero null:
zero=cmds.listRelatives(ctrl,p=1)[0]
self.name.desc='Curl%02d'%idx
curlNull=cmds.group(em=True,n=self.name.get(),p=zero)
cmds.xform(curlNull,ws=True,m=cmds.xform(zero,q=True,ws=True,m=True))
cmds.parent(ctrl,curlNull)
curls.append(curlNull)
#make curl ctrl
curlZero,curlCtrl = self.addCtrl('curl',type='FK',shape='spoon',parent=self.pinParent,xform=self.ctrls[0],size=3.5)
mpAttr.lockAndHide(curlCtrl,'t')
#connect curl ctrl
for curl in curls:
for axis in 'xyz':
cmds.connectAttr(curlCtrl+'.r%s'%axis,curl+'.r%s'%axis)
class FKIKChain(limbBase.Limb):
'''Simple FK and IK chain, with FKIK blend, meant for at least three joints (not single chain IK)
Requires startJoint and endJoint
'''
def begin(self):
limbBase.Limb.begin(self)
#sanity checks on start and endJoint
if not self.startJoint or not cmds.objExists(self.startJoint):
raise RuntimeError('invalid startJoint: %s' % self.startJoint)
if not self.endJoint or not cmds.objExists(self.endJoint):
raise RuntimeError('invalid endJoint: %s' % self.endJoint)
def build(self):
self.addPinBlend()
self.addFKIKChain(self.startJoint,self.endJoint,self.pinBlend,self.pinWorld)
class FKTree(limbBase.Limb):
'''Recursively rigs a joint chain with FK offsets. Requires a parent joint, rigs parent and all children.'''
def begin(self):
limbBase.Limb.begin(self)
if not self.startJoint or not cmds.objExists(self.startJoint):
raise RuntimeError('invalid startJoint: %s' % self.startJoint)
def build(self):
self.addPinBlend()
self.makeCtrl(self.startJoint,self.pinBlend)
def makeCtrl(self,startJoint,parent):
'''recursive function to build ctrls'''
ctrlXform = mpMath.Transform()
ctrlXform.scale(0.3,0.3,0.3)
zero,c1 = self.addCtrl('%02d'%len(self.ctrls),type='FK',shape='box',parent=parent,xform=startJoint,shapeXform=ctrlXform)
cmds.parentConstraint(c1,startJoint,mo=True)
children = cmds.listRelatives(startJoint,type='joint') or []
for child in children:
childZero,childCtrl = self.makeCtrl(child,c1)
mpRig.addPickParent(childCtrl,c1)
return (zero,c1)
class NurbsStrip(limbBase.Limb):
'''Limb that uses a NURBS strip to drive the joints.
Attributes:
numCtrls: The number of ctrls to make on the strip, default 5
numSpans: the number of spans of the strip, default 5. Long strips may need more
uMin: Where on the strip to begin placing ctrls, defaults 0. Range 0-1
uMax: Where on the strip to end placing ctrls, defaults 1. Range 0-1
'''
def __init__(self):
limbBase.Limb.__init__(self)
self.stripWidth=1.0
self.numCtrls=5
self.numSpans=8
self.uMin=0
self.uMax=1
def begin(self):
limbBase.Limb.begin(self)
#sanity checks
if not self.startJoint or not cmds.objExists(self.startJoint):
raise RuntimeError('invalid startJoint: %s' % self.startJoint)
if not self.endJoint or not cmds.objExists(self.endJoint):
raise RuntimeError('invalid endJoint: %s' % self.endJoint)
def build(self):
self.addPinParent()
self.addAttrLimb(ln='noStretch', at='float',min=0,max=1,dv=0,k=True,s=1)
self.addAttrLimb(ln='slideAlong', at='float',min=-1,max=1,dv=0,k=True,s=1)
jointList = mpJoint.getJointList(self.startJoint,self.endJoint)
if len(jointList) < 2:
raise RuntimeError('NurbsStrip requires at least 2 joints in chain. Got %s'%len(jointList))
#Create NURBS strip by making curves along joints, and a cross section crv, then extruding
crv=mpNurbs.curveFromNodes(jointList)
crossCurve = cmds.curve(d=1,p=[(0,0,-0.5 * self.stripWidth),(0,0,0.5 * self.stripWidth)],k=(0,1))
cmds.select([crossCurve,crv],r=1)
surf = cmds.extrude(ch=False,po=0,et=2,ucp=1,fpt=1,upn=1,rotation=0,scale=1,rsp=1)[0]
cmds.delete([crv,crossCurve])
self.name.desc='driverSurf'
surf = cmds.rename(surf, self.name.get())
cmds.parent(surf,self.noXform)
#Rebuild strip to proper number of spans
cmds.rebuildSurface(surf,ch=0,rpo=1,rt=0,end=1,kr=0,kcp=0,kc=1,sv=self.numSpans,su=0,du=1,tol=0.01,fr=0,dir=2)
#make live curve on surface down the middle
#this is used later for noStretch
curvMaker = cmds.createNode('curveFromSurfaceIso', n = surf+"CurveIso")
cmds.setAttr(curvMaker + ".isoparmValue", 0.5)
cmds.setAttr(curvMaker + ".isoparmDirection", 1)
cmds.connectAttr(surf + ".worldSpace[0]", curvMaker + ".inputSurface")
offsetCrvShp = cmds.createNode("nurbsCurve", n=crv + "_driverSurfCrvShape")
offsetCrv = cmds.listRelatives(p=1)[0]
offsetCrv = cmds.rename(offsetCrv,crv + "_driverSurfCrv")
cmds.connectAttr(curvMaker + ".outputCurve", offsetCrvShp + ".create")
cmds.parent(offsetCrv, self.noXform)
#Measure curve length and divide by start length
#to get a normalized value that is useful later to control stretch
crvInfo = cmds.createNode('curveInfo', n=offsetCrv + "Info")
cmds.connectAttr(offsetCrv + ".worldSpace[0]", crvInfo + ".ic")
arcLength = cmds.getAttr(crvInfo + ".al")
stretchAmountNode = cmds.createNode('multiplyDivide', n=offsetCrv + "Stretch")
cmds.setAttr(stretchAmountNode + ".op" , 2) #divide
cmds.setAttr(stretchAmountNode + ".input1X", arcLength)
cmds.connectAttr( crvInfo + ".al",stretchAmountNode + ".input2X")
#Stretch Blender blends start length with current length
#and pipes it back into stretchAmoundNode's startLength, so user can turn
#stretch behavior on or off
stretchBlender = cmds.createNode('blendColors', n =offsetCrv + "StretchBlender")
cmds.setAttr(stretchBlender + ".c1r", arcLength)
cmds.connectAttr(crvInfo + ".al", stretchBlender + ".c2r")
cmds.connectAttr(stretchBlender + ".opr", stretchAmountNode + ".input1X")
cmds.connectAttr(self.limbNode + ".noStretch",stretchBlender + ".blender")
#attach joints to surface
closestPoint = cmds.createNode('closestPointOnSurface',n='tempClose')
cmds.connectAttr(surf + ".worldSpace[0]", closestPoint + ".inputSurface")
for idx,jnt in enumerate(jointList):
self.name.desc = 'jnt%02dOffset'%idx
locator = cmds.spaceLocator(n=self.name.get())[0]
cmds.setAttr(locator+'.localScale',self.stripWidth,self.stripWidth,self.stripWidth)
cmds.parent(locator,self.noXform)
#Use closest point to to find jnt's percent along the curve
cmds.setAttr(closestPoint+'.ip',*cmds.xform(jnt,q=True, t=True, ws=True))
percentage = cmds.getAttr(closestPoint+'.r.v')
#attach to surface
posNode,aimCnss,moPath,slider = self.attachObjToSurf(locator,surf,offsetCrv,stretchAmountNode,percentage)
cmds.connectAttr(self.limbNode + ".slideAlong", slider + ".i2")
cmds.parentConstraint(locator,jnt,mo=True)
cmds.delete(closestPoint)
#add controls.These drive new joints which skinCluster the NURBS strips
stripJoints = []
stripJointParent = cmds.createNode('transform',n=crv + "_stripJoints",p=self.noXform)
ctrlParent = cmds.createNode('transform',n=crv+"_Ctrls",p=self.pinParent)
cmds.xform(ctrlParent,ws=True,m=[1,0,0,0,0,1,0,0,0,0,1,0,0,0,0,1])
prevCtrl=None
for i in range(self.numCtrls):
ctrlXform=mpMath.Transform(jointList[0])
zero,ctrl = self.addCtrl('Ctrl%02d'%i,type='FK',shape='box',parent=ctrlParent,xform=ctrlXform)
ctrlXform.scale(0.8,0.8,0.8)
tZero,tCtrl=self.addCtrl('TweakCtrl%02d'%i,type='FK',shape='cross',parent=ctrl,xform=ctrlXform)
#Make the new joint for the control to drive
cmds.select(clear=True)
self.name.desc='StripJnt%02d'%i
jnt = cmds.joint(p=(0,0,0),n=self.name.get())
cmds.parentConstraint(tCtrl,jnt,mo=False)
#briefly attach ctrls to strip to align them
percentage = float(i)/(self.numCtrls-1.0)
if i > 0 and i < self.numCtrls-1:
percentage = self.uMin + (percentage * (self.uMax-self.uMin))
cmds.delete(self.attachObjToSurf(zero,surf,offsetCrv,stretchAmountNode,percentage))
cmds.parent(jnt,stripJointParent)
stripJoints.append(jnt)
if prevCtrl:
cmds.parent(zero,prevCtrl)
mpRig.addPickParent(ctrl,prevCtrl)
prevCtrl=ctrl
#skin strip to controls
#Can get some different behavior by chaning the strip's weights
#or perhaps using dual quat. mode on the skinCluster
skinObjs = stripJoints + [surf]
cmds.skinCluster(skinObjs,
bindMethod=0, #closest Point
sm=0, #standard bind method
ih=True, #ignore hierarchy
)
def attachObjToSurf(self,obj,surf,path,stretchAmountNode,percentage):
'''Given an object and a surface, attach object.
Returns created nodes like (poinOnSurface,aimCns)
'''
#Make nodes
aimCns = cmds.createNode('aimConstraint',n=obj + "Cns")
moPath = cmds.createNode('motionPath', n=obj + "MoPath")
slider = cmds.createNode('addDoubleLinear',n=obj + "Slider")
cmds.setAttr(moPath + ".uValue", percentage)
closePnt = cmds.createNode('closestPointOnSurface', n=obj + "ClsPnt")
posNode1 = cmds.pointOnSurface(surf,
constructionHistory=True,
normal=True,
normalizedNormal=True,
normalizedTangentU=True,
normalizedTangentV=True,
parameterV=0.5,
parameterU=0.5,
turnOnPercentage=True
)
#Connect motion Path to closest point, then closest point to surface info node
cmds.setAttr(moPath + ".fractionMode", 1) #distance instead of param
cmds.connectAttr(path + ".worldSpace[0]", moPath + ".geometryPath")
cmds.connectAttr(surf + ".worldSpace[0]", closePnt + ".inputSurface")
cmds.connectAttr(moPath + ".xCoordinate", closePnt + ".ipx")
cmds.connectAttr(moPath + ".yCoordinate", closePnt + ".ipy")
cmds.connectAttr(moPath + ".zCoordinate", closePnt + ".ipz")
cmds.connectAttr(closePnt + ".result.u", posNode1 + ".u")
cmds.connectAttr(closePnt + ".result.v", posNode1 + ".v")
#Create Stretch Setup using stretchAmountNode node
stretchCtrl = cmds.createNode("multDoubleLinear", n=obj + "StretchCtrl")
cmds.setAttr(stretchCtrl + ".i1", percentage)
cmds.connectAttr(stretchAmountNode + ".outputX",stretchCtrl + ".i2")
cmds.connectAttr(stretchCtrl + ".o", slider + ".i1")
cmds.connectAttr(slider + ".o", moPath + ".uValue")
#Hook up surface info attrs to aimCns to calculate rotation values
#Then hook pointOnSurface and aimCns to locator
posNode1 = cmds.rename(posNode1,obj + 'SurfInfo')
cmds.setAttr(aimCns + ".worldUpType", 3)
cmds.connectAttr(posNode1 + ".position", obj + ".translate")
cmds.connectAttr(posNode1 + '.tv',aimCns + '.target[0].targetTranslate')
cmds.connectAttr(posNode1 + '.tu',aimCns + '.worldUpVector')
for axis in ('X','Y','Z'):
cmds.connectAttr(aimCns + ".constraintRotate" + axis, obj + ".rotate" + axis)
cmds.parent(aimCns,obj) #just for tidyness, doesn't matter
return (posNode1,aimCns,moPath,slider)
|
the-stack_106_27655 | """Creates a custom kinematics body with two links and one joint
"""
from openravepy import *
import numpy, time
env = Environment() # create openrave environment
env.SetViewer('qtcoin') # attach viewer (optional)
with env:
# geometries
infobox0 = KinBody.Link.GeometryInfo()
infobox0._type = GeometryType.Box
infobox0._t[0,3] = 0
infobox0._vGeomData = [0.1,0.2,0.3]
infobox0._vDiffuseColor = [1,0,0]
infobox1 = KinBody.Link.GeometryInfo()
infobox1._type = GeometryType.Box
infobox1._t[0,3] = 0.1
infobox1._vGeomData = [0.3,0.05,0.05]
infobox1._vDiffuseColor = [0,1,0]
infobox2 = KinBody.Link.GeometryInfo()
infobox2._type = GeometryType.Box
infobox2._t[0,3] = 0
infobox2._vGeomData = [0.1,0.2,0.3]
infobox2._vDiffuseColor = [0,0,1]
# links
link0 = KinBody.LinkInfo()
link0._vgeometryinfos = [infobox0, infobox1]
link0._name = 'link0'
link0._mapFloatParameters = {'param0':[1,2.3]}
link0._mapIntParameters = {'param0':[4,5.6]}
link1 = KinBody.LinkInfo()
link1._vgeometryinfos = [infobox2]
link1._name = 'link1'
link1._mapFloatParameters = {'param0':[1,2.3]}
link1._mapIntParameters = {'param0':[4,5.6]}
link1._t[0,3] = 0.5
# joints
joint0 = KinBody.JointInfo()
joint0._name = 'j0'
joint0._linkname0 = 'link0'
joint0._linkname1 = 'link1'
joint0._type = KinBody.JointType.Hinge
joint0._vlowerlimit = [-0.5]
joint0._vupperlimit = [1.0]
joint0._vaxes = [[0,0,1]]
# instantiate
body = RaveCreateKinBody(env,'')
success=body.Init([link0,link1],[joint0])
body.SetName('temp')
env.Add(body)
|
the-stack_106_27656 | # This file is part of h5py, a Python interface to the HDF5 library.
#
# http://www.h5py.org
#
# Copyright 2008-2013 Andrew Collette and contributors
#
# License: Standard 3-clause BSD; see "license.txt" for full license terms
# and contributor agreement.
from __future__ import absolute_import
import sys
from six import unichr, PY3
if sys.version_info >= (2, 7) or sys.version_info >= (3, 2):
import unittest as ut
else:
try:
import unittest2 as ut
except ImportError:
raise ImportError(
'unittest2 is required to run the test suite with python-%d.%d'
% (sys.version_info[:2])
)
import shutil
import tempfile
import numpy as np
import os
class TestCase(ut.TestCase):
@classmethod
def setUpClass(cls):
cls.tempdir = tempfile.mkdtemp(prefix='h5py-test_')
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tempdir)
def mktemp(self, suffix='.hdf5', prefix='', dir=None):
if dir is None:
dir = self.tempdir
return tempfile.mktemp(suffix, prefix, dir=self.tempdir)
if not hasattr(ut.TestCase, 'assertSameElements'):
# shim until this is ported into unittest2
def assertSameElements(self, a, b):
for x in a:
match = False
for y in b:
if x == y:
match = True
if not match:
raise AssertionError("Item '%s' appears in a but not b" % x)
for x in b:
match = False
for y in a:
if x == y:
match = True
if not match:
raise AssertionError("Item '%s' appears in b but not a" % x)
def assertArrayEqual(self, dset, arr, message=None, precision=None):
""" Make sure dset and arr have the same shape, dtype and contents, to
within the given precision.
Note that dset may be a NumPy array or an HDF5 dataset.
"""
if precision is None:
precision = 1e-5
if message is None:
message = ''
else:
message = ' (%s)' % message
if np.isscalar(dset) or np.isscalar(arr):
self.assert_(
np.isscalar(dset) and np.isscalar(arr),
'Scalar/array mismatch ("%r" vs "%r")%s' % (dset, arr, message)
)
self.assert_(
dset - arr < precision,
"Scalars differ by more than %.3f%s" % (precision, message)
)
return
self.assert_(
dset.shape == arr.shape,
"Shape mismatch (%s vs %s)%s" % (dset.shape, arr.shape, message)
)
self.assert_(
dset.dtype == arr.dtype,
"Dtype mismatch (%s vs %s)%s" % (dset.dtype, arr.dtype, message)
)
self.assert_(
np.all(np.abs(dset[...] - arr[...]) < precision),
"Arrays differ by more than %.3f%s" % (precision, message)
)
# Check if non-ascii filenames are supported
# Evidently this is the most reliable way to check
# See also h5py issue #263 and ipython #466
# To test for this, run the testsuite with LC_ALL=C
try:
testfile, fname = tempfile.mkstemp(unichr(0x03b7))
except UnicodeError:
unicode_filenames = False
else:
unicode_filenames = True
os.close(testfile)
os.unlink(fname)
del fname
del testfile
|
the-stack_106_27657 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from xml.etree.ElementTree import Element, SubElement, tostring
from lxml import etree
import codecs
XML_EXT = '.xml'
ENCODE_METHOD = 'utf-8'
class PascalVocWriter:
def __init__(self, foldername, filename, imgSize, localImgPath=None, databaseSrc='Unknown'):
self.foldername = foldername
self.filename = filename
self.databaseSrc = databaseSrc
self.imgSize = imgSize
self.boxlist = []
self.localImgPath = localImgPath
self.verified = False
def prettify(self, elem):
"""
Return a pretty-printed XML string for the Element.
"""
rough_string = tostring(elem, 'utf8')
root = etree.fromstring(rough_string)
return etree.tostring(root, pretty_print=True, encoding=ENCODE_METHOD).replace(" ".encode(), "\t".encode())
def genXML(self):
"""
Return XML root
"""
# Check conditions
if self.filename is None or self.foldername is None or self.imgSize is None:
return None
top = Element('annotation')
if self.verified:
top.set('verified', 'yes')
folder = SubElement(top, 'folder')
folder.text = self.foldername
filename = SubElement(top, 'filename')
filename.text = self.filename
if self.localImgPath is not None:
localImgPath = SubElement(top, 'path')
localImgPath.text = self.localImgPath
source = SubElement(top, 'source')
database = SubElement(source, 'database')
database.text = self.databaseSrc
size_part = SubElement(top, 'size')
width = SubElement(size_part, 'width')
height = SubElement(size_part, 'height')
depth = SubElement(size_part, 'depth')
width.text = str(self.imgSize[1])
height.text = str(self.imgSize[0])
if len(self.imgSize) == 3:
depth.text = str(self.imgSize[2])
else:
depth.text = '1'
segmented = SubElement(top, 'segmented')
segmented.text = '0'
return top
def addBndBox(self, xmin, ymin, xmax, ymax, name, difficult=0):
bndbox = {'xmin': xmin, 'ymin': ymin, 'xmax': xmax, 'ymax': ymax}
bndbox['name'] = name
bndbox['difficult'] = difficult
self.boxlist.append(bndbox)
def appendObjects(self, top):
for each_object in self.boxlist:
object_item = SubElement(top, 'object')
name = SubElement(object_item, 'name')
name.text = each_object['name']
pose = SubElement(object_item, 'pose')
pose.text = "Unspecified"
truncated = SubElement(object_item, 'truncated')
if int(float(each_object['ymax'])) == int(float(self.imgSize[0])) or (int(float(each_object['ymin']))== 1):
truncated.text = "1" # max == height or min
elif (int(float(each_object['xmax']))==int(float(self.imgSize[1]))) or (int(float(each_object['xmin']))== 1):
truncated.text = "1" # max == width or min
else:
truncated.text = "0"
difficult = SubElement(object_item, 'difficult')
difficult.text = str(bool(each_object['difficult']) & 1)
bndbox = SubElement(object_item, 'bndbox')
xmin = SubElement(bndbox, 'xmin')
xmin.text = str(each_object['xmin'])
ymin = SubElement(bndbox, 'ymin')
ymin.text = str(each_object['ymin'])
xmax = SubElement(bndbox, 'xmax')
xmax.text = str(each_object['xmax'])
ymax = SubElement(bndbox, 'ymax')
ymax.text = str(each_object['ymax'])
def save(self, targetFile=None):
root = self.genXML()
self.appendObjects(root)
if targetFile is None:
out_file = codecs.open(
self.filename + XML_EXT, 'w', encoding=ENCODE_METHOD)
else:
out_file = codecs.open(targetFile, 'w', encoding=ENCODE_METHOD)
prettifyResult = self.prettify(root)
out_file.write(prettifyResult.decode('utf8'))
out_file.close() |
the-stack_106_27661 | import pytest
@pytest.mark.parametrize("string", ["a", "abc", "abcde", "potato"])
def test_string_inside_tuple(get_contract, string):
code = f"""
struct Person:
name: String[6]
age: uint256
@external
def test_return() -> Person:
return Person({{ name:"{string}", age:42 }})
"""
c1 = get_contract(code)
code = """
struct Person:
name: String[6]
age: uint256
interface jsonabi:
def test_return() -> Person: view
@external
def test_values(a: address) -> Person:
return jsonabi(a).test_return()
"""
c2 = get_contract(code)
assert c2.test_values(c1.address) == [string, 42]
|
the-stack_106_27663 | # coding:utf8
# code needed to get customized constants for different OS
import sys
OS_WINDOWS = "win"
OS_LINUX = "linux"
OS_MACOS = "darwin"
OS_BSD = "freebsd"
OS_DRAGONFLY = "dragonfly"
OS_DEFAULT = "default"
def getValueForOS(constantDict):
if sys.platform.startswith(OS_WINDOWS):
return constantDict[OS_WINDOWS] if OS_WINDOWS in constantDict else constantDict[OS_DEFAULT]
if sys.platform.startswith(OS_LINUX):
return constantDict[OS_LINUX] if OS_LINUX in constantDict else constantDict[OS_DEFAULT]
if sys.platform.startswith(OS_MACOS):
return constantDict[OS_MACOS] if OS_MACOS in constantDict else constantDict[OS_DEFAULT]
if OS_BSD in sys.platform or sys.platform.startswith(OS_DRAGONFLY):
return constantDict[OS_BSD] if OS_BSD in constantDict else constantDict[OS_DEFAULT]
# You might want to change these
DEFAULT_PORT = 8999
OSD_DURATION = 3.0
OSD_WARNING_MESSAGE_DURATION = 5.0
NO_ALERT_OSD_WARNING_DURATION = 13.0
MPC_OSD_POSITION = 1 # Right corner, 1 for left
MPLAYER_OSD_LEVEL = 1
UI_TIME_FORMAT = "[%X] "
CONFIG_NAMES = [".syncplay", "syncplay.ini"] # Syncplay searches first to last
DEFAULT_CONFIG_NAME = "syncplay.ini"
RECENT_CLIENT_THRESHOLD = "1.6.8" # This and higher considered 'recent' clients (no warnings)
MUSIC_FORMATS = [".mp3", ".m4a", ".m4p", ".wav", ".aiff", ".r", ".ogg", ".flac"] # ALL LOWER CASE!
WARN_OLD_CLIENTS = True # Use MOTD to inform old clients to upgrade
LIST_RELATIVE_CONFIGS = True # Print list of relative configs loaded
SHOW_CONTACT_INFO = True # Displays dev contact details below list in GUI
SHOW_TOOLTIPS = True
WARN_ABOUT_MISSING_STRINGS = False # (If debug mode is enabled)
FALLBACK_INITIAL_LANGUAGE = "en"
FALLBACK_PUBLIC_SYNCPLAY_SERVERS = [
['syncplay.pl:8995 (France)', 'syncplay.pl:8995'],
['syncplay.pl:8996 (France)', 'syncplay.pl:8996'],
['syncplay.pl:8997 (France)', 'syncplay.pl:8997'],
['syncplay.pl:8998 (France)', 'syncplay.pl:8998'],
['syncplay.pl:8999 (France)', 'syncplay.pl:8999']]
PLAYLIST_LOAD_NEXT_FILE_MINIMUM_LENGTH = 10 # Seconds
PLAYLIST_LOAD_NEXT_FILE_TIME_FROM_END_THRESHOLD = 5 # Seconds (only triggered if file is paused, e.g. due to EOF)
EXECUTABLE_COMBOBOX_MINIMUM_LENGTH = 30 # Minimum number of characters that the combobox will make visible
# Overriden by config
SHOW_OSD = True # Sends Syncplay messages to media player OSD
SHOW_OSD_WARNINGS = True # Show warnings if playing different file, alone in room
SHOW_SLOWDOWN_OSD = True # Show notifications of slowing down / reverting on time difference
SHOW_SAME_ROOM_OSD = True # Show OSD notifications for events relating to room user is in
SHOW_NONCONTROLLER_OSD = False # Show OSD notifications for non-controllers in controlled rooms
SHOW_DIFFERENT_ROOM_OSD = False # Show OSD notifications for events relating to room user is not in
SHOW_DURATION_NOTIFICATION = True
DEBUG_MODE = False
# Changing these might be ok
DELAYED_LOAD_WAIT_TIME = 2.5
AUTOMATIC_UPDATE_CHECK_FREQUENCY = 7 * 86400 # Days converted into seconds
DEFAULT_REWIND_THRESHOLD = 4
MINIMUM_REWIND_THRESHOLD = 3
DEFAULT_FASTFORWARD_THRESHOLD = 5
MINIMUM_FASTFORWARD_THRESHOLD = 4
FASTFORWARD_EXTRA_TIME = 0.25
FASTFORWARD_RESET_THRESHOLD = 3.0
FASTFORWARD_BEHIND_THRESHOLD = 1.75
SEEK_THRESHOLD = 1
SLOWDOWN_RATE = 0.95
DEFAULT_SLOWDOWN_KICKIN_THRESHOLD = 1.5
MINIMUM_SLOWDOWN_THRESHOLD = 1.3
SLOWDOWN_RESET_THRESHOLD = 0.1
DIFFERENT_DURATION_THRESHOLD = 2.5
PROTOCOL_TIMEOUT = 12.5
RECONNECT_RETRIES = 999
SERVER_STATE_INTERVAL = 1
SERVER_STATS_SNAPSHOT_INTERVAL = 3600
WARNING_OSD_MESSAGES_LOOP_INTERVAL = 1
AUTOPLAY_DELAY = 3.0
DO_NOT_RESET_POSITION_THRESHOLD = 1.0
SYNC_ON_PAUSE = True # Client seek to global position - subtitles may disappear on some media players
PLAYLIST_MAX_CHARACTERS = 10000
PLAYLIST_MAX_ITEMS = 250
MAXIMUM_TAB_WIDTH = 350
TAB_PADDING = 30
MONOSPACE_FONT = getValueForOS({
OS_DEFAULT: "Monospace",
OS_MACOS: "Menlo",
OS_WINDOWS: "Consolas"})
DEFAULT_CHAT_FONT_SIZE = 24
DEFAULT_CHAT_INPUT_FONT_COLOR = "#FFFF00"
DEFAULT_CHAT_OUTPUT_FONT_COLOR = "#FFFF00"
DEFAULT_CHAT_FONT_WEIGHT = 1
# Max numbers are used by server (and client pre-connection). Once connected client gets values from server featureList (or uses 'fallback' versions for old servers)
MAX_CHAT_MESSAGE_LENGTH = 150 # Number of displayed characters
MAX_USERNAME_LENGTH = 150 # Number of displayed characters
MAX_ROOM_NAME_LENGTH = 35 # Number of displayed characters
MAX_FILENAME_LENGTH = 250 # Number of displayed characters
FALLBACK_MAX_CHAT_MESSAGE_LENGTH = 50 # Number of displayed characters
FALLBACK_MAX_USERNAME_LENGTH = 16 # Number of displayed characters
FALLBACK_MAX_ROOM_NAME_LENGTH = 35 # Number of displayed characters
FALLBACK_MAX_FILENAME_LENGTH = 250 # Number of displayed characters
# Options for the File Switch feature:
FOLDER_SEARCH_FIRST_FILE_TIMEOUT = 25.0 # Secs - How long to wait to find the first file in folder search (to take account of HDD spin up)
FOLDER_SEARCH_TIMEOUT = 20.0 # Secs - How long to wait until searches in folder to update cache are aborted (after first file is found)
FOLDER_SEARCH_DOUBLE_CHECK_INTERVAL = 30.0 # Secs - Frequency of updating cache
# Usually there's no need to adjust these
DOUBLE_CHECK_REWIND = False
LAST_PAUSED_DIFF_THRESHOLD = 2
FILENAME_STRIP_REGEX = "[-~_\.\[\](): ]"
CONTROL_PASSWORD_STRIP_REGEX = "[^a-zA-Z0-9\-]"
ROOM_NAME_STRIP_REGEX = "^(\+)(?P<roomnamebase>.*)(:)(\w{12})$"
COMMANDS_UNDO = ["u", "undo", "revert"]
COMMANDS_CHAT = ["ch", "chat"]
COMMANDS_LIST = ["l", "list", "users"]
COMMANDS_PAUSE = ["p", "play", "pause"]
COMMANDS_ROOM = ["r", "room"]
COMMANDS_HELP = ['help', 'h', '?', '/?', r'\?']
COMMANDS_CREATE = ['c', 'create']
COMMANDS_AUTH = ['a', 'auth']
COMMANDS_TOGGLE = ['t', 'toggle']
COMMANDS_QUEUE = ['queue', 'qa', 'add']
COMMANDS_QUEUEANDSELECT = ['queueandselect','qas']
COMMANDS_PLAYLIST = ['playlist', 'ql', 'pl']
COMMANDS_SELECT = ['select', 'qs']
COMMANDS_DELETE = ['delete', 'd', 'qd']
MPC_MIN_VER = "1.6.4"
MPC_BE_MIN_VER = "1.5.2.3123"
VLC_MIN_VERSION = "2.2.1"
VLC_INTERFACE_VERSION = "0.3.7"
VLC_LATENCY_ERROR_THRESHOLD = 2.0
MPV_UNRESPONSIVE_THRESHOLD = 60.0
CONTROLLED_ROOMS_MIN_VERSION = "1.3.0"
USER_READY_MIN_VERSION = "1.3.0"
SHARED_PLAYLIST_MIN_VERSION = "1.4.0"
CHAT_MIN_VERSION = "1.5.0"
FEATURE_LIST_MIN_VERSION = "1.5.0"
IINA_PATHS = ['/Applications/IINA.app/Contents/MacOS/IINA']
MPC_PATHS = [
r"c:\program files (x86)\mpc-hc\mpc-hc.exe",
r"c:\program files\mpc-hc\mpc-hc.exe",
r"c:\program files\mpc-hc\mpc-hc64.exe",
r"c:\program files\media player classic - home cinema\mpc-hc.exe",
r"c:\program files\media player classic - home cinema\mpc-hc64.exe",
r"c:\program files (x86)\media player classic - home cinema\mpc-hc.exe",
r"c:\program files (x86)\k-lite codec pack\media player classic\mpc-hc.exe",
r"c:\program files\k-lite codec pack\media Player classic\mpc-hc.exe",
r"c:\program files\k-lite codec pack\mpc-hc64\mpc-hc64.exe",
r"c:\program files (x86)\k-lite codec pack\mpc-hc64\mpc-hc64.exe",
r"c:\program files (x86)\combined community codec pack\mpc\mpc-hc.exe",
r"c:\program files\combined community codec pack\mpc\mpc-hc.exe",
r"c:\program files\mpc homecinema (x64)\mpc-hc64.exe",
r"c:\program files (x86)\lav filters\x86\mpc-hc\shoukaku.exe",
r"c:\program files (x86)\lav filters\x64\mpc-hc\shoukaku.exe"
]
MPC_EXECUTABLES = ["mpc-hc.exe", "mpc-hc64.exe", "mpc-hcportable.exe", "mpc-hc_nvo.exe", "mpc-hc64_nvo.exe", "shoukaku.exe"]
MPC64_EXECUTABLES = ["mpc-hc64.exe", "mpc-hc64_nvo.exe", "x64\mpc-hc\shoukaku.exe"]
MPC_BE_PATHS = [
r"c:\program files\mpc-be x64\mpc-be64.exe",
r"c:\program files\mpc-be x64\mpc-be.exe",
r"c:\program files\mpc-be\mpc-be64.exe",
r"c:\program files\mpc-be\mpc-be.exe"
]
MPLAYER_PATHS = ["mplayer2", "mplayer"]
MPV_PATHS = ["mpv", "/opt/mpv/mpv", r"c:\program files\mpv\mpv.exe", r"c:\program files\mpv-player\mpv.exe",
r"c:\program Files (x86)\mpv\mpv.exe", r"c:\program Files (x86)\mpv-player\mpv.exe",
"/Applications/mpv.app/Contents/MacOS/mpv"]
MPVNET_PATHS = [r"c:\program files\mpv.net\mpvnet.exe", r"c:\program Files (x86)\mpv.net\mpvnet.exe"]
try:
import os
MPVNET_PATHS.append(os.path.expandvars(r'%LOCALAPPDATA%\Microsoft\WindowsApps\mpvnet.exe'))
except:
pass
VLC_PATHS = [
r"c:\program files (x86)\videolan\vlc\vlc.exe",
r"c:\program files\videolan\vlc\vlc.exe",
"/usr/bin/vlc",
"/usr/bin/vlc-wrapper",
"/Applications/VLC.app/Contents/MacOS/VLC",
"/usr/local/bin/vlc",
"/usr/local/bin/vlc-wrapper",
"/snap/bin/vlc"
]
VLC_ICONPATH = "vlc.png"
IINA_ICONPATH = "iina.png"
MPLAYER_ICONPATH = "mplayer.png"
MPV_ICONPATH = "mpv.png"
MPVNET_ICONPATH = "mpvnet.png"
MPC_ICONPATH = "mpc-hc.png"
MPC64_ICONPATH = "mpc-hc64.png"
MPC_BE_ICONPATH = "mpc-be.png"
MPV_ERROR_MESSAGES_TO_REPEAT = ['[ytdl_hook] Your version of youtube-dl is too old', '[ytdl_hook] youtube-dl failed', 'Failed to recognize file format.', '[syncplayintf] Lua error']
# Changing these is usually not something you're looking for
PLAYER_ASK_DELAY = 0.1
PING_MOVING_AVERAGE_WEIGHT = 0.85
MPC_OPEN_MAX_WAIT_TIME = 10
MPC_LOCK_WAIT_TIME = 0.2
MPC_RETRY_WAIT_TIME = 0.01
MPC_MAX_RETRIES = 30
MPC_PAUSE_TOGGLE_DELAY = 0.05
MPV_NEWFILE_IGNORE_TIME = 1
MPV_SENDMESSAGE_COOLDOWN_TIME = 0.05
MPV_MAX_NEWFILE_COOLDOWN_TIME = 3
STREAM_ADDITIONAL_IGNORE_TIME = 10
MPV_LOCK_WAIT_TIME = 0.05
VLC_OPEN_MAX_WAIT_TIME = 20
VLC_MIN_PORT = 10000
VLC_MAX_PORT = 55000
# These are not changes you're looking for
STYLE_TABLIST = "QListWidget::item { border-style: solid; border-width: 1px; border-radius: 2px; } QListWidget::item:selected { color: black; background: qlineargradient(spread:pad, x1:0, y1:1, x2:0, y2:0, stop:0 rgba(242, 248, 255, 255), stop:1 rgba(208, 229, 255, 255)); border-color: #84ACDD; } QListWidget::item:!selected { border-color: transparent; } QListWidget::item:!selected:hover { color: black; background: qlineargradient(spread:pad, x1:0, y1:1, x2:0, y2:0, stop:0 rgba(248, 248, 248, 255), stop:1 rgba(229, 229, 229, 255)); border-color: silver; }"
STYLE_SUBCHECKBOX = "QCheckBox, QLabel, QRadioButton {{ margin-left: 6px; padding-left: 21px; background:url('{}') left no-repeat }}" # Graphic path
STYLE_SUBLABEL = "QCheckBox, QLabel {{ margin-left: 6px; padding-left: 16px; background:url('{}') left no-repeat }}" # Graphic path
STYLE_ERRORLABEL = "QLabel { color : black; border-style: outset; border-width: 2px; border-radius: 7px; border-color: red; padding: 2px; background: #FFAAAA; }"
STYLE_SUCCESSLABEL = "QLabel { color : black; border-style: outset; border-width: 2px; border-radius: 7px; border-color: green; padding: 2px; background: #AAFFAA; }"
STYLE_READY_PUSHBUTTON = getValueForOS({
OS_DEFAULT: "QPushButton { text-align: left; padding: 10px 5px 10px 5px;}",
OS_MACOS: "QPushButton { text-align: left; padding: 10px 5px 10px 15px; margin: 0px 3px 0px 2px}"})
STYLE_AUTO_PLAY_PUSHBUTTON = getValueForOS({
OS_DEFAULT: "QPushButton { text-align: left; padding: 5px 5px 5px 5px; }",
OS_MACOS: "QPushButton { text-align: left; padding: 10px 5px 10px 15px; margin: 0px 0px 0px -4px}"})
STYLE_NOTIFICATIONBOX = "Username { color: #367AA9; font-weight:bold; }"
STYLE_CONTACT_INFO = "<span style=\"color: grey\"><strong><small>{}</span><br /><br />" # Contact info message
STYLE_USER_MESSAGE = "<span style=\"{}\"><{}></span> {}"
STYLE_USERNAME = "color: #367AA9; font-weight:bold;"
STYLE_ERRORNOTIFICATION = "color: red;"
STYLE_DIFFERENTITEM_COLOR = 'red'
STYLE_NOFILEITEM_COLOR = 'blue'
STYLE_NOTCONTROLLER_COLOR = 'grey'
STYLE_UNTRUSTEDITEM_COLOR = 'purple'
STYLE_DARK_LINKS_COLOR = "a {color: #1A78D5; }"
STYLE_DARK_ABOUT_LINK_COLOR = "color: #1A78D5;"
STYLE_DARK_ERRORNOTIFICATION = "color: #E94F64;"
STYLE_DARK_DIFFERENTITEM_COLOR = '#E94F64'
STYLE_DARK_NOFILEITEM_COLOR = '#1A78D5'
STYLE_DARK_NOTCONTROLLER_COLOR = 'grey'
STYLE_DARK_UNTRUSTEDITEM_COLOR = '#882fbc'
TLS_CERT_ROTATION_MAX_RETRIES = 10
USERLIST_GUI_USERNAME_OFFSET = getValueForOS({
OS_DEFAULT: 21,
OS_MACOS: 26}) # Pixels
USERLIST_GUI_USERNAME_COLUMN = 0
USERLIST_GUI_FILENAME_COLUMN = 3
MPLAYER_SLAVE_ARGS = ['-slave', '--hr-seek=always', '-nomsgcolor', '-msglevel', 'all=1:global=4:cplayer=4', '-af-add', 'scaletempo']
MPV_ARGS = {'force-window': 'yes',
'idle': 'yes',
'hr-seek': 'always',
'keep-open': 'always',
'input-terminal': 'no',
'term-playing-msg': '<SyncplayUpdateFile>\nANS_filename=${filename}\nANS_length=${=duration:${=length:0}}\nANS_path=${path}\n</SyncplayUpdateFile>',
'keep-open-pause': 'yes'
}
IINA_PROPERTIES = {'geometry': '25%+100+100',
'idle': 'yes',
'hr-seek': 'always',
'input-terminal': 'no',
'term-playing-msg': '<SyncplayUpdateFile>\nANS_filename=${filename}\nANS_length=${=duration:${=length:0}}\nANS_path=${path}\n</SyncplayUpdateFile>',
'keep-open-pause': 'yes',
}
MPV_NEW_VERSION = False
MPV_OSC_VISIBILITY_CHANGE_VERSION = False
MPV_INPUT_PROMPT_START_CHARACTER = "ใ"
MPV_INPUT_PROMPT_END_CHARACTER = " ใ"
MPV_INPUT_BACKSLASH_SUBSTITUTE_CHARACTER = "๏ผผ"
MPV_SYNCPLAYINTF_OPTIONS_TO_SEND = ["chatInputEnabled", "chatInputFontFamily", "chatInputRelativeFontSize", "chatInputFontWeight", "chatInputFontUnderline",
"chatInputFontColor", "chatInputPosition", "chatOutputFontFamily", "chatOutputRelativeFontSize",
"chatOutputFontWeight", "chatOutputFontUnderline", "chatOutputMode", "chatMaxLines",
"chatTopMargin", "chatLeftMargin", "chatBottomMargin", "chatDirectInput",
"notificationTimeout", "alertTimeout", "chatTimeout", "chatOutputEnabled"]
MPV_SYNCPLAYINTF_CONSTANTS_TO_SEND = [
"MaxChatMessageLength={}".format(MAX_CHAT_MESSAGE_LENGTH),
"inputPromptStartCharacter={}".format(MPV_INPUT_PROMPT_START_CHARACTER),
"inputPromptEndCharacter={}".format(MPV_INPUT_PROMPT_END_CHARACTER),
"backslashSubstituteCharacter={}".format(MPV_INPUT_BACKSLASH_SUBSTITUTE_CHARACTER)]
# Note: Constants updated in client.py->checkForFeatureSupport
MPV_SYNCPLAYINTF_LANGUAGE_TO_SEND = ["mpv-key-tab-hint", "mpv-key-hint", "alphakey-mode-warning-first-line", "alphakey-mode-warning-second-line"]
VLC_SLAVE_ARGS = ['--extraintf=luaintf', '--lua-intf=syncplay', '--no-quiet', '--no-input-fast-seek',
'--play-and-pause', '--start-time=0']
VLC_SLAVE_EXTRA_ARGS = getValueForOS({
OS_DEFAULT: ['--no-one-instance', '--no-one-instance-when-started-from-file'],
OS_MACOS: ['--verbose=2', '--no-file-logging']})
MPV_SUPERSEDE_IF_DUPLICATE_COMMANDS = ["set_property time-pos ", "loadfile "]
MPV_REMOVE_BOTH_IF_DUPLICATE_COMMANDS = ["cycle pause"]
MPLAYER_ANSWER_REGEX = "^ANS_([a-zA-Z_-]+)=(.+)$|^(Exiting)\.\.\. \((.+)\)$"
VLC_ANSWER_REGEX = r"(?:^(?P<command>[a-zA-Z_-]+)(?:\: )?(?P<argument>.*))"
UI_COMMAND_REGEX = r"^(?P<command>[^\ ]+)(?:\ (?P<parameter>.+))?"
UI_OFFSET_REGEX = r"^(?:o|offset)\ ?(?P<sign>[/+-])?(?P<time>\d{1,9}(?:[^\d\.](?:\d{1,9})){0,2}(?:\.(?:\d{1,3}))?)$"
UI_SEEK_REGEX = r"^(?:s|seek)?\ ?(?P<sign>[+-])?(?P<time>\d{1,4}(?:[^\d\.](?:\d{1,6})){0,2}(?:\.(?:\d{1,3}))?)$"
PARSE_TIME_REGEX = r'(:?(?:(?P<hours>\d+?)[^\d\.])?(?:(?P<minutes>\d+?))?[^\d\.])?(?P<seconds>\d+?)(?:\.(?P<miliseconds>\d+?))?$'
MESSAGE_WITH_USERNAME_REGEX = "^(<(?P<username>[^<>]+)>)(?P<message>.*)"
SERVER_MAX_TEMPLATE_LENGTH = 10000
PRIVACY_SENDRAW_MODE = "SendRaw"
PRIVACY_SENDHASHED_MODE = "SendHashed"
PRIVACY_DONTSEND_MODE = "DoNotSend"
UNPAUSE_IFALREADYREADY_MODE = "IfAlreadyReady"
UNPAUSE_IFOTHERSREADY_MODE = "IfOthersReady"
UNPAUSE_IFMINUSERSREADY_MODE = "IfMinUsersReady"
UNPAUSE_ALWAYS_MODE = "Always"
INPUT_POSITION_TOP = "Top"
INPUT_POSITION_MIDDLE = "Middle"
INPUT_POSITION_BOTTOM = "Bottom"
VLC_EOF_DURATION_THRESHOLD = 2.0
PRIVACY_HIDDENFILENAME = "**Hidden filename**"
INVERTED_STATE_MARKER = "*"
ERROR_MESSAGE_MARKER = "*"
LOAD_SAVE_MANUALLY_MARKER = "!"
CONFIG_NAME_MARKER = ":"
CONFIG_VALUE_MARKER = "="
USERITEM_CONTROLLER_ROLE = 0
USERITEM_READY_ROLE = 1
FILEITEM_SWITCH_ROLE = 1
FILEITEM_SWITCH_NO_SWITCH = 0
FILEITEM_SWITCH_FILE_SWITCH = 1
FILEITEM_SWITCH_STREAM_SWITCH = 2
PLAYLISTITEM_CURRENTLYPLAYING_ROLE = 3
MESSAGE_NEUTRAL = "neutral"
MESSAGE_BADNEWS = "bad"
MESSAGE_GOODNEWS = "good"
OSD_NOTIFICATION = "notification" # Also known as PrimaryOSD
OSD_ALERT = "alert" # Also known as SecondaryOSD
OSD_CHAT = "chat"
CHATROOM_MODE = "Chatroom"
SCROLLING_MODE = "Scrolling"
SYNCPLAY_UPDATE_URL = "https://syncplay.pl/checkforupdate?{}" # Params
SYNCPLAY_DOWNLOAD_URL = "https://syncplay.pl/download/"
SYNCPLAY_PUBLIC_SERVER_LIST_URL = "https://syncplay.pl/listpublicservers?{}" # Params
DEFAULT_TRUSTED_DOMAINS = ["youtube.com", "youtu.be"]
TRUSTABLE_WEB_PROTOCOLS = ["http", "https"]
PRIVATE_FILE_FIELDS = ["path"]
CONSOLE_UI_MODE = "CLI"
GRAPHICAL_UI_MODE = "GUI"
UNKNOWN_UI_MODE = "Unknown"
FALLBACK_ASSUMED_UI_MODE = GRAPHICAL_UI_MODE
|
the-stack_106_27666 | import copy
import io
import os
from contextlib import contextmanager
from importlib import import_module
from unittest import SkipTest
from unittest.mock import patch
from django.conf import settings
from django.core.management import call_command, CommandError
from django.template import Context, Origin, Template
from django.test import override_settings, TestCase
from django.urls import get_script_prefix, set_script_prefix
from compressor.cache import flush_offline_manifest, get_offline_manifest
from compressor.exceptions import OfflineGenerationError
from compressor.management.commands.compress import Command as CompressCommand
from compressor.storage import default_storage
from compressor.utils import get_mod_func
def offline_context_generator():
for i in range(1, 4):
yield {'content': 'OK %d!' % i}
def static_url_context_generator():
yield {'STATIC_URL': settings.STATIC_URL}
class LazyScriptNamePrefixedUrl(str):
"""
Lazy URL with ``SCRIPT_NAME`` WSGI param as path prefix.
.. code-block :: python
settings.STATIC_URL = LazyScriptNamePrefixedUrl('/static/')
# HTTP request to '/some/page/' without SCRIPT_NAME
str(settings.STATIC_URL) == '/static/'
# HTTP request to '/app/prefix/some/page/` with SCRIPT_NAME = '/app/prefix/'
str(settings.STATIC_URL) == '/app/prefix/static/'
# HTTP request to '/another/prefix/some/page/` with SCRIPT_NAME = '/another/prefix/'
str(settings.STATIC_URL) == '/another/prefix/static/'
The implementation is incomplete, all ``str`` methods must be overridden
in order to work correctly with the rest of Django core.
"""
def __str__(self):
return get_script_prefix() + self[1:] if self.startswith('/') else self
def __unicode__(self):
return str(self)
def split(self, *args, **kwargs):
"""
Override ``.split()`` method to make it work with ``{% static %}``.
"""
return str(self).split(*args, **kwargs)
def replace(self, *args, **kwargs):
""" Override ``.replace()`` to make it work with ``{% static %}``.
In ``django.core.files.storage``, ``FileSystemStorage.url()`` passes
this object to ``urllib.parse.urljoin``.
In ``urrlib.parse``, the function that calls ``replace()`` is
``_remove_unsafe_bytes_from_url()``.
"""
return str(self).replace(*args, **kwargs)
@contextmanager
def script_prefix(new_prefix):
"""
Override ``SCRIPT_NAME`` WSGI param, yield, then restore its original value.
:param new_prefix: New ``SCRIPT_NAME`` value.
"""
old_prefix = get_script_prefix()
set_script_prefix(new_prefix)
yield
set_script_prefix(old_prefix)
class OfflineTestCaseMixin:
CHARSET = 'utf-8'
template_name = 'test_compressor_offline.html'
# Change this for each test class
templates_dir = ''
expected_basename = 'output'
expected_hash = ''
# Engines to test
engines = ('django', 'jinja2')
additional_test_settings = None
def setUp(self):
# Reset template dirs, because it enables us to force compress to
# consider only a specific directory (helps us make true,
# independent unit tests).
# Specify both Jinja2 and Django template locations. When the wrong
# engine is used to parse a template, the TemplateSyntaxError will
# cause the template to be skipped over.
# We've hardcoded TEMPLATES[0] to be Django templates backend and
# TEMPLATES[1] to be Jinja2 templates backend in test_settings.
TEMPLATES = copy.deepcopy(settings.TEMPLATES)
django_template_dir = os.path.join(
TEMPLATES[0]['DIRS'][0], self.templates_dir)
jinja2_template_dir = os.path.join(
TEMPLATES[1]['DIRS'][0], self.templates_dir)
TEMPLATES[0]['DIRS'] = [django_template_dir]
TEMPLATES[1]['DIRS'] = [jinja2_template_dir]
override_settings = {
'TEMPLATES': TEMPLATES,
'COMPRESS_ENABLED': True,
'COMPRESS_OFFLINE': True
}
if 'jinja2' in self.engines:
override_settings['COMPRESS_JINJA2_GET_ENVIRONMENT'] = (
lambda: self._get_jinja2_env())
if self.additional_test_settings is not None:
override_settings.update(self.additional_test_settings)
self.override_settings = self.settings(**override_settings)
self.override_settings.__enter__()
if 'django' in self.engines:
self.template_path = os.path.join(
django_template_dir, self.template_name)
origin = Origin(name=self.template_path, # Absolute path
template_name=self.template_name) # Loader-relative path
with io.open(self.template_path,
encoding=self.CHARSET) as file_:
self.template = Template(file_.read(), origin=origin)
if 'jinja2' in self.engines:
self.template_path_jinja2 = os.path.join(
jinja2_template_dir, self.template_name)
jinja2_env = override_settings['COMPRESS_JINJA2_GET_ENVIRONMENT']()
with io.open(self.template_path_jinja2,
encoding=self.CHARSET) as file_:
self.template_jinja2 = jinja2_env.from_string(file_.read())
def tearDown(self):
self.override_settings.__exit__(None, None, None)
manifest_path = os.path.join('CACHE', 'manifest.json')
if default_storage.exists(manifest_path):
default_storage.delete(manifest_path)
def _prepare_contexts(self, engine):
contexts = settings.COMPRESS_OFFLINE_CONTEXT
if not isinstance(contexts, (list, tuple)):
contexts = [contexts]
if engine == 'django':
return [Context(c) for c in contexts]
if engine == 'jinja2':
return contexts
return None
def _render_template(self, engine):
contexts = self._prepare_contexts(engine)
if engine == 'django':
return ''.join(self.template.render(c) for c in contexts)
if engine == 'jinja2':
return '\n'.join(
self.template_jinja2.render(c) for c in contexts) + '\n'
return None
def _render_script(self, hash):
return (
'<script src="{}CACHE/js/{}.{}.js">'
'</script>'.format(
settings.COMPRESS_URL_PLACEHOLDER, self.expected_basename, hash
)
)
def _render_link(self, hash):
return (
'<link rel="stylesheet" href="{}CACHE/css/{}.{}.css" '
'type="text/css">'.format(
settings.COMPRESS_URL_PLACEHOLDER, self.expected_basename, hash
)
)
def _render_result(self, result, separator='\n'):
return (separator.join(result) + '\n').replace(
settings.COMPRESS_URL_PLACEHOLDER, str(settings.COMPRESS_URL)
)
def _test_offline(self, engine):
hashes = self.expected_hash
if not isinstance(hashes, (list, tuple)):
hashes = [hashes]
count, result = CompressCommand().handle_inner(engines=[engine], verbosity=0)
self.assertEqual(len(hashes), count)
self.assertEqual([self._render_script(h) for h in hashes], result)
rendered_template = self._render_template(engine)
self.assertEqual(rendered_template, self._render_result(result))
def test_offline_django(self):
if 'django' not in self.engines:
raise SkipTest('This test class does not support django engine.')
self._test_offline(engine='django')
def test_offline_jinja2(self):
if 'jinja2' not in self.engines:
raise SkipTest('This test class does not support jinja2 engine.')
self._test_offline(engine='jinja2')
def _get_jinja2_env(self):
import jinja2.ext
from compressor.offline.jinja2 import url_for, SpacelessExtension
from compressor.contrib.jinja2ext import CompressorExtension
# Extensions needed for the test cases only.
extensions = [
CompressorExtension,
SpacelessExtension,
jinja2.ext.with_,
jinja2.ext.do,
]
loader = self._get_jinja2_loader()
env = jinja2.Environment(extensions=extensions, loader=loader)
env.globals['url_for'] = url_for
return env
def _get_jinja2_loader(self):
import jinja2
loader = jinja2.FileSystemLoader(
settings.TEMPLATES[1]['DIRS'], encoding=self.CHARSET)
return loader
class OfflineCompressBasicTestCase(OfflineTestCaseMixin, TestCase):
templates_dir = 'basic'
expected_hash = '822ac7501287'
@patch.object(CompressCommand, 'compress')
def test_handle_no_args(self, compress_mock):
compress_mock.return_value = {}, 1, []
CompressCommand().handle()
self.assertEqual(compress_mock.call_count, 1)
@patch.object(CompressCommand, 'compress')
def test_handle_compress_disabled(self, compress_mock):
with self.settings(COMPRESS_ENABLED=False):
with self.assertRaises(CommandError):
CompressCommand().handle()
self.assertEqual(compress_mock.call_count, 0)
@patch.object(CompressCommand, 'compress')
def test_handle_compress_offline_disabled(self, compress_mock):
with self.settings(COMPRESS_OFFLINE=False):
with self.assertRaises(CommandError):
CompressCommand().handle()
self.assertEqual(compress_mock.call_count, 0)
@patch.object(CompressCommand, 'compress')
def test_handle_compress_offline_disabled_force(self, compress_mock):
compress_mock.return_value = {}, 1, []
with self.settings(COMPRESS_OFFLINE=False):
CompressCommand().handle(force=True)
self.assertEqual(compress_mock.call_count, 1)
def test_rendering_without_manifest_raises_exception(self):
# flush cached manifest
flush_offline_manifest()
self.assertRaises(OfflineGenerationError,
self.template.render, Context({}))
def test_rendering_without_manifest_raises_exception_jinja2(self):
# flush cached manifest
flush_offline_manifest()
self.assertRaises(OfflineGenerationError,
self.template_jinja2.render, {})
def _test_deleting_manifest_does_not_affect_rendering(self, engine):
count, result = CompressCommand().handle_inner(engines=[engine], verbosity=0)
get_offline_manifest()
manifest_path = os.path.join('CACHE', 'manifest.json')
if default_storage.exists(manifest_path):
default_storage.delete(manifest_path)
self.assertEqual(1, count)
self.assertEqual([self._render_script(self.expected_hash)], result)
rendered_template = self._render_template(engine)
self.assertEqual(rendered_template, self._render_result(result))
def test_deleting_manifest_does_not_affect_rendering(self):
for engine in self.engines:
self._test_deleting_manifest_does_not_affect_rendering(engine)
def test_get_loaders(self):
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
)
with self.settings(TEMPLATE_LOADERS=TEMPLATE_LOADERS):
from django.template.loaders.filesystem import (
Loader as FileSystemLoader)
from django.template.loaders.app_directories import (
Loader as AppDirectoriesLoader)
loaders = CompressCommand().get_loaders()
self.assertTrue(isinstance(loaders[0], FileSystemLoader))
self.assertTrue(isinstance(loaders[1], AppDirectoriesLoader))
@patch("compressor.offline.django.DjangoParser.render_node",
side_effect=Exception(b"non-ascii character here:\xc3\xa4"))
def test_non_ascii_exception_messages(self, mock):
with self.assertRaises(CommandError):
CompressCommand().handle(verbosity=0)
class OfflineCompressSkipDuplicatesTestCase(OfflineTestCaseMixin, TestCase):
templates_dir = 'test_duplicate'
def _test_offline(self, engine):
count, result = CompressCommand().handle_inner(engines=[engine], verbosity=0)
# Only one block compressed, the second identical one was skipped.
self.assertEqual(1, count)
# Only 1 <script> block in returned result as well.
self.assertEqual([self._render_script('822ac7501287')], result)
rendered_template = self._render_template(engine)
# But rendering the template returns both (identical) scripts.
self.assertEqual(
rendered_template, self._render_result(result * 2, ''))
class SuperMixin:
# Block.super not supported for Jinja2 yet.
engines = ('django',)
class OfflineCompressBlockSuperTestCase(
SuperMixin, OfflineTestCaseMixin, TestCase):
templates_dir = 'test_block_super'
expected_hash = '817b5defb197'
class OfflineCompressBlockSuperMultipleTestCase(
SuperMixin, OfflineTestCaseMixin, TestCase):
templates_dir = 'test_block_super_multiple'
expected_hash = 'd3f749e83c81'
class OfflineCompressBlockSuperMultipleCachedLoaderTestCase(
SuperMixin, OfflineTestCaseMixin, TestCase):
templates_dir = 'test_block_super_multiple_cached'
expected_hash = '055f88f4751f'
additional_test_settings = {
'TEMPLATE_LOADERS': (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
)
}
class OfflineCompressBlockSuperTestCaseWithExtraContent(
SuperMixin, OfflineTestCaseMixin, TestCase):
templates_dir = 'test_block_super_extra'
def _test_offline(self, engine):
count, result = CompressCommand().handle_inner(engines=[engine], verbosity=0)
self.assertEqual(2, count)
self.assertEqual([
self._render_script('bfcec76e0f28'),
self._render_script('817b5defb197')
], result)
rendered_template = self._render_template(engine)
self.assertEqual(rendered_template, self._render_result(result, ''))
class OfflineCompressConditionTestCase(OfflineTestCaseMixin, TestCase):
templates_dir = 'test_condition'
expected_hash = 'a3275743dc69'
additional_test_settings = {
'COMPRESS_OFFLINE_CONTEXT': {
'condition': 'red',
}
}
class OfflineCompressTemplateTagTestCase(OfflineTestCaseMixin, TestCase):
templates_dir = 'test_templatetag'
expected_hash = '2bb88185b4f5'
class OfflineCompressStaticTemplateTagTestCase(OfflineTestCaseMixin, TestCase):
templates_dir = 'test_static_templatetag'
expected_hash = 'be0b1eade28b'
class OfflineCompressTemplateTagNamedTestCase(OfflineTestCaseMixin, TestCase):
templates_dir = 'test_templatetag_named'
expected_basename = 'output_name'
expected_hash = '822ac7501287'
class OfflineCompressTestCaseWithContext(OfflineTestCaseMixin, TestCase):
templates_dir = 'test_with_context'
expected_hash = 'c6bf81bca7ad'
additional_test_settings = {
'COMPRESS_OFFLINE_CONTEXT': {
'content': 'OK!',
}
}
class OfflineCompressTestCaseWithContextSuper(
SuperMixin, OfflineTestCaseMixin, TestCase):
templates_dir = 'test_with_context_super'
expected_hash = 'dd79e1bd1527'
additional_test_settings = {
'COMPRESS_OFFLINE_CONTEXT': {
'content': 'OK!',
}
}
class OfflineCompressTestCaseWithContextList(OfflineTestCaseMixin, TestCase):
templates_dir = 'test_with_context'
expected_hash = ['8b4a7452e1c5', '55b3123e884c', 'bfc63829cc58']
additional_test_settings = {
'COMPRESS_OFFLINE_CONTEXT': list(offline_context_generator())
}
def _prepare_contexts(self, engine):
if engine == 'django':
return [Context(c) for c in settings.COMPRESS_OFFLINE_CONTEXT]
if engine == 'jinja2':
return settings.COMPRESS_OFFLINE_CONTEXT
return None
class OfflineCompressTestCaseWithContextListSuper(
SuperMixin, OfflineCompressTestCaseWithContextList):
templates_dir = 'test_with_context_super'
expected_hash = ['b39975a8f6ea', 'ed565a1d262f', '6ac9e4b29feb']
additional_test_settings = {
'COMPRESS_OFFLINE_CONTEXT': list(offline_context_generator())
}
class OfflineCompressTestCaseWithContextGenerator(
OfflineTestCaseMixin, TestCase):
templates_dir = 'test_with_context'
expected_hash = ['8b4a7452e1c5', '55b3123e884c', 'bfc63829cc58']
additional_test_settings = {
'COMPRESS_OFFLINE_CONTEXT': 'compressor.tests.test_offline.'
'offline_context_generator'
}
def _prepare_contexts(self, engine):
module, function = get_mod_func(settings.COMPRESS_OFFLINE_CONTEXT)
contexts = getattr(import_module(module), function)()
if engine == 'django':
return (Context(c) for c in contexts)
if engine == 'jinja2':
return contexts
return None
class OfflineCompressTestCaseWithContextGeneratorSuper(
SuperMixin, OfflineCompressTestCaseWithContextGenerator):
templates_dir = 'test_with_context_super'
expected_hash = ['b39975a8f6ea', 'ed565a1d262f', '6ac9e4b29feb']
additional_test_settings = {
'COMPRESS_OFFLINE_CONTEXT': 'compressor.tests.test_offline.'
'offline_context_generator'
}
class OfflineCompressStaticUrlIndependenceTestCase(
OfflineCompressTestCaseWithContextGenerator):
"""
Test that the offline manifest is independent of STATIC_URL.
I.e. users can use the manifest with any other STATIC_URL in the future.
"""
templates_dir = 'test_static_url_independence'
expected_hash = 'b0bfc3754fd4'
additional_test_settings = {
'STATIC_URL': '/custom/static/url/',
# We use ``COMPRESS_OFFLINE_CONTEXT`` generator to make sure that
# ``STATIC_URL`` is not cached when rendering the template.
'COMPRESS_OFFLINE_CONTEXT': (
'compressor.tests.test_offline.static_url_context_generator'
)
}
def _test_offline(self, engine):
count, result = CompressCommand().handle_inner(engines=[engine], verbosity=0)
self.assertEqual(1, count)
self.assertEqual([self._render_script(self.expected_hash)], result)
self.assertEqual(
self._render_template(engine), self._render_result(result))
# Changing STATIC_URL setting doesn't break things despite that
# offline compression was made with different STATIC_URL.
with self.settings(STATIC_URL='/another/static/url/'):
self.assertEqual(
self._render_template(engine), self._render_result(result))
class OfflineCompressTestCaseWithContextVariableInheritance(
OfflineTestCaseMixin, TestCase):
templates_dir = 'test_with_context_variable_inheritance'
expected_hash = 'b8376aad1357'
additional_test_settings = {
'COMPRESS_OFFLINE_CONTEXT': {
'parent_template': 'base.html',
}
}
def _render_result(self, result, separator='\n'):
return '\n' + super()._render_result(result, separator)
class OfflineCompressTestCaseWithContextVariableInheritanceSuper(
SuperMixin, OfflineTestCaseMixin, TestCase):
templates_dir = 'test_with_context_variable_inheritance_super'
additional_test_settings = {
'COMPRESS_OFFLINE_CONTEXT': [{
'parent_template': 'base1.html',
}, {
'parent_template': 'base2.html',
}]
}
expected_hash = ['cee48db7cedc', 'c877c436363a']
class OfflineCompressTestCaseWithContextGeneratorImportError(
OfflineTestCaseMixin, TestCase):
templates_dir = 'test_with_context'
def _test_offline(self, engine):
# Test that we are properly generating ImportError when
# COMPRESS_OFFLINE_CONTEXT looks like a function but can't be imported
# for whatever reason.
with self.settings(
COMPRESS_OFFLINE_CONTEXT='invalid_mod.invalid_func'):
# Path with invalid module name -- ImportError:
self.assertRaises(
ImportError, CompressCommand().handle_inner, engines=[engine])
with self.settings(COMPRESS_OFFLINE_CONTEXT='compressor'):
# Valid module name only without function -- AttributeError:
self.assertRaises(
ImportError, CompressCommand().handle_inner, engines=[engine])
with self.settings(
COMPRESS_OFFLINE_CONTEXT='compressor.tests.invalid_function'):
# Path with invalid function name -- AttributeError:
self.assertRaises(
ImportError, CompressCommand().handle_inner, engines=[engine])
with self.settings(
COMPRESS_OFFLINE_CONTEXT='compressor.tests.test_offline'):
# Path without function attempts call on module -- TypeError:
self.assertRaises(
ImportError, CompressCommand().handle_inner, engines=[engine])
valid_path = 'compressor.tests.test_offline.offline_context_generator'
with self.settings(COMPRESS_OFFLINE_CONTEXT=valid_path):
# Valid path to generator function -- no ImportError:
try:
CompressCommand().handle_inner(engines=[engine], verbosity=0)
except ImportError:
self.fail('Valid path to offline context generator must'
' not raise ImportError.')
class OfflineCompressTestCaseErrors(OfflineTestCaseMixin, TestCase):
templates_dir = 'test_error_handling'
def _test_offline(self, engine):
count, result = CompressCommand().handle_inner(engines=[engine], verbosity=0)
if engine == 'django':
self.assertEqual(2, count)
else:
# Because we use env.parse in Jinja2Parser, the engine does not
# actually load the 'extends' and 'includes' templates, and so
# it is unable to detect that they are missing. So all the
# 'compress' nodes are processed correctly.
self.assertEqual(4, count)
self.assertEqual(engine, 'jinja2')
self.assertIn(self._render_link('187e2ce75808'), result)
self.assertIn(self._render_link('fffafcdf428e'), result)
self.assertIn(self._render_script('eeabdac29232'), result)
self.assertIn(self._render_script('9a7f06880ce3'), result)
class OfflineCompressTestCaseWithError(OfflineTestCaseMixin, TestCase):
templates_dir = 'test_error_handling'
additional_test_settings = {
'COMPRESS_PRECOMPILERS': (('text/coffeescript', 'nonexisting-binary'),)
}
def _test_offline(self, engine):
"""
Test that a CommandError is raised with DEBUG being False as well as
True, as otherwise errors in configuration will never show in
production.
"""
with self.settings(DEBUG=True):
self.assertRaises(
CommandError, CompressCommand().handle_inner, engines=[engine], verbosity=0)
with self.settings(DEBUG=False):
self.assertRaises(
CommandError, CompressCommand().handle_inner, engines=[engine], verbosity=0)
class OfflineCompressEmptyTag(OfflineTestCaseMixin, TestCase):
"""
In case of a compress template tag with no content, an entry
will be added to the manifest with an empty string as value.
This test makes sure there is no recompression happening when
compressor encounters such an emptystring in the manifest.
"""
templates_dir = 'basic'
expected_hash = '822ac7501287'
def _test_offline(self, engine):
CompressCommand().handle_inner(engines=[engine], verbosity=0)
manifest = get_offline_manifest()
manifest[list(manifest)[0]] = ''
self.assertEqual(self._render_template(engine), '\n')
class OfflineCompressBlockSuperBaseCompressed(OfflineTestCaseMixin, TestCase):
template_names = ['base.html', 'base2.html',
'test_compressor_offline.html']
templates_dir = 'test_block_super_base_compressed'
expected_hash_offline = ['e4e9263fa4c0', '9cecd41a505f', 'd3f749e83c81']
expected_hash = ['028c3fc42232', '2e9d3f5545a6', 'd3f749e83c81']
# Block.super not supported for Jinja2 yet.
engines = ('django',)
def setUp(self):
super().setUp()
self.template_paths = []
self.templates = []
for template_name in self.template_names:
template_path = os.path.join(
settings.TEMPLATES[0]['DIRS'][0], template_name)
self.template_paths.append(template_path)
with io.open(template_path,
encoding=self.CHARSET) as file_:
template = Template(file_.read())
self.templates.append(template)
def _render_template(self, template, engine):
if engine == 'django':
return template.render(Context(settings.COMPRESS_OFFLINE_CONTEXT))
elif engine == 'jinja2':
return template.render(settings.COMPRESS_OFFLINE_CONTEXT) + '\n'
else:
return None
def _test_offline(self, engine):
count, result = CompressCommand().handle_inner(engines=[engine], verbosity=0)
self.assertEqual(len(self.expected_hash), count)
for expected_hash, template in zip(self.expected_hash_offline, self.templates):
expected = self._render_script(expected_hash)
self.assertIn(expected, result)
rendered_template = self._render_template(template, engine)
self.assertEqual(
rendered_template, self._render_result([expected]))
class OfflineCompressInlineNonAsciiTestCase(OfflineTestCaseMixin, TestCase):
templates_dir = 'test_inline_non_ascii'
additional_test_settings = {
'COMPRESS_OFFLINE_CONTEXT': {
'test_non_ascii_value': '\u2014',
}
}
def _test_offline(self, engine):
_, result = CompressCommand().handle_inner(engines=[engine], verbosity=0)
rendered_template = self._render_template(engine)
self.assertEqual(rendered_template, ''.join(result) + '\n')
class OfflineCompressComplexTestCase(OfflineTestCaseMixin, TestCase):
templates_dir = 'test_complex'
additional_test_settings = {
'COMPRESS_OFFLINE_CONTEXT': {
'condition': 'OK!',
# Django templating does not allow definition of tuples in the
# templates.
# Make sure this is same as test_templates_jinja2/test_complex.
'my_names': ('js/one.js', 'js/nonasc.js'),
}
}
def _test_offline(self, engine):
count, result = CompressCommand().handle_inner(engines=[engine], verbosity=0)
self.assertEqual(3, count)
self.assertEqual([
self._render_script('76a82cfab9ab'),
self._render_script('7219642b8ab4'),
self._render_script('567bb77b13db')
], result)
rendered_template = self._render_template(engine)
self.assertEqual(
rendered_template, self._render_result([result[0], result[2]], ''))
class OfflineCompressExtendsRecursionTestCase(OfflineTestCaseMixin, TestCase):
"""
Test that templates extending templates with the same name
(e.g. admin/index.html) don't cause an infinite test_extends_recursion
"""
templates_dir = 'test_extends_recursion'
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.staticfiles',
'compressor',
]
@override_settings(INSTALLED_APPS=INSTALLED_APPS)
def _test_offline(self, engine):
count, _ = CompressCommand().handle_inner(engines=[engine], verbosity=0)
self.assertEqual(count, 1)
class OfflineCompressExtendsRelativeTestCase(SuperMixin, OfflineTestCaseMixin, TestCase):
"""
Test that templates extending templates using relative paths
(e.g. ./base.html) are evaluated correctly
"""
templates_dir = 'test_extends_relative'
expected_hash = '817b5defb197'
class TestCompressCommand(OfflineTestCaseMixin, TestCase):
templates_dir = "test_compress_command"
def _test_offline(self, engine):
raise SkipTest("Not utilized for this test case")
def _build_expected_manifest(self, expected):
return {
k: self._render_script(v) for k, v in expected.items()
}
def test_multiple_engines(self):
opts = {
"force": True,
"verbosity": 0,
}
call_command('compress', engines=["django"], **opts)
manifest_django = get_offline_manifest()
manifest_django_expected = self._build_expected_manifest(
{'0fed9c02607acba22316a328075a81a74e0983ae79470daa9d3707a337623dc3': '0241107e9a9a'})
self.assertEqual(manifest_django, manifest_django_expected)
call_command('compress', engines=["jinja2"], **opts)
manifest_jinja2 = get_offline_manifest()
manifest_jinja2_expected = self._build_expected_manifest(
{'077408d23d4a829b8f88db2eadcf902b29d71b14f94018d900f38a3f8ed24c94': '5694ca83dd14'})
self.assertEqual(manifest_jinja2, manifest_jinja2_expected)
call_command('compress', engines=["django", "jinja2"], **opts)
manifest_both = get_offline_manifest()
manifest_both_expected = self._build_expected_manifest(
{'0fed9c02607acba22316a328075a81a74e0983ae79470daa9d3707a337623dc3': '0241107e9a9a',
'077408d23d4a829b8f88db2eadcf902b29d71b14f94018d900f38a3f8ed24c94': '5694ca83dd14'})
self.assertEqual(manifest_both, manifest_both_expected)
class OfflineCompressTestCaseWithLazyStringAlikeUrls(OfflineCompressTestCaseWithContextGenerator):
"""
Test offline compressing with ``STATIC_URL`` and ``COMPRESS_URL`` as instances of
*lazy string-alike objects* instead of strings.
In particular, lazy string-alike objects that add ``SCRIPT_NAME`` WSGI param
as URL path prefix.
For example:
- We've generated offline assets and deployed them with our Django project.
- We've configured HTTP server (e.g. Nginx) to serve our app at two different URLs:
``http://example.com/my/app/`` and ``http://app.example.com/``.
- Both URLs are leading to the same app, but in the first case we pass
``SCRIPT_NAME = /my/app/`` to WSGI app server (e.g. to uWSGI, which is *behind* Nginx).
- Django (1.11.7, as of today) *ignores* ``SCRIPT_NAME`` when generating
static URLs, while it uses ``SCRIPT_NAME`` when generating Django views URLs -
see https://code.djangoproject.com/ticket/25598.
- As a solution - we can use a lazy string-alike object instead of ``str`` for ``STATIC_URL``
so it will know about ``SCRIPT_NAME`` and add it as a prefix every time we do any
string operation with ``STATIC_URL``.
- However, there are some cases when we cannot force CPython to render our lazy string
correctly - e.g. ``some_string.replace(STATIC_URL, '...')``. So we need to do explicit
``str`` type cast: ``some_string.replace(str(STATIC_URL), '...')``.
"""
templates_dir = 'test_static_templatetag'
additional_test_settings = {
'STATIC_URL': LazyScriptNamePrefixedUrl('/static/'),
'COMPRESS_URL': LazyScriptNamePrefixedUrl('/static/'),
# We use ``COMPRESS_OFFLINE_CONTEXT`` generator to make sure that
# ``STATIC_URL`` is not cached when rendering the template.
'COMPRESS_OFFLINE_CONTEXT': (
'compressor.tests.test_offline.static_url_context_generator'
)
}
expected_hash = 'be0b1eade28b'
def _test_offline(self, engine):
count, result = CompressCommand().handle_inner(engines=[engine], verbosity=0)
self.assertEqual(1, count)
# Change ``SCRIPT_NAME`` WSGI param - it can be changed on every HTTP request,
# e.g. passed via HTTP header.
for script_name in ['', '/app/prefix/', '/another/prefix/']:
with script_prefix(script_name):
self.assertEqual(
str(settings.STATIC_URL),
script_name.rstrip('/') + '/static/'
)
self.assertEqual(
str(settings.COMPRESS_URL),
script_name.rstrip('/') + '/static/'
)
expected_result = self._render_result(result)
actual_result = self._render_template(engine)
self.assertEqual(actual_result, expected_result)
self.assertIn(str(settings.COMPRESS_URL), actual_result)
|
the-stack_106_27667 | """
Author: Fritz Alder
Copyright:
Secure Systems Group, Aalto University
https://ssg.aalto.fi/
This code is released under Apache 2.0 license
http://www.apache.org/licenses/LICENSE-2.0
"""
import onnx
from onnx import numpy_helper
import numpy as np
print("These tests work with a fractional of 1 and a downscale of 10,000. Set this in config.py accordingly before running MiniONN.")
model = onnx.load("manual_model_only_gemm.onnx")
tensor_dict = {}
for t in model.graph.initializer:
tensor_dict[str(t.name)] = onnx.numpy_helper.to_array(t)
input_tensor = onnx.TensorProto()
with open('manual_model_only_gemm.onnx.tensor', 'rb') as fid:
content = fid.read()
input_tensor.ParseFromString(content)
tensor_dict["1"] = onnx.numpy_helper.to_array(input_tensor)
#tensor_dict["1"] = np.reshape(tensor_dict["1temp"], (1,3))
# do fractionals
fractional = 1
downscale = 10000
single = ["1", "2"]
double = ["3"]
for s in single:
tensor_dict[s] = np.multiply(tensor_dict[s], fractional)
for s in double:
tensor_dict[s] = np.multiply(tensor_dict[s], fractional*fractional)
for s in tensor_dict:
tensor_dict[s] = np.array([int(d) for d in tensor_dict[s].flatten().tolist()]).reshape(tensor_dict[s].shape)
tensor_dict["4temp"] = np.matmul(tensor_dict["2"], tensor_dict["1"])
tensor_dict["4added"] = np.add(tensor_dict["4temp"], tensor_dict["3"])
tensor_dict["4"] = np.divide(tensor_dict["4added"],fractional*downscale).astype(int)
"""
print("Input")
print(tensor_dict["1"])
print("W1")
print(tensor_dict["2"])
print("b1")
print(tensor_dict["3"])
"""
print("Expected result")
print(tensor_dict["4"])
# now see if the result is close
given = np.loadtxt("out.txt", delimiter=",").astype(int)
diff = np.subtract(given, tensor_dict["4"])
print("Given result")
print(given)
print("Diff")
print(diff)
np.testing.assert_array_equal(tensor_dict["4"], given, err_msg="Result is not the same as expected result!", verbose=True)
print("All numbers equal. Test passed") |
the-stack_106_27668 | # pylint: disable=no-member, no-name-in-module, import-error
from __future__ import absolute_import
import glob
import os
import distutils.command.sdist
import distutils.log
import subprocess
from setuptools import Command, setup
import setuptools.command.sdist
# Patch setuptools' sdist behaviour with distutils' sdist behaviour
setuptools.command.sdist.sdist.run = distutils.command.sdist.sdist.run
VERSION_INFO = {}
CWD = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(CWD, "dxldomaintoolsservice", "_version.py")) as f:
exec(f.read(), VERSION_INFO) # pylint: disable=exec-used
class LintCommand(Command):
"""
Custom setuptools command for running lint
"""
description = 'run lint against project source files'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
self.announce("Running pylint for library source files and tests",
level=distutils.log.INFO)
subprocess.check_call(["pylint", "dxldomaintoolsservice"] +
glob.glob("*.py"))
self.announce("Running pylint for samples", level=distutils.log.INFO)
subprocess.check_call(["pylint"] + glob.glob("sample/*.py") +
glob.glob("sample/**/*.py") +
["--rcfile", ".pylintrc.samples"])
class CiCommand(Command):
"""
Custom setuptools command for running steps that are performed during
Continuous Integration testing.
"""
description = 'run CI steps (lint, test, etc.)'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
self.run_command("lint")
TEST_REQUIREMENTS = ["astroid<2.3.0", "pylint<=2.3.1"]
DEV_REQUIREMENTS = TEST_REQUIREMENTS + ["sphinx"]
setup(
# Package name:
name="dxldomaintoolsservice",
# Version number:
version=VERSION_INFO["__version__"],
# Requirements
install_requires=[
"domaintools_api==0.3.3",
"dxlbootstrap>=0.2.0",
"dxlclient>=4.1.0.184"
],
tests_require=TEST_REQUIREMENTS,
extras_require={
"dev": DEV_REQUIREMENTS,
"test": TEST_REQUIREMENTS
},
# Package author details:
author="McAfee LLC",
# License
license="Apache License 2.0",
# Keywords
keywords=['opendxl', 'dxl', 'mcafee', 'service', 'domaintools'],
# Packages
packages=[
"dxldomaintoolsservice",
"dxldomaintoolsservice._config",
"dxldomaintoolsservice._config.sample",
"dxldomaintoolsservice._config.app"],
package_data={
"dxldomaintoolsservice._config.sample" : ['*'],
"dxldomaintoolsservice._config.app" : ['*']},
# Details
url="http://www.mcafee.com",
description="DomainTools API DXL Python service library",
long_description=open('README').read(),
python_requires='>=2.7.9,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*',
classifiers=[
"Development Status :: 4 - Beta",
"Topic :: Software Development :: Libraries :: Python Modules",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7"
],
cmdclass={
"ci": CiCommand,
"lint": LintCommand
}
)
|
the-stack_106_27669 | """
User enters cost, amount of money given. Calculate change in quarters, dimes, nickels, pennies
"""
cost = float(input("What is the cost of the product? "))
tender = float(input("How much money are you giving to buy the product? "))
while cost < tender:
print("You still owe $" + "%.2f" % (cost - tender))
change = (cost - tender) * 100
quart = 0
dime = 0
nickel = 0
penn = 0
if change >= 25:
quart = int(change / 25)
change = change % 25
if change >= 10:
dime = int(change / 10)
change = change % 10
if change >= 5:
nickel = int(change / 5)
change = change % 5
if change >= 1:
penn = change
|
the-stack_106_27670 | # coding: utf-8
import pprint
import re
import six
class BatchDeleteFunctionTriggersRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'function_urn': 'str'
}
attribute_map = {
'function_urn': 'function_urn'
}
def __init__(self, function_urn=None):
"""BatchDeleteFunctionTriggersRequest - a model defined in huaweicloud sdk"""
self._function_urn = None
self.discriminator = None
self.function_urn = function_urn
@property
def function_urn(self):
"""Gets the function_urn of this BatchDeleteFunctionTriggersRequest.
ๅฝๆฐ็URN๏ผ่ฏฆ็ป่งฃ้่งFunctionGraphๅฝๆฐๆจกๅ็ๆ่ฟฐใ
:return: The function_urn of this BatchDeleteFunctionTriggersRequest.
:rtype: str
"""
return self._function_urn
@function_urn.setter
def function_urn(self, function_urn):
"""Sets the function_urn of this BatchDeleteFunctionTriggersRequest.
ๅฝๆฐ็URN๏ผ่ฏฆ็ป่งฃ้่งFunctionGraphๅฝๆฐๆจกๅ็ๆ่ฟฐใ
:param function_urn: The function_urn of this BatchDeleteFunctionTriggersRequest.
:type: str
"""
self._function_urn = function_urn
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BatchDeleteFunctionTriggersRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_106_27672 | # coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from onshape_client.oas.models import bt_comment_attachment_info
except ImportError:
bt_comment_attachment_info = sys.modules[
"onshape_client.oas.models.bt_comment_attachment_info"
]
try:
from onshape_client.oas.models import bt_user_summary_info
except ImportError:
bt_user_summary_info = sys.modules["onshape_client.oas.models.bt_user_summary_info"]
try:
from onshape_client.oas.models import bt_view_data_info
except ImportError:
bt_view_data_info = sys.modules["onshape_client.oas.models.bt_view_data_info"]
class BTCommentInfo(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"assembly_features": ([str],), # noqa: E501
"assigned_at": (datetime,), # noqa: E501
"assignee": (bt_user_summary_info.BTUserSummaryInfo,), # noqa: E501
"attachment": (
bt_comment_attachment_info.BTCommentAttachmentInfo,
), # noqa: E501
"can_delete": (bool,), # noqa: E501
"can_resolve_or_reopen": (bool,), # noqa: E501
"created_at": (datetime,), # noqa: E501
"document_id": (str,), # noqa: E501
"element_feature": (str,), # noqa: E501
"element_id": (str,), # noqa: E501
"element_occurrences": ([str],), # noqa: E501
"element_query": (str,), # noqa: E501
"href": (str,), # noqa: E501
"id": (str,), # noqa: E501
"message": (str,), # noqa: E501
"name": (str,), # noqa: E501
"parent_id": (str,), # noqa: E501
"release_package_id": (str,), # noqa: E501
"reopened_at": (datetime,), # noqa: E501
"reopened_by": (bt_user_summary_info.BTUserSummaryInfo,), # noqa: E501
"reply_count": (int,), # noqa: E501
"resolved_at": (datetime,), # noqa: E501
"resolved_by": (bt_user_summary_info.BTUserSummaryInfo,), # noqa: E501
"state": (int,), # noqa: E501
"thumbnail": (
bt_comment_attachment_info.BTCommentAttachmentInfo,
), # noqa: E501
"top_level": (bool,), # noqa: E501
"user": (bt_user_summary_info.BTUserSummaryInfo,), # noqa: E501
"version_id": (str,), # noqa: E501
"view_data": (bt_view_data_info.BTViewDataInfo,), # noqa: E501
"view_ref": (str,), # noqa: E501
"workspace_id": (str,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"assembly_features": "assemblyFeatures", # noqa: E501
"assigned_at": "assignedAt", # noqa: E501
"assignee": "assignee", # noqa: E501
"attachment": "attachment", # noqa: E501
"can_delete": "canDelete", # noqa: E501
"can_resolve_or_reopen": "canResolveOrReopen", # noqa: E501
"created_at": "createdAt", # noqa: E501
"document_id": "documentId", # noqa: E501
"element_feature": "elementFeature", # noqa: E501
"element_id": "elementId", # noqa: E501
"element_occurrences": "elementOccurrences", # noqa: E501
"element_query": "elementQuery", # noqa: E501
"href": "href", # noqa: E501
"id": "id", # noqa: E501
"message": "message", # noqa: E501
"name": "name", # noqa: E501
"parent_id": "parentId", # noqa: E501
"release_package_id": "releasePackageId", # noqa: E501
"reopened_at": "reopenedAt", # noqa: E501
"reopened_by": "reopenedBy", # noqa: E501
"reply_count": "replyCount", # noqa: E501
"resolved_at": "resolvedAt", # noqa: E501
"resolved_by": "resolvedBy", # noqa: E501
"state": "state", # noqa: E501
"thumbnail": "thumbnail", # noqa: E501
"top_level": "topLevel", # noqa: E501
"user": "user", # noqa: E501
"version_id": "versionId", # noqa: E501
"view_data": "viewData", # noqa: E501
"view_ref": "viewRef", # noqa: E501
"workspace_id": "workspaceId", # noqa: E501
}
@staticmethod
def _composed_schemas():
return None
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""bt_comment_info.BTCommentInfo - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
assembly_features ([str]): [optional] # noqa: E501
assigned_at (datetime): [optional] # noqa: E501
assignee (bt_user_summary_info.BTUserSummaryInfo): [optional] # noqa: E501
attachment (bt_comment_attachment_info.BTCommentAttachmentInfo): [optional] # noqa: E501
can_delete (bool): [optional] # noqa: E501
can_resolve_or_reopen (bool): [optional] # noqa: E501
created_at (datetime): [optional] # noqa: E501
document_id (str): [optional] # noqa: E501
element_feature (str): [optional] # noqa: E501
element_id (str): [optional] # noqa: E501
element_occurrences ([str]): [optional] # noqa: E501
element_query (str): [optional] # noqa: E501
href (str): [optional] # noqa: E501
id (str): [optional] # noqa: E501
message (str): [optional] # noqa: E501
name (str): [optional] # noqa: E501
parent_id (str): [optional] # noqa: E501
release_package_id (str): [optional] # noqa: E501
reopened_at (datetime): [optional] # noqa: E501
reopened_by (bt_user_summary_info.BTUserSummaryInfo): [optional] # noqa: E501
reply_count (int): [optional] # noqa: E501
resolved_at (datetime): [optional] # noqa: E501
resolved_by (bt_user_summary_info.BTUserSummaryInfo): [optional] # noqa: E501
state (int): [optional] # noqa: E501
thumbnail (bt_comment_attachment_info.BTCommentAttachmentInfo): [optional] # noqa: E501
top_level (bool): [optional] # noqa: E501
user (bt_user_summary_info.BTUserSummaryInfo): [optional] # noqa: E501
version_id (str): [optional] # noqa: E501
view_data (bt_view_data_info.BTViewDataInfo): [optional] # noqa: E501
view_ref (str): [optional] # noqa: E501
workspace_id (str): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
for var_name, var_value in six.iteritems(kwargs):
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
|
the-stack_106_27676 | from ...types.groups.group_call_participant import GroupCallParticipant
from ...types.update import Update
class JoinedGroupCallParticipant(Update):
"""A participant joined to the Group Call
Attributes:
chat_id (``int``):
Unique identifier of chat.
participant (:obj:`~pytgcalls.types.GroupCallParticipant()`):
Info about a group call participant
Parameters:
chat_id (``int``):
Unique identifier of chat.
participant (:obj:`~pytgcalls.types.GroupCallParticipant()`):
Info about a group call participant
"""
def __init__(
self,
chat_id: int,
participant: GroupCallParticipant,
):
super().__init__(chat_id)
self.participant = participant
|
the-stack_106_27678 | import gzip
import colorsys
import numpy as np
import numexpr as ne
from scipy.spatial import cKDTree
from skimage import measure
from vapory import (Camera, Scene, LightSource, Background,
Sphere, Isosurface, Box, Texture,
Pigment, Finish, ContainedBy, Function)
def save_mb_obj(out_file, metaballs, vertices, vcolors=[], vnormals=[]):
"""Save OBJ (+ Metaballs) file
Save OBJ (+ Metaballs) file
Args:
out_file:
metaballs:
vertices:
vcolors:
vnormals:
Returns:
bool:
"""
if out_file.endswith(".gz"):
f_out = gzip.open(out_file, 'wt')
else:
f_out = open(out_file, 'w')
f_out.write("####\n")
f_out.write("#\n")
f_out.write("# Metaballs: {}\n".format(len(metaballs.mbs)))
f_out.write("#\n")
f_out.write("# mth {}\n".format(str(metaballs.mth)))
for mb in metaballs.mbs:
mbstr = "# " + str(mb) + "\n"
f_out.write(mbstr)
f_out.write("#\n")
f_out.write("####\n")
f_out.write("#\n")
f_out.write("# Vertices: {}\n".format(len(vertices)))
f_out.write("#\n")
f_out.write("####\n")
for vi, v in enumerate(vertices):
vstr = "v {} {} {}".format(v[0], v[1], v[2])
if len(vcolors) > 0:
vc = vcolors[vi]
vstr += " {} {} {}".format(vc[0], vc[1], vc[2])
vstr += "\n"
f_out.write(vstr)
f_out.write("# {} vertices\n\n".format(len(vertices)))
if len(vnormals) > 0:
for vn in vnormals:
vnstr = "vn {} {} {}\n".format(vn[0], vn[1], vn[2])
f_out.write(vnstr)
f_out.write("# {} normals\n\n".format(len(vnormals)))
f_out.write("# End of File")
f_out.close()
return True
def load_mb_obj(in_file):
"""Load OBJ (+ Metaballs) file
Load OBJ (+ Metaballs) file
Args:
in_file:
Returns:
metaballs (Metaballs):
vertices:
vcolors:
vnormals:
faces: (0-based)
"""
mth = 0
metaballs = []
vertices = []
vcolors = []
vnormals = []
faces = []
if in_file.endswith(".gz"):
f_in = gzip.open(in_file, 'rt')
else:
f_in = open(in_file, 'r')
for line in f_in:
vals = line.split()
if len(vals) == 0:
continue
if vals[0] == "#":
if (len(vals) > 2):
if vals[1] == "mth":
mth = float(vals[2])
if vals[1] == "mb":
mb = Metaball(float(vals[2]),
float(vals[3]),
float(vals[4]),
float(vals[5]),
float(vals[6]))
metaballs.append(mb)
if vals[0] == "v":
v = [float(x) for x in vals[1:4]]
vertices.append(v)
if len(vals) == 7:
vc = [float(x) for x in vals[4:7]]
vcolors.append(vc)
if vals[0] == "vn":
vn = [float(x) for x in vals[1:4]]
vnormals.append(vn)
if vals[0] == "f":
fvi = []
for f in vals[1:]:
w = f.split("/")
fvi.append(int(w[0]) - 1)
faces.append(fvi)
f_in.close()
print("load "+in_file+": {:d} cells".format(len(vertices)))
return (Metaballs(metaballs, mth),
np.array(vertices),
np.array(vcolors),
np.array(vnormals),
np.array(faces))
def save_rd_prms(out_file,
vs,
mbs,
A, B, C, D, E, F, G,
synUmax, synVmax, ucmax,
dt,
Du, Dv,
RR):
"""Save rd_prms file
Save RDprms file
Args:
out_file:
vs:
mbs:
A, B, C, D, E, F, G:
synUmax, synVmax, ucmax:
dt:
Du, Dv:
RR:
Returns:
bool:
"""
np.savez_compressed(out_file,
vs=vs,
mbs=mbs,
A=A, B=B, C=C, D=D, E=E, F=F, G=G,
synUmax=synUmax, synVmax=synVmax, ucmax=ucmax,
dt=dt, Du=Du, Dv=Dv, RR=RR)
return True
def save_rd_uv(out_file, ucs, vcs):
"""Save rd_uv file
Save rd_uv file
Args:
out_file:
ucs, vcs:
Returns:
bool:
"""
np.savez_compressed(out_file, ucs=ucs, vcs=vcs)
return True
def load_rd_prms(in_file):
"""Load rd_prms file
Load rd_prms file
Args:
in_file:
Returns:
vs:
mbs:
A, B, C, D, E, F, G:
synUmax, synVmax, ucmax:
dt:
Du:
Dv:
RR:
"""
prms = np.load(in_file)
return (prms['vs'],
prms['mbs'].item(),
prms['A'],
prms['B'],
prms['C'],
prms['D'],
prms['E'],
prms['F'],
prms['G'],
prms['synUmax'],
prms['synVmax'],
prms['ucmax'],
prms['dt'],
prms['Du'],
prms['Dv'],
prms['RR'])
def load_rd_uv(in_file):
"""Load rd_uv file
Load rd_uv file
Args:
in_file:
Returns:
ucs, vcs:
"""
uv_data = np.load(in_file)
return uv_data['ucs'], uv_data['vcs']
def load_rd_mb(fnbase, time_point=2000):
"""Load rd_mb file
Load rd_mb file
Args:
fnbase:
Returns:
vs, ucs, A, C:
"""
rd_prms = load_rd_prms(fnbase+"_prms.npz")
vs, mbs = rd_prms[0:2]
A, B, C, D, E, F, G, = rd_prms[2:9]
synUmax, synVmax, ucmax, dt, Du, Dv, RR = rd_prms[9:]
ucs, vcs = load_rd_uv(fnbase+"_{:05}.npz".format(time_point))
ucs = ucs / ucmax
ucs[ucs > 1.0] = 1.0
return vs, ucs, A, C
class Metaball():
"""Metaball object class
Metaball object class
Attributes:
x, y, z:
s:
a:
"""
def __init__(self, x=0, y=0, z=0, s=1.0, a=1.0):
self.x = x
self.y = y
self.z = z
self.s = s
self.a = a
def __str__(self):
return "mb {} {} {} {} {}".format(self.x,
self.y,
self.z,
self.s,
self.a)
class Metaballs():
"""Metaballs class (a set of Metaball object)
Metaballs class (a set of Metaball object)
Attributes:
mbs (Metaball list):
mth:
"""
def __init__(self, mbs=[], mth=0.65):
self.mbs = []
self.mth = mth
if len(mbs) > 0:
self.mbs = mbs
self.update()
def append(self, metaball):
"""Append metaball
Append a metaball
Args:
metaball:
"""
self.mbs.append(metaball)
def update(self):
"""Update metaballs
Update metaballs
"""
self._pre_calc_mb()
def _pre_calc_mb(self):
self.mx = np.array([mb.x for mb in self.mbs])
self.my = np.array([mb.y for mb in self.mbs])
self.mz = np.array([mb.z for mb in self.mbs])
self.ms = np.array([mb.s for mb in self.mbs])
self.ma = np.array([mb.a for mb in self.mbs])
self.mxT = np.array([mb.x for mb in self.mbs])[:, np.newaxis]
self.myT = np.array([mb.y for mb in self.mbs])[:, np.newaxis]
self.mzT = np.array([mb.z for mb in self.mbs])[:, np.newaxis]
self.msT = np.array([mb.s for mb in self.mbs])[:, np.newaxis]
self.maT = np.array([mb.a for mb in self.mbs])[:, np.newaxis]
xmin, xmax, ymin, ymax, zmin, zmax = self.get_min_max()
self.xmin = xmin
self.xmax = xmax
self.ymin = ymin
self.ymax = ymax
self.zmin = zmin
self.zmax = zmax
self.marching_cubes()
def set_threshold(self, mth):
self.mth = mth
def to_povray_func(self):
"""Function string for Pov-Ray Isosurface
Function string for Pov-Ray Isosurface
func_str:
mth - sum(mmi * exp(-((x-mxi)**2 + (y-ymi)**2 + (z-mzi)**2)/(2*msi**2))
Returns:
func_str:
"""
func_str = "{}".format(self.mth)
for mb in self.mbs:
func_str += (" - {} "
"* exp(-(pow(x-{},2) + pow(y-{},2) + pow(z-{},2))"
"/ (2 * pow({},2)))"
.format(mb.a, mb.x, mb.y, mb.z, mb.s))
return func_str
def f(self, x, y, z):
"""Metaball value at (x, y, z)
Metaball value at (x, y, z)
metaball_value:
mth - sum(mmi * exp(-((x-mxi)**2 + (y-ymi)**2 + (z-mzi)**2)/(2*msi**2))
Args:
x, y, z:
Returns:
metaball_value:
"""
me = ne.evaluate("maT "
"* exp(-((x-mxT)**2 + (y-myT)**2 + (z-mzT)**2)"
"/ (2*msT**2))",
local_dict={'x': x,
'y': y,
'z': z,
'mxT': self.mxT,
'myT': self.myT,
'mzT': self.mzT,
'msT': self.msT,
'maT': self.maT})
mesum = np.sum(me, axis=0)
return self.mth - mesum
def cr(self, x, y, z, k):
"""Contribution ratios at (x, y, z)
An array of contribution ratio of each metaball
to the metaball_value at (x, y, z)
Args:
x, y, z:
k:
Returns:
cr:
"""
if k == 0:
cr_eq = "maT*exp(-((x-mxT)**2+(y-myT)**2+(z-mzT)**2)/(2*msT**2))"
elif k == 1:
cr_eq = "1/sqrt((x-mxT)**2+(y-myT)**2+(z-mzT)**2)"
elif k == 1.5:
cr_eq = "1/sqrt((x-mxT)**2+(y-myT)**2+(z-mzT)**2)**1.5"
elif k == 2:
cr_eq = "1/((x-mxT)**2+(y-myT)**2+(z-mzT)**2)"
me = ne.evaluate(cr_eq,
local_dict={'x': x,
'y': y,
'z': z,
'mxT': self.mxT,
'myT': self.myT,
'mzT': self.mzT,
'msT': self.msT,
'maT': self.maT})
mesum = np.sum(me, axis=0)
cr = me/mesum
return cr
def grad_f(self, x, y, z):
"""Gradient of metaball_value at (x, y, z)
Gradient of metaball_value at (x, y, z)
Args:
x, y, z:
Returns:
(dfdx, dfdy, dfdz):
"""
str_dfdx = ("maT * (x-mxT)"
"* exp(-((x-mxT)**2+(y-myT)**2+(z-mzT)**2)/(2*msT**2))"
"/ (msT**2)")
dfdx = ne.evaluate(str_dfdx,
local_dict={'x': x,
'y': y,
'z': z,
'mxT': self.mxT,
'myT': self.myT,
'mzT': self.mzT,
'msT': self.msT,
'maT': self.maT})
str_dfdy = ("maT * (y-myT)"
"* exp(-((x-mxT)**2+(y-myT)**2+(z-mzT)**2)/(2*msT**2))"
"/ (msT**2)")
dfdy = ne.evaluate(str_dfdy,
local_dict={'x': x,
'y': y,
'z': z,
'mxT': self.mxT,
'myT': self.myT,
'mzT': self.mzT,
'msT': self.msT,
'maT': self.maT})
str_dfdz = ("maT * (z-mzT)"
"* exp(-((x-mxT)**2+(y-myT)**2+(z-mzT)**2)/(2*msT**2))"
"/ (msT**2)")
dfdz = ne.evaluate(str_dfdz,
local_dict={'x': x,
'y': y,
'z': z,
'mxT': self.mxT,
'myT': self.myT,
'mzT': self.mzT,
'msT': self.msT,
'maT': self.maT})
dfdx_sum = np.sum(dfdx, axis=0)
dfdy_sum = np.sum(dfdy, axis=0)
dfdz_sum = np.sum(dfdz, axis=0)
return dfdx_sum, dfdy_sum, dfdz_sum
def f_v(self, v):
"""Metaball value at v
Metaball value at v
metaball_value:
mth - sum(mmi*exp(-((x-mxi)**2+(y-ymi)**2+(z-mzi)**2)/(2*msi**2))
Args:
v:
Returns:
metaball_value:
"""
return self.f(v[:, 0], v[:, 1], v[:, 2])
def cr_v(self, v, k):
"""Contribution ratios at v
An array of contribution ratio of each metaball
to the metaball_value at v
Args:
v:
k:
Returns:
cr:
"""
return self.cr(v[:, 0], v[:, 1], v[:, 2], k).T
def grad_f_v(self, v):
"""Gradient of metaball_value at v
Gradient of metaball_value at v
Args:
v:
Returns:
grad:
"""
dfdx, dfdy, dfdz = self.grad_f(v[:, 0], v[:, 1], v[:, 2])
return np.c_[dfdx, dfdy, dfdz]
def normal_f_v(self, v):
"""Normal vector at v
Normal vector at v
Args:
v:
Returns:
normal_vec:
"""
gv = self.grad_f_v(v)
gvn = np.linalg.norm(gv, axis=1)
return (gv / gvn[:, np.newaxis])
def get_min_max(self):
"""Loose bounding box for metaballs
Loose bounding box for metaballs
Returns:
xmin, xmax, ymin, ymax, zmin, zmax:
"""
mr = np.sqrt(2 * np.log(1/self.mth)) * self.ms
mr[:] = np.max(mr)
mxmin = self.mx - mr
mxmax = self.mx + mr
mymin = self.my - mr
mymax = self.my + mr
mzmin = self.mz - mr
mzmax = self.mz + mr
mb_xmin_idx = np.argmin(mxmin[self.ma > 0])
mb_xmax_idx = np.argmax(mxmax[self.ma > 0])
mb_ymin_idx = np.argmin(mymin[self.ma > 0])
mb_ymax_idx = np.argmax(mymax[self.ma > 0])
mb_zmin_idx = np.argmin(mzmin[self.ma > 0])
mb_zmax_idx = np.argmax(mzmax[self.ma > 0])
xmin0 = self.mx[mb_xmin_idx] - mr[mb_xmin_idx]
xmax0 = self.mx[mb_xmax_idx] + mr[mb_xmax_idx]
ymin0 = self.my[mb_ymin_idx] - mr[mb_ymin_idx]
ymax0 = self.my[mb_ymax_idx] + mr[mb_ymax_idx]
zmin0 = self.mz[mb_zmin_idx] - mr[mb_zmin_idx]
zmax0 = self.mz[mb_zmax_idx] + mr[mb_zmax_idx]
xmin = xmin0 - (xmax0 - xmin0) * 0.25
xmax = xmax0 + (xmax0 - xmin0) * 0.25
ymin = ymin0 - (ymax0 - ymin0) * 0.25
ymax = ymax0 + (ymax0 - ymin0) * 0.25
zmin = zmin0 - (zmax0 - zmin0) * 0.25
zmax = zmax0 + (zmax0 - zmin0) * 0.25
return xmin, xmax, ymin, ymax, zmin, zmax
def set_min_max(self, xmin, xmax, ymin, ymax, zmin, zmax):
"""Set loose bounding box for metaballs
Set loose bounding box for metaballs
Args:
xmin, xmax, ymin, ymax, zmin, zmax:
"""
self.xmin = xmin
self.xmax = xmax
self.ymin = ymin
self.ymax = ymax
self.zmin = zmin
self.zmax = zmax
def get_mb_meshgrid(self, spc=0.02):
"""Meshgrid of loose bounding box for metaballs
Meshgrid of loose bounding box for metaballs
Args:
spc:
Returns:
mb_meshgrid:
xyz_spc:
"""
kx = np.int(np.round((self.xmax - self.xmin) / spc))
ky = np.int(np.round((self.ymax - self.ymin) / spc))
kz = np.int(np.round((self.zmax - self.zmin) / spc))
X, Xspc = np.linspace(self.xmin, self.xmax, kx+1, retstep=True)
Y, Yspc = np.linspace(self.ymin, self.ymax, ky+1, retstep=True)
Z, Zspc = np.linspace(self.zmin, self.zmax, kz+1, retstep=True)
xyz_spc = (Xspc, Yspc, Zspc)
XX, YY, ZZ = np.meshgrid(X, Y, Z, indexing='ij')
XYZs = np.c_[(XX.ravel(), YY.ravel(), ZZ.ravel())]
mb_meshgrid = self.f_v(XYZs).reshape(kx+1, ky+1, kz+1)
return mb_meshgrid, xyz_spc
def marching_cubes(self, spc=0.02):
"""Set initial vertices on metaballs using marching cubes
Set initial vertices on metaballs using marching cubes
Args:
spc:
"""
mb_meshgrid, xyz_spc = self.get_mb_meshgrid(spc)
verts, faces, normals, values = measure.marching_cubes(
mb_meshgrid,
level=0.0,
spacing=xyz_spc,
gradient_direction='ascent',
step_size=1)
verts += np.c_[self.xmin, self.ymin, self.zmin]
self.verts = verts
self.faces = faces
self.normals = normals
self.values = values
self.sa = measure.mesh_surface_area(verts, faces)
def scatter_vertices(self, n=20000):
"""Scatter n vertices on metaballs
Scatter n vertices on metaballs
Args:
n:
Returns:
vs:
"""
vs = self.verts[np.random.choice(len(self.verts), n, replace=False)]
return vs
def effective_radius(self, n):
"""Effective radius
Effective radius for repulsive force
Args:
n:
Returns:
er:
"""
er2 = 5.0 * self.sa / n
er = np.sqrt(er2)
return er
def stick_to_mb(self, vs0, max_ite=10, eps=1.0e-10):
"""Move a point onto the surface of metaball
Move a point onto the surface of metaball
Args:
vs0:
max_ite:
eps:
Returns:
vs:
"""
n = len(vs0)
normal_vs0 = self.normal_f_v(vs0)
ts = np.zeros(n)
delta_ts = np.ones(n)
vs = vs0
for i in range(max_ite):
delta_ts = 1.0 * self.f_v(vs) / np.einsum('ij,ij->i',
self.grad_f_v(vs),
normal_vs0)
ts -= delta_ts
vs = vs0 + ts[:, np.newaxis] * normal_vs0
if (np.abs(delta_ts).max() < eps):
break
return vs
def get_nei_force(vs, mbs):
"""Get neighbor points and repulsive forces
Get neighbor points and repulsive forces
Args:
vs: vertices
mbs: Metaballs
Returns:
nei:
dnei:
fvnei:
fv:
dnei6mean:
dnei6std:
"""
n = len(vs)
er = mbs.effective_radius(n)
f_er = er * 1.0
# repulsive force = alpha * exp(- ((dnei)**2) / (2 * sigma**2))
alpha = 1.0
sigma = 0.3 * f_er
# maximum number of neighbor points to be obtained
maxnei = 50
# threads used by cKDTree query
q_threads = 6
nei = np.empty((n, maxnei+1), dtype=int)
dnei = np.empty((n, maxnei+1), dtype=float)
fnei = np.empty((n, maxnei), dtype=float)
vnei = np.empty((n, maxnei, 3), dtype=float)
fvnei = np.empty((n, maxnei, 3), dtype=float)
fvneisum = np.empty((n, 3), dtype=float)
Mv = np.empty((n, 3, 3), dtype=float)
fv = np.empty((n, 3), dtype=float)
tree = cKDTree(vs)
# get neighbor points
dnei0, nei0 = tree.query(vs, k=maxnei+1, n_jobs=q_threads)
# remove first hit (self)
dnei = dnei0[:, 1:]
nei = nei0[:, 1:]
# vector (neighbor -> self)
vnei = np.einsum('ij,k->ikj', vs, np.ones(maxnei)) - vs[nei]
# repulsive force from neighbor points
# fnei = alpha * np.exp((-dnei**2)/(2*sigma**2))
fnei[dnei < f_er] = alpha * np.exp((-dnei[dnei < f_er]**2)/(2*sigma**2))
fnei[dnei >= f_er] = 0
# repulsive force (vector)
fvnei = np.einsum('ij,ijk->ijk', fnei/np.linalg.norm(vnei, axis=2), vnei)
# sum of repulsive force (vector)
fvneisum = fvnei.sum(axis=1)
# projection matrix to the tangent plane of metaball at v
Mv = (np.einsum('ij,k->kji', np.eye(3), np.ones(n))
- np.einsum('i,ij,ik->ijk',
np.linalg.norm(mbs.grad_f_v(vs), axis=1)**2,
mbs.grad_f_v(vs),
mbs.grad_f_v(vs)))
# tangential component of sum of repulsive forces
fv = np.einsum('ijk,ik->ij', Mv, fvneisum)
# mean/std distance to the nearby 6 points (used for end condition)
dnei6mean = dnei[:, 0:6].mean()
dnei6std = dnei[:, 0:6].std()
return nei, dnei, fvnei, fv, dnei6mean, dnei6std
def render_povray(vs, rotx=0, roty=0, rotz=0,
width=400, height=400, angle=14, antialiasing=0.001):
"""Render vertices using Pov-Ray (Vapory)
Render vertices using Pov-Ray (Vapory)
Args:
vs: vertices
rotx, roty, rotz: rotation angle
width, height:
angle: camera angle
Returns:
rendered_scene:
"""
rot1 = [rotx, 0, 0]
rot2 = [0, roty, 0]
rot3 = [0, 0, rotz]
camera = Camera('location', [0, 0, -25],
'look_at', [0, 0, 0],
'angle', angle,
'right x*image_width/image_height')
light = LightSource([-3, 2, -6], 'color', [1.0, 1.0, 1.0], 'parallel')
light2 = LightSource([2, -2, -6], 'color', [0.6, 0.6, 0.6], 'parallel')
background = Background('color', [1, 1, 1])
spheres = [Sphere(v, 0.05,
Finish('ambient', 0.2, 'diffuse', 0.8, 'phong', 1.0),
Texture(Pigment('color', [1.0, 1.0, 1.0])),
'rotate', rot1,
'rotate', rot2,
'rotate', rot3) for v in vs]
objects = [light, light2, background] + spheres
scene = Scene(camera, objects=objects)
return scene.render('ipython',
width=width, height=height,
antialiasing=antialiasing)
def render_povray_mb(mbs, rotx=0, roty=0, rotz=0,
width=400, height=400, angle=14):
"""Render metaballs using Pov-Ray (Vapory)
Render metaballs using Pov-Ray (Vapory)
Args:
mbs: Metaballs
width, height:
Returns:
rendered_scene:
"""
rot1 = [rotx, 0, 0]
rot2 = [0, roty, 0]
rot3 = [0, 0, rotz]
camera = Camera('location', [0, 0, -25],
'look_at', [0, 0, 0],
'angle', angle,
'right x*image_width/image_height')
light = LightSource([-3, 2, -6], 'color', [1.0, 1.0, 1.0], 'parallel')
# light2 = LightSource([2, -2, -6], 'color', [0.6, 0.6, 0.6], 'parallel')
background = Background('color', [1, 1, 1, 1])
mbs_function = mbs.to_povray_func()
isosurface = Isosurface(Function(mbs_function),
ContainedBy(Box(-5, 5)),
'max_gradient', 1.8,
Pigment('color', [1.0, 0.15, 0.3]),
Finish('phong', 0.7,
'specular', 0.2,
'diffuse', 0.9,
'ambient', 0.1),
'rotate', rot1,
'rotate', rot2,
'rotate', rot3,
'translate', [0, 0, 0],
'no_shadow')
objects = [light, background] + [isosurface]
scene = Scene(camera, objects=objects)
return scene.render('ipython', width=width, height=height)
def make_mb_obj(file, mbs, mva=4e-4,
do_ite=True, dt=1.0e-2, max_ite=5000, cv_end=0.08):
"""Make metaball_OBJ
Make metaball_OBJ (consisting of evenly distributed vertices on metaball)
Args:
file: output file
mbs: Metaballs
mva: mean area per vertex
do_ite: if True, iteration will be performed
dt: iteration time step
max_ite: maximum number of iterations
cv_end: coefficient of variation threshold for end condition
Returns:
vs: vertices
"""
# scatter initial vertices and calculate effective radius
n = np.int(np.round(mbs.sa / mva))
print("scatter vertices: ", n)
vs = mbs.scatter_vertices(n)
# iteration using repulsive force
if (do_ite):
for i in range(max_ite):
nei, dnei, fvnei, fv, dnei6mean, dnei6std = get_nei_force(vs, mbs)
print("[ite: {}] dnei6mean={}, dnei6std={}, CV={}"
.format(i, dnei6mean, dnei6std, dnei6std/dnei6mean))
fv_norm = np.linalg.norm(fv, axis=1)
print("fv_norm_mean={}, fv_norm_max={}"
.format(np.mean(fv_norm), np.max(fv_norm)))
if dnei6std/dnei6mean < cv_end:
break
vs += fv * dt
vs = mbs.stick_to_mb(vs)
save_mb_obj(file, mbs, vs)
return vs
def rdmb(vs, mbs, out_file_base, prms, max_ite=2000):
"""Reaction-diffusion on metaball
Reaction-diffusion on metaball
Args:
vs: vertices
mbs: Metaballs
mb_obj_file: mb_obj_file
out_file_base: output file basename
prms: rd parameters A..G
max_ite: maximum number of iterations
"""
nei, dnei, fvnei, fv, dnei6mean, dnei6std = get_nei_force(vs, mbs)
dneiN = dnei / dnei6mean
n = len(vs)
max_nei = len(nei[0])
er = mbs.effective_radius(n)
A, B, C, D, E, F, G = prms
synUmax = 0.23
synVmax = 0.50
ucmax = 6.0
dt = 1.0e-2
Du = 0.5
Dv = 10.0
# RR = 30
RR = 80
u = np.random.rand(n) * ucmax
v = np.random.rand(n) * ucmax
rea_u = np.zeros(n)
rea_v = np.zeros(n)
syn_u = np.zeros(n)
syn_v = np.zeros(n)
Ru = Du / (dneiN**2)
Rv = Dv / (dneiN**2)
Ru[dnei > er] = 0
Rv[dnei > er] = 0
save_rd_prms(out_file_base+"_prms.npz",
vs, mbs,
A, B, C, D, E, F, G,
synUmax, synVmax, ucmax, dt, Du, Dv, RR)
save_rd_uv(out_file_base+"_{:05}.npz".format(0), u, v)
for ite in range(max_ite):
syn_u = A * u - B * v + C
syn_v = E * u - F
syn_u[syn_u < 0] = 0
syn_u[syn_u > synUmax] = synUmax
syn_v[syn_v < 0] = 0
syn_v[syn_v > synVmax] = synVmax
rea_u = syn_u - D * u
rea_v = syn_v - G * v
uu = (Ru * (u[nei] - np.einsum('i,j->ij', u,
np.ones(max_nei)))).sum(axis=1)
vv = (Rv * (v[nei] - np.einsum('i,j->ij', v,
np.ones(max_nei)))).sum(axis=1)
u += (RR * rea_u + uu) * dt
v += (RR * rea_v + vv) * dt
if ((ite+1) % 500) == 0:
print("[ite: {}]".format(ite+1))
fname = out_file_base + "_{:05}.npz".format(max_ite)
save_rd_uv(fname, u, v)
return True
def rdmb_uni_AC(mb_obj_file, out_file_base, A=0.08, C=0.15, max_ite=2000):
"""Reaction-diffusion on metaball (uniform A, C)
Reaction-diffusion on metaball (uniform A, C)
Args:
mb_obj_file: metaball OBJ file
out_file_base: output file basename
A: parameter A
C: parameter C
max_ite: maximum number of iterations
"""
mbs, vs, vc, vn, fs = load_mb_obj(mb_obj_file)
A = A
B = 0.08
C = C
D = 0.03
E = 0.10
F = 0.12
G = 0.06
prms = (A, B, C, D, E, F, G)
rdmb(vs, mbs, out_file_base, prms, max_ite)
return True
def rdmb_grad_AC(mb_obj_file,
out_file_base,
px0=-1.0, px1=1.0,
pa0=0.08, pa1=0.08,
pc0=0.03, pc1=0.27,
max_ite=2000):
"""Reaction-diffusion on metaball (gradual A, C)
Reaction-diffusion on metaball (gradual A, C)
Args:
mb_obj_file: metaball OBJ file
out_file_base: output file basename
px0, px1: gradation start/end edges
pa0, pa1: A values at edges
pc0, pc1: C values at edges
max_ite: maximum number of iterations
"""
mbs, vs, vc, vn, fs = load_mb_obj(mb_obj_file)
n = len(vs)
# make A gradient
# A = pa * x + pb
pa = (pa1 - pa0)/(px1 - px0)
pb = pa0 - pa * px0
A = np.ones(n) * 0.15
A[:] = vs[:, 0] * pa + pb # A = pa * x + pb
A[vs[:, 0] <= px0] = pa0
A[vs[:, 0] >= px1] = pa1
B = 0.08
# make C gradient
# C = pa * x + pb
pa = (pc1 - pc0)/(px1 - px0)
pb = pc0 - pa * px0
C = np.ones(n) * 0.15
C[:] = vs[:, 0] * pa + pb # C = pa * x + pb
C[vs[:, 0] <= px0] = pc0
C[vs[:, 0] >= px1] = pc1
D = 0.03
E = 0.10
F = 0.12
G = 0.06
prms = (A, B, C, D, E, F, G)
rdmb(vs, mbs, out_file_base, prms, max_ite)
return True
def rdmb_blend_AC(mb_obj_file,
out_file_base,
pas, pcs, ccrs,
max_ite=2000):
"""Reaction-diffusion on metaball (blended A, C)
Reaction-diffusion on metaball (blended A, C)
params A, C for each vertex are calculated based on
the contribution ratio of each metaball to the vertex
Args:
mb_obj_file: metaball OBJ file
out_file_base: output file basename
pas, pcs: np array of params A and C for each metaball
ccrs: coefficient for contribution ratio (1/0)
max_ite: maximum number of iterations
"""
mbs, vs, vc, vn, fs = load_mb_obj(mb_obj_file)
# crs: corrected contribution ratios
crs = mbs.cr_v(vs, 1.5) * ccrs
# normalization
crs = crs / np.c_[np.linalg.norm(crs, ord=1, axis=1)]
# make A gradient
A = np.ravel(np.dot(crs, np.c_[pas]))
B = 0.08
# make C gradient
C = np.ravel(np.dot(crs, np.c_[pcs]))
D = 0.03
E = 0.10
F = 0.12
G = 0.06
prms = (A, B, C, D, E, F, G)
rdmb(vs, mbs, out_file_base, prms, max_ite)
return True
def rdmb_povray_save(out_file,
vs,
ucs, vcs,
width=800, height=600,
rotx=0, roty=0, rotz=0,
angle=14):
"""Render and save RD results using Pov-Ray
Render and save RD results using Pov-Ray
Args:
out_file: output file
vs: vertices
ucs, vcs: u/v conc.
width, height: width and height of output image
rotx, roty, rotz: rotation angle
angle: camera angle
"""
ucmax = 6.0
ucs = ucs / ucmax
ucs[ucs > 1.0] = 1.0
# ucs = ucs / np.max(ucs)
rot1 = [rotx, 0, 0]
rot2 = [0, roty, 0]
rot3 = [0, 0, rotz]
camera = Camera('location', [0, 0, -25],
'look_at', [0, 0, 0],
'angle', angle,
'right x*image_width/image_height')
light = LightSource([-3, 2, -6], 'color', [1.0, 1.0, 1.0], 'parallel')
light2 = LightSource([2, -2, -6], 'color', [0.6, 0.6, 0.6], 'parallel')
background = Background('color', [1, 1, 1, 1])
spheres = [Sphere(v, 0.02,
Finish('ambient', 0.2, 'diffuse', 0.8, 'phong', 1.0),
Texture(Pigment('color',
[0.3+uc*0.7, 0.2+uc*0.8, 0.2+uc*0.8])),
'rotate', rot1,
'rotate', rot2,
'rotate', rot3) for v, uc in zip(vs, ucs)]
objects = [light, light2, background] + spheres
scene = Scene(camera, objects=objects)
scene.render(out_file, width=width, height=height,
output_alpha=True, antialiasing=0.001,
tempfile=out_file+"__temp__.pov")
def rdmb_povray(file_base,
time_point=2000,
width=800, height=600,
angle=14):
"""Load RD results and Render/Save image using Pov-Ray
Load RD results and Render/Save image using Pov-Ray
Args:
file_base:
time_point:
width, height:
angle:
Returns:
file_png
"""
file_prms = file_base + "_prms.npz"
vs, *_ = load_rd_prms(file_prms)
file_uv = file_base + "_{:05}.npz".format(time_point)
file_png = file_base + "_{:05}.png".format(time_point)
ucs, vcs = load_rd_uv(file_uv)
rdmb_povray_save(file_png,
vs,
ucs, vcs,
width=width, height=height,
rotx=0, roty=0, rotz=0,
angle=angle)
return file_png
def rdmb_povray_save_q(out_file,
vs,
ucs, vcs,
width=800, height=600,
rotx=0, roty=0, rotz=0,
angle=14):
"""Render and save RD results using Pov-Ray (for quantification)
Render and save RD results using Pov-Ray (for quantification)
Args:
out_file: output file
vs: vertices
ucs: u conc.
vcs: v conc.
rotx, roty, rotz: rotation angle
width, height: width and height of output image
angle: camera angle
"""
ucmax = 6.0
ucs = ucs / ucmax
ucs[ucs > 1.0] = 1.0
# ucs = ucs / np.max(ucs)
rot1 = [rotx, 0, 0]
rot2 = [0, roty, 0]
rot3 = [0, 0, rotz]
camera = Camera('location', [0, 0, -25],
'look_at', [0, 0, 0],
'angle', angle,
'right x*image_width/image_height')
light = LightSource([0, 0, -10],
'color', [1.0, 1.0, 1.0], 'parallel', 'shadowless')
light1 = LightSource([-10, 0, 0],
'color', [0.5, 0.5, 0.5], 'parallel', 'shadowless')
light2 = LightSource([10, 0, 0],
'color', [0.5, 0.5, 0.5], 'parallel', 'shadowless')
light3 = LightSource([0, -10, 0],
'color', [0.5, 0.5, 0.5], 'parallel', 'shadowless')
light4 = LightSource([0, 10, 0],
'color', [0.5, 0.5, 0.5], 'parallel', 'shadowless')
background = Background('color', [1, 1, 1, 1])
spheres = [Sphere(v, 0.02,
Finish('ambient', 1.0),
Texture(Pigment('color',
[0.3+uc*0.7, 0.2+uc*0.8, 0.2+uc*0.8])),
'rotate', rot1,
'rotate', rot2,
'rotate', rot3) for v, uc in zip(vs, ucs)]
objects = [light, light1, light2, light3, light4, background] + spheres
scene = Scene(camera, objects=objects)
scene.render(out_file, width=width, height=height,
output_alpha=True, antialiasing=0.001,
tempfile=out_file+"__temp__.pov")
def rdmb_povray_q(file_base,
time_point=2000,
width=800, height=600,
angle=14):
"""Load RD results and Render/Save image using Pov-Ray (for quantification)
Load RD results and Render/Save image using Pov-Ray (for quantification)
Args:
file_base:
time_point:
width, height:
angle:
Returns:
file_png
"""
file_prms = file_base + "_prms.npz"
vs, *_ = load_rd_prms(file_prms)
file_uv = file_base + "_{:05}.npz".format(time_point)
file_png = file_base + "_{:05}.png".format(time_point)
ucs, vcs = load_rd_uv(file_uv)
rdmb_povray_save_q(file_png,
vs,
ucs, vcs,
width=width, height=height,
rotx=0, roty=0, rotz=0,
angle=angle)
return file_png
def rdmb_povray_color(file_base,
time_point=2000,
width=800, height=600,
rotx=0, roty=0, rotz=0,
angle=14,
mode="C"):
"""Render and save RD results using Pov-Ray (color)
Render and save RD results using Pov-Ray
with color indicating parameter values
Args:
file_base:
time_point:
width, height:
rotx, roty, rotz:
angle:
mode:
"""
vs, ucs, As, Cs = load_rd_mb(file_base)
file_png = file_base + "_color_{:05}.png".format(time_point)
tempfile = file_png[:-4] + "__temp__" + ".pov"
camera = Camera('location', [0, 0, -25],
'look_at', [0, 0, 0],
'angle', angle,
'right x*image_width/image_height')
light = LightSource([-3, 2, -6],
'color', [1.0, 1.0, 1.0], 'parallel')
light2 = LightSource([2, -2, -6],
'color', [0.2, 0.2, 0.2], 'parallel')
background = Background('color', [1, 1, 1, 1])
spheres = []
spheres += sph(vs, ucs, As, Cs,
0, 0, 0,
rotx=rotx, roty=roty, rotz=rotz,
mode=mode)
objects = [light, light2, background] + spheres
scene = Scene(camera, objects=objects)
scene.render(file_png,
width=width, height=height,
tempfile=tempfile,
output_alpha=True, antialiasing=0.001)
return file_png
def sph(vs, ucs, A, C, x, y, z, rotx=0, roty=0, rotz=0, mode="C"):
"""Colored spheres for rendering RD results
Colored spheres for rendering RD results
Args:
vs: vertices
ucs: u conc.
A, C: RD parameters
x, y, z: translation
rotx, roty, rotz: rotation angle
mode: color mode for parameters
"""
sph = []
if mode == "A":
nA = (A-0.07)/(0.12-0.07)
nC = (C+0.1)/(0.31+0.1)
elif mode == "C":
nA = (A-0.07)/(0.12-0.07)
nC = C/0.307
elif mode == "AC":
nA = (A-0.07)/(0.12-0.07)
nC = (C+0.1)/(0.31+0.1)
else:
nA = (A-0.07)/(0.12-0.07)
nC = C/0.307
if (type(nA) is np.float64):
nA = np.full(len(vs), nA)
if (type(nC) is np.float64):
nC = np.full(len(vs), nC)
for v, uc, a, c in zip(vs, ucs, nA, nC):
if mode == "A":
H0 = a
L0 = 1/(1+np.exp((-2.8*((a-0.52)*2.4))))
# R0, G0, B0 = colorsys.hls_to_rgb(0.02+H0*0.35, 0.5-L0*0.4, 1.0)
R0, G0, B0 = colorsys.hls_to_rgb(1.0-H0*0.40, 0.5-L0*0.4, 1.0)
elif mode == "C":
H0 = c
L0 = 1/(1+np.exp((-2.8*((c-0.52)*2.4))))
R0, G0, B0 = colorsys.hls_to_rgb(0.02+H0*0.35, 0.5-L0*0.4, 1.0)
elif mode == "AC":
R0 = a*1.0
# G0 = max(0.8-(max(a+c, 0)), 0)
G0 = 0.0
B0 = c*1.0
else:
R0 = 0.3
G0 = 0.2
B0 = 0.2
R1 = 1.0 - R0
G1 = 1.0 - G0
B1 = 1.0 - B0
sph.append(Sphere(v, 0.022,
Texture(Pigment('color',
[R0+uc*R1, G0+uc*G1, B0+uc*B1]),
Finish('phong', 0.7,
'specular', 0.2,
'diffuse', 0.9,
'ambient', 0.1)),
'rotate', [rotx, 0, 0],
'rotate', [0, roty, 0],
'rotate', [0, 0, rotz],
'translate', [x, y, z],
'no_shadow'))
return sph
|
the-stack_106_27679 | import sys
import os
import os.path
import logging
import logging.config
import tornado.log
from nanotools.common import ensure_dir
FMT = "[%(asctime)s][%(levelname)s] - %(filename)s:%(lineno)s - %(message)s"
SERVER_LOGGING_CONFIG = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"default": {
"format": FMT,
"encoding": "utf-8"
}
},
"handlers": {
"nanotools": {
"level": "DEBUG",
"filters": None,
"class": "logging.handlers.TimedRotatingFileHandler",
"filename": "./logs/nanotools.log",
"formatter": "default",
"encoding": "utf-8",
"when": "D"
}
},
"loggers": {
"nanotools": {
"handlers": [
"nanotools"
],
"level": "DEBUG",
"encoding": "utf-8",
"propagate": True
}
}
}
# ๅๅปบๆฅๅฟ็ฎๅฝ
log_path = os.path.join('.', 'logs')
if not ensure_dir(log_path, create=True):
raise RuntimeError(f'Could not create logdir: {log_path}')
if SERVER_LOGGING_CONFIG:
logging.config.dictConfig(SERVER_LOGGING_CONFIG)
tornado.log.enable_pretty_logging()
logger = logging.getLogger('nanotools')
stdout_handler = logging.StreamHandler(stream=sys.stdout)
logger.addHandler(stdout_handler)
|
the-stack_106_27680 | # Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2018 Dan Tรจs <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
from pyrogram.api.core import *
class InputBotInlineResultGame(Object):
"""Attributes:
ID: ``0x4fa417f2``
Args:
id: ``str``
short_name: ``str``
send_message: Either :obj:`InputBotInlineMessageMediaAuto <pyrogram.api.types.InputBotInlineMessageMediaAuto>`, :obj:`InputBotInlineMessageText <pyrogram.api.types.InputBotInlineMessageText>`, :obj:`InputBotInlineMessageMediaGeo <pyrogram.api.types.InputBotInlineMessageMediaGeo>`, :obj:`InputBotInlineMessageMediaVenue <pyrogram.api.types.InputBotInlineMessageMediaVenue>`, :obj:`InputBotInlineMessageMediaContact <pyrogram.api.types.InputBotInlineMessageMediaContact>` or :obj:`InputBotInlineMessageGame <pyrogram.api.types.InputBotInlineMessageGame>`
"""
ID = 0x4fa417f2
def __init__(self, id: str, short_name: str, send_message):
self.id = id # string
self.short_name = short_name # string
self.send_message = send_message # InputBotInlineMessage
@staticmethod
def read(b: BytesIO, *args) -> "InputBotInlineResultGame":
# No flags
id = String.read(b)
short_name = String.read(b)
send_message = Object.read(b)
return InputBotInlineResultGame(id, short_name, send_message)
def write(self) -> bytes:
b = BytesIO()
b.write(Int(self.ID, False))
# No flags
b.write(String(self.id))
b.write(String(self.short_name))
b.write(self.send_message.write())
return b.getvalue()
|
the-stack_106_27681 | import tensorflow.keras.backend as K
from tensorflow.keras.callbacks import Callback
import math
class OneCycleScheduler(Callback):
def __init__(self, start_lr=1e-4, max_lr=3e-3, moms=None, switch_point=0.3):
self.start_lr = start_lr
self.max_lr = max_lr
self.switch_point = switch_point
self.iteration = 0
if moms:
self.cycle_moms = True
self.max_mom = moms[0]
self.min_mom = moms[1]
else:
self.cycle_moms = False
def on_train_begin(self, logs=None):
self.n = self.params['steps'] * self.params['epochs']
self.p1 = int(self.n * self.switch_point)
self.p2 = self.n-self.p1
K.set_value(self.model.optimizer.lr, self.start_lr)
if self.cycle_moms:
K.set_value(self.model.optimizer.momentum, self.max_mom)
def on_train_batch_end(self, batch, logs=None):
K.set_value(self.model.optimizer.lr, self.lr_sched())
if self.cycle_moms:
K.set_value(self.model.optimizer.momentum, self.mom_sched())
self.iteration += 1
def lr_sched(self):
i = self.iteration
p1 = self.p1
p2 = self.p2
if i <= p1:
pos = i / p1
return self.cos_sched(self.start_lr, self.max_lr, pos)
else:
pos = (i-p1) / p2
return self.cos_sched(self.max_lr, 0., pos)
def mom_sched(self):
i = self.iteration
p1 = self.p1
p2 = self.p2
if i <= p1:
pos = i / p1
return self.cos_sched(self.max_mom, self.min_mom, pos)
else:
pos = (i-p1) / p2
return self.cos_sched(self.min_mom, self.max_mom, pos)
def cos_sched(self, start, end, pos):
return start + (1 + math.cos(math.pi*(1-pos))) * (end-start) / 2
|
the-stack_106_27682 | import argparse
import os
import shutil
import sys
import checks as submission_checks
import constants
import report
def verify_submission(args):
root_dir = args.root
public_key = args.public_key
private_key = args.private_key
encrypt_out = args.encrypt_out
decrypt_out = args.decrypt_out
# validate args
if any([public_key, encrypt_out]) and not all([public_key, encrypt_out]):
print("--encrypt-key and --encrypt-out must be present togetger.")
sys.exit(1)
if any([private_key, decrypt_out]) and not all([private_key, decrypt_out]):
print("--decrypt-key and --decrypt-out must be present together.")
sys.exit(1)
if all([private_key, public_key]):
print("--encrypt-key and --decrypt-key cannot be present together.")
sys.exit(1)
if any([public_key, private_key]):
import crypto
# if decrypt-key is provided, then decrypt the submission, save it to
# decrypt-out and point submission root to the decrypted directory
if private_key:
try:
crypto.decrypt_submission(private_key, root_dir, decrypt_out)
except Exception as e:
print("Unable to decrypt submission: {}".format(str(e)))
sys.exit(1)
print("Decrypted submission saved at {}".format(decrypt_out))
root_dir = decrypt_out
# perform verifications and extract results
checks = submission_checks.SubmissionChecks()
checks.verify_dirs_and_files(root_dir)
checks.verify_metadata()
checks.compile_results()
checks.report.print_report()
checks.report.print_results()
# if encrypt-key is provided, then encrypt the submission
# and save it to encrypt-out
if public_key:
try:
crypto.encrypt_submission(public_key, root_dir, encrypt_out)
except Exception as e:
print("Unable to encrypt submission: {}".format(str(e)))
sys.exit(1)
print("Encrypted submission saved at {}".format(encrypt_out))
def main():
parser = argparse.ArgumentParser(description="Verify MLPerf submission.")
parser.add_argument("root", metavar="SUBMISSION_ROOT",
help="submission root directory")
parser.add_argument("--encrypt-key", dest="public_key", default=None,
help="public key for encrypting log files")
parser.add_argument("--encrypt-out", dest="encrypt_out", default=None,
help="output path for encrypted submission")
parser.add_argument("--decrypt-key", dest="private_key", default=None,
help="private key for decrypting log files")
parser.add_argument("--decrypt-out", dest="decrypt_out", default=None,
help="output path for decrypted submission")
args = parser.parse_args()
verify_submission(args)
if __name__ == "__main__":
main()
|
the-stack_106_27683 | # team = 'BLUE'
# rcj_soccer_player controller - ROBOT Y1
# Feel free to import built-in libraries
import math
# You can also import scripts that you put into the folder with controller
import rcj_soccer_robot
import utils
class MyRobot(rcj_soccer_robot.RCJSoccerRobot):
def run(self):
if self.name[0] == 'Y':
t_m = 1
else:
t_m = -1
frameCounter = 0
while self.robot.step(rcj_soccer_robot.TIME_STEP) != -1:
if self.is_new_data():
data = self.get_new_data()
# Get the position of our robot
robot_pos = data[self.name]
# Get the position of the ball
ball_pos = data['ball']
# Get angle between the robot and the ball
# and between the robot and the north
ball_angle, robot_angle = self.get_angles(ball_pos, robot_pos)
# Compute the speed for motors
direction = utils.get_direction(ball_angle)
# If the robot has the ball right in front of it, go forward,
# rotate otherwise
if direction == 0:
left_speed = -10
right_speed = -10
elif direction == 2:
left_speed = 10
right_speed = 10
else:
left_speed = direction * 4
right_speed = direction * -4
# Set the speed to motors
self.left_motor.setVelocity(left_speed)
self.right_motor.setVelocity(right_speed)
frameCounter += 1
my_robot = MyRobot()
my_robot.run()
|
the-stack_106_27684 | #Necessary packages
import os
import time
import ujson as json
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
class MySet(Dataset):
"""Load complete data
Args:
--epochs 1 --batch_size 32 --model brits
Returns:
- rec
"""
def __init__(self):
super(MySet, self).__init__()
self.content = open('./json/json').readlines()
#Return evenly spaced values of the length of the dataset
indices = np.arange(len(self.content))
#Generate a uniform random sample from indices of size = length of dataset
val_indices = np.random.choice(indices, len(self.content) // 5)
self.val_indices = set(val_indices.tolist())
#Helper method for length
def __len__(self):
return len(self.content)
#Loads JSON data
def __getitem__(self, idx):
rec = json.loads(self.content[idx])
#Masking vector to determine not observed/otherwise instance of data
if idx in self.val_indices:
rec['is_train'] = 0
else:
rec['is_train'] = 1
return rec
def collate_fn(recs):
forward = list(map(lambda x: x['forward'], recs))
backward = list(map(lambda x: x['backward'], recs))
def to_tensor_dict(recs):
values = torch.FloatTensor(list(map(lambda r: list(map(lambda x: x['values'], r)), recs)))
masks = torch.FloatTensor(list(map(lambda r: list(map(lambda x: x['masks'], r)), recs)))
deltas = torch.FloatTensor(list(map(lambda r: list(map(lambda x: x['deltas'], r)), recs)))
#forwards = torch.FloatTensor(list(map(lambda r: list(map(lambda x: x['forwards'], r)), recs)))
evals = torch.FloatTensor(list(map(lambda r: list(map(lambda x: x['evals'], r)), recs)))
eval_masks = torch.FloatTensor(list(map(lambda r: list(map(lambda x: x['eval_masks'], r)), recs)))
#return {'values': values, 'forwards': forwards, 'masks': masks, 'deltas': deltas, 'evals': evals, 'eval_masks': eval_masks}
return {'values': values, 'masks': masks, 'deltas': deltas, 'evals': evals, 'eval_masks': eval_masks}
ret_dict = {'forward': to_tensor_dict(forward), 'backward': to_tensor_dict(backward)}
ret_dict['labels'] = torch.FloatTensor(list(map(lambda x: x['label'], recs)))
ret_dict['is_train'] = torch.FloatTensor(list(map(lambda x: x['is_train'], recs)))
return ret_dict
def get_loader(batch_size = 64, shuffle = True):
#Returns a batched and shuffled dataset
data_set = MySet()
data_iter = DataLoader(dataset = data_set, \
batch_size = batch_size, \
num_workers = 4, \
shuffle = shuffle, \
pin_memory = True, \
collate_fn = collate_fn
)
return data_iter
|
the-stack_106_27686 | import sys
import time
def create_versioned_files(src_filename, filenames):
timestamp = int(time.time())
with open(src_filename, encoding='utf-8') as html_file:
html_file_content = html_file.read()
for filename in filenames:
usages_count = html_file_content.count(filename)
if usages_count != 1:
print('ERROR: Found {} usages for file {} (expected exactly 1)'.format(usages_count, filename))
return
new_filename = "{}?v={}".format(filename, timestamp)
html_file_content = html_file_content.replace(filename, new_filename)
with open('versioned.' + src_filename, mode="w", encoding="utf-8") as f:
f.write(html_file_content)
if __name__ == '__main__':
create_versioned_files(sys.argv[1], sys.argv[2:])
|
the-stack_106_27687 | """Code and data structures for storing and displaying errors."""
from __future__ import print_function
import collections
import csv
import logging
import re
import sys
from pytype import abstract
from pytype import debug
from pytype import function
from pytype import mixin
from pytype import utils
from pytype.pytd import optimize
from pytype.pytd import pytd
from pytype.pytd import pytd_utils
from pytype.pytd import slots
from pytype.pytd import visitors
import six
# Usually we call the logger "log" but that name is used quite often here.
_log = logging.getLogger(__name__)
# "Error level" enum for distinguishing between warnings and errors:
SEVERITY_WARNING = 1
SEVERITY_ERROR = 2
# The set of known error names.
_ERROR_NAMES = set()
# The current error name, managed by the error_name decorator.
_CURRENT_ERROR_NAME = utils.DynamicVar()
# Max number of calls in the traceback string.
MAX_TRACEBACK_LENGTH = 3
# Max number of tracebacks to show for the same error.
MAX_TRACEBACKS = 3
# Marker indicating the start of a traceback.
TRACEBACK_MARKER = "Called from (traceback):"
# Symbol representing an elided portion of the stack.
_ELLIPSIS = object()
def _error_name(name):
"""Decorate a function so that it binds the current error name."""
_ERROR_NAMES.add(name)
def wrap(func):
def invoke(*args, **kwargs):
with _CURRENT_ERROR_NAME.bind(name):
return func(*args, **kwargs)
return invoke
return wrap
def _maybe_truncate_traceback(traceback):
"""Truncate the traceback if it is too long.
Args:
traceback: A list representing an error's traceback. There should be one
list item per entry in the traceback (in the right order); beyond that,
this function does not care about the item types.
Returns:
The traceback, possibly with some items removed and an _ELLIPSIS inserted.
Guaranteed to be no longer than MAX_TRACEBACK_LENGTH.
"""
if len(traceback) > MAX_TRACEBACK_LENGTH:
return traceback[:MAX_TRACEBACK_LENGTH-2] + [_ELLIPSIS, traceback[-1]]
else:
return traceback
def _make_traceback_str(frames):
"""Turn a stack of frames into a traceback string."""
if len(frames) < 2 or (
frames[-1].f_code and not frames[-1].f_code.get_arg_count()):
# A traceback is usually unnecessary when the topmost frame has no
# arguments. If this frame ran during module loading, caching prevented it
# from running again without a traceback, so we drop the traceback manually.
return None
frames = frames[:-1]
frames = _maybe_truncate_traceback(frames)
traceback = []
format_line = "line %d, in %s"
for frame in frames:
if frame is _ELLIPSIS:
line = "..."
elif frame.current_opcode.code.co_name == "<module>":
line = format_line % (frame.current_opcode.line, "current file")
else:
line = format_line % (frame.current_opcode.line,
frame.current_opcode.code.co_name)
traceback.append(line)
return TRACEBACK_MARKER + "\n " + "\n ".join(traceback)
def _dedup_opcodes(stack):
"""Dedup the opcodes in a stack of frames."""
deduped_stack = []
for frame in stack:
if frame.current_opcode and (
not deduped_stack or
frame.current_opcode.line != deduped_stack[-1].current_opcode.line):
# We can have consecutive opcodes with the same line number due to, e.g.,
# a set comprehension. The first opcode we encounter is the one with the
# real method name, whereas the second's method name is something like
# <setcomp>, so we keep the first.
deduped_stack.append(frame)
return deduped_stack
def _compare_traceback_strings(left, right):
"""Try to compare two traceback strings.
Two traceback strings are comparable if they are equal, or if one ends with
the other. For example, these two tracebacks are comparable:
Traceback:
line 1, in <module>
line 2, in foo
Traceback:
line 2, in foo
and the first is greater than the second.
Args:
left: A string or None.
right: A string or None.
Returns:
None if the inputs aren't comparable, else an integer.
"""
if left == right:
return 0
left = left[len(TRACEBACK_MARKER):] if left else ""
right = right[len(TRACEBACK_MARKER):] if right else ""
if left.endswith(right):
return 1
elif right.endswith(left):
return -1
else:
return None
def _function_name(name, capitalize=False):
builtin_prefix = "__builtin__."
if name.startswith(builtin_prefix):
ret = "built-in function %s" % name[len(builtin_prefix):]
else:
ret = "function %s" % name
if capitalize:
return ret[0].upper() + ret[1:]
else:
return ret
class CheckPoint(object):
"""Represents a position in an error log."""
def __init__(self, log, position):
self.log = log
self.position = position
class Error(object):
"""Representation of an error in the error log.
Attributes:
name: The error name.
bad_call: Optionally, a `pytype.function.BadCall` of details of a bad
function call.
details: Optionally, a string of message details.
filename: The file in which the error occurred.
lineno: The line number at which the error occurred.
message: The error message string.
methodname: The method in which the error occurred.
severity: The error level (error or warning), an integer.
keyword: Optionally, the culprit keyword in the line where error is.
e.g.,
message = "No attribute '_submatch' on BasePattern"
keyword = _submatch
keyword_context: Optionally, a string naming the object on which `keyword`
occurs. e.g. the fully qualified module name that a
non-existent function doesn't exist on.
traceback: Optionally, an error traceback.
"""
def __init__(self, severity, message, filename=None, lineno=0,
methodname=None, details=None, traceback=None, keyword=None,
keyword_context=None, bad_call=None):
name = _CURRENT_ERROR_NAME.get()
assert name, ("Errors must be created from a caller annotated "
"with @error_name.")
# Required for every Error.
self._severity = severity
self._message = message
self._name = name
# Optional information about the error.
self._details = details
# Optional information about error position.
# TODO(dbaum): Do not allow filename (and maybe lineno) of None.
self._filename = filename
self._lineno = lineno or 0
self._methodname = methodname
self._traceback = traceback
self._keyword_context = keyword_context
self._keyword = keyword
self._bad_call = bad_call
@classmethod
def with_stack(cls, stack, severity, message, **kwargs):
"""Return an error using a stack for position information.
Args:
stack: A list of state.Frame or state.SimpleFrame objects.
severity: The error level (error or warning), an integer.
message: The error message string.
**kwargs: Additional keyword args to pass onto the class ctor.
Returns:
An Error object.
"""
stack = _dedup_opcodes(stack) if stack else None
opcode = stack[-1].current_opcode if stack else None
if opcode is None:
return cls(severity, message, **kwargs)
else:
return cls(severity, message, filename=opcode.code.co_filename,
lineno=opcode.line, methodname=opcode.code.co_name,
traceback=_make_traceback_str(stack), **kwargs)
@classmethod
def for_test(cls, severity, message, name, **kwargs):
"""Create an _Error with the specified name, for use in tests."""
with _CURRENT_ERROR_NAME.bind(name):
return cls(severity, message, **kwargs)
@property
def name(self):
return self._name
@property
def lineno(self):
return self._lineno
@property
def filename(self):
return self._filename
@property
def message(self):
message = self._message
if self._details:
message += "\n" + self._details
if self._traceback:
message += "\n" + self._traceback
return message
@property
def traceback(self):
return self._traceback
@property
def methodname(self):
return self._methodname
@property
def bad_call(self):
return self._bad_call
@property
def details(self):
return self._details
@property
def keyword(self):
return self._keyword
@property
def keyword_context(self):
return self._keyword_context
def _position(self):
"""Return human-readable filename + line number."""
method = ", in %s" % self._methodname if self._methodname else ""
if self._filename:
return "File \"%s\", line %d%s" % (self._filename,
self._lineno,
method)
elif self._lineno:
return "Line %d%s" % (self._lineno, method)
else:
return ""
def __str__(self):
pos = self._position()
if pos:
pos += ": "
text = "%s%s [%s]" % (pos, self._message.replace("\n", "\n "), self._name)
if self._details:
text += "\n " + self._details.replace("\n", "\n ")
if self._traceback:
text += "\n" + self._traceback
return text
def drop_traceback(self):
with _CURRENT_ERROR_NAME.bind(self._name):
return self.__class__(
severity=self._severity,
message=self._message,
filename=self._filename,
lineno=self._lineno,
methodname=self._methodname,
details=self._details,
traceback=None)
class ErrorLogBase(object):
"""A stream of errors."""
def __init__(self):
self._errors = []
# An error filter (initially None)
self._filter = None
def __len__(self):
return len(self._errors)
def __iter__(self):
return iter(self._errors)
def __getitem__(self, index):
return self._errors[index]
def is_valid_error_name(self, name):
"""Return True iff name was defined in an @error_name() decorator."""
return name in _ERROR_NAMES
def set_error_filter(self, filt):
"""Set the error filter.
Args:
filt: A function or callable object that accepts a single argument of
type Error and returns True if that error should be included in the
log. A filter of None will add all errors.
"""
self._filter = filt
def has_error(self):
"""Return true iff an Error with SEVERITY_ERROR is present."""
# pylint: disable=protected-access
return any(e._severity == SEVERITY_ERROR for e in self._errors)
def _add(self, error):
if self._filter is None or self._filter(error):
_log.info("Added error to log: %s\n%s", error.name, error)
if _log.isEnabledFor(logging.DEBUG):
_log.debug(debug.stack_trace(limit=1).rstrip())
self._errors.append(error)
def warn(self, stack, message, *args):
self._add(Error.with_stack(stack, SEVERITY_WARNING, message % args))
def error(self, stack, message, details=None, keyword=None, bad_call=None,
keyword_context=None):
self._add(Error.with_stack(stack, SEVERITY_ERROR, message, details=details,
keyword=keyword, bad_call=bad_call,
keyword_context=keyword_context))
def save(self):
"""Returns a checkpoint that represents the log messages up to now."""
return CheckPoint(self, len(self._errors))
def revert_to(self, checkpoint):
assert checkpoint.log is self
self._errors = self._errors[:checkpoint.position]
def print_to_csv_file(self, filename):
"""Print the errorlog to a csv file."""
with open(filename, "w") as f:
csv_file = csv.writer(f, delimiter=",")
for error in self.unique_sorted_errors():
# pylint: disable=protected-access
# TODO(kramm): Add _methodname
if error._details and error._traceback:
details = error._details + "\n\n" + error._traceback
elif error._traceback:
details = error._traceback
else:
details = error._details
csv_file.writerow(
[error._filename,
error._lineno,
error._name,
error._message,
details])
def print_to_file(self, fi):
for error in self.unique_sorted_errors():
print(error, file=fi)
def unique_sorted_errors(self):
"""Gets the unique errors in this log, sorted on filename and lineno."""
unique_errors = collections.OrderedDict()
for error in self._sorted_errors():
error_without_traceback = str(error.drop_traceback())
if error_without_traceback not in unique_errors:
unique_errors[error_without_traceback] = [error]
continue
errors = unique_errors[error_without_traceback]
for previous_error in list(errors): # make a copy, since we modify errors
traceback_cmp = _compare_traceback_strings(error.traceback,
previous_error.traceback)
if traceback_cmp is None:
# We have multiple bad call sites, e.g.,
# def f(x): x + 42
# f("hello") # error
# f("world") # same error, different backtrace
# so we'll report this error multiple times with different backtraces.
continue
elif traceback_cmp < 0:
# If the current traceback is shorter, use the current error instead
# of the previous one.
errors.remove(previous_error)
else:
# One of the previous errors has a shorter traceback than the current
# one, so the latter can be discarded.
break
else:
if len(errors) < MAX_TRACEBACKS:
errors.append(error)
return sum(unique_errors.values(), [])
def _sorted_errors(self):
return sorted(self._errors, key=lambda x: (x.filename, x.lineno))
def print_to_stderr(self):
self.print_to_file(sys.stderr)
def __str__(self):
io = six.StringIO()
self.print_to_file(io)
return io.getvalue()
class ErrorLog(ErrorLogBase):
"""ErrorLog with convenience functions."""
def _pytd_print(self, pytd_type):
"""Print the name of the pytd type."""
name = pytd.Print(pytd_utils.CanonicalOrdering(optimize.Optimize(
pytd_type.Visit(visitors.RemoveUnknownClasses()))))
# Clean up autogenerated namedtuple names, e.g. "namedtuple-X-a-_0-c"
# becomes just "X", by extracting out just the type name.
if "namedtuple-" in name:
return re.sub(r"\bnamedtuple-([^-]+)-[-_\w]*", r"\1", name)
nested_class_match = re.search(r"_(?:\w+)_DOT_", name)
if nested_class_match:
# Pytype doesn't have true support for nested classes. Instead, for
# class Foo:
# class Bar: ...
# it outputs:
# class _Foo_DOT_Bar: ...
# class Foo:
# Bar = ... # type: Type[_Foo_DOT_Bar]
# Replace _Foo_DOT_Bar with Foo.Bar in error messages for readability.
# TODO(b/35138984): Get rid of this hack.
start = nested_class_match.start()
return name[:start] + name[start+1:].replace("_DOT_", ".")
return name
def _print_as_expected_type(self, t, instance=None):
"""Print abstract value t as a pytd type."""
if isinstance(t, (abstract.Unknown, abstract.Unsolvable, mixin.Class,
abstract.Union)):
with t.vm.convert.pytd_convert.produce_detailed_output():
return self._pytd_print(t.get_instance_type(instance=instance))
elif (isinstance(t, mixin.PythonConstant) and
not getattr(t, "could_contain_anything", False)):
return re.sub(r"(\\n|\s)+", " ",
t.str_of_constant(self._print_as_expected_type))
elif isinstance(t, abstract.AnnotationClass) or not t.cls:
return t.name
else:
return "<instance of %s>" % self._print_as_expected_type(t.cls, t)
def _print_as_actual_type(self, t):
with t.vm.convert.pytd_convert.produce_detailed_output():
return self._pytd_print(t.to_type())
def _print_as_return_type(self, t):
ret = self._pytd_print(t)
# typing.NoReturn is a prettier alias for nothing.
return "NoReturn" if ret == "nothing" else ret
def _join_printed_types(self, types):
"""Pretty-print the union of the printed types."""
types = sorted(set(types)) # dedup
if len(types) == 1:
return next(iter(types))
elif types:
if "None" in types:
types.remove("None")
return "Optional[%s]" % self._join_printed_types(types)
else:
return "Union[%s]" % ", ".join(types)
else:
return "nothing"
def _iter_sig(self, sig):
"""Iterate through a function.Signature object. Focus on a bad parameter."""
for name in sig.param_names:
yield "", name
if sig.varargs_name is not None:
yield "*", sig.varargs_name
elif sig.kwonly_params:
yield ("*", "")
for name in sorted(sig.kwonly_params):
yield "", name
if sig.kwargs_name is not None:
yield "**", sig.kwargs_name
def _iter_expected(self, sig, bad_param):
"""Yield the prefix, name and type information for expected parameters."""
for prefix, name in self._iter_sig(sig):
suffix = " = ..." if name in sig.defaults else ""
if bad_param and name == bad_param.name:
type_str = self._print_as_expected_type(bad_param.expected)
suffix = ": " + type_str + suffix
else:
suffix = suffix
yield prefix, name, suffix
def _iter_actual(self, sig, passed_args, bad_param):
"""Yield the prefix, name and type information for actual parameters."""
# We want to display the passed_args in the order they're defined in the
# signature, unless there are starargs or starstarargs.
# Map param names to their position in the list, then sort the list of
# passed args so it's in the same order as the params.
keys = {param: n for n, (_, param) in enumerate(self._iter_sig(sig))}
def key_f(arg):
arg_name = arg[0]
# starargs are given anonymous names, which won't be found in the sig.
# Instead, use the same name as the varags param itself, if present.
if arg_name not in keys and pytd_utils.ANON_PARAM.match(arg_name):
return keys.get(sig.varargs_name, len(keys)+1)
return keys.get(arg_name, len(keys)+1)
for name, arg in sorted(passed_args, key=key_f):
if bad_param and name == bad_param.name:
suffix = ": " + self._print_as_actual_type(arg)
else:
suffix = ""
yield "", name, suffix
def _print_args(self, arg_iter, bad_param):
"""Pretty-print a list of arguments. Focus on a bad parameter."""
# (foo, bar, broken : type, ...)
printed_params = []
found = False
for prefix, name, suffix in arg_iter:
if bad_param and name == bad_param.name:
printed_params.append(prefix + name + suffix)
found = True
elif found:
printed_params.append("...")
break
elif pytd_utils.ANON_PARAM.match(name):
printed_params.append(prefix + "_")
else:
printed_params.append(prefix + name)
return ", ".join(printed_params)
@_error_name("pyi-error")
def pyi_error(self, stack, name, error):
self.error(stack, "Couldn't import pyi for %r" % name, str(error),
keyword=name)
@_error_name("attribute-error")
def _attribute_error(self, stack, binding, attr_name):
"""Log an attribute error."""
obj_repr = self._print_as_actual_type(binding.data)
if len(binding.variable.bindings) > 1:
# Joining the printed types rather than merging them before printing
# ensures that we print all of the options when 'Any' is among them.
details = "In %s" % self._join_printed_types(
self._print_as_actual_type(v) for v in binding.variable.data)
else:
details = None
self.error(
stack, "No attribute %r on %s" % (attr_name, obj_repr), details=details,
keyword=attr_name)
@_error_name("not-writable")
def not_writable(self, stack, obj, attr_name):
obj_values = obj.vm.merge_values([obj])
obj_repr = self._print_as_actual_type(obj_values)
self.error(stack, "Can't assign attribute %r on %s" % (attr_name, obj_repr),
keyword=attr_name, keyword_context=obj_repr)
@_error_name("module-attr")
def _module_attr(self, stack, binding, attr_name):
module_name = binding.data.name
self.error(stack, "No attribute %r on module %r" % (attr_name, module_name),
keyword=attr_name, keyword_context=module_name)
def attribute_error(self, stack, binding, attr_name):
if attr_name in slots.SYMBOL_MAPPING:
obj = self._print_as_actual_type(binding.data)
details = "No attribute %r on %s" % (attr_name, obj)
self._unsupported_operands(stack, attr_name, obj, details=details)
elif isinstance(binding.data, abstract.Module):
self._module_attr(stack, binding, attr_name)
else:
self._attribute_error(stack, binding, attr_name)
@_error_name("unbound-type-param")
def unbound_type_param(self, stack, obj, attr_name, type_param_name):
self.error(
stack, "Can't access attribute %r on %s" % (attr_name, obj.name),
"No binding for type parameter %s" % type_param_name, keyword=attr_name,
keyword_context=obj.name)
@_error_name("name-error")
def name_error(self, stack, name):
self.error(stack, "Name %r is not defined" % name, keyword=name)
@_error_name("import-error")
def import_error(self, stack, module_name):
self.error(stack, "Can't find module %r." % module_name,
keyword=module_name)
def _explain_protocol_mismatch(self, protocol_param, passed_params):
"""Return possibly extra protocol details about an argument mismatch."""
if not protocol_param:
return []
expected = protocol_param.expected
vm = expected.vm
if not isinstance(expected, mixin.Class) or not expected.is_protocol:
return []
p = None # make pylint happy
for name, p in passed_params:
if name == protocol_param.name:
break
else:
return []
methods = vm.matcher.unimplemented_protocol_methods(p, expected)
if not methods:
# Happens if all the protocol methods are implemented, but with the wrong
# types. We don't yet provide more detail about that.
return []
return [
"\nThe following methods aren't implemented on %s:\n" %
self._print_as_actual_type(p)] + [", ".join(sorted(methods))]
def _invalid_parameters(self, stack, message, bad_call):
"""Log an invalid parameters error."""
sig, passed_args, bad_param = bad_call
expected = self._print_args(self._iter_expected(sig, bad_param), bad_param)
actual = self._print_args(
self._iter_actual(sig, passed_args, bad_param), bad_param)
details = [
"Expected: (", expected, ")\n",
"Actually passed: (", actual,
")"]
details += self._explain_protocol_mismatch(bad_param, passed_args)
self.error(stack, message, "".join(details), bad_call=bad_call)
@_error_name("wrong-arg-count")
def wrong_arg_count(self, stack, name, bad_call):
message = "%s expects %d arg(s), got %d" % (
_function_name(name, capitalize=True),
bad_call.sig.mandatory_param_count(),
len(bad_call.passed_args))
self._invalid_parameters(stack, message, bad_call)
def _get_binary_operation(self, function_name, bad_call):
"""Return (op, left, right) if the function should be treated as a binop."""
maybe_left_operand, _, f = function_name.rpartition(".")
# Check that
# (1) the function is bound to an object (the left operand),
# (2) the function has a pretty representation,
# (3) either there are exactly two passed args or the function is one we've
# chosen to treat as a binary operation.
if (not maybe_left_operand or f not in slots.SYMBOL_MAPPING or
(len(bad_call.passed_args) != 2 and
f not in ("__setitem__", "__getslice__"))):
return None
for arg_name, arg_value in bad_call.passed_args[1:]:
if arg_name == bad_call.bad_param.name:
# maybe_left_operand is something like `dict`, but we want a more
# precise type like `Dict[str, int]`.
left_operand = self._print_as_actual_type(bad_call.passed_args[0][1])
right_operand = self._print_as_actual_type(arg_value)
return f, left_operand, right_operand
return None
def wrong_arg_types(self, stack, name, bad_call):
"""Log [wrong-arg-types]."""
operation = self._get_binary_operation(name, bad_call)
if operation:
operator, left_operand, right_operand = operation
operator_name = _function_name(operator, capitalize=True)
expected_right_operand = self._print_as_expected_type(
bad_call.bad_param.expected)
details = "%s on %s expects %s" % (
operator_name, left_operand, expected_right_operand)
self._unsupported_operands(
stack, operator, left_operand, right_operand, details=details)
else:
self._wrong_arg_types(stack, name, bad_call)
@_error_name("wrong-arg-types")
def _wrong_arg_types(self, stack, name, bad_call):
"""A function was called with the wrong parameter types."""
message = ("%s was called with the wrong arguments" %
_function_name(name, capitalize=True))
self._invalid_parameters(stack, message, bad_call)
@_error_name("wrong-keyword-args")
def wrong_keyword_args(self, stack, name, bad_call, extra_keywords):
"""A function was called with extra keywords."""
if len(extra_keywords) == 1:
message = "Invalid keyword argument %s to %s" % (
extra_keywords[0], _function_name(name))
else:
message = "Invalid keyword arguments %s to %s" % (
"(" + ", ".join(sorted(extra_keywords)) + ")",
_function_name(name))
self._invalid_parameters(stack, message, bad_call)
@_error_name("missing-parameter")
def missing_parameter(self, stack, name, bad_call, missing_parameter):
"""A function call is missing parameters."""
message = "Missing parameter %r in call to %s" % (
missing_parameter, _function_name(name))
self._invalid_parameters(stack, message, bad_call)
@_error_name("not-callable")
def not_callable(self, stack, func):
"""Calling an object that isn't callable."""
if isinstance(func, abstract.InterpreterFunction) and func.is_overload:
prefix = "@typing.overload-decorated "
else:
prefix = ""
message = "%s%r object is not callable" % (prefix, func.name)
self.error(stack, message, keyword=func.name)
@_error_name("not-indexable")
def not_indexable(self, stack, name, generic_warning=False):
message = "class %s is not indexable" % name
if generic_warning:
self.error(stack, message, "(%r does not subclass Generic)" % name,
keyword=name)
else:
self.error(stack, message, keyword=name)
@_error_name("not-instantiable")
def not_instantiable(self, stack, cls):
"""Instantiating an abstract class."""
message = "Can't instantiate %s with abstract methods %s" % (
cls.full_name, ", ".join(sorted(cls.abstract_methods)))
self.error(stack, message)
@_error_name("ignored-abstractmethod")
def ignored_abstractmethod(self, stack, cls_name, method_name):
message = "Stray abc.abstractmethod decorator on method %s" % method_name
self.error(stack, message,
details="(%s does not have metaclass abc.ABCMeta)" % cls_name)
@_error_name("duplicate-keyword-argument")
def duplicate_keyword(self, stack, name, bad_call, duplicate):
message = ("%s got multiple values for keyword argument %r" %
(_function_name(name), duplicate))
self._invalid_parameters(stack, message, bad_call)
@_error_name("invalid-super-call")
def invalid_super_call(self, stack, message, details=None):
self.error(stack, message, details)
def invalid_function_call(self, stack, error):
"""Log an invalid function call."""
if isinstance(error, function.WrongArgCount):
self.wrong_arg_count(stack, error.name, error.bad_call)
elif isinstance(error, function.WrongArgTypes):
self.wrong_arg_types(stack, error.name, error.bad_call)
elif isinstance(error, function.WrongKeywordArgs):
self.wrong_keyword_args(
stack, error.name, error.bad_call, error.extra_keywords)
elif isinstance(error, function.MissingParameter):
self.missing_parameter(
stack, error.name, error.bad_call, error.missing_parameter)
elif isinstance(error, function.NotCallable):
self.not_callable(stack, error.obj)
elif isinstance(error, function.DuplicateKeyword):
self.duplicate_keyword(
stack, error.name, error.bad_call, error.duplicate)
else:
raise AssertionError(error)
@_error_name("base-class-error")
def base_class_error(self, stack, base_var):
base_cls = self._join_printed_types(
self._print_as_expected_type(t) for t in base_var.data)
self.error(stack, "Invalid base class: %s" % base_cls, keyword=base_cls)
@_error_name("bad-return-type")
def bad_return_type(self, stack, actual_pytd, expected_pytd):
details = "".join([
"Expected: ", self._print_as_return_type(expected_pytd), "\n",
"Actually returned: ", self._print_as_return_type(actual_pytd),
])
self.error(stack, "bad option in return type", details)
@_error_name("bad-concrete-type")
def bad_concrete_type(self, stack, actual_pytd, expected_pytd):
details = "".join([
"Expected: ", self._print_as_return_type(expected_pytd), "\n",
"Actually passed: ", self._print_as_return_type(actual_pytd),
])
self.error(stack, "Invalid instantiation of generic class", details)
def unsupported_operands(self, stack, operator, var1, var2):
left = self._join_printed_types(
self._print_as_actual_type(t) for t in var1.data)
right = self._join_printed_types(
self._print_as_actual_type(t) for t in var2.data)
details = "No attribute %r on %s" % (operator, left)
if operator in slots.REVERSE_NAME_MAPPING:
details += " or %r on %s" % (slots.REVERSE_NAME_MAPPING[operator], right)
self._unsupported_operands(stack, operator, left, right, details=details)
@_error_name("unsupported-operands")
def _unsupported_operands(self, stack, operator, *operands, **details):
# TODO(b/114124544): Change the signature to (..., *operands, details=None)
assert set(details) <= {"details"}
self.error(
stack, "unsupported operand type(s) for %s: %s" % (
slots.SYMBOL_MAPPING[operator],
" and ".join(repr(operand) for operand in operands)),
details=details.get("details"))
def invalid_annotation(self, stack, annot, details=None, name=None):
if annot is not None:
annot = self._print_as_expected_type(annot)
self._invalid_annotation(stack, annot, details, name)
def invalid_ellipses(self, stack, indices, container_name):
if indices:
details = "Not allowed at %s %s in %s" % (
"index" if len(indices) == 1 else "indices",
", ".join(str(i) for i in sorted(indices)),
container_name)
self._invalid_annotation(stack, "Ellipsis", details, None)
def ambiguous_annotation(self, stack, options, name=None):
desc = " or ".join(sorted(self._print_as_expected_type(o) for o in options))
self._invalid_annotation(stack, desc, "Must be constant", name)
@_error_name("invalid-annotation")
def _invalid_annotation(self, stack, annot_string, details, name):
"""Log the invalid annotation."""
if name is None:
suffix = ""
else:
suffix = "for " + name
annot_string = "%r " % annot_string if annot_string else ""
self.error(stack, "Invalid type annotation %s%s" % (annot_string, suffix),
details=details)
@_error_name("mro-error")
def mro_error(self, stack, name, mro_seqs):
seqs = []
for seq in mro_seqs:
seqs.append("[%s]" % ", ".join(cls.name for cls in seq))
self.error(stack, "Class %s has invalid (cyclic?) inheritance: %s." % (
name, ", ".join(seqs)), keyword=name)
@_error_name("invalid-directive")
def invalid_directive(self, filename, lineno, message):
self._add(Error(
SEVERITY_WARNING, message, filename=filename, lineno=lineno))
@_error_name("late-directive")
def late_directive(self, filename, lineno, name):
message = "%s disabled from here to the end of the file" % name
details = ("Consider limiting this directive's scope or moving it to the "
"top of the file.")
self._add(Error(SEVERITY_WARNING, message, details=details,
filename=filename, lineno=lineno))
@_error_name("not-supported-yet")
def not_supported_yet(self, stack, feature):
self.error(stack, "%s not supported yet" % feature)
@_error_name("key-error")
def key_error(self, stack, key):
self.error(stack, "Key %r possibly not in dictionary (yet)" % key,
keyword=key)
@_error_name("python-compiler-error")
def python_compiler_error(self, filename, lineno, message):
self._add(Error(
SEVERITY_ERROR, message, filename=filename, lineno=lineno))
@_error_name("recursion-error")
def recursion_error(self, stack, name):
self.error(stack, "Detected recursion in %s" % name, keyword=name)
@_error_name("redundant-function-type-comment")
def redundant_function_type_comment(self, filename, lineno):
self._add(Error(
SEVERITY_ERROR,
"Function type comments cannot be used with annotations",
filename=filename, lineno=lineno))
@_error_name("invalid-function-type-comment")
def invalid_function_type_comment(self, stack, comment, details=None):
self.error(stack, "Invalid function type comment: %s" % comment,
details=details)
@_error_name("invalid-type-comment")
def invalid_type_comment(self, stack, comment, details=None):
self.error(stack, "Invalid type comment: %s" % comment,
details=details)
@_error_name("ignored-type-comment")
def ignored_type_comment(self, filename, lineno, comment):
self._add(Error(
SEVERITY_WARNING, "Stray type comment: %s" % comment,
filename=filename, lineno=lineno))
@_error_name("invalid-typevar")
def invalid_typevar(self, stack, comment, bad_call=None):
if bad_call:
self._invalid_parameters(stack, comment, bad_call)
else:
self.error(stack, "Invalid TypeVar: %s" % comment)
@_error_name("invalid-namedtuple-arg")
def invalid_namedtuple_arg(self, stack, badname=None, err_msg=None):
if err_msg is None:
msg = ("collections.namedtuple argument %r is not a valid typename or "
"field name.")
self.warn(stack, msg % badname)
else:
self.error(stack, err_msg)
@_error_name("bad-function-defaults")
def bad_function_defaults(self, stack, func_name):
msg = "Attempt to set %s.__defaults__ to a non-tuple value."
self.warn(stack, msg % func_name)
@_error_name("bad-slots")
def bad_slots(self, stack, msg):
self.error(stack, msg)
@_error_name("bad-unpacking")
def bad_unpacking(self, stack, num_vals, num_vars):
prettify = lambda v, label: "%d %s%s" % (v, label, "" if v == 1 else "s")
vals_str = prettify(num_vals, "value")
vars_str = prettify(num_vars, "variable")
msg = "Cannot unpack %s into %s" % (vals_str, vars_str)
self.error(stack, msg, keyword=vals_str)
@_error_name("reveal-type")
def reveal_type(self, stack, node, var):
types = [self._print_as_actual_type(b.data)
for b in var.bindings
if node.HasCombination([b])]
self.error(stack, self._join_printed_types(types))
def get_error_names_set():
return _ERROR_NAMES
|
the-stack_106_27690 | import os, socket
import urllib.request
# set fake user agent here
ua = 'Wget/1.19.4 (linux-gnu)'
def handle_client(c):
hdr = c.recv(1024).decode("utf-8")
url = hdr.split(' ')[1]
print(url, "=> downloading")
data = urllib.request.urlopen(urllib.request.Request(url, headers={'User-Agent': ua})).read()
print(url, "=> fetched, len:", len(data))
c.send("HTTP/1.1 200 OK\r\n".encode("utf-8"))
c.send(("Content-Length: " + str(len(data)) + "\r\n").encode("utf-8"))
c.send("Connection: close\r\n".encode("utf-8"))
c.send("\r\n".encode("utf-8"))
c.sendall(data)
c.shutdown(socket.SHUT_RDWR)
c.close()
# config proxy for urllib if needed
#proxy_handler = urllib.request.ProxyHandler({'http': 'http://PROXY_ADDRESS:PORT'})
#opener = urllib.request.build_opener(proxy_handler)
#urllib.request.install_opener(opener)
# set listen address and port here
host = "0.0.0.0"
port = 8080
s = socket.socket()
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((host, port))
s.listen(100)
while True:
c, addr = s.accept()
pid = os.fork()
if pid == 0:
if os.fork() == 0:
handle_client(c)
exit(0)
os.waitpid(pid, 0)
|
the-stack_106_27692 | #!/usr/bin/env python3
# Copyright (c) 2020-2021 The Eleccoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test wallet replace-by-fee capabilities in conjunction with the fallbackfee."""
from test_framework.test_framework import EleccoinTestFramework
from test_framework.util import assert_raises_rpc_error
class WalletRBFTest(EleccoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.nodes[0].generate(101)
# sending a transaction without fee estimations must be possible by default on regtest
self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
# test sending a tx with disabled fallback fee (must fail)
self.restart_node(0, extra_args=["-fallbackfee=0"])
assert_raises_rpc_error(-6, "Fee estimation failed", lambda: self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1))
assert_raises_rpc_error(-4, "Fee estimation failed", lambda: self.nodes[0].fundrawtransaction(self.nodes[0].createrawtransaction([], {self.nodes[0].getnewaddress(): 1})))
assert_raises_rpc_error(-6, "Fee estimation failed", lambda: self.nodes[0].sendmany("", {self.nodes[0].getnewaddress(): 1}))
if __name__ == '__main__':
WalletRBFTest().main()
|
the-stack_106_27696 | """
A convenience script to playback random demonstrations from
a set of demonstrations stored in a hdf5 file.
Example:
$ python playback_demonstrations_from_hdf5.py --folder ../models/assets/demonstrations/SawyerPickPlace/
"""
import os
import h5py
import argparse
import random
import numpy as np
import robosuite
from robosuite.utils.mjcf_utils import postprocess_model_xml
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--folder",
type=str,
default=os.path.join(
robosuite.models.assets_root, "demonstrations/SawyerNutAssembly"
),
)
args = parser.parse_args()
demo_path = args.folder
hdf5_path = os.path.join(demo_path, "demo.hdf5")
f = h5py.File(hdf5_path, "r")
env_name = f["data"].attrs["env"]
env = robosuite.make(
env_name,
has_renderer=True,
ignore_done=True,
use_camera_obs=False,
gripper_visualization=True,
reward_shaping=True,
control_freq=100,
)
# list of all demonstrations episodes
demos = list(f["data"].keys())
while True:
print("Playing back random episode... (press ESC to quit)")
# # select an episode randomly
ep = random.choice(demos)
# read the model xml, using the metadata stored in the attribute for this episode
model_file = f["data/{}".format(ep)].attrs["model_file"]
model_path = os.path.join(demo_path, "models", model_file)
with open(model_path, "r") as model_f:
model_xml = model_f.read()
env.reset()
xml = postprocess_model_xml(model_xml)
env.reset_from_xml_string(xml)
env.viewer.set_camera(0)
# load the flattened mujoco states
states = f["data/{}/states".format(ep)].value
# force the sequence of internal mujoco states one by one
for state in states:
env.sim.set_state_from_flattened(state)
env.sim.forward()
env.render()
f.close()
|
the-stack_106_27697 | import ast
import math
import numpy as np
def add(p1, p2):
return [p1,p2]
def canReduce(p):
return checkExplode(p) or checkSplit(p)
def checkExplode(p, depth=0):
#print(p, depth)
if isinstance(p, list):
if depth>=4:
return True
else:
return checkExplode(p[0], depth+1) or checkExplode(p[1], depth+1)
else:
return False
def checkSplit(p):
if isinstance(p,int):
return p>=10
return checkSplit(p[0]) or checkSplit(p[1])
def getExplodingPair(p, depth=0, path=[]):
if isinstance(p, list):
if depth>=4 and not any(isinstance(i, list) for i in p):
return (p, depth, path)
else:
if checkExplode(p[0], depth+1):
path.append(0)
return getExplodingPair(p[0], depth+1, path)
else:
path.append(1)
return getExplodingPair(p[1], depth+1,path)
def getNearestNeighbour(path, splitIndex):
#Get the nearest neighbour with the given split index closest to the node on the path
for i, direction in enumerate(reversed(path)):
if direction==splitIndex:
return path[0:len(path)-i]
return []
def literalIn(p):
return isinstance(p,int) or literalIn(p[0]) or literalIn(p[1])
def getLeftMostLiteral(p, path=[]):
if isinstance(p, int):
return (p, path)
else:
if literalIn(p[0]):
path.append(0)
return getLeftMostLiteral(p[0],path)
else:
path.append(1)
return getLeftMostLiteral(p[1], path)
def getRightMostLiteral(p, path=[]):
if isinstance(p, int):
return (p, path)
else:
if literalIn(p[1]):
path.append(1)
return getRightMostLiteral(p[1],path)
else:
path.append(0)
return getRightMostLiteral(p[0], path)
def getSubTree(p, path):
#print(p)
if len(path)==0:
return p
else:
return getSubTree(p[path[0]], path[1:])
def changeValue(p,newValue,path):
#print(path, p)
if len(path)==0:
return newValue
else:
if path[0]==0:
return [changeValue(p[0],newValue, path[1:]), p[1]]
else:
return [p[0],changeValue(p[1],newValue, path[1:])]
def explode(p):
result=p
exploding, depth, path = getExplodingPair(p,0,[])
leftPath = getNearestNeighbour(path, 0) #Return the path to the deepest right split on the road to the exploding pair
if len(leftPath)!=0: #Found a right split
leftPath[-1] = 1 #Change the split to a right branch
#leftPath is now the path that point to the subtree where we need to look for the left most literal
subTree=getSubTree(p,leftPath)
v, subPath = getLeftMostLiteral(subTree,[])
fullPath=leftPath+subPath
result = changeValue(result,v+exploding[1],fullPath)
rightPath = getNearestNeighbour(path, 1)
if len(rightPath)!=0:
rightPath[-1]=0
subTree=getSubTree(result,rightPath)
v, subPath = getRightMostLiteral(subTree,[])
fullPath=rightPath+subPath
result = changeValue(result, v+exploding[0], fullPath)
result = changeValue(result,0,path)
return result
def split(p):
if isinstance(p, int):
if p>=10:
return [math.floor(p/2), math.ceil(p/2)]
if checkSplit(p[0]):
return [split(p[0]),p[1]]
else:
return [p[0],split(p[1])]
def reduce(p):
if checkExplode(p):
return explode(p)
elif checkSplit(p):
return split(p)
def getMagnitude(p):
if isinstance(p, int):
return p
else:
return 3*getMagnitude(p[0])+2*getMagnitude(p[1])
if __name__ == '__main__':
with open("input.txt", 'r') as f:
data = f.readlines()
pair = ast.literal_eval(data[0])
#"""
step=1
for line in data[1:]:
pair2 = ast.literal_eval(line.strip())
pair = add(pair, pair2)
while canReduce(pair):
pair=reduce(pair)
step+=1
#"""
print("p1:",getMagnitude(pair))
results=np.zeros((len(data), len(data)))
for i,l1 in enumerate(data):
for j,l2 in enumerate(data):
pair=add(ast.literal_eval(l1.strip()), ast.literal_eval(l2.strip()))
while canReduce(pair):
pair = reduce(pair)
results[i,j]=getMagnitude(pair)
print("p2:",np.amax(results)) |
the-stack_106_27698 | import torch
# Function from https://github.com/ikostrikov/pytorch-a2c-ppo-acktr/blob/master/model.py
def initialize_parameters(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
m.weight.data.normal_(0, 1)
m.weight.data *= 1 / torch.sqrt(m.weight.data.pow(2).sum(1, keepdim=True))
if m.bias is not None:
m.bias.data.fill_(0)
|
the-stack_106_27701 | """ Logging Settings """
import os
from masonite import env
"""Default Channel
The default channel will be used by Masonite whenever it needs
to use the logging channel. You can switch the channel at
any time.
"""
DEFAULT = env("LOG_CHANNEL", "single")
"""Channels
Channels dictate how logging drivers will be initialized.
Supported Channels: single, daily, stack, terminal, slack, syslog
"""
CHANNELS = {
"timezone": env("LOG_TIMEZONE", "PRC"),
"single": {"driver": "single", "level": "debug", "path": "storage/logs/single.log"},
"stack": {"driver": "stack", "channels": ["single", "daily", "slack", "terminal"]},
"daily": {"driver": "daily", "level": "debug", "path": "storage/logs"},
"terminal": {
"driver": "terminal",
"level": "info",
},
"slack": {
"driver": "slack",
"channel": "#bot",
"emoji": ":warning:",
"username": "Logging Bot",
"token": env("SLACK_TOKEN", None),
"level": "debug",
},
"syslog": {"driver": "syslog", "path": "/var/run/syslog", "level": "debug"},
}
|
the-stack_106_27705 | # -*- coding: utf-8 -*-
# This software is open source software available under the BSD-3 license.
#
# Copyright (c) 2020 Triad National Security, LLC. All rights reserved.
# Copyright (c) 2020 Lawrence Livermore National Security, LLC. All rights
# reserved.
# Copyright (c) 2020 UT-Battelle, LLC. All rights reserved.
#
# Additional copyright and license information can be found in the LICENSE file
# distributed with this code, or at
# https://raw.githubusercontent.com/MPAS-Dev/MPAS-Analysis/master/LICENSE
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from mpas_analysis.shared.io import open_mpas_dataset
from mpas_analysis.shared.time_series.moving_average import compute_moving_avg
def compute_moving_avg_anomaly_from_start(timeSeriesFileName, variableList,
anomalyStartTime, anomalyEndTime,
startDate, endDate, calendar,
movingAveragePoints=12,
alter_dataset=None): # {{{
'''
Compute the rolling mean of the anomaly of a quantity from the beginning
of the simulation (such that the rolling mean starts at zero by definition)
Parameters
----------
timeSeriesFileName : str
a file produced by ``MpasTimeSeriesTask`` containing variables, the
anomaly and rolling mean of which is to be computed
variableList : list of str
variable names to include in the resulting data set
anomalyStartTime, anomalyEndTime : str
the start and end times of the reference point for the anomaly
startDate, endDate : str
the start and end dates of the time series
calendar : {'gregorian', 'gregoraian_noleap'}
The calendar used in the MPAS run
movingAveragePoints : int, optional
The number of points (months) over which to perform the rolling average
of the data set
alter_dataset : function
A function for manipulating the data set (e.g. computing new
variables), taking an ``xarray.Dataset`` as input argument and
returning an ``xarray.Dataset``
Returns
-------
ds : ``xarray.Dataset``
The anomaly of the rolling time mean from the start of the simulation
'''
# Authors
# -------
# Xylar Asay-Davis
ds = open_mpas_dataset(fileName=timeSeriesFileName,
calendar=calendar,
variableList=variableList,
startDate=startDate,
endDate=endDate)
if alter_dataset is not None:
ds = alter_dataset(ds)
dsStart = open_mpas_dataset(
fileName=timeSeriesFileName,
calendar=calendar,
variableList=variableList,
startDate=anomalyStartTime,
endDate=anomalyEndTime)
if alter_dataset is not None:
dsStart = alter_dataset(dsStart)
dsStart = dsStart.isel(Time=slice(0, movingAveragePoints)).mean('Time')
for variable in ds.data_vars:
ds[variable] = ds[variable] - dsStart[variable]
ds = compute_moving_avg(ds)
return ds
# }}}
# vim: foldmethod=marker ai ts=4 sts=4 et sw=4 ft=python
|
the-stack_106_27707 | """
This module collects helper functions and classes that "span" multiple levels
of MVC. In other words, these functions/classes introduce controlled coupling
for convenience's sake.
"""
from django.http import (
Http404, HttpResponse, HttpResponsePermanentRedirect, HttpResponseRedirect,
)
from django.template import loader
from django.urls import NoReverseMatch, reverse
from django.utils.encoding import force_text
from django.utils.functional import Promise
def render_to_response(template_name, context=None, content_type=None, status=None, using=None):
"""
Returns a HttpResponse whose content is filled with the result of calling
django.template.loader.render_to_string() with the passed arguments.
"""
content = loader.render_to_string(template_name, context, using=using)
return HttpResponse(content, content_type, status)
def render(request, template_name, context=None, content_type=None, status=None, using=None):
"""
Returns a HttpResponse whose content is filled with the result of calling
django.template.loader.render_to_string() with the passed arguments.
"""
content = loader.render_to_string(template_name, context, request, using=using)
return HttpResponse(content, content_type, status)
def redirect(to, *args, permanent=False, **kwargs):
"""
Returns an HttpResponseRedirect to the appropriate URL for the arguments
passed.
The arguments could be:
* A model: the model's `get_absolute_url()` function will be called.
* A view name, possibly with arguments: `urls.reverse()` will be used
to reverse-resolve the name.
* A URL, which will be used as-is for the redirect location.
By default issues a temporary redirect; pass permanent=True to issue a
permanent redirect
"""
redirect_class = HttpResponsePermanentRedirect if permanent else HttpResponseRedirect
return redirect_class(resolve_url(to, *args, **kwargs))
def _get_queryset(klass):
"""
Return a QuerySet or a Manager.
Duck typing in action: any class with a `get()` method (for
get_object_or_404) or a `filter()` method (for get_list_or_404) might do
the job.
"""
# If it is a model class or anything else with ._default_manager
if hasattr(klass, '_default_manager'):
return klass._default_manager.all()
return klass
def get_object_or_404(klass, *args, **kwargs):
"""
Uses get() to return an object, or raises a Http404 exception if the object
does not exist.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the get() query.
Note: Like with get(), an MultipleObjectsReturned will be raised if more than one
object is found.
"""
queryset = _get_queryset(klass)
try:
return queryset.get(*args, **kwargs)
except AttributeError:
klass__name = klass.__name__ if isinstance(klass, type) else klass.__class__.__name__
raise ValueError(
"First argument to get_object_or_404() must be a Model, Manager, "
"or QuerySet, not '%s'." % klass__name
)
except queryset.model.DoesNotExist:
raise Http404('No %s matches the given query.' % queryset.model._meta.object_name)
def get_list_or_404(klass, *args, **kwargs):
"""
Uses filter() to return a list of objects, or raise a Http404 exception if
the list is empty.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the filter() query.
"""
queryset = _get_queryset(klass)
try:
obj_list = list(queryset.filter(*args, **kwargs))
except AttributeError:
klass__name = klass.__name__ if isinstance(klass, type) else klass.__class__.__name__
raise ValueError(
"First argument to get_list_or_404() must be a Model, Manager, or "
"QuerySet, not '%s'." % klass__name
)
if not obj_list:
raise Http404('No %s matches the given query.' % queryset.model._meta.object_name)
return obj_list
def resolve_url(to, *args, **kwargs):
"""
Return a URL appropriate for the arguments passed.
The arguments could be:
* A model: the model's `get_absolute_url()` function will be called.
* A view name, possibly with arguments: `urls.reverse()` will be used
to reverse-resolve the name.
* A URL, which will be returned as-is.
"""
# If it's a model, use get_absolute_url()
if hasattr(to, 'get_absolute_url'):
return to.get_absolute_url()
if isinstance(to, Promise):
# Expand the lazy instance, as it can cause issues when it is passed
# further to some Python functions like urlparse.
to = force_text(to)
if isinstance(to, str):
# Handle relative URLs
if to.startswith(('./', '../')):
return to
# Next try a reverse URL resolution.
try:
return reverse(to, args=args, kwargs=kwargs)
except NoReverseMatch:
# If this is a callable, re-raise.
if callable(to):
raise
# If this doesn't "feel" like a URL, re-raise.
if '/' not in to and '.' not in to:
raise
# Finally, fall back and assume it's a URL
return to
|
the-stack_106_27708 | #
# Copyright (c) 2017, Massachusetts Institute of Technology All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from MDSplus import mdsExceptions, Device, Data, version
from ctypes import CDLL, byref, c_double, c_int, c_void_p, c_char_p, create_string_buffer
from numpy import array
from threading import Thread
import traceback
class FLIRSC65X(Device):
"""FLIR655 NEW Camera"""
parts=[
{'path':':NAME', 'type':'text'},
{'path':':COMMENT', 'type':'text'},
{'path':'.OBJECT', 'type':'structure'},
{'path':'.OBJECT:EMISSIVITY', 'type':'numeric', 'value':920E-3},
{'path':'.OBJECT:DISTANCE', 'type':'numeric', 'value':2},
{'path':'.OBJECT:REFL_TEMP', 'type':'numeric', 'value':20},
{'path':'.OBJECT:OPTIC_TEMP', 'type':'numeric', 'value':20},
{'path':'.OBJECT:OPTIC_TRANS', 'type':'numeric', 'value':1},
{'path':'.OBJECT:ATM_TEMP', 'type':'numeric', 'value':20},
{'path':'.OBJECT:ATM_HUM', 'type':'numeric', 'value':0.50},
{'path':'.OBJECT:ATM_TRANS', 'type':'numeric', 'value':99E-2},
{'path':'.FRAME', 'type':'structure'},
{'path':'.FRAME:X', 'type':'numeric', 'value':0},
{'path':'.FRAME:Y', 'type':'numeric', 'value':0},
{'path':'.FRAME:WIDTH', 'type':'numeric', 'value':640},
{'path':'.FRAME:HEIGHT', 'type':'numeric', 'value':480},
{'path':'.FRAME:TEMP_UNIT', 'type':'text', 'value':'LinearTemperature10mK'},
{'path':'.CAM_SETUP', 'type':'structure'},
{'path':'.CAM_SETUP:FOCAL_LENGTH', 'type':'text', 'value':'25'},
{'path':'.CAM_SETUP:MEAS_RANGE', 'type':'text', 'value':'100...650'},
{'path':'.CAM_SETUP:FOCUS_POS', 'type':'numeric', 'value':0},
{'path':'.CAM_SETUP:CALIB_AUTO', 'type':'text', 'value':'NO'},
{'path':'.CAM_SETUP:CALIB_TIME', 'type':'numeric', 'value':4},
{'path':'.TIMING', 'type':'structure'},
{'path':'.TIMING:TRIG_MODE', 'type':'text', 'value':'INTERNAL'},
{'path':'.TIMING:TRIG_SOURCE', 'type':'numeric'},
{'path':'.TIMING:TIME_BASE', 'type':'numeric'},
{'path':'.TIMING:FRAME_RATE', 'type':'numeric', 'value':50},
{'path':'.TIMING:BURST_DUR', 'type':'numeric', 'value':5},
{'path':'.TIMING:SKIP_FRAME', 'type':'numeric', 'value':0},
{'path':'.STREAMING', 'type':'structure'},
{'path':'.STREAMING:MODE', 'type':'text', 'value':'Stream and Store'},
{'path':'.STREAMING:SERVER', 'type':'text', 'value':'localhost'},
{'path':'.STREAMING:PORT', 'type':'numeric', 'value':8888},
{'path':'.STREAMING:AUTOSCALE', 'type':'text', 'value':'YES'},
{'path':'.STREAMING:LOLIM', 'type':'numeric', 'value':15},
{'path':'.STREAMING:HILIM', 'type':'numeric', 'value':50},
{'path':':FRAMES', 'type':'signal','options':('no_write_model', 'no_compress_on_put')},
{'path':':FRAMES_METAD', 'type':'signal','options':('no_write_model', 'no_compress_on_put')},
{'path':':FRAME0_TIME', 'type':'numeric','value':0}]
parts.append({'path':':INIT_ACT','type':'action',
'valueExpr':"Action(Dispatch('CAMERA_SERVER','PULSE_PREPARATION',50,None),Method(None,'init',head))",
'options':('no_write_shot',)})
parts.append({'path':':START_ACT','type':'action',
'valueExpr':"Action(Dispatch('CAMERA_SERVER','INIT',50,None),Method(None,'startAcquisition',head))",
'options':('no_write_shot',)})
parts.append({'path':':STOP_ACT','type':'action',
'valueExpr':"Action(Dispatch('CAMERA_SERVER','STORE',50,None),Method(None,'stopAcquisition',head))",
'options':('no_write_shot',)})
handle = c_int(-1)
handles = {}
workers = {}
flirLib = None
mdsLib = None
streamLib = None
flirUtilsLib = None
error = create_string_buffer(version.tobytes(''), 512)
"""Asynchronous readout internal class"""
class AsynchStore(Thread):
def configure(self, device):
self.device = device
self.frameIdx = 0
self.stopReq = False
def run(self):
print("Asychronous acquisition thread")
status = FLIRSC65X.flirLib.startFramesAcquisition(self.device.handle)
if status < 0:
FLIRSC65X.flirLib.getLastError(self.device.handle, self.device.error)
Data.execute('DevLogErr($1,$2)', self.device.nid, 'Cannot start frames acquisition : ' + self.device.error.raw )
print("Fine acquisition thread")
status = FLIRSC65X.flirLib.flirClose(self.device.handle) #close device and remove from info
if status < 0:
FLIRSC65X.flirLib.getLastError(self.device.handle, self.device.error)
Data.execute('DevLogErr($1,$2)', self.device.nid, 'Cannot close camera : ' + self.device.error.raw )
self.device.removeInfo()
#raise mdsExceptions.TclFAILED_ESSENTIAL
def stop(self):
print("STOP frames acquisition loop")
status = FLIRSC65X.flirLib.stopFramesAcquisition(self.device.handle)
if status < 0:
FLIRSC65X.flirLib.getLastError(self.device.handle, self.device.error)
Data.execute('DevLogErr($1,$2)', self.device.nid, 'Cannot stop frames acquisition : ' + self.device.error.raw )
def saveWorker(self):
FLIRSC65X.workers[self.nid] = self.worker
###save Info###
#saveInfo and restoreInfo allow to manage multiple occurrences of camera devices
#and to avoid opening and closing devices handles
def saveInfo(self):
FLIRSC65X.handles[self.nid] = self.handle
###restore worker###
def restoreWorker(self):
if self.nid in FLIRSC65X.workers.keys():
self.worker = FLIRSC65X.workers[self.nid]
return 1
else:
Data.execute('DevLogErr($1,$2)', self.nid, 'Cannot restore worker!!')
raise mdsExceptions.TclFAILED_ESSENTIAL
return
###restore info###
def restoreInfo(self):
print("restore Info")
try:
if FLIRSC65X.flirLib is None:
libName = "libflirsc65x.so"
FLIRSC65X.flirLib = CDLL(libName)
print(FLIRSC65X.flirLib)
if FLIRSC65X.mdsLib is None:
libName = "libcammdsutils.so"
FLIRSC65X.mdsLib = CDLL(libName)
print(FLIRSC65X.mdsLib)
if FLIRSC65X.streamLib is None:
libName = "libcamstreamutils.so"
FLIRSC65X.streamLib = CDLL(libName)
print(FLIRSC65X.streamLib)
"""
if FLIRSC65X.flirUtilsLib is None:
libName = "libflirutils.so"
FLIRSC65X.flirUtilsLib = CDLL(libName)
print(FLIRSC65X.flirUtilsLib)
"""
except:
Data.execute('DevLogErr($1,$2)', self.nid, 'Cannot load library : ' + libName )
raise mdsExceptions.TclFAILED_ESSENTIAL
if self.nid in FLIRSC65X.handles.keys():
self.handle = FLIRSC65X.handles[self.nid]
print('RESTORE INFO HANDLE TROVATO')
else:
print('RESTORE INFO HANDLE NON TROVATO')
try:
name = self.name.data()
except:
Data.execute('DevLogErr($1,$2)', self.nid, 'Missing device name' )
raise mdsExceptions.TclFAILED_ESSENTIAL
print("Opening")
self.handle = c_int(-1)
status = FLIRSC65X.flirLib.flirOpen(c_char_p(name), byref(self.handle))
print("Opened ", status)
if status < 0:
FLIRSC65X.flirLib.getLastError(self.handle, self.error)
Data.execute('DevLogErr($1,$2)', self.nid, 'Cannot open device '+ name +'('+self.error.raw+')')
raise mdsExceptions.TclFAILED_ESSENTIAL
return
###remove info###
def removeInfo(self):
try:
del(FLIRSC65X.handles[self.nid])
except:
print('ERROR TRYING TO REMOVE INFO')
##########init############################################################################
def init(self):
if self.restoreInfo() == 0:
raise mdsExceptions.TclFAILED_ESSENTIAL
self.saveInfo()
try:
self.frames.setCompressOnPut(False)
except:
Data.execute('DevLogErr($1,$2)', self.nid, 'Cannot disable automatic compresson on put for frames node')
raise mdsExceptions.TclFAILED_ESSENTIAL
try:
self.frames_metad.setCompressOnPut(False)
except:
Data.execute('DevLogErr($1,$2)', self.nid, 'Cannot disable automatic compresson on put for frames_metad node')
raise mdsExceptions.TclFAILED_ESSENTIAL
###Object Parameters
try:
o_refl_temp = c_double(self.object_refl_temp.data())
except:
Data.execute('DevLogErr($1,$2)', self.nid, 'Invalid value for object refletive temperature')
raise mdsExceptions.TclFAILED_ESSENTIAL
try:
o_atm_temp = c_double(self.object_atm_temp.data())
except:
Data.execute('DevLogErr($1,$2)', self.nid, 'Invalid value for object atmosfere temperature')
raise mdsExceptions.TclFAILED_ESSENTIAL
try:
o_distance = c_double(self.object_distance.data())
except:
Data.execute('DevLogErr($1,$2)', self.nid, 'Invalid value for object distance')
raise mdsExceptions.TclFAILED_ESSENTIAL
try:
o_emissivity = c_double(self.object_emissivity.data())
except:
Data.execute('DevLogErr($1,$2)', self.nid, 'Invalid value for object emissivity')
raise mdsExceptions.TclFAILED_ESSENTIAL
try:
o_atm_hum = c_double(self.object_atm_hum.data())
except:
Data.execute('DevLogErr($1,$2)', self.nid, 'Invalid value for object atmosfere humidity')
raise mdsExceptions.TclFAILED_ESSENTIAL
try:
o_optic_temp = c_double(self.object_optic_temp.data())
except:
Data.execute('DevLogErr($1,$2)', self.nid, 'Invalid value for object optic temperature')
raise mdsExceptions.TclFAILED_ESSENTIAL
try:
o_optic_trans = c_double(self.object_optic_trans.data())
except:
Data.execute('DevLogErr($1,$2)', self.nid, 'Invalid value for object optic transmission')
raise mdsExceptions.TclFAILED_ESSENTIAL
try:
o_atm_trans = c_double(self.object_atm_trans.data())
except:
Data.execute('DevLogErr($1,$2)', self.nid, 'Invalid value for object atmosfere trasmission')
raise mdsExceptions.TclFAILED_ESSENTIAL
status = FLIRSC65X.flirLib.setObjectParameters(self.handle, o_refl_temp, o_atm_temp, o_distance, o_emissivity, o_atm_hum , o_optic_temp, o_optic_trans, o_atm_trans )
if status < 0:
FLIRSC65X.flirLib.getLastError(self.handle, self.error)
Data.execute('DevLogErr($1,$2)', self.nid, 'Cannot Set Object Parameters : ' + self.error.raw)
raise mdsExceptions.TclFAILED_ESSENTIAL
###Frame Rate
try:
frameRate = self.timing_frame_rate.data()
except:
Data.execute('DevLogErr($1,$2)', self.nid, 'Invalid frame rate value')
raise mdsExceptions.TclFAILED_ESSENTIAL
status = FLIRSC65X.flirLib.setFrameRateNew(self.handle, c_double(frameRate))
if status < 0:
FLIRSC65X.flirLib.getLastError(self.handle, self.error)
Data.execute('DevLogErr($1,$2)', self.nid, 'Cannot Set Frame Rate : ' + self.error.raw)
raise mdsExceptions.TclFAILED_ESSENTIAL
###Frame Area
x=c_int(0)
y=c_int(0)
width=c_int(0)
height=c_int(0)
status = FLIRSC65X.flirLib.getReadoutArea(self.handle, byref(x), byref(y), byref(width), byref(height))
if status < 0:
FLIRSC65X.flirLib.getLastError(self.handle, self.error)
Data.execute('DevLogErr($1,$2)', self.nid, 'Cannot Get Readout Area : ' + self.error.raw)
raise mdsExceptions.TclFAILED_ESSENTIAL
#write data in mdsplus
self.frame_x.putData(x.value)
self.frame_y.putData(y.value)
self.frame_width.putData(width.value)
self.frame_height.putData(height.value)
###Focal Length
try:
focalLength = self.cam_setup_focal_length.data()
except:
Data.execute('DevLogErr($1,$2)', self.nid, 'Invalid Focal Length value')
raise mdsExceptions.TclFAILED_ESSENTIAL
if focalLength == '25':
focalLengthInt=0
elif focalLength == '41':
focalLengthInt=3 #offset to select the correct calibration curve using the Measurement Range
###Measurement Range
try:
measureRange = self.cam_setup_meas_range.data()
except:
Data.execute('DevLogErr($1,$2)', self.nid, 'Invalid measurement range value')
raise mdsExceptions.TclFAILED_ESSENTIAL
if measureRange == '-40...150':
measRangeInt=0
elif measureRange == '100...650':
measRangeInt=1
elif measureRange == '300...2000':
measRangeInt=2
status = FLIRSC65X.flirLib.setMeasurementRange(self.handle, c_int(measRangeInt+focalLengthInt))
if status < 0:
try:
FLIRSC65X.flirLib.getLastError(self.handle, self.error)
Data.execute('DevLogErr($1,$2)', self.nid, 'Cannot Set Measurement Range : ' + self.error.raw)
raise mdsExceptions.TclFAILED_ESSENTIAL
except:
traceback.print_exc()
###Image Temperature
try:
frameTempUnit = self.frame_temp_unit.data()
except:
Data.execute('DevLogErr($1,$2)', self.nid, 'Invalid image temperature unit (Radiometric, 10mk, 100mk) value')
raise mdsExceptions.TclFAILED_ESSENTIAL
if frameTempUnit == 'Radiometric':
frameTempUnitCode=c_int(0)
elif frameTempUnit == 'LinearTemperature10mK':
frameTempUnitCode=c_int(1)
elif frameTempUnit == 'LinearTemperature100mK':
frameTempUnitCode=c_int(2)
status = FLIRSC65X.flirLib.setIrFormat(self.handle, frameTempUnitCode)
if status < 0:
FLIRSC65X.flirLib.getLastError(self.handle, self.error)
Data.execute('DevLogErr($1,$2)', self.nid, 'Cannot Set Image Temperature unit : ' + self.error.raw)
raise mdsExceptions.TclFAILED_ESSENTIAL
###Frame Trigger mode
try:
burstDuration = self.timing_burst_dur.data()
except:
Data.execute('DevLogErr($1,$2)', self.nid, 'Invalid acquisition duration value')
raise mdsExceptions.TclFAILED_ESSENTIAL
try:
triggerMode = self.timing_trig_mode.data()
except:
Data.execute('DevLogErr($1,$2)', self.nid, 'Invalid trigger mode value')
raise mdsExceptions.TclFAILED_ESSENTIAL
try:
trigSource = self.timing_trig_source.data()
except:
if triggerMode == 'EXTERNAL':
Data.execute('DevLogErr($1,$2)', self.nid, 'Invalid trigger source value')
raise mdsExceptions.TclFAILED_ESSENTIAL
else:
trigSource = array([0.])
print("OK " + triggerMode )
if triggerMode == 'EXTERNAL': #0=internal 1=external trigger
trigModeCode=c_int(1)
else:
trigSource = array([0.])
trigModeCode=c_int(0)
numTrigger = trigSource.size
print("OK - NUM TRIGGER ", numTrigger)
print("OK - Trigger Source ", trigSource)
timeBase = Data.compile(" $ : $ + $ :(zero( size( $ ), 0.) + 1.) * 1./$", trigSource, trigSource, burstDuration, trigSource, frameRate)
print("Data = " + Data.decompile(timeBase))
self.timing_time_base.putData(timeBase)
status = FLIRSC65X.flirLib.setTriggerMode(self.handle, trigModeCode, c_double(burstDuration), numTrigger)
if status < 0:
FLIRSC65X.flirLib.getLastError(self.handle, self.error)
Data.execute('DevLogErr($1,$2)', self.nid, 'Cannot Set Internal/External Trigger : ' + self.error.raw)
raise mdsExceptions.TclFAILED_ESSENTIAL
###Calibration
try:
calibAuto = self.cam_setup_calib_auto.data()
except:
Data.execute('DevLogErr($1,$2)', self.nid, 'Invalid auto calibration setup')
raise mdsExceptions.TclFAILED_ESSENTIAL
calibModeCode = c_int(1)
if calibAuto == 'NO':
try:
calibTime = self.cam_setup_calib_time.data()
calibModeCode = c_int(0)
except:
Data.execute('DevLogErr($1,$2)', self.nid, 'Invalid calibration duration value')
raise mdsExceptions.TclFAILED_ESSENTIAL
if numTrigger > 1 and (burstDuration + calibTime) > (trigSource[1] - trigSource[0]) :
Data.execute('DevLogErr($1,$2)', self.nid, 'Calibration executed during acquisition')
raise mdsExceptions.TclFAILED_ESSENTIAL
status = FLIRSC65X.flirLib.setCalibMode(self.handle, calibModeCode)
if status < 0:
FLIRSC65X.flirLib.getLastError(self.handle, self.error)
Data.execute('DevLogErr($1,$2)', self.nid, 'Cannot Set Internal/External Trigger : ' + self.error.raw)
raise mdsExceptions.TclFAILED_ESSENTIAL
###Streaming
try:
streamingMode = self.streaming_mode.data()
except:
Data.execute('DevLogErr($1,$2)', self.nid, 'Invalid streaming mode setup')
raise mdsExceptions.TclFAILED_ESSENTIAL
if streamingMode == 'Stream and Store':
streamingEnabled = c_int(1)
storeEnabled = c_int(1)
elif streamingMode == 'Only Stream':
streamingEnabled = c_int(1)
storeEnabled = c_int(0)
else: #streamingMode == 'Only Store':
streamingEnabled = c_int(0)
storeEnabled = c_int(1)
if streamingEnabled :
try:
if self.streaming_autoscale.data() == 'YES' :
autoAdjustLimit = c_int(1)
else:
autoAdjustLimit = c_int(0)
except:
Data.execute('DevLogErr($1,$2)', self.nid, 'Invalid streaming autoscale parameter value')
raise mdsExceptions.TclFAILED_ESSENTIAL
try:
lowLim = c_int(self.streaming_lolim.data())
except:
Data.execute('DevLogErr($1,$2)', self.nid, 'Invalid streaming low temperature limit parameter value')
raise mdsExceptions.TclFAILED_ESSENTIAL
try:
highLim = c_int(self.streaming_hilim.data())
except:
Data.execute('DevLogErr($1,$2)', self.nid, 'Invalid streaming high temperature limit parameter value')
raise mdsExceptions.TclFAILED_ESSENTIAL
try:
streamingPort = c_int(self.streaming_port.data())
except:
Data.execute('DevLogErr($1,$2)', self.nid, 'Invalid streaming port parameter value')
raise mdsExceptions.TclFAILED_ESSENTIAL
try:
streamingServer = self.streaming_server.data()
except:
Data.execute('DevLogErr($1,$2)', self.nid, 'Invalid streaming server parameter value')
raise mdsExceptions.TclFAILED_ESSENTIAL
#fede 20161012
# else:
# autoAdjustLimit = c_int(0)
# streamingPort = c_int(8888)
# lowLim = c_int(0)
# highLim = c_int(36)
# streamingServer = "localhost"
print("lowLim ", lowLim)
print("highLim ", highLim)
print("frameTempUnitCode ", frameTempUnitCode)
print("streamingPort ", streamingPort)
print("streamingServer ", streamingServer)
deviceName = str(self).rsplit(":",1)
deviceName = deviceName[1]
print("Device Name ", deviceName)
#fede: recover device name and pass it to set streaming to overlay text on frame!!!
status = FLIRSC65X.flirLib.setStreamingMode(self.handle, frameTempUnitCode, streamingEnabled, autoAdjustLimit, c_char_p(streamingServer), streamingPort, lowLim, highLim, c_char_p(deviceName));
if status < 0:
FLIRSC65X.flirLib.getLastError(self.handle, self.error)
Data.execute('DevLogErr($1,$2)', self.nid, 'Cannot execute streaming setup mode : ' + self.error.raw)
raise mdsExceptions.TclFAILED_ESSENTIAL
###Acquisition
try:
acqSkipFrameNumber = c_int( self.timing_skip_frame.data() )
except:
Data.execute('DevLogErr($1,$2)', self.nid, 'Invalid acquisition decimation value')
raise mdsExceptions.TclFAILED_ESSENTIAL
status = FLIRSC65X.flirLib.setAcquisitionMode(self.handle, storeEnabled , acqSkipFrameNumber)
if status < 0:
FLIRSC65X.flirLib.getLastError(self.handle, self.error)
Data.execute('DevLogErr($1,$2)', self.nid, 'Cannot execute acquisition setup mode : ' + self.error.raw)
raise mdsExceptions.TclFAILED_ESSENTIAL
try:
treePtr = c_void_p(0)
status = FLIRSC65X.mdsLib.camOpenTree(c_char_p(self.getTree().name), c_int(self.getTree().shot), byref(treePtr))
if status == -1:
Data.execute('DevLogErr($1,$2)', self.nid, 'Cannot open tree')
raise mdsExceptions.TclFAILED_ESSENTIAL
except:
traceback.print_exc()
framesNid = self.frames.nid
timebaseNid = self.timing_time_base.nid
framesMetadNid = self.frames_metad.nid
frame0TimeNid = self.frame0_time.nid
status = FLIRSC65X.flirLib.setTreeInfo( self.handle, treePtr, framesNid, timebaseNid, framesMetadNid, frame0TimeNid)
if status < 0:
FLIRSC65X.flirLib.getLastError(self.handle, self.error)
Data.execute('DevLogErr($1,$2)', self.nid, 'Cannot execute set tree info : '+self.error.raw)
raise mdsExceptions.TclFAILED_ESSENTIAL
###Auto Calibration
status = FLIRSC65X.flirLib.executeAutoCalib(self.handle)
if status < 0:
FLIRSC65X.flirLib.getLastError(self.handle, self.error)
Data.execute('DevLogErr($1,$2)', self.nid, 'Cannot Execute Auto Calibration : '+self.error.raw)
raise mdsExceptions.TclFAILED_ESSENTIAL
print('Init action completed.')
return
####################MANUAL CALIBRATION ACTION
def calib(self):
if self.restoreInfo() == 0:
raise mdsExceptions.TclFAILED_ESSENTIAL
status = FLIRSC65X.flirLib.executeAutoCalib(self.handle)
if status < 0:
FLIRSC65X.flirLib.getLastError(self.handle, self.error)
Data.execute('DevLogErr($1,$2)', self.nid, 'Cannot Execute Auto Calibration '+ self.error.raw)
raise mdsExceptions.TclFAILED_ESSENTIAL
#self.saveInfo()
return
####################MANUAL AUTOFOCUS ACTION
def autofocus(self):
if self.restoreInfo() == 0:
raise mdsExceptions.TclFAILED_ESSENTIAL
status = FLIRSC65X.flirLib.executeAutoFocus(self.handle)
if status < 0:
FLIRSC65X.flirLib.getLastError(self.handle, self.error)
Data.execute('DevLogErr($1,$2)', self.nid, 'Cannot Execute Auto Focus : ' + self.error.raw)
raise mdsExceptions.TclFAILED_ESSENTIAL
self.saveInfo()
return
####################READ FOCUS POSITION
def readFocusPos(self):
if self.restoreInfo() == 0:
raise mdsExceptions.TclFAILED_ESSENTIAL
focPos=c_int(0)
status = FLIRSC65X.flirLib.getFocusAbsPosition(self.handle, byref(focPos))
print('Focus Position Read: ', focPos)
if status < 0:
FLIRSC65X.flirLib.getLastError(self.handle, self.error)
Data.execute('DevLogErr($1,$2)', self.nid, 'Cannot Read Focus Position : '+ self.error.raw)
raise mdsExceptions.TclFAILED_ESSENTIAL
self.cam_setup_focus_pos.putData(focPos.value) #write data in mdsplus
self.saveInfo()
return
####################WRITE FOCUS POSITION
def writeFocusPos(self):
if self.restoreInfo() == 0:
raise mdsExceptions.TclFAILED_ESSENTIAL
status = FLIRSC65X.flirLib.setFocusAbsPosition(self.handle, c_int(self.cam_setup_focus_pos.data()))
if status < 0:
FLIRSC65X.flirLib.getLastError(self.handle, self.error)
Data.execute('DevLogErr($1,$2)', self.nid, 'Cannot Write Focus Position : ' + self.error.raw)
raise mdsExceptions.TclFAILED_ESSENTIAL
self.saveInfo()
return
##########start acquisition############################################################################
def startAcquisition(self):
if self.restoreInfo() == 0:
raise mdsExceptions.TclFAILED_ESSENTIAL
print("start store")
self.worker = self.AsynchStore()
self.worker.daemon = True
self.worker.stopReq = False
print("start store2")
width = c_int(0)
height = c_int(0)
payloadSize = c_int(0)
status = FLIRSC65X.flirLib.startAcquisition(self.handle, byref(width), byref(height), byref(payloadSize))
if status < 0:
FLIRSC65X.flirLib.getLastError(self.handle, self.error)
Data.execute('DevLogErr($1,$2)', self.nid, 'Cannot Start Camera Acquisition : '+self.error.raw)
raise mdsExceptions.TclFAILED_ESSENTIAL
print("start store3")
self.worker.configure(self)
self.saveWorker()
self.worker.start()
return
##########stop acquisition############################################################################
def stopAcquisition(self):
if self.restoreWorker() :
self.worker.stop()
return
##########software trigger (start saving in mdsplus)############################################
def swTrigger(self):
if self.restoreInfo() == 0:
raise mdsExceptions.TclFAILED_ESSENTIAL
print('SOFTWARE TRIGGER')
status = FLIRSC65X.flirLib.softwareTrigger(self.handle)
if status < 0:
FLIRSC65X.flirLib.getLastError(self.handle, self.error)
Data.execute('DevLogErr($1,$2)', self.nid, 'Cannot Execute Software Trigger : ' + self.error.raw)
raise mdsExceptions.TclFAILED_ESSENTIAL
self.saveInfo()
return
|
the-stack_106_27710 | '''
Created on Oct 5, 2010
@author: Mark V Systems Limited
(c) Copyright 2010 Mark V Systems Limited, All rights reserved.
'''
import os, threading, time, logging
from tkinter import Menu, BooleanVar, font as tkFont
from arelle import (ViewWinTkTable, ModelDocument, ModelDtsObject, ModelInstanceObject, XbrlConst,
ModelXbrl, XmlValidate, Locale, FunctionXfi,
ValidateXbrlDimensions)
from arelle.ModelValue import qname, QName
from arelle.RenderingResolver import resolveAxesStructure, RENDER_UNITS_PER_CHAR
from arelle.ModelFormulaObject import Aspect, aspectModels, aspectModelAspect
from arelle.ModelInstanceObject import ModelDimensionValue
from arelle.ModelRenderingObject import (ModelClosedDefinitionNode, ModelEuAxisCoord,
ModelFilterDefinitionNode,
OPEN_ASPECT_ENTRY_SURROGATE)
from arelle.FormulaEvaluator import init as formulaEvaluatorInit, aspectMatches
from arelle.PluginManager import pluginClassMethods
from arelle.PrototypeInstanceObject import FactPrototype
from arelle.UITkTable import XbrlTable
from arelle.DialogNewFactItem import getNewFactItemOptions
from collections import defaultdict
from arelle.ValidateXbrl import ValidateXbrl
from arelle.XbrlConst import eurofilingModelNamespace, eurofilingModelPrefix
from _tkinter import TclError
from arelle.ValidateXbrlDimensions import isFactDimensionallyValid
try:
from tkinter import ttk
_Combobox = ttk.Combobox
except ImportError:
from ttk import Combobox
_Combobox = Combobox
emptyList = []
ENTRY_WIDTH_IN_CHARS = 12 # width of a data column entry cell in characters (nominal)
ENTRY_WIDTH_SCREEN_UNITS = 100
PADDING = 20 # screen units of padding between entry cells
TRACE_HEADERS = False
TRACE_FACT_PROTOS = False
TRACE_Z_CONCEPT_ASPECT = False
TRACE_Z_CHOICES = False
qnPercentItemType = qname("{http://www.xbrl.org/dtr/type/numeric}num:percentItemType")
qnPureItemType = qname("{http://www.xbrl.org/2003/instance}xbrli:pureItemType")
integerItemTypes = {"integerItemType", "nonPositiveIntegerItemType", "negativeIntegerItemType",
"longItemType", "intItemType", "shortItemType", "byteItemType",
"nonNegativeIntegerItemType", "unsignedLongItemType", "unsignedIntItemType",
"unsignedShortItemType", "unsignedByteItemType", "positiveIntegerItemType"}
TABLE_AXIS_ROLES = (XbrlConst.euTableAxis, XbrlConst.tableBreakdown, XbrlConst.tableBreakdownMMDD, XbrlConst.tableBreakdown201305, XbrlConst.tableBreakdown201301, XbrlConst.tableAxis2011)
'''
Returns a tuple with all known table axis roles
'''
def getTableAxisArcroles():
return TABLE_AXIS_ROLES
def viewRenderedGrid(modelXbrl, tabWin, lang=None):
modelXbrl.modelManager.showStatus(_("viewing rendering"))
view = ViewRenderedGrid(modelXbrl, tabWin, lang)
view.blockMenuEvents = 1
menu = view.contextMenu()
optionsMenu = Menu(view.viewFrame, tearoff=0)
optionsMenu.add_command(label=_("New fact item options"), underline=0, command=lambda: getNewFactItemOptions(modelXbrl.modelManager.cntlr, view.newFactItemOptions))
optionsMenu.add_command(label=_("Open breakdown entry rows"), underline=0, command=view.setOpenBreakdownEntryRows)
view.ignoreDimValidity.trace("w", view.viewReloadDueToMenuAction)
optionsMenu.add_checkbutton(label=_("Ignore Dimensional Validity"), underline=0, variable=view.ignoreDimValidity, onvalue=True, offvalue=False)
view.xAxisChildrenFirst.trace("w", view.viewReloadDueToMenuAction)
optionsMenu.add_checkbutton(label=_("X-Axis Children First"), underline=0, variable=view.xAxisChildrenFirst, onvalue=True, offvalue=False)
view.yAxisChildrenFirst.trace("w", view.viewReloadDueToMenuAction)
optionsMenu.add_checkbutton(label=_("Y-Axis Children First"), underline=0, variable=view.yAxisChildrenFirst, onvalue=True, offvalue=False)
menu.add_cascade(label=_("Options"), menu=optionsMenu, underline=0)
view.tablesMenu = Menu(view.viewFrame, tearoff=0)
menu.add_cascade(label=_("Tables"), menu=view.tablesMenu, underline=0)
view.tablesMenuLength = 0
view.menuAddLangs()
saveMenu = Menu(view.viewFrame, tearoff=0)
saveMenu.add_command(label=_("HTML file"), underline=0, command=lambda: view.modelXbrl.modelManager.cntlr.fileSave(view=view, fileType="html"))
saveMenu.add_command(label=_("Layout model"), underline=0, command=lambda: view.modelXbrl.modelManager.cntlr.fileSave(view=view, fileType="xml"))
saveMenu.add_command(label=_("XBRL instance"), underline=0, command=view.saveInstance)
menu.add_cascade(label=_("Save"), menu=saveMenu, underline=0)
for pluginMenuExtender in pluginClassMethods("CntlrWinMain.Rendering.ContextualTableMenu"):
pluginMenuExtender(view, menu)
view.view()
view.blockSelectEvent = 1
view.blockViewModelObject = 0
view.viewFrame.bind("<Enter>", view.cellEnter, '+')
view.viewFrame.bind("<Leave>", view.cellLeave, '+')
view.viewFrame.bind("<FocusOut>", view.onQuitView, '+')
view.viewFrame.bind("<1>", view.onClick, '+') # does not currently work (since tktable changes)
view.viewFrame.bind("<Configure>", view.onConfigure, '+') # frame resized, redo column header wrap length ratios
view.blockMenuEvents = 0
return view
class CellInfo:
#See TableInfo
def __init__(self, x, y, isHeader=False, isValue=False, isChoice=False, isOpen=False):
self.x = x
self.y = y
self.isHeader = isHeader
self.isValue = isValue
self.isChoice = isChoice
self.isOpen = isOpen
def __repr__(self):
s = "x={0} y={1} isHeader={2} isValue={3} isChoice={4} isOpen={5}".format(
str(self.x), str(self.y), str(self.isHeader), str(self.isValue), str(self.isChoice), str(self.isOpen))
return s
class TableInfo:
'''
This class gives easy cell coordinate based access to rendered grid information.
It is presently only populated and used in case of tables with X or Y filled open axis.
'''
def __init__(self):
self.maxColIndex = -1
self.maxRowIndex = -1
# cols indexed by row number (starting at 0)
self.cols = {}
self.fillTable = False
def setHeaderCell(self, x, y):
if not self.fillTable:
return
c = CellInfo(x, y, isHeader=True)
self.setCell(c)
def setValueCell(self, x, y):
if not self.fillTable:
return
c = CellInfo(x, y, isValue=True)
self.setCell(c)
def setChoiceCell(self, x, y):
if not self.fillTable:
return
c = CellInfo(x, y, isChoice=True)
self.setCell(c)
def setOpenValueCell(self, x, y):
if not self.fillTable:
return
c = CellInfo(x, y, isOpen=True)
self.setCell(c)
def setCell(self, cell):
try:
col = self.cols[cell.y]
except KeyError:
col = {}
self.cols[cell.y] = col
col[cell.x] = cell
if self.maxRowIndex < cell.x:
self.maxRowIndex = cell.x
if self.maxColIndex < cell.y:
self.maxColIndex = cell.y
def getCol(self, y):
try:
col = self.cols[y]
return col
except KeyError:
return None
def getCell(self, x, y):
try:
col = self.cols[y]
except KeyError:
return None
try:
cell = col[x]
return cell
except KeyError:
return None
class ViewRenderedGrid(ViewWinTkTable.ViewTkTable):
def __init__(self, modelXbrl, tabWin, lang):
viewTitle = _("Table")
viewTitle = viewTitle + " (" + modelXbrl.getInstanceFilenameForView() + ")"
super(ViewRenderedGrid, self).__init__(modelXbrl, tabWin, viewTitle,
False, lang, self.onQuitView)
self.newFactItemOptions = ModelInstanceObject.NewFactItemOptions(xbrlInstance=modelXbrl)
self.factPrototypes = []
self.aspectEntryObjectIdsNode = {}
self.aspectEntryObjectIdsCell = {}
self.factPrototypeAspectEntryObjectIds = defaultdict(set)
self.zOrdinateChoices = None
# context menu Boolean vars
self.options = self.modelXbrl.modelManager.cntlr.config.setdefault("viewRenderedGridOptions", {})
self.openBreakdownLines = self.options.setdefault("openBreakdownLines", 5) # ensure there is a default entry
self.ignoreDimValidity = BooleanVar(value=self.options.setdefault("ignoreDimValidity",True))
self.xAxisChildrenFirst = BooleanVar(value=self.options.setdefault("xAxisChildrenFirst",True))
self.yAxisChildrenFirst = BooleanVar(value=self.options.setdefault("yAxisChildrenFirst",False))
formulaEvaluatorInit() # one-time module initialization
self.factsByDimMemQnameCache = ModelXbrl.FactsByDimMemQnameCache(modelXbrl)
self.conceptMessageIssued = False
self.tableInfo = None
def refreshTitle(self):
tid = str(self.modelXbrl.guiViews.tableView.viewFrame)
text = _("Table")
text += " (" + self.modelXbrl.getInstanceFilenameForView() + ")"
self.tabWin.tab(tid, text=text)
self.tabTitle = text
def close(self):
if True:
try:
self.tablesMenu.destroy()
except AttributeError:
pass
super(ViewRenderedGrid, self).close()
if self.modelXbrl:
for fp in self.factPrototypes:
fp.clear()
self.factPrototypes = None
self.aspectEntryObjectIdsNode.clear()
self.aspectEntryObjectIdsCell.clear()
self.factsByDimMemQnameCache.close()
self.factsByDimMemQnameCache = None
if True:
self.rendrCntx = None # remove the reference but do not manipulate since it may still be in use and shared
self.table = None
self.options = None
self.tblELR = None
self.tablesToELR = None
def loadTablesMenu(self):
tblMenuEntries = {}
tblRelSet = self.modelXbrl.relationshipSet("Table-rendering")
self.tablesToELR = {}
for tblLinkroleUri in tblRelSet.linkRoleUris:
for tableAxisArcrole in getTableAxisArcroles():
tblAxisRelSet = self.modelXbrl.relationshipSet(tableAxisArcrole, tblLinkroleUri)
if tblAxisRelSet and len(tblAxisRelSet.modelRelationships) > 0:
# table name
modelRoleTypes = self.modelXbrl.roleTypes.get(tblLinkroleUri)
if modelRoleTypes is not None and len(modelRoleTypes) > 0:
roledefinition = modelRoleTypes[0].definition
if roledefinition is None or roledefinition == "":
roledefinition = os.path.basename(tblLinkroleUri)
for table in tblAxisRelSet.rootConcepts:
# add table to menu if there's any entry
tblMenuEntries[roledefinition] = tblLinkroleUri
self.tablesToELR[table.objectId()] = tblLinkroleUri
break
self.tablesMenu.delete(0, self.tablesMenuLength)
self.tablesMenuLength = 0
self.tblELR = None
for tblMenuEntry in sorted(tblMenuEntries.items()):
tbl,elr = tblMenuEntry
self.tablesMenu.add_command(label=tbl, command=lambda e=elr: self.view(viewTblELR=e)) # use this to activate profiling from menu selection: , profile=True))
self.tablesMenuLength += 1
if self.tblELR is None:
self.tblELR = elr # start viewing first ELR
def viewReloadDueToMenuAction(self, *args):
if not self.blockMenuEvents:
# update config (config saved when exiting)
self.options["ignoreDimValidity"] = self.ignoreDimValidity.get()
self.options["xAxisChildrenFirst"] = self.xAxisChildrenFirst.get()
self.options["yAxisChildrenFirst"] = self.yAxisChildrenFirst.get()
self.view()
def setOpenBreakdownEntryRows(self, *args):
import tkinter.simpledialog
newValue = tkinter.simpledialog.askinteger(_("arelle - Open breakdown entry rows setting"),
_("The number of extra entry rows for open breakdowns is: {0} \n\n"
"(When a row header includes an open breakdown, such as \nfor typed dimension(s), this number of extra entry rows \nare provided below the table.)"
).format(self.options["openBreakdownLines"]),
parent=self.tabWin)
if newValue is not None:
self.options["openBreakdownLines"] = self.openBreakdownLines = newValue
self.viewReloadDueToMenuAction()
def view(self, viewTblELR=None, newInstance=None, profile=False):
'''
if profile: # for debugging only, to use, uncomment in loadTablesMenu
import cProfile, pstats, sys
statsFile = "/Users/hermf/temp/profileRendering.bin"
cProfile.runctx("self.view(viewTblELR=viewTblELR)", globals(), locals(), statsFile)
priorStdOut = sys.stdout
sys.stdout = open("/Users/hermf/temp/profileRendering.txt", "w")
statObj = pstats.Stats(statsFile)
statObj.strip_dirs()
statObj.sort_stats("time")
statObj.print_stats()
statObj.print_callees()
statObj.print_callers()
sys.stdout.flush()
sys.stdout.close()
del statObj
sys.stdout = priorStdOut
os.remove(statsFile)
return
'''
startedAt = time.time()
self.factsByDimMemQnameCache.clear()
self.tableInfo = TableInfo()
self.testMode = self.modelXbrl.modelManager.cntlr.testMode
self.blockMenuEvents += 1
if newInstance is not None:
self.modelXbrl = newInstance # a save operation has created a new instance to use subsequently
clearZchoices = False
if viewTblELR: # specific table selection
self.tblELR = viewTblELR
clearZchoices = True
else: # first or subsequenct reloading (language, dimensions, other change)
clearZchoices = self.zOrdinateChoices is None
if clearZchoices: # also need first time initialization
self.loadTablesMenu() # load menus (and initialize if first time
viewTblELR = self.tblELR
if not self.tblELR:
return # no table to display
if clearZchoices:
self.zOrdinateChoices = {}
# remove old widgets
self.viewFrame.clearGrid()
tblAxisRelSet, xTopStructuralNode, yTopStructuralNode, zTopStructuralNode = resolveAxesStructure(self, viewTblELR)
colAdjustment = 1 if zTopStructuralNode is not None else 0
self.table.resizeTable(self.dataFirstRow+self.dataRows-1, self.dataFirstCol+self.dataCols+colAdjustment-1, titleRows=self.dataFirstRow-1, titleColumns=self.dataFirstCol-1)
self.hasTableFilters = bool(self.modelTable.filterRelationships)
if tblAxisRelSet:
# review row header wrap widths and limit to 2/3 of the frame width (all are screen units)
fontWidth = tkFont.Font(font='TkTextFont').configure()['size']
fontWidth = fontWidth * 3 // 2
dataColsAllowanceWidth = (fontWidth * ENTRY_WIDTH_IN_CHARS + PADDING) * self.dataCols + PADDING
frameWidth = self.viewFrame.winfo_width()
if dataColsAllowanceWidth + self.rowHdrWrapLength > frameWidth:
if dataColsAllowanceWidth > frameWidth / 2:
rowHdrAllowanceWidth = frameWidth / 2
else:
rowHdrAllowanceWidth = frameWidth - dataColsAllowanceWidth
if self.rowHdrWrapLength > rowHdrAllowanceWidth:
widthRatio = rowHdrAllowanceWidth / self.rowHdrWrapLength
self.rowHdrWrapLength = rowHdrAllowanceWidth
fixedWidth = sum(w for w in self.rowHdrColWidth if w <= RENDER_UNITS_PER_CHAR)
adjustableWidth = sum(w for w in self.rowHdrColWidth if w > RENDER_UNITS_PER_CHAR)
if adjustableWidth> 0:
widthRatio = (rowHdrAllowanceWidth - fixedWidth) / adjustableWidth
for i in range(len(self.rowHdrColWidth)):
w = self.rowHdrColWidth[i]
if w > RENDER_UNITS_PER_CHAR:
self.rowHdrColWidth[i] = int(w * widthRatio)
self.aspectEntryObjectIdsNode.clear()
self.aspectEntryObjectIdsCell.clear()
self.factPrototypeAspectEntryObjectIds.clear()
headerLabel = (self.modelTable.genLabel(lang=self.lang, strip=True) or # use table label, if any
self.roledefinition)
if self.testMode:
for pluginMethod in pluginClassMethods("DevTesting.InitHeaderCellValue"):
pluginMethod(headerLabel, 0, 0)
break
self.table.initHeaderCellValue(headerLabel,
0, 0, (self.dataFirstCol - 2),
(self.dataFirstRow - 2),
XbrlTable.TG_TOP_LEFT_JUSTIFIED)
self.zAspectStructuralNodes = defaultdict(set)
if TRACE_HEADERS:
self.headerLevel = 0
self.zAxis(1, zTopStructuralNode, clearZchoices)
xStructuralNodes = []
colsFoundPlus1, _, _, _ = self.xAxis(self.dataFirstCol, self.colHdrTopRow, self.colHdrTopRow + self.colHdrRows - 1,
xTopStructuralNode, xStructuralNodes, self.xAxisChildrenFirst.get(), True, True)
_, rowsFoundPlus1 = self.yAxis(1, self.dataFirstRow,
yTopStructuralNode, self.yAxisChildrenFirst.get(), True, True)
self.table.resizeTable(rowsFoundPlus1-1,
colsFoundPlus1+colAdjustment-1,
clearData=False)
for fp in self.factPrototypes: # dereference prior facts
if fp is not None:
fp.clear()
self.factPrototypes = []
self.bodyCells(self.dataFirstRow, yTopStructuralNode, xStructuralNodes, self.zAspectStructuralNodes, self.yAxisChildrenFirst.get())
if False:
print("bodyCells took " + "{:.2f}".format(time.time() - startedAt) + " " + os.path.basename(viewTblELR))
self.factsByDimMemQnameCache.printStats()
self.factsByDimMemQnameCache.clear()
self.table.clearModificationStatus()
self.table.disableUnusedCells()
from arelle.UITkTable import USE_resizeTableCells
if USE_resizeTableCells:
self.table.resizeTableCells()
self.modelXbrl.profileStat("viewTable_" + os.path.basename(viewTblELR), time.time() - startedAt)
#self.gridView.config(scrollregion=self.gridView.bbox(constants.ALL))
if (not self.newFactItemOptions.entityIdentScheme or # not initialized yet
not self.newFactItemOptions.entityIdentValue or
not self.newFactItemOptions.monetaryUnit or
not self.newFactItemOptions.startDateDate or not self.newFactItemOptions.endDateDate):
getNewFactItemOptions(self.modelXbrl.modelManager.cntlr, self.newFactItemOptions)
self.blockMenuEvents -= 1
def zAxis(self, row, zStructuralNode, clearZchoices):
if zStructuralNode is not None:
if TRACE_HEADERS:
self.headerLevel += 1
tracePrefix = str(self.headerLevel) + (" " * self.headerLevel) + "z "
label = zStructuralNode.header(lang=self.lang)
xValue = self.dataFirstCol-1
yValue = row-1
if TRACE_HEADERS:
print(tracePrefix + str(zStructuralNode))
print(tracePrefix + str(label), " x=" + str(xValue) + " y=" + str(yValue))
if self.testMode:
for pluginMethod in pluginClassMethods("DevTesting.InitHeaderCellValue"):
pluginMethod(label, xValue, yValue)
break
self.table.initHeaderCellValue(label,
xValue, yValue,
0, 0,
XbrlTable.TG_LEFT_JUSTIFIED,
objectId=zStructuralNode.objectId())
if zStructuralNode.choiceStructuralNodes is not None: # combo box
valueHeaders = [''.ljust(zChoiceStructuralNode.indent * 4) + # indent if nested choices
(zChoiceStructuralNode.header(lang=self.lang, inheritedAspects=False) or '')
for zChoiceStructuralNode in zStructuralNode.choiceStructuralNodes]
if TRACE_Z_CHOICES:
print("headers: " + str(valueHeaders))
zAxisIsOpenExplicitDimension = False
zAxisTypedDimension = None
i = zStructuralNode.choiceNodeIndex # for aspect entry, use header selected
zStructuralNodeSelectionIndex = None
memberSelectionIndex = None
if i == 0:
# No previous selection: try to determine the first Z selection for which facts do exist
zStructuralNodeSelectionIndex, memberSelectionIndex = self.getFirstZChoiceWithFacts(zStructuralNode)
if zStructuralNodeSelectionIndex is not None:
i = zStructuralNodeSelectionIndex
zStructuralNode.choiceNodeIndex = zStructuralNodeSelectionIndex
comboBoxValue = None if i >= 0 else zStructuralNode.aspects.get('aspectValueLabel')
chosenStructuralNode = zStructuralNode.choiceStructuralNodes[i]
aspect = None
for aspect in chosenStructuralNode.aspectsCovered():
if aspect != Aspect.DIMENSIONS:
break
# for open filter nodes of explicit dimension allow selection of all values
zAxisAspectEntryMode = False
if isinstance(chosenStructuralNode.definitionNode, ModelFilterDefinitionNode):
if isinstance(aspect, QName):
dimConcept = self.modelXbrl.qnameConcepts[aspect]
if dimConcept.isExplicitDimension:
if len(valueHeaders) != 1 or valueHeaders[0]: # not just a blank initial entry
valueHeaders.append("(all members)")
else:
valueHeaders.extend(
self.explicitDimensionFilterMembers(zStructuralNode, chosenStructuralNode))
zAxisAspectEntryMode = True
zAxisIsOpenExplicitDimension = True
elif dimConcept.isTypedDimension:
if (zStructuralNode.choiceStructuralNodes[0].contextItemBinding is None and
not valueHeaders[0]): # remove filterNode from the list
''' this isn't reliable
if i > 0:
del zStructuralNode.choiceStructuralNodes[0]
del valueHeaders[0]
zStructuralNode.choiceNodeIndex = i = i-1
'''
if i >= 0:
chosenStructuralNode = zStructuralNode.choiceStructuralNodes[i]
else:
chosenStructuralNode = zStructuralNode # use aspects of structural node (for entered typed value)
if not comboBoxValue and not valueHeaders:
comboBoxValue = "--please select--"
i = -1
valueHeaders.append("(enter typed member)")
zAxisTypedDimension = dimConcept
if TRACE_HEADERS:
print(tracePrefix + "header combo", " x=" + str(self.dataFirstCol) + " y=" + str(row-1))
if self.testMode:
for pluginMethod in pluginClassMethods("DevTesting.InitHeaderCombobox"):
pluginMethod(self.dataFirstCol, row-1)
break
comboSelectIndex = zStructuralNode.choiceNodeIndex if i >= 0 else None
if zStructuralNodeSelectionIndex is not None:
comboSelectIndex = zStructuralNodeSelectionIndex
if memberSelectionIndex is not None:
comboSelectIndex = memberSelectionIndex + 1 # add one for the first empty entry
combobox = self.table.initHeaderCombobox(self.dataFirstCol,
row-1,
colspan=0,
values=valueHeaders,
value=comboBoxValue,
selectindex=comboSelectIndex,
comboboxselected=self.onZComboBoxSelected)
combobox.zStructuralNode = zStructuralNode
combobox.zAxisIsOpenExplicitDimension = zAxisIsOpenExplicitDimension
combobox.zAxisTypedDimension = zAxisTypedDimension
combobox.zAxisAspectEntryMode = zAxisAspectEntryMode
combobox.zAxisAspect = aspect
combobox.zChoiceOrdIndex = row - 1
combobox.objectId = zStructuralNode.objectId()
# add aspect for chosen node
self.setZStructuralNodeAspects(chosenStructuralNode)
if memberSelectionIndex is not None:
# Even if in this case things get correctly displayed, we still need
# to set some stuff so that proper filtering happens when filling
# body cells (aspects attribute of structural node).
# Code similar to things done in onZComboBoxSelected
#TODO: reorganize onZComboBoxSelected to factorize things except event and viex
structuralNode = combobox.zStructuralNode
if combobox.zAxisAspectEntryMode:
aspectValue = structuralNode.aspectEntryHeaderValues.get(combobox.get())
if aspectValue is not None:
self.zOrdinateChoices[structuralNode.definitionNode] = \
structuralNode.aspects = {combobox.zAxisAspect: aspectValue, 'aspectValueLabel': combobox.get()}
else:
#process aspect on this node before child nodes in case it is overridden
self.setZStructuralNodeAspects(zStructuralNode)
# nested nodes override parent nodes
for zStructuralNode in zStructuralNode.childStructuralNodes:
self.zAxis(row + 1, zStructuralNode, clearZchoices)
if TRACE_HEADERS:
self.headerLevel -= 1
def getFirstZChoiceWithFacts(self, zStructuralNode):
zStructuralNodeSelectionIndex = None
memberSelectionIndex = None
# try to determine the first Z selection for which facts do exist
choiceIndex = 0
for zSNode in zStructuralNode.choiceStructuralNodes:
aspect = None
for aspect in zSNode.aspectsCovered():
if aspect != Aspect.DIMENSIONS:
break
if aspect is not None:
if isinstance(aspect, QName):
if TRACE_Z_CHOICES:
label = str(zSNode.header(lang="en"))
print("1-examining aspect choiceIndex=" + str(choiceIndex) + " " + str(aspect), " label=" + label)
if isinstance(zSNode.definitionNode, ModelFilterDefinitionNode):
dimConcept = self.modelXbrl.qnameConcepts[aspect]
if dimConcept.isExplicitDimension:
aspectValue = zSNode.aspectValue(aspect, inherit=True)
if isinstance(aspectValue, ModelDimensionValue):
if aspectValue.isExplicit:
concept = aspectValue.member
facts = self.factsByDimMemQnameCache.factsByDimMemQname(aspect, concept.qname)
if len(facts) > 0:
zStructuralNodeSelectionIndex = choiceIndex
if TRACE_Z_CHOICES:
print("->selected " + str(label) + " #facts= " + str(len(facts)))
break
if hasattr(zSNode.definitionNode, 'aspectValues'):
try:
if TRACE_Z_CHOICES:
print(str(zSNode.definitionNode.aspectValues) + "value=" + str(zSNode.aspectValue(aspect)))
facts = self.factsByDimMemQnameCache.factsByDimMemQname(aspect, zSNode.aspectValue(aspect))
if len(facts) > 0:
zStructuralNodeSelectionIndex = choiceIndex
if TRACE_Z_CHOICES:
print("->selected with #facts= " + str(len(facts)))
break
except:
pass
choiceIndex += 1
# similar loop with lower priority conditions
choiceIndex = 0
if zStructuralNodeSelectionIndex is None:
for zSNode in zStructuralNode.choiceStructuralNodes:
aspect = None
for aspect in zSNode.aspectsCovered():
if aspect != Aspect.DIMENSIONS:
break
if aspect is not None:
if isinstance(aspect, QName):
if TRACE_Z_CHOICES:
label = str(zSNode.header(lang="en"))
print("2-examining aspect choiceIndex=" + str(choiceIndex) + " " + str(aspect), " label=" + label)
if isinstance(zSNode.definitionNode, ModelFilterDefinitionNode):
dimConcept = self.modelXbrl.qnameConcepts[aspect]
if dimConcept.isExplicitDimension:
aspectValue = zSNode.aspectValue(aspect, inherit=True)
if isinstance(aspectValue, ModelDimensionValue):
pass
else:
if TRACE_Z_CHOICES:
print(self.explicitDimensionFilterMembers(zStructuralNode, zSNode))
valuesDict = self.getQnameValues(aspect, zSNode)
sorteddKeys = sorted(valuesDict)
if self.modelXbrl.hasFactsForExplicitDimQname(aspect):
if TRACE_Z_CHOICES:
print("Found facts with this aspect; values are" + str(valuesDict))
memberIdx = 0
# this may cost a lot e.g. for things like the list of 250+ countries
for key in sorteddKeys:
qname = valuesDict[key]
facts = self.factsByDimMemQnameCache.factsByDimMemQname(aspect, qname)
if len(facts) > 0:
zStructuralNodeSelectionIndex = choiceIndex
# should also return the proper selected member for the combo...
if TRACE_Z_CHOICES:
print("->selected with #facts= " + str(len(facts)) + " memberSelectionIndex= " + str(memberSelectionIndex))
memberSelectionIndex = memberIdx
break
memberIdx += 1
if TRACE_Z_CHOICES:
print("member loops " + str(memberIdx))
else:
# use the first member
testedKey = "LUXEMBOURG"
try:
memberSelectionIndex = sorteddKeys.index(testedKey)
except:
pass
if memberSelectionIndex is None:
testedKey = "EUR"
try:
memberSelectionIndex = sorteddKeys.index(testedKey)
except:
pass
if memberSelectionIndex is None:
memberSelectionIndex = 0
if TRACE_Z_CHOICES:
print("No fact can be found with this aspect. Will use memeber index= " + str(memberSelectionIndex))
elif dimConcept.isTypedDimension:
label = str(zSNode.header(lang="en"))
if label != "None":
#TODO: check existence of facts instead of picking first entry with existing label
zStructuralNodeSelectionIndex = choiceIndex
break
if zStructuralNodeSelectionIndex is not None:
break
choiceIndex += 1
return (zStructuralNodeSelectionIndex, memberSelectionIndex)
def getQnameValues(self, aspect, structuralNodeWithFilter):
#TODO: avoid this copy of explicitDimensionFilterMembers code
valueHeaders = set()
headerValues = {}
# check for dimension filter(s)
dimFilterRels = structuralNodeWithFilter.definitionNode.filterRelationships
if dimFilterRels:
for rel in dimFilterRels:
dimFilter = rel.toModelObject
if dimFilter is not None:
for memberModel in dimFilter.memberProgs:
memQname = memberModel.qname
memConcept = self.modelXbrl.qnameConcepts.get(memQname)
if memConcept is not None and (not memberModel.axis or memberModel.axis.endswith('-self')):
header = memConcept.label(lang=self.lang)
valueHeaders.add(header)
if rel.isUsable:
headerValues[header] = memQname
else:
headerValues[header] = memConcept
if memberModel.axis and memberModel.linkrole and memberModel.arcrole:
# merge of pull request 42 acsone:TABLE_Z_AXIS_DESCENDANT_OR_SELF
if memberModel.axis.endswith('-or-self'):
searchAxis = memberModel.axis[:len(memberModel.axis)-len('-or-self')]
else:
searchAxis = memberModel.axis
relationships = concept_relationships(self.rendrCntx,
None,
(memQname,
memberModel.linkrole,
memberModel.arcrole,
searchAxis),
False) # return flat list
for rel in relationships:
if rel.isUsable:
header = rel.toModelObject.label(lang=self.lang)
valueHeaders.add(header)
headerValues[header] = rel.toModelObject.qname
if not valueHeaders:
relationships = concept_relationships(self.rendrCntx,
None,
(aspect,
"XBRL-all-linkroles", # linkrole,
"XBRL-dimensions",
'descendant'),
False) # return flat list
for rel in relationships:
if (rel.arcrole in (XbrlConst.dimensionDomain, XbrlConst.domainMember)
and rel.isUsable):
header = rel.toModelObject.label(lang=self.lang)
valueHeaders.add(header)
headerValues[header] = rel.toModelObject.qname
return headerValues
def setZStructuralNodeAspects(self, zStructuralNode, add=True):
if TRACE_Z_CONCEPT_ASPECT:
if Aspect.CONCEPT in aspectModels[self.aspectModel]:
zHasAspect = (Aspect.CONCEPT in zStructuralNode.aspects or zStructuralNode.hasAspect(Aspect.CONCEPT, inherit=True))
if zHasAspect:
print("zStructuralNode Has CONCEPT aspect " + str(zStructuralNode))
for aspect in aspectModels[self.aspectModel]:
if (aspect in zStructuralNode.aspects or # might be added as custom-entered value (typed dim)
zStructuralNode.hasAspect(aspect, inherit=True)): #implies inheriting from other z axes
if aspect == Aspect.DIMENSIONS:
for dim in (zStructuralNode.aspectValue(Aspect.DIMENSIONS, inherit=True) or emptyList):
if add:
self.zAspectStructuralNodes[dim].add(zStructuralNode)
else:
self.zAspectStructuralNodes[dim].discard(zStructuralNode)
else:
if add:
self.zAspectStructuralNodes[aspect].add(zStructuralNode)
else:
self.zAspectStructuralNodes[aspect].discard(zStructuralNode)
def onZComboBoxSelected(self, event):
combobox = event.widget
structuralNode = combobox.zStructuralNode
if combobox.zAxisAspectEntryMode:
aspectValue = structuralNode.aspectEntryHeaderValues.get(combobox.get())
if aspectValue is not None:
self.zOrdinateChoices[structuralNode.definitionNode] = \
structuralNode.aspects = {combobox.zAxisAspect: aspectValue, 'aspectValueLabel': combobox.get()}
self.view() # redraw grid
elif combobox.zAxisIsOpenExplicitDimension and combobox.get() == "(all members)":
# reload combo box
self.comboboxLoadExplicitDimension(combobox,
structuralNode, # owner of combobox
structuralNode.choiceStructuralNodes[structuralNode.choiceNodeIndex]) # aspect filter node
structuralNode.choiceNodeIndex = -1 # use entry aspect value
combobox.zAxisAspectEntryMode = True
elif combobox.zAxisTypedDimension is not None and combobox.get() == "(enter typed member)":
# ask typed member entry
import tkinter.simpledialog
result = tkinter.simpledialog.askstring(_("Enter new typed dimension value"),
combobox.zAxisTypedDimension.label(),
parent=self.tabWin)
if result:
structuralNode.choiceNodeIndex = -1 # use entry aspect value
aspectValue = FunctionXfi.create_element(self.rendrCntx,
None,
(combobox.zAxisTypedDimension.typedDomainElement.qname, (), result))
self.zOrdinateChoices[structuralNode.definitionNode] = \
structuralNode.aspects = {combobox.zAxisAspect: aspectValue,
Aspect.DIMENSIONS: {combobox.zAxisTypedDimension.qname},
'aspectValueLabel': result}
if not hasattr(structuralNode, "aspectEntryHeaderValues"): structuralNode.aspectEntryHeaderValues = {}
structuralNode.aspectEntryHeaderValues[result] = aspectValue
valueHeaders = list(combobox["values"])
if result not in valueHeaders: valueHeaders.insert(0, result)
combobox["values"] = valueHeaders
combobox.zAxisAspectEntryMode = True
self.view() # redraw grid
else:
# remove prior combo choice aspect
self.setZStructuralNodeAspects(structuralNode.choiceStructuralNodes[structuralNode.choiceNodeIndex], add=False)
i = combobox.valueIndex
self.zOrdinateChoices[structuralNode.definitionNode] = structuralNode.choiceNodeIndex = i
# set current combo choice aspect
self.setZStructuralNodeAspects(structuralNode.choiceStructuralNodes[i])
self.view() # redraw grid
def xAxis(self, leftCol, topRow, rowBelow, xParentStructuralNode, xStructuralNodes, childrenFirst, renderNow, atTop):
if xParentStructuralNode is not None:
if TRACE_HEADERS:
self.headerLevel += 1
tracePrefix = str(self.headerLevel) + (" " * self.headerLevel) + "x "
parentRow = rowBelow
noDescendants = True
rightCol = leftCol
widthToSpanParent = 0
for xStructuralNode in xParentStructuralNode.childStructuralNodes:
if TRACE_HEADERS:
print(tracePrefix + str(xStructuralNode))
if not xStructuralNode.isRollUp:
noDescendants = False
isLabeled = xStructuralNode.isLabeled
isAbstract = (xStructuralNode.isAbstract or
(xStructuralNode.childStructuralNodes and
not isinstance(xStructuralNode.definitionNode, (ModelClosedDefinitionNode, ModelEuAxisCoord))))
isNonAbstract = not isAbstract
rightCol, row, width, leafNode = self.xAxis(leftCol, topRow + isLabeled, rowBelow, xStructuralNode, xStructuralNodes, # nested items before totals
childrenFirst, childrenFirst, False)
if row - 1 < parentRow:
parentRow = row - 1
#if not leafNode:
# rightCol -= 1
if isNonAbstract and isLabeled:
width += ENTRY_WIDTH_SCREEN_UNITS # width for this label, in screen units
widthToSpanParent += width
if childrenFirst:
thisCol = rightCol
else:
thisCol = leftCol
if renderNow and isLabeled:
if TRACE_HEADERS:
print(tracePrefix + "dataFirstRow=" + str(self.dataFirstRow) + " colHdrNonStdRoles=" + str(len(self.colHdrNonStdRoles)) + " colHdrTopRow=" + str(self.colHdrTopRow) + " colHdrRows=" + str(self.colHdrRows) + " leftCol=" + str(leftCol) + " topRow=" + str(topRow))
columnspan = (rightCol - leftCol + (1 if isNonAbstract else 0))
label = xStructuralNode.header(lang=self.lang,
returnGenLabel=isinstance(xStructuralNode.definitionNode, (ModelClosedDefinitionNode, ModelEuAxisCoord)))
if label != OPEN_ASPECT_ENTRY_SURROGATE:
xValue = leftCol-1
yValue = topRow-1
headerLabel = label if label else " "
if TRACE_HEADERS:
print(tracePrefix + str(label) + " x=" + str(xValue) + " y=" + str(yValue))
if self.testMode:
for pluginMethod in pluginClassMethods("DevTesting.InitHeaderCellValue"):
pluginMethod(headerLabel, xValue, yValue)
break
if False: #TODO: complete X open case
if xStructuralNode.contextItemBinding is not None and xStructuralNode.contextItemBinding.yieldedFact is not None:
yf = xStructuralNode.contextItemBinding.yieldedFact
if not(isinstance(yf, FactPrototype)):
print("Open X node with value " + label + " x=" + str(xValue) + " y=" + str(yValue))
self.table.initHeaderCellValue(headerLabel,
xValue, yValue,
columnspan-1,
((row - topRow) if leafNode else 0),
XbrlTable.TG_CENTERED,
objectId=xStructuralNode.objectId(),
isRollUp=columnspan>1 and isNonAbstract and len(xStructuralNode.childStructuralNodes)<columnspan)
else:
self.aspectEntryObjectIdsNode[xStructuralNode.aspectEntryObjectId] = xStructuralNode
if TRACE_HEADERS:
print(tracePrefix + "header combo" + " x=" + str(leftCol-1) + " y=" + str(topRow-1))
if self.testMode:
for pluginMethod in pluginClassMethods("DevTesting.InitHeaderCombobox"):
pluginMethod(leftCol-1, topRow-1)
break
self.aspectEntryObjectIdsCell[xStructuralNode.aspectEntryObjectId] = self.table.initHeaderCombobox(leftCol-1,
topRow-1,
values=self.aspectEntryValues(xStructuralNode),
objectId=xStructuralNode.aspectEntryObjectId,
comboboxselected=self.onAspectComboboxSelection)
if isNonAbstract:
xValue = thisCol - 1
for i, role in enumerate(self.colHdrNonStdRoles):
j = (self.dataFirstRow
- len(self.colHdrNonStdRoles) + i)-1
cellValue = xStructuralNode.header(role=role, lang=self.lang)
if TRACE_HEADERS:
print(tracePrefix + "roleNum=" + str(i) + " " + str(cellValue) + " x=" + str(xValue) + " y=" + str(j))
if self.testMode:
for pluginMethod in pluginClassMethods("DevTesting.InitHeaderCellValue"):
pluginMethod(cellValue, xValue, j)
break
self.table.initHeaderCellValue(cellValue,
xValue,
j,
0,
0,
XbrlTable.TG_CENTERED,
objectId=xStructuralNode.objectId())
xStructuralNodes.append(xStructuralNode)
if isNonAbstract:
rightCol += 1
if renderNow and not childrenFirst:
self.xAxis(leftCol + (1 if isNonAbstract else 0), topRow + isLabeled, rowBelow, xStructuralNode, xStructuralNodes, childrenFirst, True, False) # render on this pass
leftCol = rightCol
if TRACE_HEADERS:
self.headerLevel -= 1
return (rightCol, parentRow, widthToSpanParent, noDescendants)
def yAxis(self, leftCol, row, yParentStructuralNode, childrenFirst, renderNow, atLeft):
if yParentStructuralNode is not None:
if TRACE_HEADERS:
self.headerLevel += 1
tracePrefix = str(self.headerLevel) + (" " * self.headerLevel) + "y "
nestedBottomRow = row
for yStructuralNode in yParentStructuralNode.childStructuralNodes:
if TRACE_HEADERS:
print(tracePrefix + str(yStructuralNode))
if not yStructuralNode.isRollUp:
isAbstract = (yStructuralNode.isAbstract or
(yStructuralNode.childStructuralNodes and
not isinstance(yStructuralNode.definitionNode, (ModelClosedDefinitionNode, ModelEuAxisCoord))))
isNonAbstract = not isAbstract
isLabeled = yStructuralNode.isLabeled
nestRow, nextRow = self.yAxis(leftCol + isLabeled, row, yStructuralNode, # nested items before totals
childrenFirst, childrenFirst, False)
topRow = row
if childrenFirst and isNonAbstract:
row = nextRow
if renderNow and isLabeled:
columnspan = self.rowHdrCols - leftCol + 1 if isNonAbstract or nextRow == row else 1
depth = yStructuralNode.depth
wraplength = (self.rowHdrColWidth[depth] if isAbstract else
self.rowHdrWrapLength - sum(self.rowHdrColWidth[0:depth]))
if wraplength < 0:
wraplength = self.rowHdrColWidth[depth]
label = yStructuralNode.header(lang=self.lang,
returnGenLabel=isinstance(yStructuralNode.definitionNode, (ModelClosedDefinitionNode, ModelEuAxisCoord)),
recurseParent=not isinstance(yStructuralNode.definitionNode, ModelFilterDefinitionNode))
if label != OPEN_ASPECT_ENTRY_SURROGATE:
xValue = leftCol-1
yValue = row-1
if TRACE_HEADERS:
print(tracePrefix + str(label) + " x=" + str(xValue) + " y=" + str(yValue))
headerLabel = label if label is not None else " "
if self.testMode:
for pluginMethod in pluginClassMethods("DevTesting.InitHeaderCellValue"):
pluginMethod(headerLabel, xValue, yValue)
break
allowDeleteOpenLine = False # temporarily off since not yet complete
isFilledOpenLine = False
if allowDeleteOpenLine:
if yStructuralNode.contextItemBinding is not None and yStructuralNode.contextItemBinding.yieldedFact is not None:
yf = yStructuralNode.contextItemBinding.yieldedFact
if not(isinstance(yf, FactPrototype)):
#print("Open Y node with value " + label + " x=" + str(xValue) + " y=" + str(yValue))
isFilledOpenLine = True
#TODO for X open axis
if isFilledOpenLine:
yValue = row-1
xValue = leftCol-1
objectId = yStructuralNode.objectId()
#TODO for other cases than starting with first column (e.g. multiple breakdowns in Y)
if xValue == 0:
self.tableInfo.fillTable = True
self.table.initFilledFirstCellOpenRow(headerLabel, xValue, yValue, objectId=objectId,
command=lambda x=xValue, y=yValue: self.deleteFilledOpenRow(x, y)) #closed arg. values
else:
self.table.initHeaderCellValue(headerLabel, xValue, yValue, 0, 0, XbrlTable.TG_RIGHT_JUSTIFIED, objectId=objectId)
self.tableInfo.setOpenValueCell(xValue, yValue)
else:
self.table.initHeaderCellValue(headerLabel,
xValue, yValue,
columnspan-1,
(nestRow - row if isAbstract else 1)-1,
(XbrlTable.TG_LEFT_JUSTIFIED
if isNonAbstract or nestRow == row
else XbrlTable.TG_CENTERED),
objectId=yStructuralNode.objectId(),
isRollUp=columnspan>1 and isNonAbstract and (len(yStructuralNode.childStructuralNodes)>1 or (len(yStructuralNode.childStructuralNodes)==1 and not(yStructuralNode.childStructuralNodes[0].isAbstract))))
else:
self.aspectEntryObjectIdsNode[yStructuralNode.aspectEntryObjectId] = yStructuralNode
if TRACE_HEADERS:
print(tracePrefix + "header combo" + " x=" + str(leftCol-1) + " y=" + str(row-1))
self.aspectEntryObjectIdsCell[yStructuralNode.aspectEntryObjectId] = self.table.initHeaderCombobox(leftCol-1,
row-1,
values=self.aspectEntryValues(yStructuralNode),
objectId=yStructuralNode.aspectEntryObjectId,
comboboxselected=self.onAspectComboboxSelection)
if isNonAbstract:
for i, role in enumerate(self.rowHdrNonStdRoles):
isCode = "code" in role
docCol = self.dataFirstCol - len(self.rowHdrNonStdRoles) + i-1
yValue = row-1
label = yStructuralNode.header(role=role, lang=self.lang)
if TRACE_HEADERS:
print(tracePrefix + str(label) + " x=" + str(docCol) + " y=" + str(yValue))
if self.testMode:
for pluginMethod in pluginClassMethods("DevTesting.InitHeaderCellValue"):
pluginMethod(label, docCol, yValue)
break
self.table.initHeaderCellValue(label,
docCol, yValue,
0, 0,
XbrlTable.TG_CENTERED if isCode else XbrlTable.TG_RIGHT_JUSTIFIED,
objectId=yStructuralNode.objectId())
if isNonAbstract:
row += 1
elif childrenFirst:
row = nextRow
if nestRow > nestedBottomRow:
nestedBottomRow = nestRow + (isNonAbstract and not childrenFirst)
if row > nestedBottomRow:
nestedBottomRow = row
#if renderNow and not childrenFirst:
# dummy, row = self.yAxis(leftCol + 1, row, yStructuralNode, childrenFirst, True, False) # render on this pass
if not childrenFirst:
dummy, row = self.yAxis(leftCol + isLabeled, row, yStructuralNode, childrenFirst, renderNow, False) # render on this pass
if TRACE_HEADERS:
self.headerLevel -= 1
return (nestedBottomRow, row)
def getbackgroundColor(self, factPrototype):
bgColor = XbrlTable.TG_BG_DEFAULT # default monetary
concept = factPrototype.concept
if concept == None:
return bgColor
isNumeric = concept.isNumeric
# isMonetary = concept.isMonetary
isInteger = concept.baseXbrliType in integerItemTypes
isPercent = concept.typeQname in (qnPercentItemType, qnPureItemType)
isString = concept.baseXbrliType in ("stringItemType", "normalizedStringItemType")
isDate = concept.baseXbrliType in ("dateTimeItemType", "dateItemType")
if isNumeric:
if concept.isShares or isInteger:
bgColor = XbrlTable.TG_BG_ORANGE
elif isPercent:
bgColor = XbrlTable.TG_BG_YELLOW
# else assume isMonetary
elif isDate:
bgColor = XbrlTable.TG_BG_GREEN
elif isString:
bgColor = XbrlTable.TG_BG_VIOLET
return bgColor;
def bodyCells(self, row, yParentStructuralNode, xStructuralNodes, zAspectStructuralNodes, yChildrenFirst):
if yParentStructuralNode is not None:
dimDefaults = self.modelXbrl.qnameDimensionDefaults
for yStructuralNode in yParentStructuralNode.childStructuralNodes:
if yChildrenFirst:
row = self.bodyCells(row, yStructuralNode, xStructuralNodes, zAspectStructuralNodes, yChildrenFirst)
if not (yStructuralNode.isAbstract or
(yStructuralNode.childStructuralNodes and
not isinstance(yStructuralNode.definitionNode, (ModelClosedDefinitionNode, ModelEuAxisCoord)))) and yStructuralNode.isLabeled:
isYEntryPrototype = yStructuralNode.isEntryPrototype(default=False) # row to enter open aspects
yAspectStructuralNodes = defaultdict(set)
for aspect in aspectModels[self.aspectModel]:
if yStructuralNode.hasAspect(aspect):
if aspect == Aspect.DIMENSIONS:
for dim in (yStructuralNode.aspectValue(Aspect.DIMENSIONS) or emptyList):
yAspectStructuralNodes[dim].add(yStructuralNode)
else:
yAspectStructuralNodes[aspect].add(yStructuralNode)
yTagSelectors = yStructuralNode.tagSelectors
# data for columns of row
ignoreDimValidity = self.ignoreDimValidity.get()
for i, xStructuralNode in enumerate(xStructuralNodes):
isEntryPrototype = isYEntryPrototype or xStructuralNode.isEntryPrototype(default=False)
xAspectStructuralNodes = defaultdict(set)
for aspect in aspectModels[self.aspectModel]:
if xStructuralNode.hasAspect(aspect):
if aspect == Aspect.DIMENSIONS:
for dim in (xStructuralNode.aspectValue(Aspect.DIMENSIONS) or emptyList):
xAspectStructuralNodes[dim].add(xStructuralNode)
else:
xAspectStructuralNodes[aspect].add(xStructuralNode)
cellTagSelectors = yTagSelectors | xStructuralNode.tagSelectors
cellAspectValues = {}
matchableAspects = set()
for aspect in _DICT_SET(xAspectStructuralNodes.keys()) | _DICT_SET(yAspectStructuralNodes.keys()) | _DICT_SET(zAspectStructuralNodes.keys()):
aspectValue = xStructuralNode.inheritedAspectValue(yStructuralNode,
self, aspect, cellTagSelectors,
xAspectStructuralNodes, yAspectStructuralNodes, zAspectStructuralNodes)
# value is None for a dimension whose value is to be not reported in this slice
if (isinstance(aspect, _INT) or # not a dimension
dimDefaults.get(aspect) != aspectValue or # explicit dim defaulted will equal the value
aspectValue is not None): # typed dim absent will be none
cellAspectValues[aspect] = aspectValue
matchableAspects.add(aspectModelAspect.get(aspect,aspect)) #filterable aspect from rule aspect
cellDefaultedDims = _DICT_SET(dimDefaults) - _DICT_SET(cellAspectValues.keys())
priItemQname = cellAspectValues.get(Aspect.CONCEPT)
concept = self.modelXbrl.qnameConcepts.get(priItemQname)
conceptNotAbstract = concept is None or not concept.isAbstract
value = None
objectId = None
justify = None
fp = FactPrototype(self, cellAspectValues)
if TRACE_FACT_PROTOS:
print("Created fact prototype; x=" + str(self.dataFirstCol + i-1) + " y=" + str(row-1) + " " + str(fp))
if fp.concept is None:
print(" -> prototype is missing concept")
if conceptNotAbstract:
# reduce set of matchable facts to those with pri item qname and have dimension aspects
facts = self.modelXbrl.factsByQname(priItemQname, set()) if priItemQname else self.modelXbrl.factsInInstance
if self.hasTableFilters:
facts = self.modelTable.filterFacts(self.rendrCntx, facts)
for aspect in matchableAspects: # trim down facts with explicit dimensions match or just present
if isinstance(aspect, QName):
aspectValue = cellAspectValues.get(aspect, None)
if isinstance(aspectValue, ModelDimensionValue):
if aspectValue.isExplicit:
dimMemQname = aspectValue.memberQname # match facts with this explicit value
else:
dimMemQname = None # match facts that report this dimension
elif isinstance(aspectValue, QName):
dimMemQname = aspectValue # match facts that have this explicit value
elif aspectValue is None: # match typed dims that don't report this value
dimMemQname = ModelXbrl.DEFAULT
else:
dimMemQname = None # match facts that report this dimension
newFacts = self.factsByDimMemQnameCache.factsByDimMemQname(aspect, dimMemQname)
facts = facts & newFacts
if len(facts)==0:
break;
for fact in facts:
if (all(aspectMatches(self.rendrCntx, fact, fp, aspect)
for aspect in matchableAspects) and
all(fact.context.dimMemberQname(dim,includeDefaults=True) in (dimDefaults[dim], None)
for dim in cellDefaultedDims) and
len(fp.context.qnameDims) == len(fact.context.qnameDims)):
if yStructuralNode.hasValueExpression(xStructuralNode):
value = yStructuralNode.evalValueExpression(fact, xStructuralNode)
else:
value = fact.effectiveValue
objectId = fact.objectId()
justify = XbrlTable.TG_RIGHT_JUSTIFIED if fact.isNumeric else XbrlTable.TG_LEFT_JUSTIFIED
break
if (conceptNotAbstract and
(value is not None or ignoreDimValidity or isFactDimensionallyValid(self, fp) or
isEntryPrototype)):
if objectId is None:
objectId = "f{0}".format(len(self.factPrototypes))
if TRACE_FACT_PROTOS:
print("Add factPrototype " + str(fp))
self.factPrototypes.append(fp) # for property views
for aspect, aspectValue in cellAspectValues.items():
if isinstance(aspectValue, str) and aspectValue.startswith(OPEN_ASPECT_ENTRY_SURROGATE):
self.factPrototypeAspectEntryObjectIds[objectId].add(aspectValue)
modelConcept = fp.concept
if (justify is None) and modelConcept is not None:
justify = XbrlTable.TG_RIGHT_JUSTIFIED if modelConcept.isNumeric else XbrlTable.TG_LEFT_JUSTIFIED
if modelConcept is not None and modelConcept.isEnumeration:
myValidationObject = ValidateXbrl(self.modelXbrl)
enumerationSet = ValidateXbrlDimensions.usableEnumerationMembers(myValidationObject, modelConcept)
enumerationDict = dict()
for enumerationItem in enumerationSet:
# we need to specify the concept linkrole to sort out between possibly many different labels
enumerationDict[enumerationItem.label(linkrole=modelConcept.enumLinkrole)] = enumerationItem.qname
enumerationValues = sorted(list(enumerationDict.keys()))
enumerationQNameStrings = [""]+list(str(enumerationDict[enumerationItem]) for enumerationItem in enumerationValues)
enumerationValues = [""]+enumerationValues
try:
selectedIdx = enumerationQNameStrings.index(value)
effectiveValue = enumerationValues[selectedIdx]
except ValueError:
effectiveValue = enumerationValues[0]
selectedIdx = 0
xValue = self.dataFirstCol + i-1
yValue = row-1
if self.testMode:
for pluginMethod in pluginClassMethods("DevTesting.InitCellCombobox"):
pluginMethod(effectiveValue, enumerationValues, xValue, yValue)
break
self.tableInfo.setChoiceCell(xValue, yValue)
self.table.initCellCombobox(effectiveValue,
enumerationValues,
xValue,
yValue,
objectId=objectId,
selectindex=selectedIdx,
codes=enumerationDict)
elif modelConcept is not None and modelConcept.type.qname == XbrlConst.qnXbrliQNameItemType:
if eurofilingModelPrefix in concept.nsmap and concept.nsmap.get(eurofilingModelPrefix) == eurofilingModelNamespace:
hierarchy = concept.get("{" + eurofilingModelNamespace + "}" + "hierarchy", None)
domainQNameAsString = concept.get("{" + eurofilingModelNamespace + "}" + "domain", None)
if hierarchy is not None and domainQNameAsString is not None:
newAspectValues = [""]
newAspectQNames = dict()
newAspectQNames[""] = None
domPrefix, _, domLocalName = domainQNameAsString.strip().rpartition(":")
domNamespace = concept.nsmap.get(domPrefix)
relationships = concept_relationships(self.rendrCntx,
None,
(QName(domPrefix, domNamespace, domLocalName),
hierarchy, # linkrole,
"XBRL-dimensions",
'descendant'),
False) # return flat list
for rel in relationships:
if (rel.arcrole in (XbrlConst.dimensionDomain, XbrlConst.domainMember)
and rel.isUsable):
header = rel.toModelObject.label(lang=self.lang)
newAspectValues.append(header)
currentQName = rel.toModelObject.qname
if str(currentQName) == value:
value = header
newAspectQNames[header] = currentQName
else:
newAspectValues = None
else:
newAspectValues = None
if newAspectValues is None:
xValue = self.dataFirstCol + i-1
yValue = row-1
if self.testMode:
for pluginMethod in pluginClassMethods("DevTesting.InitCellValue"):
pluginMethod(value, xValue, yValue)
break
self.tableInfo.setValueCell(xValue, yValue)
self.table.initCellValue(value,
xValue,
yValue,
justification=justify,
objectId=objectId,
backgroundColourTag=self.getbackgroundColor(fp))
else:
qNameValues = newAspectValues
try:
selectedIdx = qNameValues.index(value)
effectiveValue = value
except ValueError:
effectiveValue = qNameValues[0]
selectedIdx = 0
xValue = self.dataFirstCol + i-1
yValue = row-1
if self.testMode:
for pluginMethod in pluginClassMethods("DevTesting.InitCellCombobox"):
pluginMethod(effectiveValue, qNameValues, xValue, yValue)
break
self.tableInfo.setChoiceCell(xValue, yValue)
self.table.initCellCombobox(effectiveValue,
qNameValues,
xValue,
yValue,
objectId=objectId,
selectindex=selectedIdx,
codes=newAspectQNames)
elif modelConcept is not None and modelConcept.type.qname == XbrlConst.qnXbrliBooleanItemType:
booleanValues = ["",
XbrlConst.booleanValueTrue,
XbrlConst.booleanValueFalse]
try:
selectedIdx = booleanValues.index(value)
effectiveValue = value
except ValueError:
effectiveValue = booleanValues[0]
selectedIdx = 0
xValue = self.dataFirstCol + i-1
yValue = row-1
if self.testMode:
for pluginMethod in pluginClassMethods("DevTesting.InitCellCombobox"):
pluginMethod(effectiveValue, booleanValues, xValue, yValue)
break
self.tableInfo.setChoiceCell(xValue, yValue)
self.table.initCellCombobox(effectiveValue,
booleanValues,
xValue,
yValue,
objectId=objectId,
selectindex=selectedIdx)
else:
xValue = self.dataFirstCol + i-1
yValue = row-1
if self.testMode:
for pluginMethod in pluginClassMethods("DevTesting.InitCellValue"):
pluginMethod(value, xValue, yValue)
break
self.tableInfo.setValueCell(xValue, yValue)
self.table.initCellValue(value,
xValue,
yValue,
justification=justify,
objectId=objectId,
backgroundColourTag=self.getbackgroundColor(fp))
else:
fp.clear() # dereference
row += 1
if not yChildrenFirst:
row = self.bodyCells(row, yStructuralNode, xStructuralNodes, zAspectStructuralNodes, yChildrenFirst)
return row
def onClick(self, event):
try:
objId = event.widget.objectId
if objId and objId[0] == "f":
viewableObject = self.factPrototypes[int(objId[1:])]
else:
viewableObject = objId
self.modelXbrl.viewModelObject(viewableObject)
except AttributeError: # not clickable
pass
self.modelXbrl.modelManager.cntlr.currentView = self
def cellEnter(self, *args):
# triggered on grid frame enter (not cell enter)
self.blockSelectEvent = 0
self.modelXbrl.modelManager.cntlr.currentView = self
def cellLeave(self, *args):
# triggered on grid frame leave (not cell leave)
self.blockSelectEvent = 1
# this method is not currently used
def cellSelect(self, *args):
if self.blockSelectEvent == 0 and self.blockViewModelObject == 0:
self.blockViewModelObject += 1
#self.modelXbrl.viewModelObject(self.nodeToObjectId[self.treeView.selection()[0]])
#self.modelXbrl.viewModelObject(self.treeView.selection()[0])
self.blockViewModelObject -= 1
def viewModelObject(self, modelObject):
if self.blockViewModelObject == 0:
self.blockViewModelObject += 1
try:
if isinstance(modelObject, ModelDtsObject.ModelRelationship):
objectId = modelObject.toModelObject.objectId()
else:
objectId = modelObject.objectId()
if objectId in self.tablesToELR:
self.view(viewTblELR=self.tablesToELR[objectId])
try:
self.modelXbrl.modelManager.cntlr.currentView = self.modelXbrl.guiViews.tableView
# force focus (synch) on the corresponding "Table" tab (useful in case of several instances)
self.modelXbrl.guiViews.tableView.tabWin.select(str(self.modelXbrl.guiViews.tableView.viewFrame))
except:
pass
except (KeyError, AttributeError):
pass
self.blockViewModelObject -= 1
def onConfigure(self, event, *args):
if not self.blockMenuEvents:
lastFrameWidth = getattr(self, "lastFrameWidth", 0)
lastFrameHeight = getattr(self, "lastFrameHeight", 0)
frameWidth = self.tabWin.winfo_width()
frameHeight = self.tabWin.winfo_height()
if lastFrameWidth != frameWidth or lastFrameHeight != frameHeight:
self.updateInstanceFromFactPrototypes()
self.lastFrameWidth = frameWidth
self.lastFrameHeight = frameHeight
self.setHeightANdWidth()
if lastFrameWidth:
# frame resized, recompute row header column widths and lay out table columns
"""
def sleepAndReload():
time.sleep(.75)
self.viewReloadDueToMenuAction()
self.modelXbrl.modelManager.cntlr.uiThreadQueue.put((sleepAndReload, []))
"""
#self.modelXbrl.modelManager.cntlr.uiThreadQueue.put((self.viewReloadDueToMenuAction, []))
def deferredReload():
self.deferredReloadCount -= 1 # only do reload after all queued reload timers expire
if self.deferredReloadCount <= 0:
self.viewReloadDueToMenuAction()
self.deferredReloadCount = getattr(self, "deferredReloadCount", 0) + 1
self.viewFrame.after(1500, deferredReload)
def onQuitView(self, event, *args):
# this method is passed as callbacl when creating the view
# (to ScrolledTkTableFrame and then to XbrlTable that will monitor cell operations)
self.updateInstanceFromFactPrototypes()
self.updateProperties()
def updateProperties(self):
if self.modelXbrl is not None:
modelXbrl = self.modelXbrl
# make sure the properties view is visible and we handle an instance
if modelXbrl.guiViews.propertiesView is not None and modelXbrl.modelDocument.type == ModelDocument.Type.INSTANCE:
tbl = self.table
# get coordinates of last currently operated cell
coordinates = tbl.getCurrentCellCoordinates()
if coordinates is not None:
# get object identifier from its coordinates in the current table
objId = tbl.getObjectId(coordinates)
if objId is not None and len(objId) > 0:
if objId and objId[0] == "f":
# fact prototype
viewableObject = self.factPrototypes[int(objId[1:])]
elif objId[0] != "a": #TODO: clarify what this "a" means
# instance fact
viewableObject = self.modelXbrl.modelObject(objId)
else:
return
modelXbrl.guiViews.propertiesView.viewModelObject(viewableObject)
def updateInstanceFromFactPrototypes(self):
# Only update the model if it already exists
if self.modelXbrl is not None \
and self.modelXbrl.modelDocument.type == ModelDocument.Type.INSTANCE:
instance = self.modelXbrl
cntlr = instance.modelManager.cntlr
newCntx = ModelXbrl.AUTO_LOCATE_ELEMENT
newUnit = ModelXbrl.AUTO_LOCATE_ELEMENT
tbl = self.table
# check user keyed changes to aspects
aspectEntryChanges = {} # index = widget ID, value = widget contents
aspectEntryChangeIds = _DICT_SET(aspectEntryChanges.keys())
for modifiedCell in tbl.getCoordinatesOfModifiedCells():
objId = tbl.getObjectId(modifiedCell)
if objId is not None and len(objId)>0:
if tbl.isHeaderCell(modifiedCell):
if objId[0] == OPEN_ASPECT_ENTRY_SURROGATE:
aspectEntryChanges[objId] = tbl.getTableValue(modifiedCell)
else:
# check user keyed changes to facts
cellIndex = str(modifiedCell)
comboboxCells = tbl.window_names(cellIndex)
if comboboxCells is not None and len(comboboxCells)>0:
comboName = tbl.window_cget(cellIndex, '-window')
combobox = cntlr.parent.nametowidget(comboName)
else:
combobox = None
if isinstance(combobox, _Combobox):
codeDict = combobox.codes
if len(codeDict)>0: # the drop-down list shows labels, we want to have the actual values
bodyCellValue = tbl.getTableValue(modifiedCell)
value = codeDict.get(bodyCellValue, None)
if value is None:
value = bodyCellValue # this must be a qname!
else:
value = tbl.getTableValue(modifiedCell)
else:
value = tbl.getTableValue(modifiedCell)
objId = tbl.getObjectId(modifiedCell)
if objId is not None and len(objId)>0:
if objId[0] == "f":
factPrototypeIndex = int(objId[1:])
factPrototype = self.factPrototypes[factPrototypeIndex]
concept = factPrototype.concept
if concept is None:
if not self.conceptMessageIssued:
# This should be removed once cells have been disabled until every needed selection is done
self.conceptMessageIssued = True
self.modelXbrl.modelManager.cntlr.showMessage(" Please make sure every Z axis selection is done")
return
else:
self.conceptMessageIssued = False
entityIdentScheme = self.newFactItemOptions.entityIdentScheme
entityIdentValue = self.newFactItemOptions.entityIdentValue
periodType = concept.periodType
periodStart = self.newFactItemOptions.startDateDate if periodType == "duration" else None
periodEndInstant = self.newFactItemOptions.endDateDate
qnameDims = factPrototype.context.qnameDims
newAspectValues = self.newFactOpenAspects(objId)
if newAspectValues is None:
self.modelXbrl.modelManager.showStatus(_("Some open values are missing in an axis, the save is incomplete"), 5000)
continue
qnameDims.update(newAspectValues)
# open aspects widgets
prevCntx = instance.matchContext(
entityIdentScheme, entityIdentValue, periodType, periodStart, periodEndInstant,
qnameDims, [], [])
if prevCntx is not None:
cntxId = prevCntx.id
if self.modelXbrl.factAlreadyExists(concept.qname, cntxId):
self.modelXbrl.modelManager.addToLog(_("Value %s will not be saved, because fact '%s' already exists somewhere else") % (value, concept.label()), level=logging.ERROR)
continue
else:
newCntx = instance.createContext(entityIdentScheme, entityIdentValue,
periodType, periodStart, periodEndInstant,
concept.qname, qnameDims, [], [],
afterSibling=newCntx)
cntxId = newCntx.id # need new context
# new context
if concept.isNumeric:
if concept.isMonetary:
unitMeasure = qname(XbrlConst.iso4217, self.newFactItemOptions.monetaryUnit)
unitMeasure.prefix = "iso4217" # want to save with a recommended prefix
decimals = self.newFactItemOptions.monetaryDecimals
elif concept.isShares:
unitMeasure = XbrlConst.qnXbrliShares
decimals = self.newFactItemOptions.nonMonetaryDecimals
else:
unitMeasure = XbrlConst.qnXbrliPure
decimals = self.newFactItemOptions.nonMonetaryDecimals
prevUnit = instance.matchUnit([unitMeasure], [])
if prevUnit is not None:
unitId = prevUnit.id
else:
newUnit = instance.createUnit([unitMeasure], [], afterSibling=newUnit)
unitId = newUnit.id
attrs = [("contextRef", cntxId)]
if concept.isNumeric:
attrs.append(("unitRef", unitId))
value = Locale.atof(self.modelXbrl.locale, value, str.strip)
# Check if there is a custom method to compute the decimals
for pluginXbrlMethod in pluginClassMethods("CntlrWinMain.Rendering.ComputeDecimals"):
stopPlugin, decimals = pluginXbrlMethod(instance.locale, value, concept, decimals)
if stopPlugin == True:
break;
attrs.append(("decimals", decimals))
newFact = instance.createFact(concept.qname, attributes=attrs, text=value)
# Check if there is a custom method to update filing indicators
for pluginXbrlMethod in pluginClassMethods("CntlrWinMain.Rendering.CheckUpdateFilingIndicator"):
stopPlugin = pluginXbrlMethod(self.roledefinition, self.modelXbrl)
if stopPlugin:
break;
tbl.setObjectId(modifiedCell,
newFact.objectId()) # switch cell to now use fact ID
if self.factPrototypes[factPrototypeIndex] is not None:
self.factPrototypes[factPrototypeIndex].clear()
self.factPrototypes[factPrototypeIndex] = None #dereference fact prototype
#TODO: clarify what this "a" means
elif objId[0] != "a": # instance fact, not prototype
fact = self.modelXbrl.modelObject(objId)
if fact.concept.isNumeric:
value = Locale.atof(self.modelXbrl.locale, value, str.strip)
if fact.concept.isMonetary:
unitMeasure = qname(XbrlConst.iso4217, self.newFactItemOptions.monetaryUnit)
unitMeasure.prefix = "iso4217" # want to save with a recommended prefix
decimals = self.newFactItemOptions.monetaryDecimals
elif fact.concept.isShares:
unitMeasure = XbrlConst.qnXbrliShares
decimals = self.newFactItemOptions.nonMonetaryDecimals
else:
unitMeasure = XbrlConst.qnXbrliPure
decimals = self.newFactItemOptions.nonMonetaryDecimals
# Check if there is a custom method to compute the decimals
for pluginXbrlMethod in pluginClassMethods("CntlrWinMain.Rendering.ComputeDecimals"):
stopPlugin, decimals = pluginXbrlMethod(instance.locale, value, fact.concept, decimals)
if stopPlugin == True:
break;
if fact.value != str(value):
if fact.isNil != (not value):
fact.isNil = not value
if fact.isNil:
pass
#TODO: clear out nil facts
instance.updateFactIndex(fact) # for the time being, only the isNil value can change
if fact.concept.isNumeric and (not fact.isNil): # if nil, there is no need to update these values
fact.decimals = decimals
prevUnit = instance.matchUnit([unitMeasure], [])
if prevUnit is not None:
unitId = prevUnit.id
else:
newUnit = instance.createUnit([unitMeasure], [], afterSibling=newUnit)
unitId = newUnit.id
fact.unitID = unitId
fact.text = str(value)
instance.setIsModified()
fact.xValid = XmlValidate.UNVALIDATED
XmlValidate.validate(instance, fact)
tbl.clearModificationStatus()
def saveInstance(self, newFilename=None, onSaved=None):
# newFilename = None # only used when a new instance must be created
self.updateInstanceFromFactPrototypes()
if self.modelXbrl.modelDocument.type != ModelDocument.Type.INSTANCE and newFilename is None:
newFilename = self.modelXbrl.modelManager.cntlr.fileSave(view=self, fileType="xbrl")
if not newFilename:
return # saving cancelled
# continue saving in background
if self.modelXbrl.modelManager.cntlr.testMode:
self.backgroundSaveInstance(newFilename, onSaved)
else:
thread = threading.Thread(target=lambda: self.backgroundSaveInstance(newFilename, onSaved))
thread.daemon = True
thread.start()
def backgroundSaveInstance(self, newFilename=None, onSaved=None):
cntlr = self.modelXbrl.modelManager.cntlr
if newFilename and self.modelXbrl.modelDocument.type != ModelDocument.Type.INSTANCE:
self.modelXbrl.modelManager.showStatus(_("creating new instance {0}").format(os.path.basename(newFilename)))
self.modelXbrl.modelManager.cntlr.waitForUiThreadQueue() # force status update
self.modelXbrl.createInstance(newFilename) # creates an instance as this modelXbrl's entrypoint
instance = self.modelXbrl
cntlr.showStatus(_("Saving {0}").format(instance.modelDocument.basename))
cntlr.waitForUiThreadQueue() # force status update
self.updateInstanceFromFactPrototypes()
instance.saveInstance(newFilename) # may override prior filename for instance from main menu
if self.modelXbrl.guiViews.tableIndexView is not None:
self.modelXbrl.guiViews.tableIndexView.refreshTitle()
if self.modelXbrl.guiViews.propertiesView is not None:
self.modelXbrl.guiViews.propertiesView.refreshTitle()
self.modelXbrl.guiViews.tableView.refreshTitle()
cntlr.addToLog(_("{0} saved").format(newFilename if newFilename is not None else instance.modelDocument.filepath))
cntlr.showStatus(_("Saved {0}").format(instance.modelDocument.basename), clearAfter=3000)
cntlr.triggerShowTitle(self.modelXbrl, os.path.basename(newFilename))
if onSaved is not None:
if self.modelXbrl.modelManager.cntlr.testMode:
onSaved()
else:
self.modelXbrl.modelManager.cntlr.uiThreadQueue.put((onSaved, []))
def newFactOpenAspects(self, factObjectId):
aspectValues = {}
for aspectObjId in self.factPrototypeAspectEntryObjectIds[factObjectId]:
structuralNode = self.aspectEntryObjectIdsNode[aspectObjId]
for aspect in structuralNode.aspectsCovered():
if aspect != Aspect.DIMENSIONS:
break
gridCellItem = self.aspectEntryObjectIdsCell[aspectObjId]
value = gridCellItem.get()
# is aspect in a childStructuralNode?
if value is not None and OPEN_ASPECT_ENTRY_SURROGATE in aspectObjId and len(value)==0:
return None # some values are missing!
if value:
aspectValue = structuralNode.aspectEntryHeaderValues.get(value)
if aspectValue is None: # try converting value
if isinstance(aspect, QName): # dimension
dimConcept = self.modelXbrl.qnameConcepts[aspect]
if dimConcept.isExplicitDimension:
# value must be qname
aspectValue = None # need to find member for the description
else:
typedDimElement = dimConcept.typedDomainElement
aspectValue = FunctionXfi.create_element(
self.rendrCntx, None, (typedDimElement.qname, (), value))
if aspectValue is not None:
aspectValues[aspect] = aspectValue
return aspectValues
def aspectEntryValues(self, structuralNode):
for aspect in structuralNode.aspectsCovered():
if aspect != Aspect.DIMENSIONS:
break
# if findHeader is None, return all header values in a list
# otherwise return aspect value matching header if any
depth = 0
n = structuralNode
while (n.parentStructuralNode is not None):
depth += 1
root = n = n.parentStructuralNode
headers = set()
headerValues = {}
def getHeaders(n, d):
for childStructuralNode in n.childStructuralNodes:
if d == depth:
h = childStructuralNode.header(lang=self.lang,
returnGenLabel=False,
returnMsgFormatString=False)
if not childStructuralNode.isEntryPrototype() and h:
headerValues[h] = childStructuralNode.aspectValue(aspect)
headers.add(h)
else:
getHeaders(childStructuralNode, d+1)
getHeaders(root, 1)
structuralNode.aspectEntryHeaderValues = headerValues
# is this an explicit dimension, if so add "(all members)" option at end
headersList = sorted(headers)
if isinstance(aspect, QName): # dimension
dimConcept = self.modelXbrl.qnameConcepts[aspect]
if dimConcept.isExplicitDimension:
if headersList: # has entries, add all-memembers at end
headersList.append("(all members)")
else: # empty list, just add all members anyway
return self.explicitDimensionFilterMembers(structuralNode, structuralNode)
return headersList
def onAspectComboboxSelection(self, event):
gridCombobox = event.widget
if gridCombobox.get() == "(all members)":
structuralNode = self.aspectEntryObjectIdsNode[gridCombobox.objectId]
self.comboboxLoadExplicitDimension(gridCombobox, structuralNode, structuralNode)
def comboboxLoadExplicitDimension(self, gridCombobox, structuralNode, structuralNodeWithFilter):
gridCombobox["values"] = self.explicitDimensionFilterMembers(structuralNode, structuralNodeWithFilter)
def explicitDimensionFilterMembers(self, structuralNode, structuralNodeWithFilter):
for aspect in structuralNodeWithFilter.aspectsCovered():
if isinstance(aspect, QName): # dimension
break
valueHeaders = set()
if structuralNode is not None:
headerValues = {}
# check for dimension filter(s)
dimFilterRels = structuralNodeWithFilter.definitionNode.filterRelationships
if dimFilterRels:
for rel in dimFilterRels:
dimFilter = rel.toModelObject
if dimFilter is not None:
for memberModel in dimFilter.memberProgs:
memQname = memberModel.qname
memConcept = self.modelXbrl.qnameConcepts.get(memQname)
if memConcept is not None and (not memberModel.axis or memberModel.axis.endswith('-self')):
header = memConcept.label(lang=self.lang)
valueHeaders.add(header)
if rel.isUsable:
headerValues[header] = memQname
else:
headerValues[header] = memConcept
if memberModel.axis and memberModel.linkrole and memberModel.arcrole:
# merge of pull request 42 acsone:TABLE_Z_AXIS_DESCENDANT_OR_SELF
if memberModel.axis.endswith('-or-self'):
searchAxis = memberModel.axis[:len(memberModel.axis)-len('-or-self')]
else:
searchAxis = memberModel.axis
relationships = concept_relationships(self.rendrCntx,
None,
(memQname,
memberModel.linkrole,
memberModel.arcrole,
searchAxis),
False) # return flat list
for rel in relationships:
if rel.isUsable:
header = rel.toModelObject.label(lang=self.lang)
valueHeaders.add(header)
headerValues[header] = rel.toModelObject.qname
if not valueHeaders:
relationships = concept_relationships(self.rendrCntx,
None,
(aspect,
"XBRL-all-linkroles", # linkrole,
"XBRL-dimensions",
'descendant'),
False) # return flat list
for rel in relationships:
if (rel.arcrole in (XbrlConst.dimensionDomain, XbrlConst.domainMember)
and rel.isUsable):
header = rel.toModelObject.label(lang=self.lang)
valueHeaders.add(header)
headerValues[header] = rel.toModelObject.qname
structuralNode.aspectEntryHeaderValues = headerValues
return sorted(valueHeaders)
def deleteFilledOpenRow(self, x, y):
'''
This method is used as callback to delete a filled open axis row or column.
This is done the closest possible way to the GUI way: all fact cell values
are just cleared as if the user had wiped the cell contents.
Then the normal "update instance from fact prototype" processing
is triggered. Then the delete nil facts method will scan and delete orphan facts,
contexts and units.
And finally, the view must be redrawn.
'''
#TODO: complete case for X open axis
#TODO: examine the possibility of moving nil facts removal to updateInstanceFromFactPrototypes
col = self.tableInfo.getCol(y)
for x, cell in col.items():
print(str(cell))
objectId = self.table.getObjectIdFromXY(x, y)
#print("objectId= " + objectId)
if cell.isValue:
value = self.table.getTableValueFromXY(x, y)
if value is not None and objectId[0] != "f":
#print("value= " + value)
self.table.clearCellValueFromXY(x, y)
self.updateInstanceFromFactPrototypes()
self.modelXbrl.deleteNilFacts()
self.view()
# import after other modules resolved to prevent circular references
from arelle.FunctionXfi import concept_relationships
|
the-stack_106_27715 | import shutil
from country_levels_lib.config import geojson_dir, export_dir
from country_levels_lib.utils import read_json, osm_url, write_json
from country_levels_lib.wam_download import wam_data_dir
from country_levels_lib.wam_collect import validate_iso1, validate_iso2
wam_geojson_simp_dir = geojson_dir / 'wam' / 'simp'
def export_wam():
shutil.rmtree(export_dir / 'geojson', ignore_errors=True)
for simp in [5, 7, 8]:
split_geojson(1, simp, debug=False)
split_geojson(2, simp, debug=False)
def split_geojson(iso_level: int, simp_level, *, debug: bool = False):
assert iso_level in [1, 2]
print(f'Splitting iso{iso_level} to level: q{simp_level}')
file_path = wam_geojson_simp_dir / f'iso{iso_level}-{simp_level}.geojson'
features = read_json(file_path)['features']
features_sorted = sorted(features, key=lambda i: i['properties']['admin_level'])
level_subdir = export_dir / 'geojson' / f'q{simp_level}' / f'iso{iso_level}'
level_subdir.mkdir(parents=True)
population_map = read_json(wam_data_dir / 'population.json')
json_data = dict()
seen = dict()
for feature in features_sorted:
prop = feature['properties']
alltags = prop['alltags']
name = prop.pop('name')
osm_id = int(prop.pop('id'))
iso = prop.pop(f'iso{iso_level}')
admin_level = int(prop.pop('admin_level'))
wikidata_id = prop.pop('wikidata_id', None)
countrylevel_id = f'iso{iso_level}:{iso}'
population = population_map.get(wikidata_id)
wikipedia_from_prop = prop.pop('wikipedia', None)
wikipedia_from_alltags = alltags.pop('wikipedia', None)
if (
wikipedia_from_prop
and wikipedia_from_alltags
and wikipedia_from_prop != wikipedia_from_alltags
):
print(wikipedia_from_prop, wikipedia_from_alltags)
wikipedia_id = wikipedia_from_alltags
if wikipedia_from_prop:
wikipedia_id = wikipedia_from_prop
del feature['bbox']
for key in ['boundary', 'note', 'rpath', 'srid', 'timestamp']:
prop.pop(key, None)
for key in [
'ISO3166-1',
'ISO3166-1:alpha2',
'ISO3166-1:numeric',
'ISO3166-2',
'ISO3166-2:alpha2',
'ISO3166-2:numeric',
'land_area',
'wikidata',
]:
alltags.pop(key, None)
seen.setdefault(iso, list())
if seen[iso] and not debug:
# print(f' duplicate {iso}, skipping')
continue
new_prop = {
'name': name,
f'iso{iso_level}': iso,
'admin_level': admin_level,
'osm_id': osm_id,
'wikidata_id': wikidata_id,
'wikipedia_id': wikipedia_id,
'population': population,
'countrylevel_id': countrylevel_id,
'osm_data': prop,
}
new_prop_without_osm_data = {k: v for k, v in new_prop.items() if k != 'osm_data'}
feature['properties'] = new_prop
seen[iso].append(new_prop_without_osm_data)
json_data[iso] = new_prop_without_osm_data
if iso_level == 1:
if not validate_iso1(iso):
print(f'invalid iso1: {iso}')
continue
write_json(level_subdir / f'{iso}.geojson', feature)
json_data[iso]['geojson_path'] = f'iso1/{iso}.geojson'
else:
if not validate_iso2(iso):
print(f'invalid iso2: {iso}')
continue
iso2_start, iso2_end = iso.split('-')
iso2_subdir = level_subdir / iso2_start
iso2_subdir.mkdir(exist_ok=True)
write_json(level_subdir / iso2_start / f'{iso}.geojson', feature)
json_data[iso]['geojson_path'] = f'iso2/{iso2_start}/{iso}.geojson'
if simp_level == 5:
write_json(export_dir / f'iso{iso_level}.json', json_data, indent=2, sort_keys=True)
#
#
if debug: # debug duplicates, fixed by sorting by admin_level
debug_dir = geojson_dir / 'wam' / 'debug' / f'iso{iso_level}'
shutil.rmtree(debug_dir, ignore_errors=True)
debug_dir.mkdir(parents=True)
# choose lowest admin level from available ones
for iso, iso_matches in seen.items():
if len(iso_matches) != 1:
matches_sorted = sorted(iso_matches, key=lambda i: i['admin_level'])
print(f'duplicate iso{iso_level}: {iso}')
for match in matches_sorted:
name = match['name']
osm_id = match['osm_id']
url = osm_url(osm_id)
admin_level = match['admin_level']
print(f' {name} {admin_level} {url}')
# file_path = debug_dir / f'{iso} {admin_level} {osm_id}.geojson'
# write_json(file_path, match)
|
the-stack_106_27717 | """ Desenvolva um programa que leia o nome, idade, e sexo de 4 pessoas. No final do programa, mostre:
-A mรฉdia da idade do grupo; -Qual รฉ o nome do homem mais velho; -Quantas mulheres tรชm menos de 20 anos. """
soma_idade = 0
media_idade = 0
maior_idade_homem = 0
nome_velho = ''
total_mulher_20 = 0
for p in range(1, 5):
print('------- {}ยบ PESSOA -------'.format(p))
nome = str(input('Nome:')).strip()
idade = int(input('Idade:'))
sexo = str(input('Sexo [M/F]:')).strip()
if p == 1 and sexo in 'Mm':
maior_idade_homem = idade
nome_velho = nome
if sexo in 'Mm' and idade > maior_idade_homem:
maior_idade_homem = idade
nome_velho = nome
if sexo in 'Ff' and idade < 20:
total_mulher_20 += 1
soma_idade += idade
media_idade = soma_idade / 4
print('A mรฉdia de idade do grupo รฉ de {} anos'.format(media_idade))
print('O homem mais velho se chama {} e tem {} anos'.format(nome_velho, maior_idade_homem))
print('Ao todo sรฃo {} mulheres com menos de 20 anos'.format(total_mulher_20)) |
the-stack_106_27718 | """
Tests for the fiu_ctrl.py module.
Note the command line utility is covered by the utils/ tests, not from here,
this is just for the Python module.
"""
import subprocess
import fiu_ctrl
import errno
import time
fiu_ctrl.PLIBPATH = "./libs/"
def run_cat(**kwargs):
return fiu_ctrl.Subprocess(["./small-cat"],
stdin = subprocess.PIPE, stdout = subprocess.PIPE,
stderr = subprocess.PIPE, **kwargs)
# Run without any failure point being enabled.
cmd = run_cat()
p = cmd.start()
out, err = p.communicate('test\n')
assert out == 'test\n', out
assert err == '', err
# Enable before starting.
cmd = run_cat(fiu_enable_posix = True)
cmd.enable('posix/io/rw/*', failinfo = errno.ENOSPC)
p = cmd.start()
out, err = p.communicate('test\n')
assert out == '', out
assert 'space' in err, err
# Enable after starting.
cmd = run_cat(fiu_enable_posix = True)
p = cmd.start()
cmd.enable('posix/io/rw/*', failinfo = errno.ENOSPC)
out, err = p.communicate('test\n')
assert out == '', out
assert 'space' in err, err
# Enable-disable.
cmd = run_cat(fiu_enable_posix = True)
p = cmd.start()
cmd.enable('posix/io/rw/*', failinfo = errno.ENOSPC)
cmd.disable('posix/io/rw/*')
out, err = p.communicate('test\n')
assert out == 'test\n', (out, err)
# Enable random.
# This relies on cat doing a reasonably small number of read and writes, which
# our small-cat does.
result = { True: 0, False: 0 }
for i in range(50):
cmd = run_cat(fiu_enable_posix = True)
p = cmd.start()
cmd.enable_random('posix/io/rw/*', failinfo = errno.ENOSPC,
probability = 0.5)
out, err = p.communicate('test\n')
if 'space' in err:
result[False] += 1
elif out == 'test\n':
result[True] += 1
else:
assert False, (out, err)
assert 10 < result[True] < 40, result
assert 10 < result[False] < 40, result
|
the-stack_106_27720 | # Copyright 2019 WebPageTest LLC.
# Copyright 2017 Google Inc.
# Use of this source code is governed by the Apache 2.0 license that can be
# found in the LICENSE file.
"""Support for Safari on iOS using iWptBrowser"""
import base64
from datetime import datetime
import gzip
import io
import logging
import multiprocessing
import os
import platform
import re
import subprocess
import sys
import time
import zipfile
if (sys.version_info >= (3, 0)):
from time import monotonic
from urllib.parse import urlsplit # pylint: disable=import-error
unicode = str
GZIP_TEXT = 'wt'
else:
from monotonic import monotonic
from urlparse import urlsplit # pylint: disable=import-error
GZIP_TEXT = 'w'
try:
import ujson as json
except BaseException:
import json
from ws4py.client.threadedclient import WebSocketClient
from .optimization_checks import OptimizationChecks
from .base_browser import BaseBrowser
class iWptBrowser(BaseBrowser):
"""iOS"""
def __init__(self, ios_device, options, job):
BaseBrowser.__init__(self)
self.job = job
self.task = None
self.options = options
self.ios = ios_device
self.event_name = None
self.nav_error = None
self.nav_error_code = None
self.page_loaded = None
self.recording = False
self.connected = False
self.messages = multiprocessing.JoinableQueue()
self.video_processing = None
self.optimization = None
self.is_navigating = False
self.main_frame = None
self.main_request = None
self.page = {}
self.requests = {}
self.connections = {}
self.last_connection_id = 0
self.console_log = []
self.timeline = None
self.trace_parser = None
self.wpt_result = None
self.id_map = {}
self.response_bodies = {}
self.bodies_zip_file = None
self.body_fail_count = 0
self.body_index = 0
self.last_activity = monotonic()
self.script_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'js')
self.path_base = None
self.websocket = None
self.command_id = 0
self.command_responses = {}
self.pending_commands = []
self.headers = {}
self.webinspector_proxy = None
self.ios_version = None
self.workers = []
self.default_target = None
def prepare(self, job, task):
"""Prepare the OS for the browser"""
self.task = task
self.page = {}
self.requests = {}
self.console_log = []
if self.timeline is not None:
self.timeline.close()
self.timeline = None
self.nav_error = None
self.nav_error_code = None
self.main_request = None
self.ios.notification_queue = self.messages
self.ios.stop_browser()
if 'browser' in job and job['browser'].lower().find('landscape') >= 0:
self.ios.landscape()
else:
self.ios.portrait()
if not task['cached']:
self.clear_profile(task)
def clear_profile(self, _):
"""Clear the browser profile"""
self.ios.clear_cache()
def flush_messages(self):
"""Flush all of the pending messages"""
try:
while True:
self.messages.get_nowait()
self.messages.task_done()
except Exception:
pass
def launch(self, _job, task):
"""Launch the browser"""
self.connected = False
self.flush_messages()
self.ios_version = self.ios.get_os_version()
if self.ios.start_browser():
# Start the webinspector proxy
args = ['ios_webkit_debug_proxy', '-F', '-u', self.ios.serial]
logging.debug(' '.join(args))
try:
self.webinspector_proxy = subprocess.Popen(args)
if self.webinspector_proxy:
# Connect to the dev tools interface
self.connected = self.connect()
if self.connected:
self.send_command('Target.setPauseOnStart', {'pauseOnStart': True}, wait=True)
# Override the UA String if necessary
ua_string = self.execute_js('navigator.userAgent;')
if 'uastring' in self.job:
ua_string = self.job['uastring']
if ua_string is not None and 'AppendUA' in task:
ua_string += ' ' + task['AppendUA']
if ua_string is not None:
self.job['user_agent_string'] = ua_string
except Exception:
logging.exception("Error starting webkit proxy")
self.flush_messages()
def connect(self, timeout=30):
"""Connect to the dev tools interface"""
import requests
proxies = {"http": None, "https": None}
ret = False
self.default_target = None
end_time = monotonic() + timeout
while not ret and monotonic() < end_time:
try:
response = requests.get("http://localhost:9222/json", timeout=timeout, proxies=proxies)
if response.text:
tabs = response.json()
logging.debug("Dev Tools tabs: %s", json.dumps(tabs))
if tabs:
websocket_url = None
for index in range(len(tabs)):
if 'webSocketDebuggerUrl' in tabs[index]:
websocket_url = tabs[index]['webSocketDebuggerUrl']
break
if websocket_url is not None:
try:
self.websocket = DevToolsClient(websocket_url)
self.websocket.messages = self.messages
self.websocket.connect()
ret = True
except Exception as err:
logging.exception("Connect to dev tools websocket Error: %s", err.__str__())
if not ret:
# try connecting to 127.0.0.1 instead of localhost
try:
websocket_url = websocket_url.replace('localhost', '127.0.0.1')
self.websocket = DevToolsClient(websocket_url)
self.websocket.messages = self.messages
self.websocket.connect()
ret = True
except Exception as err:
logging.exception("Connect to dev tools websocket Error: %s", err.__str__())
else:
time.sleep(0.5)
else:
time.sleep(0.5)
except Exception as err:
logging.exception("Connect to dev tools Error: %s", err.__str__())
time.sleep(0.5)
return ret
def stop(self, job, task):
"""Kill the browser"""
if self.websocket:
try:
self.websocket.close()
except Exception:
logging.exception('Error closing websocket')
self.websocket = None
if self.webinspector_proxy:
self.webinspector_proxy.terminate()
self.webinspector_proxy.communicate()
self.webinspector_proxy = None
self.ios.stop_browser()
def run_lighthouse_test(self, task):
"""Stub for lighthouse test"""
pass
def run_task(self, task):
"""Run an individual test"""
if self.connected:
self.task = task
logging.debug("Running test")
end_time = monotonic() + task['test_time_limit']
task['current_step'] = 1
recording = False
while task['script'] and task['error'] is None and \
monotonic() < end_time:
self.prepare_task(task)
command = task['script'].pop(0)
if not recording and command['record']:
recording = True
self.on_start_recording(task)
try:
self.process_command(command)
except Exception:
logging.exception("Exception running task")
if command['record']:
self.wait_for_page_load()
if not task['combine_steps'] or not len(task['script']):
self.on_stop_capture(task)
self.on_stop_recording(task)
recording = False
self.on_start_processing(task)
self.wait_for_processing(task)
self.step_complete(task)
if task['log_data']:
# Move on to the next step
task['current_step'] += 1
self.event_name = None
task['navigated'] = True
self.task = None
def wait_for_page_load(self):
"""Wait for the onload event from the extension"""
if self.connected:
start_time = monotonic()
end_time = start_time + self.task['time_limit']
done = False
interval = 1
while not done:
if self.page_loaded is not None:
interval = 0.1
try:
message = self.messages.get(timeout=interval)
try:
self.process_message(message)
except Exception:
logging.exception('Error processing message')
except Exception:
pass
now = monotonic()
elapsed_test = now - start_time
if 'minimumTestSeconds' in self.task and \
elapsed_test < self.task['minimumTestSeconds'] and \
now < end_time:
continue
if self.nav_error is not None:
done = True
if self.page_loaded is None or 'minimumTestSeconds' in self.task:
self.task['error'] = self.nav_error
if self.nav_error_code is not None:
self.task['page_data']['result'] = self.nav_error_code
else:
self.task['page_data']['result'] = 12999
elif now >= end_time:
done = True
# only consider it an error if we didn't get a page load event
if self.page_loaded is None:
self.task['error'] = "Page Load Timeout"
else:
elapsed_activity = now - self.last_activity
elapsed_page_load = now - self.page_loaded if self.page_loaded else 0
if elapsed_page_load >= 1 and elapsed_activity >= self.task['activity_time']:
done = True
elif self.task['error'] is not None:
done = True
def execute_js(self, script):
"""Run javascipt (stub for overriding"""
ret = None
if self.connected:
result = self.send_command('Runtime.evaluate', {'expression': script, 'returnByValue': True}, timeout=30, wait=True)
if result is not None and 'result' in result and 'result' in result['result'] and 'value' in result['result']['result']:
ret = result['result']['result']['value']
return ret
def run_js_file(self, file_name):
"""Execute one of our js scripts"""
ret = None
if self.connected:
script = None
script_file_path = os.path.join(self.script_dir, file_name)
if os.path.isfile(script_file_path):
with open(script_file_path, 'r') as script_file:
script = script_file.read()
if script is not None:
ret = self.execute_js(script)
return ret
def set_header(self, header):
"""Add/modify a header on the outbound requests"""
if header is not None and len(header):
separator = header.find(':')
if separator > 0:
name = header[:separator].strip()
value = header[separator + 1:].strip()
self.headers[name] = value
self.send_command('Network.setExtraHTTPHeaders',
{'headers': self.headers}, wait=True)
if len(self.workers):
for target in self.workers:
self.send_command('Network.setExtraHTTPHeaders',
{'headers': self.headers}, target_id=target['targetId'])
def reset_headers(self):
"""Add/modify a header on the outbound requests"""
self.headers = {}
self.send_command('Network.setExtraHTTPHeaders',
{'headers': self.headers}, wait=True)
if len(self.workers):
for target in self.workers:
self.send_command('Network.setExtraHTTPHeaders',
{'headers': self.headers}, target_id=target['targetId'])
def get_sorted_requests_json(self, include_bodies):
return 'null'
def collect_browser_metrics(self, task):
"""Collect all of the in-page browser metrics that we need"""
logging.debug("Collecting user timing metrics")
user_timing = self.run_js_file('user_timing.js')
logging.debug(user_timing)
if user_timing is not None and self.path_base is not None:
path = self.path_base + '_timed_events.json.gz'
with gzip.open(path, GZIP_TEXT, 7) as outfile:
outfile.write(json.dumps(user_timing))
logging.debug("Collecting page-level metrics")
page_data = self.run_js_file('page_data.js')
logging.debug(page_data)
if page_data is not None:
task['page_data'].update(page_data)
if 'customMetrics' in self.job:
custom_metrics = {}
requests = None
bodies = None
for name in self.job['customMetrics']:
if name == 'jsLibsVulns':
continue
logging.debug("Collecting custom metric %s", name)
custom_script = unicode(self.job['customMetrics'][name])
if custom_script.find('$WPT_REQUESTS') >= 0:
if requests is None:
requests = self.get_sorted_requests_json(False)
try:
custom_script = custom_script.replace('$WPT_REQUESTS', requests)
except Exception:
logging.exception('Error substituting request data into custom script')
if custom_script.find('$WPT_BODIES') >= 0:
if bodies is None:
bodies = self.get_sorted_requests_json(True)
try:
custom_script = custom_script.replace('$WPT_BODIES', bodies)
except Exception:
logging.exception('Error substituting request data with bodies into custom script')
script = '(function() {' + custom_script + '})()'
try:
custom_metrics[name] = self.execute_js(script)
if custom_metrics[name] is not None:
logging.debug(custom_metrics[name])
except Exception:
logging.exception('Error collecting custom metric')
if self.path_base is not None:
path = self.path_base + '_metrics.json.gz'
with gzip.open(path, GZIP_TEXT, 7) as outfile:
outfile.write(json.dumps(custom_metrics))
if 'heroElementTimes' in self.job and self.job['heroElementTimes']:
hero_elements = None
custom_hero_selectors = {}
if 'heroElements' in self.job:
custom_hero_selectors = self.job['heroElements']
logging.debug('Collecting hero element positions')
with io.open(os.path.join(self.script_dir, 'hero_elements.js'), 'r', encoding='utf-8') as script_file:
hero_elements_script = script_file.read()
script = hero_elements_script + '(' + json.dumps(custom_hero_selectors) + ')'
hero_elements = self.execute_js(script)
if hero_elements is not None:
path = os.path.join(task['dir'], task['prefix'] + '_hero_elements.json.gz')
with gzip.open(path, GZIP_TEXT, 7) as outfile:
outfile.write(json.dumps(hero_elements))
def process_message(self, msg):
"""Process a message from the browser
https://trac.webkit.org/browser/webkit/trunk/Source/JavaScriptCore/inspector/protocol"""
try:
if 'method' in msg:
parts = msg['method'].split('.')
if len(parts) >= 2:
category = parts[0]
event = parts[1]
if category == 'Page' and self.recording:
self.process_page_event(event, msg)
elif category == 'Network' and self.recording:
self.process_network_event(event, msg)
elif category == 'Inspector':
self.process_inspector_event(event)
elif category == 'Timeline' and self.recording:
self.process_timeline_event(event, msg)
elif category == 'Console' and self.recording:
self.process_console_event(event, msg)
elif category == 'Target':
self.process_target_event(event, msg)
except Exception:
logging.exception('Error processing browser message')
if self.timeline and 'method' in msg and not msg['method'].startswith('Target.') and self.recording:
json.dump(msg, self.timeline)
self.timeline.write(",\n")
if 'id' in msg:
response_id = int(re.search(r'\d+', str(msg['id'])).group())
if response_id in self.pending_commands:
self.pending_commands.remove(response_id)
self.command_responses[response_id] = msg
def process_page_event(self, event, msg):
"""Process Page.* dev tools events"""
if 'start' not in self.page and 'params' in msg and 'timestamp' in msg['params']:
self.page['start'] = msg['params']['timestamp']
if event == 'loadEventFired':
self.page_loaded = monotonic()
self.page['loaded'] = msg['params']['timestamp']
elif event == 'domContentEventFired':
self.page['DOMContentLoaded'] = msg['params']['timestamp']
elif event == 'frameStartedLoading':
if self.is_navigating and self.main_frame is None:
self.is_navigating = False
self.main_frame = msg['params']['frameId']
if self.main_frame == msg['params']['frameId']:
logging.debug("Navigating main frame")
self.last_activity = monotonic()
self.page_loaded = None
elif event == 'frameStoppedLoading':
if self.main_frame is not None and \
not self.page_loaded and \
self.main_frame == msg['params']['frameId']:
if self.nav_error is not None:
self.task['error'] = self.nav_error
logging.debug("Page load failed: %s", self.nav_error)
if self.nav_error_code is not None:
self.task['page_data']['result'] = self.nav_error_code
self.page_loaded = monotonic()
def process_network_event(self, event, msg):
"""Process Network.* dev tools events"""
if 'requestId' in msg['params']:
timestamp = None
if 'params' in msg and 'timestamp' in msg['params']:
timestamp = msg['params']['timestamp']
request_id = msg['params']['requestId']
original_request_id = request_id
if original_request_id in self.id_map:
request_id = str(original_request_id) + '.' + str(self.id_map[original_request_id])
if request_id not in self.requests:
self.requests[request_id] = {'id': request_id,
'original_id': original_request_id,
'bytesIn': 0,
'objectSize': 0,
'objectSizeUncompressed': 0,
'transfer_size': 0,
'fromNet': False,
'is_redirect': False}
if timestamp:
self.requests[request_id]['created'] = timestamp
request = self.requests[request_id]
if 'targetId' in msg['params']:
request['targetId'] = msg['params']['targetId']
ignore_activity = request['is_video'] if 'is_video' in request else False
if event == 'requestWillBeSent':
if 'start' not in self.page and timestamp:
self.page['start'] = timestamp
# For a redirect, close out the existing request and start a new one
if 'redirectResponse' in msg['params']:
if timestamp and 'start' in request and timestamp > request['start']:
if 'firstByte' not in request or timestamp < request['firstByte']:
request['firstByte'] = timestamp
if 'end' not in request or timestamp > request['end']:
request['end'] = timestamp
request['is_redirect'] = True
response = msg['params']['redirectResponse']
request['status'] = response['status']
request['statusText'] = response['statusText']
request['response_headers'] = response['headers']
if 'fromDiskCache' in response and response['fromDiskCache']:
request['fromNet'] = False
if 'source' in response and response['source'] not in ['network', 'unknown']:
request['fromNet'] = False
if 'timing' in response:
request['timing'] = response['timing']
if original_request_id in self.id_map:
self.id_map[original_request_id] += 1
else:
self.id_map[original_request_id] = 1
request_id = str(original_request_id) + '.' + \
str(self.id_map[original_request_id])
self.requests[request_id] = {'id': request_id,
'original_id': original_request_id,
'bytesIn': 0,
'objectSize': 0,
'objectSizeUncompressed': 0,
'transfer_size': 0,
'fromNet': False,
'is_redirect': True}
if timestamp:
self.requests[request_id]['created'] = timestamp
request = self.requests[request_id]
if timestamp:
request['start'] = timestamp
request['initiator'] = msg['params']['initiator']
request['url'] = msg['params']['request']['url']
request['method'] = msg['params']['request']['method']
request['request_headers'] = msg['params']['request']['headers']
if 'type' in msg['params']:
request['type'] = msg['params']['type']
if request['url'].endswith('.mp4'):
request['is_video'] = True
request['fromNet'] = True
if msg['params']['frameId'] != self.main_frame:
request['frame'] = msg['params']['frameId']
if self.main_frame is not None and \
self.main_request is None and \
msg['params']['frameId'] == self.main_frame:
logging.debug('Main request detected')
self.main_request = request_id
if timestamp:
self.page['start'] = float(msg['params']['timestamp'])
elif event == 'responseReceived':
response = msg['params']['response']
request['status'] = response['status']
request['statusText'] = response['statusText']
request['response_headers'] = response['headers']
if 'fromDiskCache' in response and response['fromDiskCache']:
request['fromNet'] = False
if 'source' in response and response['source'] not in ['network', 'unknown']:
request['fromNet'] = False
if 'timing' in response:
request['timing'] = response['timing']
if 'mimeType' in response and response['mimeType'].startswith('video/'):
request['is_video'] = True
if timestamp and 'start' in request and timestamp > request['start']:
if 'firstByte' not in request or timestamp < request['firstByte']:
request['firstByte'] = timestamp
if 'end' not in request or timestamp > request['end']:
request['end'] = timestamp
elif event == 'dataReceived':
bytesIn = 0
if 'encodedDataLength' in msg['params'] and \
msg['params']['encodedDataLength'] >= 0:
bytesIn = msg['params']['encodedDataLength']
request['objectSize'] += bytesIn
request['bytesIn'] += bytesIn
request['transfer_size'] += bytesIn
elif 'dataLength' in msg['params'] and msg['params']['dataLength'] >= 0:
bytesIn = msg['params']['dataLength']
request['objectSize'] += bytesIn
request['bytesIn'] +=bytesIn
request['transfer_size'] += bytesIn
if 'dataLength' in msg['params'] and msg['params']['dataLength'] >= 0:
request['objectSizeUncompressed'] += msg['params']['dataLength']
if timestamp and 'start' in request and timestamp > request['start']:
if 'chunks' not in request:
request['chunks'] = []
request['chunks'].append({'ts': timestamp, 'bytes': bytesIn})
if 'firstByte' not in request or timestamp < request['firstByte']:
request['firstByte'] = timestamp
if 'end' not in request or timestamp > request['end']:
request['end'] = timestamp
elif event == 'loadingFinished':
if timestamp and 'start' in request and timestamp > request['start']:
if 'firstByte' not in request or timestamp < request['firstByte']:
request['firstByte'] = timestamp
if 'end' not in request or timestamp > request['end']:
request['end'] = timestamp
if 'metrics' in msg['params']:
metrics = msg['params']['metrics']
if 'priority' in metrics:
request['priority'] = metrics['priority']
if 'protocol' in metrics:
request['protocol'] = metrics['protocol']
if 'remoteAddress' in metrics:
separator = metrics['remoteAddress'].rfind(':')
if separator >= 0:
request['ip'] = metrics['remoteAddress'][:separator]
else:
request['ip'] = metrics['remoteAddress']
if 'connectionIdentifier' in metrics:
identifier = metrics['connectionIdentifier']
if identifier in self.connections:
request['connection'] = self.connections[identifier]
else:
self.last_connection_id += 1
self.connections[identifier] = self.last_connection_id
request['connection'] = self.last_connection_id
if 'requestHeaderBytesSent' in metrics:
request['bytesOut'] = metrics['requestHeaderBytesSent']
if 'requestBodyBytesSent' in metrics:
request['bytesOut'] += metrics['requestBodyBytesSent']
if 'responseBodyBytesReceived' in metrics:
request['bytesIn'] = metrics['responseBodyBytesReceived']
request['objectSize'] = metrics['responseBodyBytesReceived']
request['transfer_size'] = metrics['responseBodyBytesReceived']
if 'responseHeaderBytesReceived' in metrics and \
metrics['responseHeaderBytesReceived'] >= 0:
request['bytesIn'] += metrics['responseHeaderBytesReceived']
if 'responseBodyDecodedSize' in metrics and \
metrics['responseBodyDecodedSize'] >= 0:
request['objectSizeUncompressed'] = \
metrics['responseBodyDecodedSize']
if request['fromNet']:
self.get_response_body(request_id, original_request_id)
elif event == 'loadingFailed':
if timestamp and 'start' in request and timestamp > request['start']:
if 'firstByte' not in request or timestamp < request['firstByte']:
request['firstByte'] = timestamp
if 'end' not in request or timestamp > request['end']:
request['end'] = timestamp
request['statusText'] = msg['params']['errorText']
if self.main_request is not None and request_id == self.main_request:
if 'canceled' not in msg['params'] or not msg['params']['canceled']:
self.task['error'] = msg['params']['errorText']
self.nav_error = msg['params']['errorText']
self.nav_error_code = 12999
logging.debug('Navigation error: %s', self.nav_error)
elif event == 'requestServedFromMemoryCache':
request['fromNet'] = False
else:
ignore_activity = True
if not self.task['stop_at_onload'] and not ignore_activity:
self.last_activity = monotonic()
def process_inspector_event(self, event):
"""Process Inspector.* dev tools events"""
if event == 'detached':
self.task['error'] = 'Inspector detached, possibly crashed.'
elif event == 'targetCrashed':
self.task['error'] = 'Browser crashed.'
def process_timeline_event(self, event, msg):
"""Handle Timeline.* events"""
if self.trace_parser is not None and 'params' in msg and 'record' in msg['params']:
if 'start' not in self.page:
return
if self.trace_parser.start_time is None:
self.trace_parser.start_time = self.page['start'] * 1000000.0
self.trace_parser.end_time = self.page['start'] * 1000000.0
if 'timestamp' in msg['params']:
timestamp = msg['params']['timestamp'] * 1000000.0
if timestamp > self.trace_parser.end_time:
self.trace_parser.end_time = timestamp
processed = self.trace_parser.ProcessOldTimelineEvent(msg['params']['record'], None)
if processed is not None:
self.trace_parser.timeline_events.append(processed)
def process_console_event(self, event, msg):
"""Handle Console.* events"""
if event == 'messageAdded' and 'message' in msg['params']:
self.console_log.append(msg['params']['message'])
def process_target_event(self, event, msg):
"""Process Target.* dev tools events"""
if event == 'dispatchMessageFromTarget':
if 'message' in msg['params']:
logging.debug(msg['params']['message'][:200])
target_message = json.loads(msg['params']['message'])
self.process_message(target_message)
if event == 'targetCreated':
if 'targetInfo' in msg['params'] and 'targetId' in msg['params']['targetInfo']:
target = msg['params']['targetInfo']
target_id = target['targetId']
if 'type' in target and target['type'] == 'page':
self.default_target = target_id
if self.recording:
self.enable_safari_events()
else:
self.workers.append(target)
if self.recording:
self.enable_target(target_id)
self.send_command('Target.resume', {'targetId': target_id})
def get_response_body(self, request_id, original_id):
"""Retrieve and store the given response body (if necessary)"""
if original_id not in self.response_bodies and self.body_fail_count < 3:
request = self.requests[request_id]
if 'status' in request and request['status'] == 200 and 'response_headers' in request:
logging.debug('Getting body for %s (%d) - %s', request_id,
request['bytesIn'], request['url'])
path = os.path.join(self.task['dir'], 'bodies')
if not os.path.isdir(path):
os.makedirs(path)
body_file_path = os.path.join(path, original_id)
if not os.path.exists(body_file_path):
# Only grab bodies needed for optimization checks
# or if we are saving full bodies
need_body = True
content_type = self.get_header_value(request['response_headers'],
'Content-Type')
is_text = False
if content_type is not None:
content_type = content_type.lower()
if content_type.startswith('text/') or \
content_type.find('javascript') >= 0 or \
content_type.find('json') >= 0:
is_text = True
# Ignore video files over 10MB
if content_type[:6] == 'video/' and request['bytesIn'] > 10000000:
need_body = False
optimization_checks_disabled = bool('noopt' in self.job and self.job['noopt'])
if optimization_checks_disabled and self.bodies_zip_file is None:
need_body = False
if need_body:
response = self.send_command("Network.getResponseBody",
{'requestId': original_id}, wait=True)
if response is None:
self.body_fail_count += 1
logging.warning('No response to body request for request %s',
request_id)
elif 'result' not in response or \
'body' not in response['result']:
self.body_fail_count = 0
logging.warning('Missing response body for request %s',
request_id)
elif len(response['result']['body']):
self.body_fail_count = 0
# Write the raw body to a file (all bodies)
if 'base64Encoded' in response['result'] and \
response['result']['base64Encoded']:
body = base64.b64decode(response['result']['body'])
else:
body = response['result']['body'].encode('utf-8')
is_text = True
# Add text bodies to the zip archive
if self.bodies_zip_file is not None and is_text:
self.body_index += 1
name = '{0:03d}-{1}-body.txt'.format(self.body_index, request_id)
self.bodies_zip_file.writestr(name, body)
logging.debug('%s: Stored body in zip', request_id)
logging.debug('%s: Body length: %d', request_id, len(body))
self.response_bodies[request_id] = body
with open(body_file_path, 'wb') as body_file:
body_file.write(body)
else:
self.body_fail_count = 0
self.response_bodies[request_id] = response['result']['body']
if os.path.exists(body_file_path):
request['body'] = body_file_path
def get_header_value(self, headers, name):
"""Get the value for the requested header"""
value = None
try:
if headers:
if name in headers:
value = headers[name]
else:
find = name.lower()
for header_name in headers:
check = header_name.lower()
if check == find or (check[0] == ':' and check[1:] == find):
value = headers[header_name]
break
except Exception:
logging.exception('Error getting header value for %s', name)
return value
def prepare_task(self, task):
"""Format the file prefixes for multi-step testing"""
if task['current_step'] == 1:
task['prefix'] = task['task_prefix']
task['video_subdirectory'] = task['task_video_prefix']
else:
task['prefix'] = '{0}_{1:d}'.format(task['task_prefix'], task['current_step'])
task['video_subdirectory'] = '{0}_{1:d}'.format(task['task_video_prefix'],
task['current_step'])
if task['video_subdirectory'] not in task['video_directories']:
task['video_directories'].append(task['video_subdirectory'])
if self.event_name is not None:
task['step_name'] = self.event_name
else:
task['step_name'] = 'Step_{0:d}'.format(task['current_step'])
self.path_base = os.path.join(self.task['dir'], self.task['prefix'])
def enable_target(self, target_id):
"""Enable all of the targe-specific events"""
self.send_command('Network.enable', {}, target_id=target_id)
if self.headers:
self.send_command('Network.setExtraHTTPHeaders', {'headers': self.headers}, target_id=target_id)
def enable_safari_events(self):
self.send_command('Inspector.enable', {})
self.send_command('Network.enable', {})
self.send_command('Runtime.enable', {})
if self.headers:
self.send_command('Network.setExtraHTTPHeaders', {'headers': self.headers})
if len(self.workers):
for target in self.workers:
self.enable_target(target['targetId'])
if 'user_agent_string' in self.job:
self.ios.set_user_agent(self.job['user_agent_string'])
if self.task['log_data']:
self.send_command('Console.enable', {})
self.send_command('Timeline.start', {}, wait=True)
self.send_command('Page.enable', {}, wait=True)
def on_start_recording(self, task):
"""Notification that we are about to start an operation that needs to be recorded"""
self.page = {}
self.requests = {}
self.console_log = []
self.response_bodies = {}
if self.timeline is not None:
self.timeline.close()
self.timeline = None
self.wpt_result = None
task['page_data'] = {'date': time.time()}
task['page_result'] = None
task['run_start_time'] = monotonic()
self.flush_messages()
self.enable_safari_events()
if self.task['log_data']:
if not self.job['shaper'].configure(self.job, task):
self.task['error'] = "Error configuring traffic-shaping"
if self.bodies_zip_file is not None:
self.bodies_zip_file.close()
self.bodies_zip_file = None
if 'bodies' in self.job and self.job['bodies']:
self.bodies_zip_file = zipfile.ZipFile(self.path_base + '_bodies.zip', 'w',
zipfile.ZIP_DEFLATED)
if 'timeline' in self.job and self.job['timeline']:
if self.path_base is not None:
timeline_path = self.path_base + '_devtools.json.gz'
self.timeline = gzip.open(timeline_path, GZIP_TEXT, 7)
if self.timeline:
self.timeline.write('[\n')
from internal.support.trace_parser import Trace
self.trace_parser = Trace()
self.trace_parser.cpu['main_thread'] = '0'
self.trace_parser.threads['0'] = {}
self.ios.show_orange()
if self.path_base is not None and not self.job['disable_video']:
task['video_file'] = self.path_base + '_video.mp4'
self.ios.start_video()
if self.ios_version:
task['page_data']['osVersion'] = self.ios_version
task['page_data']['os_version'] = self.ios_version
task['page_data']['browserVersion'] = self.ios_version
task['page_data']['browser_version'] = self.ios_version
self.recording = True
now = monotonic()
if not self.task['stop_at_onload']:
self.last_activity = now
if self.page_loaded is not None:
self.page_loaded = now
logging.debug('Starting measurement')
task['start_time'] = datetime.utcnow()
def on_stop_capture(self, task):
"""Do any quick work to stop things that are capturing data"""
pass
def on_stop_recording(self, task):
"""Notification that we are done with recording"""
self.recording = False
self.send_command('Page.disable', {})
self.send_command('Inspector.disable', {})
self.send_command('Network.disable', {})
self.send_command('Runtime.disable', {})
if len(self.workers):
for target in self.workers:
self.send_command('Network.disable', {}, target_id=target['targetId'])
self.send_command('Inspector.disable', {})
if self.task['log_data']:
self.send_command('Console.disable', {})
if 'timeline' in self.job and self.job['timeline']:
self.send_command('Timeline.stop', {})
if self.job['pngScreenShot'] and self.path_base is not None:
screen_shot = self.path_base + '_screen.png'
self.grab_screenshot(screen_shot, png=True)
elif self.path_base is not None:
screen_shot = self.path_base + '_screen.jpg'
self.grab_screenshot(screen_shot, png=False, resize=600)
# Grab the video and kick off processing async
if 'video_file' in task:
self.ios.stop_video()
# Collect end of test data from the browser
self.collect_browser_metrics(task)
if self.bodies_zip_file is not None:
self.bodies_zip_file.close()
self.bodies_zip_file = None
self.job['shaper'].reset()
def on_start_processing(self, task):
"""Start any processing of the captured data"""
if task['log_data']:
# Attach response bodies to all of the appropriate requests
requests = {}
for request_id in self.requests:
request = self.requests[request_id]
if request['fromNet'] and 'url' in request and request['url'].startswith('http'):
if not request['is_redirect'] and \
request['original_id'] in self.response_bodies:
request['response_body'] = self.response_bodies[request['original_id']]
requests[request_id] = request
# Start the optimization checks in a background thread
self.optimization = OptimizationChecks(self.job, task, requests)
self.optimization.start()
support_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), "support")
# Start processing the timeline
if self.timeline:
self.timeline.write("{}]")
self.timeline.close()
self.timeline = None
# Grab the video and kick off processing async
if 'video_file' in task and self.ios.get_video(task['video_file']):
video_path = os.path.join(task['dir'], task['video_subdirectory'])
if task['current_step'] == 1:
filename = '{0:d}.{1:d}.histograms.json.gz'.format(task['run'], task['cached'])
else:
filename = '{0:d}.{1:d}.{2:d}.histograms.json.gz'.format(task['run'],
task['cached'],
task['current_step'])
histograms = os.path.join(task['dir'], filename)
progress_file = os.path.join(task['dir'], task['prefix']) + \
'_visual_progress.json.gz'
visualmetrics = os.path.join(support_path, "visualmetrics.py")
args = [sys.executable, visualmetrics, '-i', task['video_file'],
'-d', video_path, '--force', '--quality',
'{0:d}'.format(self.job['imageQuality']),
'--viewport', '--orange', '--maxframes', '50', '--histogram', histograms,
'--progress', progress_file]
if 'debug' in self.job and self.job['debug']:
args.append('-vvvv')
if 'heroElementTimes' in self.job and self.job['heroElementTimes']:
hero_elements_file = os.path.join(task['dir'], task['prefix']) + '_hero_elements.json.gz'
args.extend(['--herodata', hero_elements_file])
if 'renderVideo' in self.job and self.job['renderVideo']:
video_out = self.path_base + '_rendered_video.mp4'
args.extend(['--render', video_out])
if 'fullSizeVideo' in self.job and self.job['fullSizeVideo']:
args.append('--full')
if 'thumbsize' in self.job:
try:
thumbsize = int(self.job['thumbsize'])
if thumbsize > 0 and thumbsize <= 2000:
args.extend(['--thumbsize', str(thumbsize)])
except Exception:
pass
logging.debug(' '.join(args))
self.video_processing = subprocess.Popen(args, close_fds=True)
# Save the console logs
if self.console_log and self.path_base is not None:
log_file = self.path_base + '_console_log.json.gz'
with gzip.open(log_file, GZIP_TEXT, 7) as f_out:
json.dump(self.console_log, f_out)
# Process the timeline data
if self.trace_parser is not None and self.path_base is not None:
start = monotonic()
logging.debug("Processing the trace timeline events")
self.trace_parser.ProcessTimelineEvents()
self.trace_parser.WriteCPUSlices(self.path_base + '_timeline_cpu.json.gz')
self.trace_parser.WriteScriptTimings(self.path_base + '_script_timing.json.gz')
self.trace_parser.WriteInteractive(self.path_base + '_interactive.json.gz')
self.trace_parser.WriteLongTasks(self.path_base + '_long_tasks.json.gz')
elapsed = monotonic() - start
logging.debug("Done processing the trace events: %0.3fs", elapsed)
self.trace_parser = None
# Calculate the request and page stats
self.wpt_result = {}
self.wpt_result['requests'] = self.process_requests(requests)
self.wpt_result['pageData'] = self.calculate_page_stats(self.wpt_result['requests'])
def wait_for_processing(self, task):
"""Wait for any background processing threads to finish"""
if self.video_processing is not None:
logging.debug('Waiting for video processing to finish')
self.video_processing.communicate()
self.video_processing = None
if not self.job['keepvideo']:
try:
os.remove(task['video_file'])
except Exception:
pass
opt = None
if self.optimization is not None:
opt = self.optimization.join()
if self.wpt_result is not None:
self.process_optimization_results(self.wpt_result['pageData'],
self.wpt_result['requests'], opt)
if self.path_base is not None:
devtools_file = self.path_base + '_devtools_requests.json.gz'
with gzip.open(devtools_file, GZIP_TEXT, 7) as f_out:
json.dump(self.wpt_result, f_out)
def step_complete(self, task):
"""Final step processing"""
logging.debug("Writing end-of-step data")
# Write out the accumulated page_data
if task['log_data'] and task['page_data']:
if 'browser' in self.job:
task['page_data']['browser_name'] = self.job['browser']
if 'step_name' in task:
task['page_data']['eventName'] = task['step_name']
if 'run_start_time' in task:
task['page_data']['test_run_time_ms'] = \
int(round((monotonic() - task['run_start_time']) * 1000.0))
if self.path_base is not None:
path = self.path_base + '_page_data.json.gz'
json_page_data = json.dumps(task['page_data'])
logging.debug('Page Data: %s', json_page_data)
with gzip.open(path, GZIP_TEXT, 7) as outfile:
outfile.write(json_page_data)
def send_command(self, method, params, wait=False, timeout=10, target_id=None):
"""Send a raw dev tools message and optionally wait for the response"""
ret = None
if target_id is None and self.default_target is not None and \
not method.startswith('Target.') and \
not method.startswith('Tracing.'):
target_id = self.default_target
if target_id is not None:
self.command_id += 1
command_id = int(self.command_id)
msg = {'id': command_id, 'method': method, 'params': params}
if wait:
self.pending_commands.append(command_id)
end_time = monotonic() + timeout
self.send_command('Target.sendMessageToTarget',
{'targetId': target_id, 'message': json.dumps(msg)},
wait=True, timeout=timeout)
if wait:
if command_id in self.command_responses:
ret = self.command_responses[command_id]
del self.command_responses[command_id]
else:
while ret is None and monotonic() < end_time:
try:
msg = self.messages.get(timeout=1)
try:
if msg:
self.process_message(msg)
except Exception:
logging.exception('Error processing command response')
except Exception:
pass
if command_id in self.command_responses:
ret = self.command_responses[command_id]
del self.command_responses[command_id]
elif self.websocket:
self.command_id += 1
command_id = int(self.command_id)
if wait:
self.pending_commands.append(command_id)
msg = {'id': command_id, 'method': method, 'params': params}
try:
out = json.dumps(msg)
logging.debug("Sending: %s", out)
self.websocket.send(out)
if wait:
end_time = monotonic() + timeout
while ret is None and monotonic() < end_time:
try:
msg = self.messages.get(timeout=1)
try:
if msg:
self.process_message(msg)
except Exception:
logging.exception('Error processing response to command')
except Exception:
pass
if command_id in self.command_responses:
ret = self.command_responses[command_id]
del self.command_responses[command_id]
except Exception as err:
logging.exception("Websocket send error: %s", err.__str__())
return ret
def flush_pending_messages(self):
"""Clear out any pending websocket messages"""
if self.websocket:
try:
while True:
msg = self.messages.get(timeout=0)
try:
if msg:
self.process_message(msg)
except Exception:
logging.exception('Error processing message')
except Exception:
pass
def process_command(self, command):
"""Process an individual script command"""
logging.debug("Processing script command:")
logging.debug(command)
if command['command'] == 'navigate':
self.task['page_data']['URL'] = command['target']
self.main_frame = None
self.main_request = None
self.is_navigating = True
self.ios.navigate(command['target'])
elif command['command'] == 'logdata':
self.task['combine_steps'] = False
if int(re.search(r'\d+', str(command['target'])).group()):
logging.debug("Data logging enabled")
self.task['log_data'] = True
else:
logging.debug("Data logging disabled")
self.task['log_data'] = False
elif command['command'] == 'combinesteps':
self.task['log_data'] = True
self.task['combine_steps'] = True
elif command['command'] == 'seteventname':
self.event_name = command['target']
elif command['command'] == 'exec':
if command['record']:
self.main_frame = None
self.main_request = None
self.is_navigating = True
self.execute_js(command['target'], remove_orange=self.recording)
elif command['command'] == 'sleep':
delay = min(60, max(0, int(re.search(r'\d+', str(command['target'])).group())))
if delay > 0:
time.sleep(delay)
elif command['command'] == 'setabm':
self.task['stop_at_onload'] = \
bool('target' in command and int(re.search(r'\d+',
str(command['target'])).group()) == 0)
elif command['command'] == 'setactivitytimeout':
if 'target' in command:
milliseconds = int(re.search(r'\d+', str(command['target'])).group())
self.task['activity_time'] = max(0, min(30, float(milliseconds) / 1000.0))
elif command['command'] == 'setminimumstepseconds':
self.task['minimumTestSeconds'] = int(re.search(r'\d+', str(command['target'])).group())
elif command['command'] == 'setuseragent':
self.task['user_agent_string'] = command['target']
elif command['command'] == 'setcookie':
if 'target' in command and 'value' in command:
try:
url = command['target'].strip()
cookie = command['value']
pos = cookie.find(';')
if pos > 0:
cookie = cookie[:pos]
pos = cookie.find('=')
if pos > 0:
name = cookie[:pos].strip()
value = cookie[pos + 1:].strip()
if len(name) and len(value) and len(url):
self.ios.set_cookie(url, name, value)
except Exception:
logging.exception('Error setting cookie')
elif command['command'] == 'clearcache':
self.ios.clear_cache()
elif command['command'] == 'addheader':
self.set_header(command['target'])
elif command['command'] == 'setheader':
self.set_header(command['target'])
elif command['command'] == 'resetheaders':
self.reset_headers()
def navigate(self, url):
"""Navigate to the given URL"""
if self.connected:
self.ios.navigate(url)
def grab_screenshot(self, path, png=True, resize=0):
"""Save the screen shot (png or jpeg)"""
if self.connected:
data = self.ios.screenshot()
if data:
resize_string = '' if not resize else '-resize {0:d}x{0:d} '.format(resize)
if png:
with open(path, 'wb') as image_file:
image_file.write(data)
if resize_string:
cmd = '{0} -format png -define png:color-type=2 '\
'-depth 8 {1}"{2}"'.format(self.job['image_magick']['mogrify'],
resize_string, path)
logging.debug(cmd)
subprocess.call(cmd, shell=True)
else:
tmp_file = path + '.png'
with open(tmp_file, 'wb') as image_file:
image_file.write(data)
command = '{0} "{1}" {2}-quality {3:d} "{4}"'.format(
self.job['image_magick']['convert'],
tmp_file, resize_string, self.job['imageQuality'], path)
logging.debug(command)
subprocess.call(command, shell=True)
if os.path.isfile(tmp_file):
try:
os.remove(tmp_file)
except Exception:
pass
def get_empty_request(self, request_id, url):
"""Return and empty, initialized request"""
parts = urlsplit(url)
request = {'type': 3,
'id': request_id,
'request_id': request_id,
'ip_addr': '',
'full_url': url,
'is_secure': 1 if parts.scheme == 'https' else 0,
'method': '',
'host': parts.netloc,
'url': parts.path,
'responseCode': -1,
'load_start': -1,
'load_ms': -1,
'ttfb_ms': -1,
'dns_start': -1,
'dns_end': -1,
'dns_ms': -1,
'connect_start': -1,
'connect_end': -1,
'connect_ms': -1,
'ssl_start': -1,
'ssl_end': -1,
'ssl_ms': -1,
'bytesIn': 0,
'bytesOut': 0,
'objectSize': 0,
'initiator': '',
'initiator_line': '',
'initiator_column': '',
'server_rtt': None,
'headers': {'request': [], 'response': []},
'score_cache': -1,
'score_cdn': -1,
'score_gzip': -1,
'score_cookies': -1,
'score_keep-alive': -1,
'score_minify': -1,
'score_combine': -1,
'score_compress': -1,
'score_etags': -1,
'gzip_total': None,
'gzip_save': None,
'minify_total': None,
'minify_save': None,
'image_total': None,
'image_save': None,
'cache_time': None,
'cdn_provider': None,
'server_count': None,
'socket': -1
}
if parts.query:
request['url'] += '?' + parts.query
return request
def process_requests(self, raw_requests):
"""Convert all of the request events into the format needed for WPT"""
requests = []
if 'start' in self.page:
start = self.page['start']
for request_id in raw_requests:
r = raw_requests[request_id]
request = self.get_empty_request(request_id, r['url'])
if 'ip' in r:
request['ip_addr'] = r['ip']
if 'connection' in r:
request['socket'] = r['connection']
if 'priority' in r:
request['priority'] = r['priority']
if 'protocol' in r:
request['protocol'] = r['protocol']
if 'method' in r:
request['method'] = r['method']
if 'status' in r:
request['responseCode'] = r['status']
if 'type' in r:
request['requestType'] = r['type']
if 'created' in r:
request['created'] = int(round((r['created'] - start) * 1000.0))
request['load_start'] = int(round((r['start'] - start) * 1000.0))
if 'end' in r:
request['load_ms'] = int(round((r['end'] - r['start']) * 1000.0))
if 'firstByte' in r:
request['ttfb_ms'] = int(round((r['firstByte'] - r['start']) * 1000.0))
if 'timing' in r and not r['is_redirect']:
start_ms = int(request['load_start'])
timing = r['timing']
if timing['domainLookupStart'] > 0 or timing['domainLookupEnd'] > 0:
request['dns_start'] = int(round(start_ms + timing['domainLookupStart']))
request['dns_end'] = int(round(start_ms + timing['domainLookupEnd']))
if timing['connectStart'] > 0 or timing['connectEnd'] > 0:
request['connect_start'] = int(round(start_ms + timing['connectStart']))
request['connect_end'] = int(round(start_ms + timing['connectEnd']))
if timing['secureConnectionStart'] >= 0:
request['ssl_start'] = int(round(start_ms +
timing['secureConnectionStart']))
request['ssl_end'] = request['connect_end']
request['connect_end'] = request['ssl_start']
if timing['requestStart'] >= 0:
request['load_start'] = int(round(start_ms + timing['requestStart']))
request['load_ms'] -= int(round(timing['requestStart']))
request['ttfb_ms'] -= int(round(timing['requestStart']))
if timing['responseStart'] >= 0:
request['ttfb_ms'] = int(round(timing['responseStart'] -
timing['requestStart']))
if 'chunks' in r:
request['chunks'] = []
for chunk in r['chunks']:
ts = (chunk['ts'] - start) * 1000.0
request['chunks'].append({'ts': ts, 'bytes': chunk['bytes']})
request['bytesIn'] = r['bytesIn']
if 'bytesOut' in r:
request['bytesOut'] = r['bytesOut']
if 'objectSize' in r:
request['objectSize'] = r['objectSize']
if 'objectSizeUncompressed' in r:
request['objectSizeUncompressed'] = r['objectSizeUncompressed']
if 'initiator' in r:
if 'url' in r['initiator']:
request['initiator'] = r['initiator']['url']
if 'lineNumber' in r['initiator']:
request['initiator_line'] = r['initiator']['lineNumber']
elif 'stackTrace' in r['initiator'] and r['initiator']['stackTrace']:
for entry in r['initiator']['stackTrace']:
if 'url' in entry and entry['url'].startswith('http'):
request['initiator'] = entry['url']
if 'lineNumber' in entry:
request['initiator_line'] = entry['lineNumber']
if 'columnNumber' in entry:
request['initiator_column'] = entry['columnNumber']
break
if 'request_headers' in r:
for name in r['request_headers']:
for value in r['request_headers'][name].splitlines():
request['headers']['request'].append(u'{0}: {1}'.format(name, value))
if 'response_headers' in r:
for name in r['response_headers']:
for value in r['response_headers'][name].splitlines():
request['headers']['response'].append(u'{0}: {1}'.format(name, value))
value = self.get_header_value(r['response_headers'], 'Expires')
if value:
request['expires'] = value
value = self.get_header_value(r['response_headers'], 'Cache-Control')
if value:
request['cacheControl'] = value
value = self.get_header_value(r['response_headers'], 'Content-Type')
if value:
request['contentType'] = value
value = self.get_header_value(r['response_headers'], 'Content-Encoding')
if value:
request['contentEncoding'] = value
# If a content-length header is available, use that instead of the values
# reported by Safari which only show the unencoded size (even though it
# claims otherwise).
try:
value = self.get_header_value(r['response_headers'], 'Content-Length')
if value:
content_length = int(value)
if content_length >= 0:
request['objectSize'] = content_length
request['bytesIn'] = content_length + \
sum(len(s) for s in request['headers']['response'])
except Exception:
logging.exception('Error processing response length')
requests.append(request)
requests.sort(key=lambda x: x['load_start'])
return requests
def calculate_page_stats(self, requests):
"""Calculate the page-level stats"""
page = {'loadTime': 0,
'docTime': 0,
'fullyLoaded': 0,
'bytesOut': 0,
'bytesOutDoc': 0,
'bytesIn': 0,
'bytesInDoc': 0,
'requests': len(requests),
'requestsDoc': 0,
'responses_200': 0,
'responses_404': 0,
'responses_other': 0,
'result': 0,
'testStartOffset': 0,
'cached': 1 if self.task['cached'] else 0,
'optimization_checked': 0,
'start_epoch': int((self.task['start_time'] - \
datetime.utcfromtimestamp(0)).total_seconds())
}
if 'loadEventStart' in self.task['page_data']:
page['loadTime'] = self.task['page_data']['loadEventStart']
page['docTime'] = page['loadTime']
page['loadEventStart'] = page['loadTime']
page['loadEventEnd'] = page['loadTime']
if 'loaded' in self.page:
page['loadTime'] = int(round((self.page['loaded'] - self.page['start']) * 1000.0))
page['docTime'] = page['loadTime']
page['loadEventStart'] = page['loadTime']
page['loadEventEnd'] = page['loadTime']
if 'DOMContentLoaded' in self.page:
page['domContentLoadedEventStart'] = int(round((self.page['DOMContentLoaded'] -
self.page['start']) * 1000.0))
page['domContentLoadedEventEnd'] = page['domContentLoadedEventStart']
main_request = None
index = 0
for request in requests:
if request['load_ms'] >= 0:
end_time = request['load_start'] + request['load_ms']
if end_time > page['fullyLoaded']:
page['fullyLoaded'] = end_time
if end_time <= page['loadTime']:
page['requestsDoc'] += 1
page['bytesInDoc'] += request['bytesIn']
page['bytesOutDoc'] += request['bytesOut']
page['bytesIn'] += request['bytesIn']
page['bytesOut'] += request['bytesOut']
if request['responseCode'] == 200:
page['responses_200'] += 1
elif request['responseCode'] == 404:
page['responses_404'] += 1
page['result'] = 99999
elif request['responseCode'] > -1:
page['responses_other'] += 1
if main_request is None and \
(request['responseCode'] == 200 or request['responseCode'] == 304):
main_request = request['id']
request['is_base_page'] = True
page['final_base_page_request'] = index
page['final_base_page_request_id'] = main_request
page['final_url'] = request['full_url']
if 'URL' not in self.task['page_data']:
self.task['page_data']['URL'] = page['final_url']
if request['ttfb_ms'] >= 0:
page['TTFB'] = request['load_start'] + request['ttfb_ms']
if request['ssl_end'] >= request['ssl_start'] and \
request['ssl_start'] >= 0:
page['basePageSSLTime'] = int(round(request['ssl_end'] - \
request['ssl_start']))
if self.nav_error_code is not None:
page['result'] = self.nav_error_code
elif page['responses_200'] == 0 and len(requests):
if 'responseCode' in requests[0]:
page['result'] = requests[0]['responseCode']
else:
page['result'] = 12999
self.task['page_result'] = page['result']
return page
def process_optimization_results(self, page_data, requests, optimization_results):
"""Merge the data from the optimization checks file"""
if optimization_results:
page_data['score_cache'] = -1
page_data['score_cdn'] = -1
page_data['score_gzip'] = -1
page_data['score_cookies'] = -1
page_data['score_keep-alive'] = -1
page_data['score_minify'] = -1
page_data['score_combine'] = -1
page_data['score_compress'] = -1
page_data['score_etags'] = -1
page_data['score_progressive_jpeg'] = -1
page_data['gzip_total'] = 0
page_data['gzip_savings'] = 0
page_data['minify_total'] = -1
page_data['minify_savings'] = -1
page_data['image_total'] = 0
page_data['image_savings'] = 0
page_data['optimization_checked'] = 1
page_data['base_page_cdn'] = ''
cache_count = 0
cache_total = 0
cdn_count = 0
cdn_total = 0
keep_alive_count = 0
keep_alive_total = 0
progressive_total_bytes = 0
progressive_bytes = 0
for request in requests:
if request['responseCode'] == 200:
request_id = str(request['id'])
pos = request_id.find('-')
if pos > 0:
request_id = request_id[:pos]
if request_id in optimization_results:
opt = optimization_results[request_id]
if 'cache' in opt:
request['score_cache'] = opt['cache']['score']
request['cache_time'] = opt['cache']['time']
cache_count += 1
cache_total += request['score_cache']
if 'cdn' in opt:
request['score_cdn'] = opt['cdn']['score']
request['cdn_provider'] = opt['cdn']['provider']
cdn_count += 1
cdn_total += request['score_cdn']
if 'is_base_page' in request and request['is_base_page'] and \
request['cdn_provider'] is not None:
page_data['base_page_cdn'] = request['cdn_provider']
if 'keep_alive' in opt:
request['score_keep-alive'] = opt['keep_alive']['score']
keep_alive_count += 1
keep_alive_total += request['score_keep-alive']
if 'gzip' in opt:
savings = opt['gzip']['size'] - opt['gzip']['target_size']
request['score_gzip'] = opt['gzip']['score']
request['gzip_total'] = opt['gzip']['size']
request['gzip_save'] = savings
page_data['gzip_total'] += opt['gzip']['size']
page_data['gzip_savings'] += savings
if 'image' in opt:
savings = opt['image']['size'] - opt['image']['target_size']
request['score_compress'] = opt['image']['score']
request['image_total'] = opt['image']['size']
request['image_save'] = savings
page_data['image_total'] += opt['image']['size']
page_data['image_savings'] += savings
if 'progressive' in opt:
size = opt['progressive']['size']
request['jpeg_scan_count'] = opt['progressive']['scan_count']
progressive_total_bytes += size
if request['jpeg_scan_count'] > 1:
request['score_progressive_jpeg'] = 100
progressive_bytes += size
elif size < 10240:
request['score_progressive_jpeg'] = 50
else:
request['score_progressive_jpeg'] = 0
if cache_count > 0:
page_data['score_cache'] = int(round(cache_total / cache_count))
if cdn_count > 0:
page_data['score_cdn'] = int(round(cdn_total / cdn_count))
if keep_alive_count > 0:
page_data['score_keep-alive'] = int(round(keep_alive_total / keep_alive_count))
if page_data['gzip_total'] > 0:
page_data['score_gzip'] = 100 - int(page_data['gzip_savings'] * 100 /
page_data['gzip_total'])
if page_data['image_total'] > 0:
page_data['score_compress'] = 100 - int(page_data['image_savings'] * 100 /
page_data['image_total'])
if progressive_total_bytes > 0:
page_data['score_progressive_jpeg'] = int(round(progressive_bytes * 100 /
progressive_total_bytes))
class DevToolsClient(WebSocketClient):
"""DevTools Websocket client"""
def __init__(self, url, protocols=None, extensions=None, heartbeat_freq=None,
ssl_options=None, headers=None):
WebSocketClient.__init__(self, url, protocols, extensions, heartbeat_freq,
ssl_options, headers)
self.connected = False
self.messages = None
self.trace_file = None
def opened(self):
"""Websocket interface - connection opened"""
logging.debug("DevTools websocket connected")
self.connected = True
def closed(self, code, reason=None):
"""Websocket interface - connection closed"""
logging.debug("DevTools websocket disconnected")
self.connected = False
def received_message(self, raw):
"""Websocket interface - message received"""
try:
if raw.is_text:
message = raw.data.decode(raw.encoding) if raw.encoding is not None else raw.data
if message.find("Timeline.eventRecorded") == -1:
logging.debug(message[:200])
if message:
message = json.loads(message)
if message:
self.messages.put(message)
except Exception:
logging.exception('Error processing received message')
|
the-stack_106_27723 | #!/usr/bin/env python
from future import standard_library
standard_library.install_aliases()
import json
commands = []
with open("sites.conf") as sites:
for line in sites.readlines():
line = line.strip()
names, args = line.split(" ", 1)
names = names.split(",")
command = {"args": args.split(" ")}
if len(names) == 1:
command["name"] = names[0]
else:
command["name"] = names
commands.append(command)
print(json.dumps(commands, sort_keys=True, indent=2))
|
the-stack_106_27728 | from __future__ import unicode_literals
from django.shortcuts import redirect
from django.template import RequestContext
from mezzanine.conf import settings
from mezzanine.forms.forms import FormForForm
from mezzanine.forms.models import Form
from mezzanine.forms.signals import form_invalid, form_valid
from mezzanine.pages.page_processors import processor_for
from mezzanine.utils.email import split_addresses, send_mail_template
from mezzanine.utils.views import is_spam
def format_value(value):
"""
Convert a list into a comma separated string, for displaying
select multiple values in emails.
"""
if isinstance(value, list):
value = ", ".join([v.strip() for v in value])
return value
@processor_for(Form)
def form_processor(request, page):
"""
Display a built form and handle submission.
"""
form = FormForForm(page.form, RequestContext(request),
request.POST or None, request.FILES or None)
if form.is_valid():
url = page.get_absolute_url() + "?sent=1"
if is_spam(request, form, url):
return redirect(url)
attachments = []
for f in form.files.values():
f.seek(0)
attachments.append((f.name, f.read()))
entry = form.save()
subject = page.form.email_subject
if not subject:
subject = "%s - %s" % (page.form.title, entry.entry_time)
fields = [(v.label, format_value(form.cleaned_data[k]))
for (k, v) in form.fields.items()]
context = {
"fields": fields,
"message": page.form.email_message,
"request": request,
}
email_from = page.form.email_from or settings.DEFAULT_FROM_EMAIL
email_to = form.email_to()
if email_to and page.form.send_email:
send_mail_template(subject, "email/form_response", email_from,
email_to, context)
headers = None
if email_to:
# Add the email entered as a Reply-To header
headers = {'Reply-To': email_to}
email_copies = split_addresses(page.form.email_copies)
if email_copies:
send_mail_template(subject, "email/form_response_copies",
email_from, email_copies, context,
attachments=attachments, headers=headers)
form_valid.send(sender=request, form=form, entry=entry)
return redirect(url)
form_invalid.send(sender=request, form=form)
return {"form": form}
|
the-stack_106_27729 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Construct and visualize phylogenetic trees from:
1. MCSCAN output
2. CDS sequences in FASTA format
Options are provided for each step:
1. sequence alignment:
ClustalW2 or MUSCLE (wrapped on Biopython)
2. alignment editting:
GBlocks (optional)
3. build trees:
NJ: PHYLIP
ML: RAxML or PHYML
Optional steps:
- reroot tree
- alternative topology test (SH test)
- TreeFix
The external software needs be installed first.
"""
from __future__ import print_function
import sys
import os
import os.path as op
import logging
import re
import warnings
from math import ceil
from itertools import chain
from functools import partial
import numpy as np
from ete3 import Tree
from Bio import SeqIO, AlignIO
from Bio.Data import CodonTable
from Bio.Emboss.Applications import FSeqBootCommandline, FDNADistCommandline, \
FNeighborCommandline, FConsenseCommandline
from Bio.Phylo.Applications import PhymlCommandline, RaxmlCommandline
from jcvi.apps.ks import AbstractCommandline, find_first_isoform, \
run_mrtrans, clustal_align_protein, muscle_align_protein
from jcvi.formats.base import must_open, DictFile, LineFile
from jcvi.formats.fasta import Fasta
from jcvi.utils.orderedcollections import OrderedDict
from jcvi.graphics.base import plt, savefig
from jcvi.apps.base import OptionParser, ActionDispatcher, mkdir, sh, getpath
GBLOCKS_BIN = partial(getpath, name="GBLOCKS", warn="warn")
PHYML_BIN = partial(getpath, name="PHYML", warn="warn")
RAXML_BIN = partial(getpath, name="RAXML", warn="warn")
FPHYLIP_BIN = partial(getpath, name="FPHYLIP", warn="warn")
TREEFIX_BIN = partial(getpath, name="TREEFIX", warn="warn")
class GblocksCommandline(AbstractCommandline):
"""Little commandline for Gblocks
(http://molevol.cmima.csic.es/castresana/Gblocks.html).
Accepts alignment in FASTA or NBRF/PIR format.
"""
def __init__(self, aln_file, aln_type="c", \
command=GBLOCKS_BIN("Gblocks"), **kwargs):
self.aln_file = aln_file
self.aln_type = aln_type
self.command = command
params = {"b4":5, "b5":"h", "p":"n"}
params.update(kwargs)
self.parameters = ["-{0}={1}".format(k,v) for k,v in params.items()]
def __str__(self):
return self.command + " %s -t=%s " % (self.aln_file, self.aln_type) \
+ " ".join(self.parameters)
class FfitchCommandline(AbstractCommandline):
"""Little commandline for ffitch in EMBOSS
(http://www.molgen.mpg.de/~beck/embassy/phylipnew/ffitch.html).
Infer branch lengths of tree.
"""
def __init__(self, datafile, outtreefile, command=FPHYLIP_BIN("ffitch"), \
intreefile=None, **kwargs):
self.datafile = datafile
self.outtreefile = outtreefile
self.outfile = datafile.rsplit(".",1)[0] + ".ffitch"
self.command = command
self.intreefile = intreefile if intreefile else '""'
self.parameters = ["-{0} {1}".format(k,v) for k,v in kwargs.items()]
def __str__(self):
return self.command + " -datafile %s -intreefile %s -outfile %s " \
"-outtreefile %s " % (self.datafile, self.intreefile, \
self.outfile, self.outtreefile) + " ".join(self.parameters)
class TreeFixCommandline(AbstractCommandline):
"""Little commandline for TreeFix
(http://compbio.mit.edu/treefix/).
"""
def __init__(self, input, stree_file, smap_file, a_ext, \
command=TREEFIX_BIN("treefix"), r=False, **kwargs):
self.input = input
self.s = stree_file
self.S = smap_file
self.A = a_ext
self.command = command
params = {"V":1, \
"l":input.rsplit(".", 1)[0] + ".treefix.log"}
params.update(kwargs)
self.parameters = ["-{0} {1}".format(k,v) for k,v in params.items()]
if r:
self.parameters.append("-r")
def __str__(self):
return self.command + " -s %s -S %s -A %s " % (self.s, self.S, self.A) \
+ " ".join(self.parameters) + " %s" % self.input
def run_treefix(input, stree_file, smap_file, a_ext=".fasta", \
o_ext=".dnd", n_ext = ".treefix.dnd", **kwargs):
"""
get the ML tree closest to the species tree
"""
cl = TreeFixCommandline(input=input, \
stree_file=stree_file, smap_file=smap_file, a_ext=a_ext, \
o=o_ext, n=n_ext, **kwargs)
outtreefile = input.rsplit(o_ext, 1)[0] + n_ext
print("TreeFix:", cl, file=sys.stderr)
r, e = cl.run()
if e:
print("***TreeFix could not run", file=sys.stderr)
return None
else:
logging.debug("new tree written to {0}".format(outtreefile))
return outtreefile
def run_gblocks(align_fasta_file, **kwargs):
"""
remove poorly aligned positions and divergent regions with Gblocks
"""
cl = GblocksCommandline(aln_file=align_fasta_file, **kwargs)
r, e = cl.run()
print("Gblocks:", cl, file=sys.stderr)
if e:
print("***Gblocks could not run", file=sys.stderr)
return None
else:
print(r, file=sys.stderr)
alignp = re.sub(r'.*Gblocks alignment:.*\(([0-9]{1,3}) %\).*', \
r'\1', r, flags=re.DOTALL)
alignp = int(alignp)
if alignp <= 10:
print("** WARNING ** Only %s %% positions retained by Gblocks. " \
"Results aborted. Using original alignment instead.\n" % alignp, file=sys.stderr)
return None
else:
return align_fasta_file+"-gb"
def run_ffitch(distfile, outtreefile, intreefile=None, **kwargs):
"""
Infer tree branch lengths using ffitch in EMBOSS PHYLIP
"""
cl = FfitchCommandline(datafile=distfile, outtreefile=outtreefile, \
intreefile=intreefile, **kwargs)
r, e = cl.run()
if e:
print("***ffitch could not run", file=sys.stderr)
return None
else:
print("ffitch:", cl, file=sys.stderr)
return outtreefile
def smart_reroot(treefile, outgroupfile, outfile, format=0):
"""
simple function to reroot Newick format tree using ete2
Tree reading format options see here:
http://packages.python.org/ete2/tutorial/tutorial_trees.html#reading-newick-trees
"""
tree = Tree(treefile, format=format)
leaves = [t.name for t in tree.get_leaves()][::-1]
outgroup = []
for o in must_open(outgroupfile):
o = o.strip()
for leaf in leaves:
if leaf[:len(o)] == o:
outgroup.append(leaf)
if outgroup:
break
if not outgroup:
print("Outgroup not found. Tree {0} cannot be rerooted.".format(treefile), file=sys.stderr)
return treefile
try:
tree.set_outgroup(tree.get_common_ancestor(*outgroup))
except ValueError:
assert type(outgroup) == list
outgroup = outgroup[0]
tree.set_outgroup(outgroup)
tree.write(outfile=outfile, format=format)
logging.debug("Rerooted tree printed to {0}".format(outfile))
return outfile
def build_nj_phylip(alignment, outfile, outgroup, work_dir="."):
"""
build neighbor joining tree of DNA seqs with PHYLIP in EMBOSS
PHYLIP manual
http://evolution.genetics.washington.edu/phylip/doc/
"""
phy_file = op.join(work_dir, "work", "aln.phy")
try:
AlignIO.write(alignment, file(phy_file, "w"), "phylip")
except ValueError:
print("Repeated seq name, possibly due to truncation. NJ tree not built.", file=sys.stderr)
return None
seqboot_out = phy_file.rsplit(".",1)[0] + ".fseqboot"
seqboot_cl = FSeqBootCommandline(FPHYLIP_BIN("fseqboot"), \
sequence=phy_file, outfile=seqboot_out, \
seqtype="d", reps=100, seed=12345)
stdout, stderr = seqboot_cl()
logging.debug("Resampling alignment: %s" % seqboot_cl)
dnadist_out = phy_file.rsplit(".",1)[0] + ".fdnadist"
dnadist_cl = FDNADistCommandline(FPHYLIP_BIN("fdnadist"), \
sequence=seqboot_out, outfile=dnadist_out, method="f")
stdout, stderr = dnadist_cl()
logging.debug\
("Calculating distance for bootstrapped alignments: %s" % dnadist_cl)
neighbor_out = phy_file.rsplit(".",1)[0] + ".njtree"
e = phy_file.rsplit(".",1)[0] + ".fneighbor"
neighbor_cl = FNeighborCommandline(FPHYLIP_BIN("fneighbor"), \
datafile=dnadist_out, outfile=e, outtreefile=neighbor_out)
stdout, stderr = neighbor_cl()
logging.debug("Building Neighbor Joining tree: %s" % neighbor_cl)
consense_out = phy_file.rsplit(".",1)[0] + ".consensustree.nodesupport"
e = phy_file.rsplit(".",1)[0] + ".fconsense"
consense_cl = FConsenseCommandline(FPHYLIP_BIN("fconsense"), \
intreefile=neighbor_out, outfile=e, outtreefile=consense_out)
stdout, stderr = consense_cl()
logging.debug("Building consensus tree: %s" % consense_cl)
# distance without bootstrapping
dnadist_out0 = phy_file.rsplit(".",1)[0] + ".fdnadist0"
dnadist_cl0 = FDNADistCommandline(FPHYLIP_BIN("fdnadist"), \
sequence=phy_file, outfile=dnadist_out0, method="f")
stdout, stderr = dnadist_cl0()
logging.debug\
("Calculating distance for original alignment: %s" % dnadist_cl0)
# infer branch length on consensus tree
consensustree1 = phy_file.rsplit(".",1)[0] + ".consensustree.branchlength"
run_ffitch(distfile=dnadist_out0, outtreefile=consensustree1, \
intreefile=consense_out)
# write final tree
ct_s = Tree(consense_out)
if outgroup:
t1 = consensustree1 + ".rooted"
t2 = smart_reroot(consensustree1, outgroup, t1)
if t2 == t1:
outfile = outfile.replace(".unrooted", "")
ct_b = Tree(t2)
else:
ct_b = Tree(consensustree1)
nodesupport = {}
for node in ct_s.traverse("postorder"):
node_children = tuple(sorted([f.name for f in node]))
if len(node_children) > 1:
nodesupport[node_children] = node.dist/100.
for k,v in nodesupport.items():
ct_b.get_common_ancestor(*k).support = v
print(ct_b)
ct_b.write(format=0, outfile=outfile)
try:
s = op.getsize(outfile)
except OSError:
s = 0
if s:
logging.debug("NJ tree printed to %s" % outfile)
return outfile, phy_file
else:
logging.debug("Something was wrong. NJ tree was not built.")
return None
def build_ml_phyml(alignment, outfile, work_dir=".", **kwargs):
"""
build maximum likelihood tree of DNA seqs with PhyML
"""
phy_file = op.join(work_dir, "work", "aln.phy")
AlignIO.write(alignment, file(phy_file, "w"), "phylip-relaxed")
phyml_cl = PhymlCommandline(cmd=PHYML_BIN("phyml"), input=phy_file, **kwargs)
logging.debug("Building ML tree using PhyML: %s" % phyml_cl)
stdout, stderr = phyml_cl()
tree_file = phy_file + "_phyml_tree.txt"
if not op.exists(tree_file):
print("***PhyML failed.", file=sys.stderr)
return None
sh("cp {0} {1}".format(tree_file, outfile), log=False)
logging.debug("ML tree printed to %s" % outfile)
return outfile, phy_file
def build_ml_raxml(alignment, outfile, work_dir=".", **kwargs):
"""
build maximum likelihood tree of DNA seqs with RAxML
"""
work_dir = op.join(work_dir, "work")
mkdir(work_dir)
phy_file = op.join(work_dir, "aln.phy")
AlignIO.write(alignment, file(phy_file, "w"), "phylip-relaxed")
raxml_work = op.abspath(op.join(op.dirname(phy_file), "raxml_work"))
mkdir(raxml_work)
raxml_cl = RaxmlCommandline(cmd=RAXML_BIN("raxmlHPC"), \
sequences=phy_file, algorithm="a", model="GTRGAMMA", \
parsimony_seed=12345, rapid_bootstrap_seed=12345, \
num_replicates=100, name="aln", \
working_dir=raxml_work, **kwargs)
logging.debug("Building ML tree using RAxML: %s" % raxml_cl)
stdout, stderr = raxml_cl()
tree_file = "{0}/RAxML_bipartitions.aln".format(raxml_work)
if not op.exists(tree_file):
print("***RAxML failed.", file=sys.stderr)
sh("rm -rf %s" % raxml_work, log=False)
return None
sh("cp {0} {1}".format(tree_file, outfile), log=False)
logging.debug("ML tree printed to %s" % outfile)
sh("rm -rf %s" % raxml_work)
return outfile, phy_file
def SH_raxml(reftree, querytree, phy_file, shout="SH_out.txt"):
"""
SH test using RAxML
querytree can be a single tree or a bunch of trees (eg. from bootstrapping)
"""
assert op.isfile(reftree)
shout = must_open(shout, "a")
raxml_work = op.abspath(op.join(op.dirname(phy_file), "raxml_work"))
mkdir(raxml_work)
raxml_cl = RaxmlCommandline(cmd=RAXML_BIN("raxmlHPC"), \
sequences=phy_file, algorithm="h", model="GTRGAMMA", \
name="SH", starting_tree=reftree, bipartition_filename=querytree, \
working_dir=raxml_work)
logging.debug("Running SH test in RAxML: %s" % raxml_cl)
o, stderr = raxml_cl()
# hard coded
try:
pval = re.search('(Significantly.*:.*)', o).group(0)
except:
print("SH test failed.", file=sys.stderr)
else:
pval = pval.strip().replace("\t"," ").replace("%","\%")
print("{0}\t{1}".format(op.basename(querytree), pval), file=shout)
logging.debug("SH p-value appended to %s" % shout.name)
shout.close()
return shout.name
CODON_TRANSLATION = CodonTable.standard_dna_table.forward_table
FOURFOLD = {"CTT": "L", "ACA": "T", "ACG": "T", "CCT": "P", "CTG": "L",
"CTA": "L", "ACT": "T", "CCG": "P", "CCA": "P", "CCC": "P",
"GGT": "G", "CGA": "R", "CGC": "R", "CGG": "R", "GGG": "G",
"GGA": "G", "GGC": "G", "CGT": "R", "GTA": "V", "GTC": "V",
"GTG": "V", "GTT": "V", "CTC": "L", "TCT": "S", "TCG": "S",
"TCC": "S", "ACC": "T", "TCA": "S", "GCA": "A", "GCC": "A",
"GCG": "A", "GCT": "A"}
def subalignment(alnfle, subtype, alntype="fasta"):
"""
Subset synonymous or fourfold degenerate sites from an alignment
input should be a codon alignment
"""
aln = AlignIO.read(alnfle, alntype)
alnlen = aln.get_alignment_length()
nseq = len(aln)
subaln = None
subalnfile = alnfle.rsplit(".", 1)[0] + "_{0}.{1}".format(subtype, alntype)
if subtype == "synonymous":
for j in range( 0, alnlen, 3 ):
aa = None
for i in range(nseq):
codon = str(aln[i, j: j + 3].seq)
if codon not in CODON_TRANSLATION:
break
if aa and CODON_TRANSLATION[codon] != aa:
break
else:
aa = CODON_TRANSLATION[codon]
else:
if subaln is None:
subaln = aln[:, j: j + 3]
else:
subaln += aln[:, j: j + 3]
if subtype == "fourfold":
for j in range( 0, alnlen, 3 ):
for i in range(nseq):
codon = str(aln[i, j: j + 3].seq)
if codon not in FOURFOLD:
break
else:
if subaln is None:
subaln = aln[:, j: j + 3]
else:
subaln += aln[:, j: j + 3]
if subaln:
AlignIO.write(subaln, subalnfile, alntype)
return subalnfile
else:
print("No sites {0} selected.".format(subtype), file=sys.stderr)
return None
def merge_rows_local(filename, ignore=".", colsep="\t", local=10, \
fieldcheck=True, fsep=","):
"""
merge overlapping rows within given row count distance
"""
fw = must_open(filename+".merged", "w")
rows = file(filename).readlines()
rows = [row.strip().split(colsep) for row in rows]
l = len(rows[0])
for rowi, row in enumerate(rows):
n = len(rows)
i = rowi+1
while i <= min(rowi+local, n-1):
merge = 1
row2 = rows[i]
for j in range(l):
a = row[j]
b = row2[j]
if fieldcheck:
a = set(a.split(fsep))
a = fsep.join(sorted(list(a)))
b = set(b.split(fsep))
b = fsep.join(sorted(list(b)))
if all([a!=ignore, b!=ignore, a not in b, b not in a]):
merge = 0
i += 1
break
if merge:
for x in range(l):
if row[x] == ignore:
rows[rowi][x] = row2[x]
elif row[x] in row2[x]:
rows[rowi][x] = row2[x]
else:
rows[rowi][x] = row[x]
row = rows[rowi]
rows.remove(row2)
print(colsep.join(row), file=fw)
fw.close()
return fw.name
def add_tandems(mcscanfile, tandemfile):
"""
add tandem genes to anchor genes in mcscan file
"""
tandems = [f.strip().split(",") for f in file(tandemfile)]
fw = must_open(mcscanfile+".withtandems", "w")
fp = must_open(mcscanfile)
seen =set()
for i, row in enumerate(fp):
if row[0] == '#':
continue
anchorslist = row.strip().split("\t")
anchors = set([a.split(",")[0] for a in anchorslist])
anchors.remove(".")
if anchors & seen == anchors:
continue
newanchors = []
for a in anchorslist:
if a == ".":
newanchors.append(a)
continue
for t in tandems:
if a in t:
newanchors.append(",".join(t))
seen.update(t)
break
else:
newanchors.append(a)
seen.add(a)
print("\t".join(newanchors), file=fw)
fw.close()
newmcscanfile = merge_rows_local(fw.name)
logging.debug("Tandems added to `{0}`. Results in `{1}`".\
format(mcscanfile, newmcscanfile))
fp.seek(0)
logging.debug("{0} rows merged to {1} rows".\
format(len(fp.readlines()), len(file(newmcscanfile).readlines())))
sh("rm %s" % fw.name)
return newmcscanfile
def main():
actions = (
('prepare', 'prepare cds sequences from .mcscan'),
('build', 'build NJ and ML trees from cds'),
('draw', 'draw Newick formatted trees'),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def prepare(args):
"""
%prog prepare mcscanfile cdsfile [options]
Pick sequences from cdsfile to form fasta files, according to multiple
alignment in the mcscanfile.
The fasta sequences can then be used to construct phylogenetic tree.
Use --addtandem=tandemfile to collapse tandems of anchors into single row.
The tandemfile must be provided with *ALL* genomes involved, otherwise
result will be incomplete and redundant.
"""
from jcvi.graphics.base import discrete_rainbow
p = OptionParser(prepare.__doc__)
p.add_option("--addtandem", help="path to tandemfile [default: %default]")
p.add_option("--writecolors", default=False, action="store_true", \
help="generate a gene_name to color mapping file which will be taken " \
"by jcvi.apps.phylo.draw [default: %default]")
p.set_outdir(outdir="sequences")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
mcscanfile, cdsfile = args
if opts.addtandem:
tandemfile = opts.addtandem
mcscanfile_with_tandems = add_tandems(mcscanfile, tandemfile)
mcscanfile = mcscanfile_with_tandems
seqdir = opts.outdir
mkdir(seqdir)
f = Fasta(cdsfile)
fp = must_open(mcscanfile)
if opts.writecolors:
fc = must_open("leafcolors.txt", "w")
n = 0
for i, row in enumerate(fp):
row = row.strip().split("\t")
if i == 0:
l = len(row)
if l <= 20:
colors = discrete_rainbow(l, shuffle=False)[1]
else:
colors = discrete_rainbow(l, usepreset=False, shuffle=False)[1]
warnings.warn("*** WARNING ***\n" \
"Too many columns. Colors may not be all distinctive.")
assert len(row)==l, "All rows should have same number of fields."
anchors = set()
for j, atom in enumerate(row):
color = "%s,%s,%s" % colors[j]
if atom == ".":
continue
elif "," in atom:
atom = atom.split(",")
for a in atom:
fc.write("{0}\t{1}\n".format(a, color))
anchors.add(a)
else:
fc.write("{0}\t{1}\n".format(atom, color))
anchors.add(atom)
if len(anchors) <= 3:
print("Not enough seqs to build trees for {0}".format(anchors), file=sys.stderr)
continue
pivot = row[0]
fw = must_open("%s/%s.cds" % (seqdir, pivot), "w")
for a in anchors:
if a not in f:
print(a)
a = find_first_isoform(a, f)
assert a, a
arec = f[a]
SeqIO.write((arec), fw, "fasta")
fw.close()
n+=1
if opts.writecolors:
fc.close()
logging.debug("leaf colors written to `{0}`".format(fc.name))
logging.debug("cds of {0} syntelog groups written to {1}/".format(n, seqdir))
return seqdir
def build(args):
"""
%prog build [prot.fasta] cds.fasta [options] --outdir=outdir
This function wraps on the following steps:
1. msa using ClustalW2 or MUSCLE(default)
2. (optional) alignment editing using Gblocks
3. build NJ tree using PHYLIP in EMBOSS package
seq names should be unique by first 10 chars (restriction of PHYLIP)
4. build ML tree using RAxML(default) or PHYML, use keywords raxml or phyml,
*WARNING* maybe slow with large dataset
If an outgroup file is provided, the result tree will be rooted on the
outgroup according to order in the file, i.e. the name in row1 will be
tried first. If not found, row2 will be used, etc.
Tail truncated names can be provided so long as it is unique among the seqs.
If not uniq, the first occurrence will be used. For example, if you have
two moss sequences in your input, then the tree will be rooted on the
first moss sequence encountered by the program, unless they are monophylic,
in which case the root will be their common ancestor.
--stree and --smap are required if --treefix is set.
Trees can be edited again using an editor such as Dendroscope. This
is the recommended way to get highly customized trees.
Newick format trees will be deposited into outdir (. by default).
"""
from jcvi.formats.fasta import translate
p = OptionParser(build.__doc__)
p.add_option("--longest", action="store_true",
help="Get longest ORF, only works if no pep file, "\
"e.g. ESTs [default: %default]")
p.add_option("--nogblocks", action="store_true",
help="don't use Gblocks to edit alignment [default: %default]")
p.add_option("--synonymous", action="store_true",
help="extract synonymous sites of the alignment [default: %default]")
p.add_option("--fourfold", action="store_true",
help="extract fourfold degenerate sites of the alignment [default: %default]")
p.add_option("--msa", default="muscle", choices=("clustalw", "muscle"),
help="software used to align the proteins [default: %default]")
p.add_option("--noneighbor", action="store_true",
help="don't build NJ tree [default: %default]")
p.add_option("--ml", default=None, choices=("raxml", "phyml"),
help="software used to build ML tree [default: %default]")
p.add_option("--outgroup",
help="path to file containing outgroup orders [default: %default]")
p.add_option("--SH", help="path to reference Newick tree [default: %default]")
p.add_option("--shout", default="SH_out.txt", \
help="SH output file name [default: %default]")
p.add_option("--treefix", action="store_true",
help="use TreeFix to rearrange ML tree [default: %default]")
p.add_option("--stree", help="path to species Newick tree [default: %default]")
p.add_option("--smap", help="path to smap file: " \
"gene_name_pattern<tab>species_name [default: %default]")
p.set_outdir()
opts, args = p.parse_args(args)
gblocks = not opts.nogblocks
synonymous = opts.synonymous
fourfold = opts.fourfold
neighbor = not opts.noneighbor
outgroup = opts.outgroup
outdir = opts.outdir
if len(args) == 1:
protein_file, dna_file = None, args[0]
elif len(args) == 2:
protein_file, dna_file = args
else:
print("Incorrect arguments", file=sys.stderr)
sys.exit(not p.print_help())
if opts.treefix:
stree = opts.stree
smap = opts.smap
assert stree and smap, "TreeFix requires stree and smap files."
opts.ml = "raxml"
treedir = op.join(outdir, "tree")
mkdir(treedir)
if not protein_file:
protein_file = dna_file + ".pep"
translate_args = [dna_file, "--outfile=" + protein_file]
if opts.longest:
translate_args += ["--longest"]
dna_file, protein_file = translate(translate_args)
work_dir = op.join(outdir, "alignment")
mkdir(work_dir)
p_recs = list(SeqIO.parse(open(protein_file), "fasta"))
if opts.msa == "clustalw":
align_fasta = clustal_align_protein(p_recs, work_dir)
elif opts.msa == "muscle":
align_fasta = muscle_align_protein(p_recs, work_dir)
n_recs = list(SeqIO.parse(open(dna_file), "fasta"))
mrtrans_fasta = run_mrtrans(align_fasta, n_recs, work_dir, outfmt="fasta")
if not mrtrans_fasta:
logging.debug("pal2nal aborted. " \
"Cannot reliably build tree for {0}".format(dna_file))
return
codon_aln_fasta = mrtrans_fasta
if gblocks:
gb_fasta = run_gblocks(mrtrans_fasta)
codon_aln_fasta = gb_fasta if gb_fasta else codon_aln_fasta
else:
if synonymous:
codon_aln_fasta = subalignment(mrtrans_fasta, "synonymous")
if fourfold:
codon_aln_fasta = subalignment(mrtrans_fasta, "fourfold")
if not neighbor and not opts.ml:
return codon_aln_fasta
alignment = AlignIO.read(codon_aln_fasta, "fasta")
if len(alignment) <= 3:
raise ValueError("Too few seqs to build tree.")
mkdir(op.join(treedir, "work"))
if neighbor:
out_file = op.join(treedir, op.basename(dna_file).rsplit(".", 1)[0] + \
".NJ.unrooted.dnd")
try:
outfile, phy_file = build_nj_phylip(alignment, \
outfile=out_file, outgroup=outgroup, work_dir=treedir)
except:
print("NJ tree cannot be built for {0}".format(dna_file))
if opts.SH:
reftree = opts.SH
querytree = outfile
SH_raxml(reftree, querytree, phy_file, shout=opts.shout)
if opts.ml:
out_file = op.join(treedir, op.basename(dna_file).rsplit(".", 1)[0] + \
".ML.unrooted.dnd")
if opts.ml == "phyml":
try:
outfile, phy_file = build_ml_phyml\
(alignment, outfile=out_file, work_dir=treedir)
except:
print("ML tree cannot be built for {0}".format(dna_file))
elif opts.ml == "raxml":
try:
outfile, phy_file = build_ml_raxml\
(alignment, outfile=out_file, work_dir=treedir)
except:
print("ML tree cannot be built for {0}".format(dna_file))
if outgroup:
new_out_file = out_file.replace(".unrooted", "")
t = smart_reroot(treefile=out_file, outgroupfile=outgroup, \
outfile=new_out_file)
if t == new_out_file:
sh("rm %s" % out_file)
outfile = new_out_file
if opts.SH:
reftree = opts.SH
querytree = outfile
SH_raxml(reftree, querytree, phy_file, shout=opts.shout)
if opts.treefix:
treefix_dir = op.join(treedir, "treefix")
assert mkdir(treefix_dir, overwrite=True)
sh("cp {0} {1}/".format(outfile, treefix_dir))
input = op.join(treefix_dir, op.basename(outfile))
aln_file = input.rsplit(".", 1)[0] + ".fasta"
SeqIO.write(alignment, aln_file, "fasta")
outfile = run_treefix(input=input, stree_file=stree, smap_file=smap, \
a_ext=".fasta", o_ext=".dnd", n_ext = ".treefix.dnd")
return outfile
def _draw_trees(trees, nrow=1, ncol=1, rmargin=.3, iopts=None, outdir=".",
shfile=None, **kwargs):
"""
Draw one or multiple trees on one plot.
"""
from jcvi.graphics.tree import draw_tree
if shfile:
SHs = DictFile(shfile, delimiter="\t")
ntrees = len(trees)
n = nrow * ncol
for x in xrange(int(ceil(float(ntrees)/n))):
fig = plt.figure(1, (iopts.w, iopts.h)) if iopts \
else plt.figure(1, (5, 5))
root = fig.add_axes([0, 0, 1, 1])
xiv = 1. / ncol
yiv = 1. / nrow
xstart = list(np.arange(0, 1, xiv)) * nrow
ystart = list(chain(*zip(*[list(np.arange(0, 1, yiv))[::-1]] * ncol)))
for i in xrange(n*x, n*(x+1)):
if i == ntrees:
break
ax = fig.add_axes([xstart[i%n], ystart[i%n], xiv, yiv])
f = trees.keys()[i]
tree = trees[f]
try:
SH = SHs[f]
except:
SH = None
draw_tree(ax, tree, rmargin=rmargin, reroot=False, \
supportcolor="r", SH=SH, **kwargs)
root.set_xlim(0, 1)
root.set_ylim(0, 1)
root.set_axis_off()
format = iopts.format if iopts else "pdf"
dpi = iopts.dpi if iopts else 300
if n == 1:
image_name = f.rsplit(".", 1)[0] + "." + format
else:
image_name = "trees{0}.{1}".format(x, format)
image_name = op.join(outdir, image_name)
savefig(image_name, dpi=dpi, iopts=iopts)
plt.clf()
def draw(args):
"""
%prog draw --input newicktrees [options]
Draw phylogenetic trees into single or combined plots.
Input trees should be one of the following:
1. single Newick format tree file
2. a dir containing *ONLY* the tree files to be drawn
Newick format:
http://evolution.genetics.washington.edu/phylip/newicktree.html
This function wraps on jcvi.graphics.tree
This function is better used for trees generated by jcvi.apps.phylo (rooted
if possible). For drawing general Newick trees from external sources invoke
jcvi.graphics.tree directly, which also gives more drawing options.
"""
trunc_name_options = ['headn', 'oheadn', 'tailn', 'otailn']
p = OptionParser(draw.__doc__)
p.add_option("--input", help="path to single input tree file or a dir "\
"containing ONLY the input tree files")
p.add_option("--combine", type="string", default="1x1", \
help="combine multiple trees into one plot in nrowxncol")
p.add_option("--trunc_name", default=None, help="Options are: {0}. " \
"truncate first n chars, retains only first n chars, " \
"truncate last n chars, retain only last chars. " \
"n=1~99. [default: %default]".format(trunc_name_options))
p.add_option("--SH", default=None,
help="path to a file containing SH test p-values in format:" \
"tree_file_name<tab>p-values " \
"This file can be generated with jcvi.apps.phylo build [default: %default]")
p.add_option("--scutoff", default=50, type="int",
help="cutoff for displaying node support, 0-100 [default: %default]")
p.add_option("--barcode", default=None,
help="path to seq/taxon name barcode mapping file: " \
"barcode<tab>new_name " \
"This option is downstream of `--trunc_name` [default: %default]")
p.add_option("--leafcolorfile", default=None,
help="path to a mapping file containing font colors " \
"for the OTUs: leafname<tab>color [default: %default]")
p.set_outdir()
opts, args, iopts = p.set_image_options(figsize="8x6")
input = opts.input
outdir = opts.outdir
combine = opts.combine.split("x")
trunc_name = opts.trunc_name
SH = opts.SH
mkdir(outdir)
if not input:
sys.exit(not p.print_help())
elif op.isfile(input):
trees_file = input
treenames = [op.basename(input)]
elif op.isdir(input):
trees_file = op.join(outdir, "alltrees.dnd")
treenames = []
for f in sorted(os.listdir(input)):
sh("cat {0}/{1} >> {2}".format(input, f, trees_file), log=False)
treenames.append(f)
else:
sys.exit(not p.print_help())
trees = OrderedDict()
tree = ""
i = 0
for row in LineFile(trees_file, comment="#", load=True).lines:
if i == len(treenames):
break
if not len(row):
continue
if ";" in row:
# sanity check
if row.index(";") != len(row)-1:
ts = row.split(";")
for ii in xrange(len(ts)-1):
ts[ii] += ";"
else:
ts = [row]
for t in ts:
if ";" in t:
tree += t
if tree:
trees[treenames[i]] = tree
tree = ""
i+=1
else:
tree += t
else:
tree += row
logging.debug("A total of {0} trees imported.".format(len(trees)))
sh("rm {0}".format(op.join(outdir, "alltrees.dnd")))
_draw_trees(trees, nrow=int(combine[0]), ncol=int(combine[1]), rmargin=.3,\
iopts=iopts, outdir=outdir, shfile=SH, trunc_name=trunc_name, \
scutoff=opts.scutoff, barcodefile = opts.barcode,
leafcolorfile=opts.leafcolorfile)
if __name__ == '__main__':
main()
|
the-stack_106_27732 | # pylint:disable=unused-variable
# pylint:disable=unused-argument
# pylint:disable=redefined-outer-name
# pylint:disable=no-value-for-parameter
# pylint:disable=protected-access
# pylint:disable=too-many-arguments
import pytest
from dask_task_models_library.container_tasks.events import (
BaseTaskEvent,
TaskCancelEvent,
TaskLogEvent,
TaskProgressEvent,
TaskStateEvent,
)
from models_library.projects_state import RunningState
from pytest_mock.plugin import MockerFixture
def test_task_event_abstract():
with pytest.raises(TypeError):
# pylint: disable=abstract-class-instantiated
BaseTaskEvent(job_id="some_fake") # type: ignore
@pytest.mark.parametrize(
"model_cls", [TaskStateEvent, TaskProgressEvent, TaskLogEvent, TaskCancelEvent]
)
def test_events_models_examples(model_cls):
examples = model_cls.Config.schema_extra["examples"]
for index, example in enumerate(examples):
print(f"{index:-^10}:\n", example)
model_instance = model_cls(**example)
assert model_instance
assert model_instance.topic_name()
@pytest.fixture()
def mocked_dask_worker_job_id(mocker: MockerFixture) -> str:
mock_get_worker = mocker.patch(
"dask_task_models_library.container_tasks.events.get_worker", autospec=True
)
fake_job_id = "some_fake_job_id"
mock_get_worker.return_value.get_current_task.return_value = fake_job_id
return fake_job_id
def test_task_state_from_worker(mocked_dask_worker_job_id: str):
event = TaskStateEvent.from_dask_worker(
RunningState.FAILED, msg="some test message"
)
assert event.job_id == mocked_dask_worker_job_id
assert event.state == RunningState.FAILED
assert event.msg == "some test message"
def test_task_progress_from_worker(mocked_dask_worker_job_id: str):
event = TaskProgressEvent.from_dask_worker(0.7)
assert event.job_id == mocked_dask_worker_job_id
assert event.progress == 0.7
def test_task_log_from_worker(mocked_dask_worker_job_id: str):
event = TaskLogEvent.from_dask_worker(log="here is the amazing logs")
assert event.job_id == mocked_dask_worker_job_id
assert event.log == "here is the amazing logs"
|
the-stack_106_27734 | import os
import sys
import edx_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'sphinx.ext.napoleon', 'edx_theme']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# Substitutions for all pages
rst_epilog = """
.. _Bok Choy: https://github.com/edx/bok-choy
.. _Selenium: http://www.seleniumhq.org
.. _Python: http://python.org
"""
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'bok-choy'
copyright = edx_theme.COPYRIGHT
author = edx_theme.AUTHOR
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0.1'
# The full version, including alpha/beta/rc tags.
release = '1.0.1'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'edx_theme'
html_theme_path = [edx_theme.get_html_theme_path()]
html_favicon = os.path.join(html_theme_path[0], 'edx_theme', 'static', 'css', 'favicon.ico')
# Output file base name for HTML help builder.
htmlhelp_basename = 'bok-choydoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'bok-choy.tex', 'bok-choy Documentation',
author, 'manual'),
]
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'bok-choy', 'bok-choy Documentation',
[author], 1)
]
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'bok-choy', 'bok-choy Documentation',
author, 'bok-choy', 'One line description of project.',
'Miscellaneous'),
]
# -- Autodoc options -----------------------------------------------------------
autoclass_content = 'both'
|
the-stack_106_27735 | # Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from datetime import datetime
import re
from concurrent.futures import as_completed
from dateutil.tz import tzutc
from dateutil.parser import parse
from c7n.actions import (
ActionRegistry, BaseAction, ModifyVpcSecurityGroupsAction)
from c7n.filters import FilterRegistry, AgeFilter
import c7n.filters.vpc as net_filters
from c7n.filters.kms import KmsRelatedFilter
from c7n.manager import resources
from c7n.query import QueryResourceManager, TypeInfo
from c7n.tags import universal_augment
from c7n.utils import (
local_session, chunks, snapshot_identifier, type_schema)
filters = FilterRegistry('elasticache.filters')
actions = ActionRegistry('elasticache.actions')
TTYPE = re.compile('cache.t1')
@resources.register('cache-cluster')
class ElastiCacheCluster(QueryResourceManager):
class resource_type(TypeInfo):
service = 'elasticache'
arn_type = 'cluster'
arn_separator = ":"
enum_spec = ('describe_cache_clusters',
'CacheClusters[]', None)
name = id = 'CacheClusterId'
filter_name = 'CacheClusterId'
filter_type = 'scalar'
date = 'CacheClusterCreateTime'
dimension = 'CacheClusterId'
universal_taggable = True
cfn_type = 'AWS::ElastiCache::CacheCluster'
filter_registry = filters
action_registry = actions
permissions = ('elasticache:ListTagsForResource',)
augment = universal_augment
@filters.register('security-group')
class SecurityGroupFilter(net_filters.SecurityGroupFilter):
RelatedIdsExpression = "SecurityGroups[].SecurityGroupId"
@filters.register('subnet')
class SubnetFilter(net_filters.SubnetFilter):
"""Filters elasticache clusters based on their associated subnet
:example:
.. code-block:: yaml
policies:
- name: elasticache-in-subnet-x
resource: cache-cluster
filters:
- type: subnet
key: SubnetId
value: subnet-12ab34cd
"""
RelatedIdsExpression = ""
def get_related_ids(self, resources):
group_ids = set()
for r in resources:
group_ids.update(
[s['SubnetIdentifier'] for s in
self.groups[r['CacheSubnetGroupName']]['Subnets']])
return group_ids
def process(self, resources, event=None):
self.groups = {
r['CacheSubnetGroupName']: r for r in
self.manager.get_resource_manager(
'cache-subnet-group').resources()}
return super(SubnetFilter, self).process(resources, event)
filters.register('network-location', net_filters.NetworkLocation)
@actions.register('delete')
class DeleteElastiCacheCluster(BaseAction):
"""Action to delete an elasticache cluster
To prevent unwanted deletion of elasticache clusters, it is recommended
to include a filter
:example:
.. code-block:: yaml
policies:
- name: elasticache-delete-stale-clusters
resource: cache-cluster
filters:
- type: value
value_type: age
key: CacheClusterCreateTime
op: ge
value: 90
actions:
- type: delete
skip-snapshot: false
"""
schema = type_schema(
'delete', **{'skip-snapshot': {'type': 'boolean'}})
permissions = ('elasticache:DeleteCacheCluster',
'elasticache:DeleteReplicationGroup')
def process(self, clusters):
skip = self.data.get('skip-snapshot', False)
client = local_session(
self.manager.session_factory).client('elasticache')
clusters_to_delete = []
replication_groups_to_delete = set()
for cluster in clusters:
if cluster.get('ReplicationGroupId', ''):
replication_groups_to_delete.add(cluster['ReplicationGroupId'])
else:
clusters_to_delete.append(cluster)
# added if statement to handle differences in parameters if snapshot is skipped
for cluster in clusters_to_delete:
params = {'CacheClusterId': cluster['CacheClusterId']}
if _cluster_eligible_for_snapshot(cluster) and not skip:
params['FinalSnapshotIdentifier'] = snapshot_identifier(
'Final', cluster['CacheClusterId'])
self.log.debug(
"Taking final snapshot of %s", cluster['CacheClusterId'])
else:
self.log.debug(
"Skipping final snapshot of %s", cluster['CacheClusterId'])
client.delete_cache_cluster(**params)
self.log.info(
'Deleted ElastiCache cluster: %s',
cluster['CacheClusterId'])
for replication_group in replication_groups_to_delete:
# NOTE don't delete the group if it's not empty
rg = client.describe_replication_groups(
ReplicationGroupId=replication_group
)["ReplicationGroups"][0]
if not all(cluster in clusters_to_delete for cluster in rg["MemberClusters"]):
self.log.info('ElastiCache replication group is not empty: %s', replication_group)
continue
params = {'ReplicationGroupId': replication_group,
'RetainPrimaryCluster': False}
if not skip:
params['FinalSnapshotIdentifier'] = snapshot_identifier(
'Final', replication_group)
client.delete_replication_group(**params)
self.log.info(
'Deleted ElastiCache replication group: %s',
replication_group)
@actions.register('snapshot')
class SnapshotElastiCacheCluster(BaseAction):
"""Action to snapshot an elasticache cluster
:example:
.. code-block:: yaml
policies:
- name: elasticache-cluster-snapshot
resource: cache-cluster
filters:
- type: value
key: CacheClusterStatus
op: not-in
value: ["deleted","deleting","creating"]
actions:
- snapshot
"""
schema = type_schema('snapshot')
permissions = ('elasticache:CreateSnapshot',)
def process(self, clusters):
set_size = len(clusters)
clusters = [c for c in clusters if _cluster_eligible_for_snapshot(c)]
if set_size != len(clusters):
self.log.info(
"action:snapshot implicitly filtered from %d to %d clusters for snapshot support",
set_size, len(clusters))
with self.executor_factory(max_workers=2) as w:
futures = []
client = local_session(self.manager.session_factory).client('elasticache')
for cluster in clusters:
futures.append(
w.submit(self.process_cluster_snapshot, client, cluster))
for f in as_completed(futures):
if f.exception():
self.log.error(
"Exception creating cache cluster snapshot \n %s",
f.exception())
return clusters
def process_cluster_snapshot(self, client, cluster):
client.create_snapshot(
SnapshotName=snapshot_identifier(
'Backup',
cluster['CacheClusterId']),
CacheClusterId=cluster['CacheClusterId'])
@actions.register('modify-security-groups')
class ElasticacheClusterModifyVpcSecurityGroups(ModifyVpcSecurityGroupsAction):
"""Modify security groups on an Elasticache cluster.
Looks at the individual clusters and modifies the Replication
Group's configuration for Security groups so all nodes get
affected equally
"""
permissions = ('elasticache:ModifyReplicationGroup',)
def process(self, clusters):
replication_group_map = {}
client = local_session(
self.manager.session_factory).client('elasticache')
groups = super(
ElasticacheClusterModifyVpcSecurityGroups, self).get_groups(
clusters)
for idx, c in enumerate(clusters):
# build map of Replication Groups to Security Groups
replication_group_map[c['ReplicationGroupId']] = groups[idx]
for idx, r in enumerate(replication_group_map.keys()):
client.modify_replication_group(
ReplicationGroupId=r,
SecurityGroupIds=replication_group_map[r])
@resources.register('cache-subnet-group')
class ElastiCacheSubnetGroup(QueryResourceManager):
class resource_type(TypeInfo):
service = 'elasticache'
arn_type = 'subnet-group'
enum_spec = ('describe_cache_subnet_groups',
'CacheSubnetGroups', None)
name = id = 'CacheSubnetGroupName'
filter_name = 'CacheSubnetGroupName'
filter_type = 'scalar'
cfn_type = 'AWS::ElastiCache::SubnetGroup'
@resources.register('cache-snapshot')
class ElastiCacheSnapshot(QueryResourceManager):
class resource_type(TypeInfo):
service = 'elasticache'
arn_type = 'snapshot'
arn_separator = ":"
enum_spec = ('describe_snapshots', 'Snapshots', None)
name = id = 'SnapshotName'
filter_name = 'SnapshotName'
filter_type = 'scalar'
date = 'StartTime'
universal_taggable = True
permissions = ('elasticache:ListTagsForResource',)
def augment(self, resources):
return universal_augment(self, resources)
@ElastiCacheSnapshot.filter_registry.register('age')
class ElastiCacheSnapshotAge(AgeFilter):
"""Filters elasticache snapshots based on their age (in days)
:example:
.. code-block:: yaml
policies:
- name: elasticache-stale-snapshots
resource: cache-snapshot
filters:
- type: age
days: 30
op: ge
"""
schema = type_schema(
'age', days={'type': 'number'},
op={'$ref': '#/definitions/filters_common/comparison_operators'})
date_attribute = 'dummy'
def get_resource_date(self, snapshot):
""" Override superclass method as there is no single snapshot date attribute.
"""
def to_datetime(v):
if not isinstance(v, datetime):
v = parse(v)
if not v.tzinfo:
v = v.replace(tzinfo=tzutc())
return v
# Return the earliest of the node snaphot creation times.
return min([to_datetime(ns['SnapshotCreateTime'])
for ns in snapshot['NodeSnapshots']])
@ElastiCacheSnapshot.action_registry.register('delete')
class DeleteElastiCacheSnapshot(BaseAction):
"""Action to delete elasticache snapshots
To prevent unwanted deletion of elasticache snapshots, it is recommended to
apply a filter
:example:
.. code-block:: yaml
policies:
- name: delete-elasticache-stale-snapshots
resource: cache-snapshot
filters:
- type: age
days: 30
op: ge
actions:
- delete
"""
schema = type_schema('delete')
permissions = ('elasticache:DeleteSnapshot',)
def process(self, snapshots):
self.log.info("Deleting %d ElastiCache snapshots", len(snapshots))
with self.executor_factory(max_workers=3) as w:
futures = []
client = local_session(self.manager.session_factory).client('elasticache')
for snapshot_set in chunks(reversed(snapshots), size=50):
futures.append(
w.submit(self.process_snapshot_set, client, snapshot_set))
for f in as_completed(futures):
if f.exception():
self.log.error(
"Exception deleting snapshot set \n %s",
f.exception())
return snapshots
def process_snapshot_set(self, client, snapshots_set):
for s in snapshots_set:
client.delete_snapshot(SnapshotName=s['SnapshotName'])
@ElastiCacheSnapshot.action_registry.register('copy-cluster-tags')
class CopyClusterTags(BaseAction):
"""
Copy specified tags from Elasticache cluster to Snapshot
:example:
.. code-block:: yaml
- name: elasticache-test
resource: cache-snapshot
filters:
- type: value
key: SnapshotName
op: in
value:
- test-tags-backup
actions:
- type: copy-cluster-tags
tags:
- tag1
- tag2
"""
schema = type_schema(
'copy-cluster-tags',
tags={'type': 'array', 'items': {'type': 'string'}, 'minItems': 1},
required=('tags',))
def get_permissions(self):
perms = self.manager.get_resource_manager('cache-cluster').get_permissions()
perms.append('elasticache:AddTagsToResource')
return perms
def process(self, snapshots):
client = local_session(self.manager.session_factory).client('elasticache')
clusters = {r['CacheClusterId']: r for r in
self.manager.get_resource_manager('cache-cluster').resources()}
copyable_tags = self.data.get('tags')
for s in snapshots:
# For replicated/sharded clusters it is possible for each
# shard to have separate tags, we go ahead and tag the
# snap with the union of tags with overlaps getting the
# last value (arbitrary if mismatched).
if 'CacheClusterId' not in s:
cluster_ids = [ns['CacheClusterId'] for ns in s['NodeSnapshots']]
else:
cluster_ids = [s['CacheClusterId']]
copy_tags = {}
for cid in sorted(cluster_ids):
if cid not in clusters:
continue
cluster_tags = {t['Key']: t['Value'] for t in clusters[cid]['Tags']}
snap_tags = {t['Key']: t['Value'] for t in s.get('Tags', ())}
for k, v in cluster_tags.items():
if copyable_tags and k not in copyable_tags:
continue
if k.startswith('aws:'):
continue
if snap_tags.get(k, '') == v:
continue
copy_tags[k] = v
if not copy_tags:
continue
if len(set(copy_tags).union(set(snap_tags))) > 50:
self.log.error(
"Cant copy tags, max tag limit hit on snapshot:%s",
s['SnapshotName'])
continue
arn = self.manager.generate_arn(s['SnapshotName'])
self.manager.retry(
client.add_tags_to_resource,
ResourceName=arn,
Tags=[{'Key': k, 'Value': v} for k, v in copy_tags.items()])
def _cluster_eligible_for_snapshot(cluster):
# added regex search to filter unsupported cachenode types
return (
cluster['Engine'] != 'memcached' and not
TTYPE.match(cluster['CacheNodeType'])
)
@resources.register('elasticache-group')
class ElastiCacheReplicationGroup(QueryResourceManager):
class resource_type(TypeInfo):
service = "elasticache"
enum_spec = ('describe_replication_groups',
'ReplicationGroups[]', None)
arn_type = 'replicationgroup'
id = name = dimension = 'ReplicationGroupId'
cfn_type = 'AWS::ElastiCache::ReplicationGroup'
arn_separator = ":"
universal_taggable = object()
augment = universal_augment
permissions = ('elasticache:DescribeReplicationGroups',)
@ElastiCacheReplicationGroup.filter_registry.register('kms-key')
class KmsFilter(KmsRelatedFilter):
RelatedIdsExpression = 'KmsKeyId'
@ElastiCacheReplicationGroup.action_registry.register('delete')
class DeleteReplicationGroup(BaseAction):
"""Action to delete a cache replication group
:example:
.. code-block:: yaml
policies:
- name: elasticache-delete-replication-group
resource: aws.elasticache-group
filters:
- type: value
key: AtRestEncryptionEnabled
value: False
actions:
- type: delete
snapshot: False
"""
schema = type_schema(
'delete', **{'snapshot': {'type': 'boolean'}})
valid_origin_states = ('available',)
permissions = ('elasticache:DeleteReplicationGroup',)
def process(self, resources):
resources = self.filter_resources(resources, 'Status', self.valid_origin_states)
client = local_session(self.manager.session_factory).client('elasticache')
for r in resources:
params = {'ReplicationGroupId': r['ReplicationGroupId']}
if self.data.get('snapshot', False):
params.update({'FinalSnapshotIdentifier': r['ReplicationGroupId'] + '-snapshot'})
self.manager.retry(client.delete_replication_group, **params, ignore_err_codes=(
'ReplicationGroupNotFoundFault',))
|
the-stack_106_27736 | # -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright 2019, Battelle Memorial Institute.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This material was prepared as an account of work sponsored by an agency of
# the United States Government. Neither the United States Government nor the
# United States Department of Energy, nor Battelle, nor any of their
# employees, nor any jurisdiction or organization that has cooperated in the
# development of these materials, makes any warranty, express or
# implied, or assumes any legal liability or responsibility for the accuracy,
# completeness, or usefulness or any information, apparatus, product,
# software, or process disclosed, or represents that its use would not infringe
# privately owned rights. Reference herein to any specific commercial product,
# process, or service by trade name, trademark, manufacturer, or otherwise
# does not necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors expressed
# herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY operated by
# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
"""bootstrap - Prepare a VOLTTRON virtual environment.
Bootstrapping is broken into two stages. The first stage should only be
invoked once per virtual environment. It downloads virtualenv and
creates a virtual Python environment in the virtual environment
directory (defaults to a subdirectory named env in the same directory as
this script). It then executes stage two using the newly installed
virtual environment. Stage two uses the new virtual Python environment
to install VOLTTRON and its dependencies.
If a new dependency is added, this script may be run again using the
Python executable in the virtual environment to re-run stage two:
env/bin/python bootstrap.py
To speed up bootstrapping in a test environment, use the --wheel
feature, which might look something like this:
$ export PIP_WHEEL_DIR=/path/to/cache/wheelhouse
$ export PIP_FIND_LINKS=file://$PIP_WHEEL_DIR
$ mkdir -p $PIP_WHEEL_DIR
$ python2.7 bootstrap.py -o
$ env/bin/python bootstrap.py --wheel
$ env/bin/python bootstrap.py
Instead of setting the environment variables, a pip configuration file
may be used. Look here for more information on configuring pip:
https://pip.pypa.io/en/latest/user_guide.html#configuration
"""
import argparse
import errno
import logging
import subprocess
import sys
from urllib.request import urlopen
import os
import traceback
from requirements import extras_require, option_requirements
_log = logging.getLogger(__name__)
_WINDOWS = sys.platform.startswith('win')
default_rmq_dir = os.path.join(os.path.expanduser("~"), "rabbitmq_server")
rabbitmq_server = 'rabbitmq_server-3.7.7'
def shescape(args):
'''Return a sh shell escaped string composed of args.'''
return ' '.join('{1}{0}{1}'.format(arg.replace('"', '\\"'),
'"' if ' ' in arg else '') for arg in args)
def bootstrap(dest, prompt='(volttron)', version=None, verbose=None):
import shutil
args = [sys.executable, "-m", "venv", dest, "--prompt", prompt]
complete = subprocess.run(args, stdout=subprocess.PIPE)
if complete.returncode != 0:
sys.stdout.write(complete.stdout.decode('utf-8'))
shutil.rmtree(dest, ignore_errors=True)
sys.exit(1)
return os.path.join(dest, "bin/python")
def pip(operation, args, verbose=None, upgrade=False, offline=False):
"""Call pip in the virtual environment to perform operation."""
cmd = ['pip', operation]
if verbose is not None:
cmd.append('--verbose' if verbose else '--quiet')
if upgrade and operation == 'install':
cmd.append('--upgrade')
if offline:
cmd.extend(['--retries', '0', '--timeout', '1'])
cmd.extend(args)
_log.info('+ %s', shescape(cmd))
cmd[:0] = [sys.executable, '-m']
subprocess.check_call(cmd)
def update(operation, verbose=None, upgrade=False, offline=False, optional_requirements=[]):
"""Install dependencies in setup.py and requirements.txt."""
assert operation in ['install', 'wheel']
wheeling = operation == 'wheel'
path = os.path.dirname(__file__) or '.'
_log.info('%sing required packages', 'Build' if wheeling else 'Install')
# We must install wheel first to eliminate a bunch of scary looking
# errors at first install.
# TODO Look towards fixing the packaging so that it works with 0.31
pip('install', ['wheel==0.30'], verbose, True, offline=offline)
# Build option_requirements separately to pass install options
build_option = '--build-option' if wheeling else '--install-option'
for requirement, options in option_requirements:
args = []
for opt in options:
args.extend([build_option, opt])
args.extend(['--no-deps', requirement])
pip(operation, args, verbose, upgrade, offline)
# Install local packages and remaining dependencies
args = []
target = path
if optional_requirements:
target += '[' + ','.join(optional_requirements) + ']'
args.extend(['--editable', target])
pip(operation, args, verbose, upgrade, offline)
try:
# Install rmq server if needed
if 'rabbitmq' in optional_requirements:
install_rabbit(default_rmq_dir)
except Exception as exc:
_log.error("Error installing RabbitMQ package {}".format(traceback.format_exc()))
def install_rabbit(rmq_install_dir):
# try:
process = subprocess.Popen(["which", "erl"], stderr=subprocess.PIPE, stdout=subprocess.PIPE)
(output, error) = process.communicate()
if process.returncode != 0:
sys.stderr.write("ERROR:\n Unable to find erlang in path. Please install necessary pre-requisites. "
"Reference: https://volttron.readthedocs.io/en/latest/setup/index.html#steps-for-rabbitmq")
sys.exit(60)
if rmq_install_dir == default_rmq_dir and not os.path.exists(
default_rmq_dir):
os.makedirs(default_rmq_dir)
_log.info("\n\nInstalling Rabbitmq Server in default directory: " +
default_rmq_dir)
else:
_log.info(
"\n\nInstalling Rabbitmq Server at {}".format(rmq_install_dir))
valid_dir = os.access(rmq_install_dir, os.W_OK)
if not valid_dir:
raise ValueError("Invalid install directory. Directory should "
"exist and should have write access to user")
rmq_home = os.path.join(rmq_install_dir, rabbitmq_server)
if os.path.exists(rmq_home) and \
os.path.exists(os.path.join(rmq_home, 'sbin/rabbitmq-server')):
_log.info("{} already contains {}. "
"Skipping rabbitmq server install".format(
rmq_install_dir, rabbitmq_server))
else:
url = "https://github.com/rabbitmq/rabbitmq-server/releases/download/v3.7.7/rabbitmq-server-generic-unix-3.7.7.tar.xz"
f = urlopen(url)
data = f.read()
filename = "rabbitmq-server.download.tar.xz"
with open(filename, "wb") as imgfile:
imgfile.write(data)
_log.info("\nDownloaded rabbitmq server")
cmd = ["tar",
"-xf",
filename,
"--directory=" + rmq_install_dir]
subprocess.check_call(cmd)
_log.info("Installed Rabbitmq server at " + rmq_home)
# enable plugins
cmd = [os.path.join(rmq_home, "sbin/rabbitmq-plugins"),
"enable", "rabbitmq_management",
"rabbitmq_federation",
"rabbitmq_federation_management",
"rabbitmq_shovel",
"rabbitmq_shovel_management",
"rabbitmq_auth_mechanism_ssl",
"rabbitmq_trust_store"]
subprocess.check_call(cmd)
with open(os.path.expanduser("~/.volttron_rmq_home"), 'w+') as f:
f.write(rmq_home)
def main(argv=sys.argv):
"""Script entry point."""
# Refuse to run as root
if not getattr(os, 'getuid', lambda: -1)():
sys.stderr.write('%s: error: refusing to run as root to prevent '
'potential damage.\n' % os.path.basename(argv[0]))
sys.exit(77)
# Python3 for life!
if sys.version_info.major < 3 or sys.version_info.minor < 6:
sys.stderr.write('error: Python >= 3.6 is required\n')
sys.exit(1)
# Build the parser
python = os.path.join('$VIRTUAL_ENV',
'Scripts' if _WINDOWS else 'bin', 'python')
if _WINDOWS:
python += '.exe'
parser = argparse.ArgumentParser(
description='Bootstrap and update a virtual Python environment '
'for VOLTTRON development.',
usage='\n bootstrap: python3.6 %(prog)s [options]'
'\n update: {} %(prog)s [options]'.format(python),
prog=os.path.basename(argv[0]),
epilog="""
The first invocation of this script, which should be made
using the system Python, will create a virtual Python
environment in the 'env' subdirectory in the same directory as
this script or in the directory given by the --envdir option.
Subsequent invocations of this script should use the Python
executable installed in the virtual environment."""
)
verbose = parser.add_mutually_exclusive_group()
verbose.add_argument(
'-q', '--quiet', dest='verbose', action='store_const', const=False,
help='produce less output')
verbose.add_argument(
'-v', '--verbose', action='store_const', const=True,
help='produce extra output')
bs = parser.add_argument_group('bootstrap options')
bs.add_argument(
'--envdir', default=None, metavar='VIRTUAL_ENV',
help='alternate location for virtual environment')
bs.add_argument(
'--force', action='store_true', default=False,
help='force installing in non-empty directory')
bs.add_argument(
'-o', '--only-virtenv', action='store_true', default=False,
help='create virtual environment and exit (skip install)')
bs.add_argument(
'--prompt', default='volttron', help='provide alternate prompt '
'in activated environment (default: %(default)s)')
bs.add_argument('--force-version', help=argparse.SUPPRESS)
# allows us to look and see if any of the dynamic optional arguments
# are on the command line. We check this during the processing of the args
# variable at the end of the block. If the option is set then it needs
# to be passed on.
po = parser.add_argument_group('Extra packaging options')
for arg in extras_require:
po.add_argument('--'+arg, action='append_const', const=arg, dest="optional_args")
# Add rmq download actions.
#optional_args = []
# if os.path.exists('optional_requirements.json'):
# po = parser.add_argument_group('Extra packaging options')
# with open('optional_requirements.json', 'r') as optional_arguments:
# data = jsonapi.load(optional_arguments)
# for arg, vals in data.items():
# if arg == '--rabbitmq':
# po.add_argument(
# '--rabbitmq', action='store', const=default_rmq_dir,
# nargs='?',
# help='install rabbitmq server and its dependencies. '
# 'optional argument: Install directory '
# 'that exists and is writeable. RabbitMQ server '
# 'will be installed in a subdirectory.'
# 'Defaults to ' + default_rmq_dir)
# else:
# optional_args.append(arg)
# if 'help' in vals.keys():
# po.add_argument(arg, action='store_true', default=False,
# help=vals['help'])
# else:
# po.add_argument(arg, action='store_true', default=False)
# Update options
up = parser.add_argument_group('update options')
up.add_argument(
'--offline', action='store_true', default=False,
help='install from cache without downloading')
ex = up.add_mutually_exclusive_group()
ex.add_argument(
'-u', '--upgrade', action='store_true', default=False,
help='upgrade installed packages')
ex.add_argument(
'-w', '--wheel', action='store_const', const='wheel', dest='operation',
help='build wheels in the pip wheelhouse')
path = os.path.dirname(__file__) or os.getcwd()
parser.set_defaults(envdir=os.path.join(path, 'env'), operation='install', optional_args=[])
options = parser.parse_args(argv[1:])
# Route errors to stderr, info and debug to stdout
error_handler = logging.StreamHandler(sys.stderr)
error_handler.setLevel(logging.WARNING)
error_handler.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))
info_handler = logging.StreamHandler(sys.stdout)
info_handler.setLevel(logging.DEBUG)
info_handler.setFormatter(logging.Formatter('%(message)s'))
root = logging.getLogger()
root.setLevel(logging.DEBUG if options.verbose else logging.INFO)
root.addHandler(error_handler)
root.addHandler(info_handler)
# Main script logic to perform bootstrapping or updating
if sys.base_prefix != sys.prefix:
# The script was called from a virtual environment Python, so update
update(options.operation, options.verbose,
options.upgrade, options.offline, options.optional_args)
else:
# The script was called from the system Python, so bootstrap
try:
# Refuse to create environment in existing, non-empty
# directory without the --force flag.
if os.path.exists(options.envdir):
if not options.force:
parser.print_usage(sys.stderr)
print('{}: error: directory exists and is not empty: {}'
.format(parser.prog, options.envdir), file=sys.stderr)
print('Use the virtual Python to update or use '
'the --force option to overwrite.', file=sys.stderr)
parser.exit(1)
_log.warning('using non-empty environment directory: %s',
options.envdir)
except OSError as exc:
if exc.errno != errno.ENOENT:
raise
env_exe = bootstrap(options.envdir, options.prompt)
if options.only_virtenv:
return
# Run this script within the virtual environment for stage2
args = [env_exe, __file__]
if options.verbose is not None:
args.append('--verbose' if options.verbose else '--quiet')
# Transfer dynamic properties to the subprocess call 'update'.
# Clip off the first two characters expecting long parameter form.
for arg in options.optional_args:
args.append('--'+arg)
subprocess.check_call(args)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
except subprocess.CalledProcessError as exc:
sys.exit(exc.returncode)
|
the-stack_106_27737 | # -*- coding: utf-8 -*-
'''
Template render systems
'''
from __future__ import absolute_import
# Import python libs
import codecs
import os
import imp
import logging
import tempfile
import traceback
import sys
# Import third party libs
import jinja2
import jinja2.ext
# Import salt libs
import salt.utils
import salt.utils.yamlencoding
from salt.exceptions import (
SaltRenderError, CommandExecutionError, SaltInvocationError
)
from salt.utils.jinja import ensure_sequence_filter, show_full_context
from salt.utils.jinja import SaltCacheLoader as JinjaSaltCacheLoader
from salt.utils.jinja import SerializerExtension as JinjaSerializerExtension
from salt.utils.odict import OrderedDict
from salt import __path__ as saltpath
from salt.ext.six import string_types
import salt.ext.six as six
log = logging.getLogger(__name__)
TEMPLATE_DIRNAME = os.path.join(saltpath[0], 'templates')
# FIXME: also in salt/template.py
SLS_ENCODING = 'utf-8' # this one has no BOM.
SLS_ENCODER = codecs.getencoder(SLS_ENCODING)
def wrap_tmpl_func(render_str):
def render_tmpl(tmplsrc,
from_str=False,
to_str=False,
context=None,
tmplpath=None,
**kws):
if context is None:
context = {}
# Alias cmd.run to cmd.shell to make python_shell=True the default for
# templated calls
if 'salt' in kws:
if 'cmd.run' in kws['salt'] and 'cmd.shell' in kws['salt']:
kws['salt']['cmd.run'] = kws['salt']['cmd.shell']
if 'run' in kws['salt'].get('cmd', {}) \
and 'shell' in kws['salt'].get('cmd', {}):
kws['salt']['cmd']['run'] = kws['salt']['cmd']['shell']
# We want explicit context to overwrite the **kws
kws.update(context)
context = kws
assert 'opts' in context
assert 'saltenv' in context
if 'sls' in context:
slspath = context['sls'].replace('.', '/')
if tmplpath is not None:
context['tplpath'] = tmplpath
if not tmplpath.lower().replace('\\', '/').endswith('/init.sls'):
slspath = os.path.dirname(slspath)
template = tmplpath.replace('\\', '/')
i = template.rfind(slspath.replace('.', '/'))
if i != -1:
template = template[i:]
tpldir = os.path.dirname(template).replace('\\', '/')
tpldata = {
'tplfile': template,
'tpldir': tpldir,
'tpldot': tpldir.replace('/', '.'),
}
context.update(tpldata)
context['slsdotpath'] = slspath.replace('/', '.')
context['slscolonpath'] = slspath.replace('/', ':')
context['sls_path'] = slspath.replace('/', '_')
context['slspath'] = slspath
if isinstance(tmplsrc, string_types):
if from_str:
tmplstr = tmplsrc
else:
try:
if tmplpath is not None:
tmplsrc = os.path.join(tmplpath, tmplsrc)
with codecs.open(tmplsrc, 'r', SLS_ENCODING) as _tmplsrc:
tmplstr = _tmplsrc.read()
except (UnicodeDecodeError,
ValueError,
OSError,
IOError) as exc:
if salt.utils.is_bin_file(tmplsrc):
# Template is a bin file, return the raw file
return dict(result=True, data=tmplsrc)
log.error(
'Exception occurred while reading file '
'{0}: {1}'.format(tmplsrc, exc),
# Show full traceback if debug logging is enabled
exc_info_on_loglevel=logging.DEBUG
)
raise exc
else: # assume tmplsrc is file-like.
tmplstr = tmplsrc.read()
tmplsrc.close()
try:
output = render_str(tmplstr, context, tmplpath)
if salt.utils.is_windows():
# Write out with Windows newlines
output = os.linesep.join(output.splitlines())
except SaltRenderError as exc:
#return dict(result=False, data=str(exc))
raise
except Exception:
return dict(result=False, data=traceback.format_exc())
else:
if to_str: # then render as string
return dict(result=True, data=output)
with tempfile.NamedTemporaryFile('wb', delete=False) as outf:
outf.write(SLS_ENCODER(output)[0])
# Note: If nothing is replaced or added by the rendering
# function, then the contents of the output file will
# be exactly the same as the input.
return dict(result=True, data=outf.name)
render_tmpl.render_str = render_str
return render_tmpl
def _get_jinja_error_slug(tb_data):
'''
Return the line number where the template error was found
'''
try:
return [
x
for x in tb_data if x[2] in ('top-level template code',
'template')
][-1]
except IndexError:
pass
def _get_jinja_error_message(tb_data):
'''
Return an understandable message from jinja error output
'''
try:
line = _get_jinja_error_slug(tb_data)
return u'{0}({1}):\n{3}'.format(*line)
except IndexError:
pass
return None
def _get_jinja_error_line(tb_data):
'''
Return the line number where the template error was found
'''
try:
return _get_jinja_error_slug(tb_data)[1]
except IndexError:
pass
return None
def _get_jinja_error(trace, context=None):
'''
Return the error line and error message output from
a stacktrace.
If we are in a macro, also output inside the message the
exact location of the error in the macro
'''
if not context:
context = {}
out = ''
error = _get_jinja_error_slug(trace)
line = _get_jinja_error_line(trace)
msg = _get_jinja_error_message(trace)
# if we failed on a nested macro, output a little more info
# to help debugging
# if sls is not found in context, add output only if we can
# resolve the filename
add_log = False
template_path = None
if 'sls' not in context:
if (
(error[0] != '<unknown>')
and os.path.exists(error[0])
):
template_path = error[0]
add_log = True
else:
# the offender error is not from the called sls
filen = context['sls'].replace('.', '/')
if (
not error[0].endswith(filen)
and os.path.exists(error[0])
):
add_log = True
template_path = error[0]
# if we add a log, format explicitly the exeception here
# by telling to output the macro context after the macro
# error log place at the beginning
if add_log:
if template_path:
out = '\n{0}\n'.format(msg.splitlines()[0])
out += salt.utils.get_context(
salt.utils.fopen(template_path).read(),
line,
marker=' <======================')
else:
out = '\n{0}\n'.format(msg)
line = 0
return line, out
def render_jinja_tmpl(tmplstr, context, tmplpath=None):
opts = context['opts']
saltenv = context['saltenv']
loader = None
newline = False
if tmplstr and not isinstance(tmplstr, six.text_type):
# http://jinja.pocoo.org/docs/api/#unicode
tmplstr = tmplstr.decode(SLS_ENCODING)
if tmplstr.endswith('\n'):
newline = True
if not saltenv:
if tmplpath:
# i.e., the template is from a file outside the state tree
#
# XXX: FileSystemLoader is not being properly instantiated here is
# it? At least it ain't according to:
#
# http://jinja.pocoo.org/docs/api/#jinja2.FileSystemLoader
loader = jinja2.FileSystemLoader(
context, os.path.dirname(tmplpath))
else:
loader = JinjaSaltCacheLoader(opts, saltenv, pillar_rend=context.get('_pillar_rend', False))
env_args = {'extensions': [], 'loader': loader}
if hasattr(jinja2.ext, 'with_'):
env_args['extensions'].append('jinja2.ext.with_')
if hasattr(jinja2.ext, 'do'):
env_args['extensions'].append('jinja2.ext.do')
if hasattr(jinja2.ext, 'loopcontrols'):
env_args['extensions'].append('jinja2.ext.loopcontrols')
env_args['extensions'].append(JinjaSerializerExtension)
# Pass through trim_blocks and lstrip_blocks Jinja parameters
# trim_blocks removes newlines around Jinja blocks
# lstrip_blocks strips tabs and spaces from the beginning of
# line to the start of a block.
if opts.get('jinja_trim_blocks', False):
log.debug('Jinja2 trim_blocks is enabled')
env_args['trim_blocks'] = True
if opts.get('jinja_lstrip_blocks', False):
log.debug('Jinja2 lstrip_blocks is enabled')
env_args['lstrip_blocks'] = True
if opts.get('allow_undefined', False):
jinja_env = jinja2.Environment(**env_args)
else:
jinja_env = jinja2.Environment(undefined=jinja2.StrictUndefined,
**env_args)
jinja_env.filters['strftime'] = salt.utils.date_format
jinja_env.filters['sequence'] = ensure_sequence_filter
jinja_env.filters['yaml_dquote'] = salt.utils.yamlencoding.yaml_dquote
jinja_env.filters['yaml_squote'] = salt.utils.yamlencoding.yaml_squote
jinja_env.filters['yaml_encode'] = salt.utils.yamlencoding.yaml_encode
jinja_env.globals['odict'] = OrderedDict
jinja_env.globals['show_full_context'] = show_full_context
decoded_context = {}
for key, value in six.iteritems(context):
if not isinstance(value, string_types):
decoded_context[key] = value
continue
decoded_context[key] = salt.utils.sdecode(value)
try:
template = jinja_env.from_string(tmplstr)
template.globals.update(decoded_context)
output = template.render(**decoded_context)
except jinja2.exceptions.TemplateSyntaxError as exc:
trace = traceback.extract_tb(sys.exc_info()[2])
line, out = _get_jinja_error(trace, context=decoded_context)
if not line:
tmplstr = ''
raise SaltRenderError('Jinja syntax error: {0}{1}'.format(exc, out),
line,
tmplstr)
except jinja2.exceptions.UndefinedError as exc:
trace = traceback.extract_tb(sys.exc_info()[2])
out = _get_jinja_error(trace, context=decoded_context)[1]
tmplstr = ''
# Don't include the line number, since it is misreported
# https://github.com/mitsuhiko/jinja2/issues/276
raise SaltRenderError(
'Jinja variable {0}{1}'.format(
exc, out),
buf=tmplstr)
except (SaltInvocationError, CommandExecutionError) as exc:
trace = traceback.extract_tb(sys.exc_info()[2])
line, out = _get_jinja_error(trace, context=decoded_context)
if not line:
tmplstr = ''
raise SaltRenderError(
'Problem running salt function in Jinja template: {0}{1}'.format(
exc, out),
line,
tmplstr)
except Exception as exc:
tracestr = traceback.format_exc()
trace = traceback.extract_tb(sys.exc_info()[2])
line, out = _get_jinja_error(trace, context=decoded_context)
if not line:
tmplstr = ''
else:
tmplstr += '\n{0}'.format(tracestr)
raise SaltRenderError('Jinja error: {0}{1}'.format(exc, out),
line,
tmplstr,
trace=tracestr)
# Workaround a bug in Jinja that removes the final newline
# (https://github.com/mitsuhiko/jinja2/issues/75)
if newline:
output += '\n'
return output
def render_mako_tmpl(tmplstr, context, tmplpath=None):
import mako.exceptions
from mako.template import Template
from salt.utils.mako import SaltMakoTemplateLookup
saltenv = context['saltenv']
lookup = None
if not saltenv:
if tmplpath:
# i.e., the template is from a file outside the state tree
from mako.lookup import TemplateLookup
lookup = TemplateLookup(directories=[os.path.dirname(tmplpath)])
else:
lookup = SaltMakoTemplateLookup(context['opts'], saltenv)
try:
return Template(
tmplstr,
strict_undefined=True,
uri=context['sls'].replace('.', '/') if 'sls' in context else None,
lookup=lookup
).render(**context)
except:
raise SaltRenderError(mako.exceptions.text_error_template().render())
def render_wempy_tmpl(tmplstr, context, tmplpath=None):
from wemplate.wemplate import TemplateParser as Template
return Template(tmplstr).render(**context)
def render_genshi_tmpl(tmplstr, context, tmplpath=None):
'''
Render a Genshi template. A method should be passed in as part of the
context. If no method is passed in, xml is assumed. Valid methods are:
.. code-block:
- xml
- xhtml
- html
- text
- newtext
- oldtext
Note that the ``text`` method will call ``NewTextTemplate``. If ``oldtext``
is desired, it must be called explicitly
'''
method = context.get('method', 'xml')
if method == 'text' or method == 'newtext':
from genshi.template import NewTextTemplate
tmpl = NewTextTemplate(tmplstr)
elif method == 'oldtext':
from genshi.template import OldTextTemplate
tmpl = OldTextTemplate(tmplstr)
else:
from genshi.template import MarkupTemplate
tmpl = MarkupTemplate(tmplstr)
return tmpl.generate(**context).render(method)
def render_cheetah_tmpl(tmplstr, context, tmplpath=None):
'''
Render a Cheetah template.
'''
from Cheetah.Template import Template
return str(Template(tmplstr, searchList=[context]))
def py(sfn, string=False, **kwargs): # pylint: disable=C0103
'''
Render a template from a python source file
Returns::
{'result': bool,
'data': <Error data or rendered file path>}
'''
if not os.path.isfile(sfn):
return {}
mod = imp.load_source(
os.path.basename(sfn).split('.')[0],
sfn
)
# File templates need these set as __var__
if '__env__' not in kwargs and 'saltenv' in kwargs:
setattr(mod, '__env__', kwargs['saltenv'])
builtins = ['salt', 'grains', 'pillar', 'opts']
for builtin in builtins:
arg = '__{0}__'.format(builtin)
setattr(mod, arg, kwargs[builtin])
for kwarg in kwargs:
setattr(mod, kwarg, kwargs[kwarg])
try:
data = mod.run()
if string:
return {'result': True,
'data': data}
tgt = salt.utils.mkstemp()
with salt.utils.fopen(tgt, 'w+') as target:
target.write(data)
return {'result': True,
'data': tgt}
except Exception:
trb = traceback.format_exc()
return {'result': False,
'data': trb}
JINJA = wrap_tmpl_func(render_jinja_tmpl)
MAKO = wrap_tmpl_func(render_mako_tmpl)
WEMPY = wrap_tmpl_func(render_wempy_tmpl)
GENSHI = wrap_tmpl_func(render_genshi_tmpl)
CHEETAH = wrap_tmpl_func(render_cheetah_tmpl)
TEMPLATE_REGISTRY = {
'jinja': JINJA,
'mako': MAKO,
'py': py,
'wempy': WEMPY,
'genshi': GENSHI,
'cheetah': CHEETAH,
}
|
the-stack_106_27739 | from cgmbrush.cgmbrush import *
from cgmbrush.plots.plots import *
import matplotlib.pyplot as plt
import numpy as np
provider = BolshoiProvider()
date = '2022-06-03'
date2 = '2022-06-03'
resolutions = [4,8,16,32]
precipProfile = MassDependentProfile(PrecipitationProfile(), NFWProfile(), 10**13.3)
profiles = [(SphericalTophatProfile(), date), (SphericalTophatProfile(rvir_factor=2), date), (NFWProfile(), date), (FireProfile(), date2), (precipProfile, date2)]
configs = []
for p in profiles:
for r in resolutions:
configs.append(Configuration(p[0], provider=provider, resolution=r, datestamp=p[1]))
summary = ''
configs[0]
for c in configs:
c.run(load_from_files=True)
std = DM_statistics(c.get_final_field()[0])
summary = summary + "{} ({}):\t{}\n".format(c.addition_profile.pretty_name, c.resolution, std)
c.clear_results()
print(summary) |
the-stack_106_27740 | import os
import pickle
import uuid
import xml.etree.ElementTree as ET
import numpy as np
import scipy.sparse
from datasets.imdb import imdb
from model.utils.config import cfg
class kittivoc(imdb):
def __init__(self, image_set, devkit_path=None):
imdb.__init__(self, 'kittivoc_' + image_set)
self._image_set = image_set
self._devkit_path = self._get_default_path() if devkit_path is None \
else devkit_path
self._data_path = self._devkit_path
self._classes = ('__background__', # always index 0
'car')
self._class_to_ind = dict(zip(self.classes, range(self.num_classes)))
self._image_ext = '.jpg'
self._image_index = self._load_image_set_index()
self._remove_empty_samples()
self._roidb_handler = self.gt_roidb
self._salt = str(uuid.uuid4())
self._comp_id = 'comp4'
self._year = ''
# PASCAL specific config options
self.config = {'cleanup': True,
'use_salt': True,
'use_diff': True, # using difficult samples
'matlab_eval': False,
'rpn_file': None,
'min_size': 2}
assert os.path.exists(self._devkit_path), \
'VOCdevkit path does not exist: {}'.format(self._devkit_path)
assert os.path.exists(self._data_path), \
'Path does not exist: {}'.format(self._data_path)
def image_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self.image_path_from_index(self._image_index[i])
def image_id_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return i
def image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier
:param index filename stem e.g. 000000
:return filepath
"""
image_path = os.path.join(self._data_path, 'JPEGImages',
index + self._image_ext)
assert os.path.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path
def _load_image_set_index(self):
"""
Load the indexes listed in this dataset's image set file.
"""
# Example path to image set file:
image_set_file = os.path.join(self._data_path, 'ImageSets', 'Main',
self._image_set + '.txt')
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index = [x.strip() for x in f.readlines()]
return image_index
def _get_default_path(self):
"""
Return the default path where PASCAL VOC is expected to be installed.
"""
return os.path.join(cfg.DATA_DIR, 'KITTIVOC')
def gt_roidb(self):
"""
Return the database of ground-truth regions of interest, aka, the annotations.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = pickle.load(fid)
print('{} gt roidb loaded from {}'.format(self.name, cache_file))
return roidb
gt_roidb = [self._load_pascal_annotation(index)
for index in self.image_index]
with open(cache_file, 'wb') as fid:
pickle.dump(gt_roidb, fid, pickle.HIGHEST_PROTOCOL)
print('wrote gt roidb to {}'.format(cache_file))
return gt_roidb
def _remove_empty_samples(self):
"""
Remove images with zero annotation ()
"""
print('Remove empty annotations: ',)
for i in range(len(self._image_index) - 1, -1, -1):
index = self._image_index[i]
filename = os.path.join(self._data_path, 'Annotations', index + '.xml')
tree = ET.parse(filename)
objs = tree.findall('object')
non_diff_objs = [
obj for obj in objs if
int(obj.find('difficult').text) == 0 and obj.find('name').text.lower().strip() != 'dontcare']
num_objs = len(non_diff_objs)
if num_objs == 0:
print(index,)
self._image_index.pop(i)
print('Done. ')
def _load_pascal_annotation(self, index):
"""
Load image and bounding boxes info from XML file in the PASCAL VOC
format.
"""
filename = os.path.join(self._data_path, 'Annotations', index + '.xml')
tree = ET.parse(filename)
objs = tree.findall('object')
if not self.config['use_diff']:
# Exclude the samples labeled as difficult
non_diff_objs = [
obj for obj in objs if int(obj.find('difficult').text) == 0]
if len(non_diff_objs) != len(objs):
print('Removed {} difficult objects'.format(
len(objs) - len(non_diff_objs)))
objs = non_diff_objs
# only need car, pedestrian, cyclist classes.
need_objs = [obj for obj in objs if obj.find('name').text.lower().strip()
in ['car', 'dontcare']]
objs = need_objs
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.int16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
# just the same as gt_classes
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
# "Seg" area for pascal is just the box area
seg_areas = np.zeros((num_objs), dtype=np.float32)
ishards = np.zeros((num_objs), dtype=np.int32)
# --------------------------------------------------
care_inds = np.empty((0), dtype=np.int32)
dontcare_inds = np.empty((0), dtype=np.int32)
# --------------------------------------------------
# Load object bounding boxes into a data frame.
for ix, obj in enumerate(objs):
bbox = obj.find('bndbox')
# Make pixel indexes 0-based
x1 = max(float(bbox.find('xmin').text) - 1, 0)
y1 = max(float(bbox.find('ymin').text) - 1, 0)
x2 = float(bbox.find('xmax').text) - 1
y2 = float(bbox.find('ymax').text) - 1
# --------------------------------------------
diffc = obj.find('difficult')
difficult = 0 if diffc is None else int(diffc.text)
ishards[ix] = difficult
# --------------------------------------------
class_name = obj.find('name').text.lower().strip()
if class_name != 'dontcare':
care_inds = np.append(care_inds, np.asarray([ix], dtype=np.int32))
if class_name == 'dontcare':
dontcare_inds = np.append(dontcare_inds, np.asarray([ix], dtype=np.int32))
boxes[ix, :] = [x1, y1, x2, y2]
continue
cls = self._class_to_ind[class_name]
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
seg_areas[ix] = (x2 - x1 + 1) * (y2 - y1 + 1)
# deal with dontcare areas
dontcare_areas = boxes[dontcare_inds, :]
boxes = boxes[care_inds, :]
gt_classes = gt_classes[care_inds]
overlaps = overlaps[care_inds, :]
seg_areas = seg_areas[care_inds]
ishards = ishards[care_inds]
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'boxes': boxes,
'gt_classes': gt_classes,
'gt_ishard': ishards,
'dontcare_areas': dontcare_areas,
'gt_overlaps': overlaps,
'flipped': False,
'seg_areas': seg_areas}
def _get_comp_id(self):
comp_id = (self._comp_id + '_' + self._salt if self.config['use_salt']
else self._comp_id)
return comp_id
def competition_mode(self, on):
if on:
self.config['use_salt'] = False
self.config['cleanup'] = False
else:
self.config['use_salt'] = True
self.config['cleanup'] = True
if __name__ == '__main__':
from datasets.pascal_voc import pascal_voc
import pprint
d = pascal_voc('trainval', '2007')
pprint.pprint(d)
res = d.roidb
from IPython import embed;
embed()
|
the-stack_106_27742 | """
If a model is provided using a second argument, the classification is run using the pretrained autoembedder.
- python scripts/supervised_classification_wembedding.py ./data/training_input/ [./data/model/autoembedder]
"""
import sys
import pandas as pd
import tensorflow as tf
from utils.params import with_params
from utils.utils import get_sorted_input_files, load_model
from utils.engine import compile_model
from utils.data import prepare_data
from models.base_classification_network import BaseClassificationNetwork
from models.autoembedder_classification_model import AutoEmbedderClassificationModel
OUTPUT_DIRECTORY = ""
def train_model(
train_data_num: pd.DataFrame,
train_data_cat: pd.DataFrame,
train_data_target: pd.DataFrame,
model: tf.keras.Model,
config: dict,
) -> None:
model.fit(
[train_data_cat, train_data_num],
train_data_target,
batch_size=config["batch_size"],
epochs=config["n_epochs"],
verbose=config["verbosity_level"],
)
def test_model(
test_data_num: pd.DataFrame,
test_data_cat: pd.DataFrame,
test_data_target: pd.DataFrame,
model: tf.keras.Model,
config: dict,
) -> None:
loss, accuracy = model.evaluate(
[test_data_cat, test_data_num],
test_data_target,
batch_size=config["batch_size"],
verbose=config["verbosity_level"],
)
print(f" Model loss on the test set: {loss}")
print(f" Model accuracy on the test set: {100*accuracy}%")
@with_params("params.yaml", "train_classification_models")
def main(params: dict):
input_files = get_sorted_input_files(
input_dir=sys.argv[1],
input_patterns="",
input_extension="feather",
)
df = pd.read_feather(input_files[0])
(
train_df_num,
train_df_cat,
test_df_num,
test_df_cat,
train_df_target,
test_df_target,
encoding_reference_values,
_,
) = prepare_data(df, params)
n_target_classes = train_df_target.shape[1]
if len(sys.argv) > 2:
print(f"Pretrained Autoembedder {sys.argv[2]} will be used in classification.")
autoembedder = load_model(sys.argv[2])
model = AutoEmbedderClassificationModel(
n_numerical_inputs=len(train_df_num.columns),
n_target_classes=n_target_classes,
autoembedder=autoembedder,
encoding_reference_values=encoding_reference_values,
config=params,
)
else:
print(f"No pretrained model defined, using basic model.")
model = BaseClassificationNetwork(
n_numerical_inputs=len(train_df_num.columns),
n_target_classes=n_target_classes,
encoding_reference_values=encoding_reference_values,
config=params,
)
compile_model(model=model, config=params)
train_model(
train_data_num=train_df_num,
train_data_cat=train_df_cat,
train_data_target=train_df_target,
model=model,
config=params,
)
test_model(
test_data_num=test_df_num,
test_data_cat=test_df_cat,
test_data_target=test_df_target,
model=model,
config=params,
)
if __name__ == "__main__":
main()
|
the-stack_106_27745 | import asyncio
from contextlib import suppress
from sanic import Blueprint, response
from sanic.exceptions import abort
from sanic_openapi import doc
from .. import helpers, settings, utils
from ..models import Template
blueprint = Blueprint("Templates", url_prefix="/templates")
@blueprint.get("/")
@doc.summary("List all templates")
# TODO: https://github.com/jacebrowning/memegen/issues/580
# @doc.consumes(
# doc.String(name="filter", description="Part of the name or example to match"),
# location="query",
# )
@doc.produces(
# Can't use doc.List(Template) because the jsonify method is slightly different
doc.List(
{
"id": str,
"name": str,
"styles": doc.List(str),
"blank": str,
"example": str,
"source": str,
"_self": str,
}
),
description="Successfully returned a list of all templates",
content_type="application/json",
)
async def index(request):
query = request.args.get("filter", "").lower()
data = await asyncio.to_thread(helpers.get_valid_templates, request, query)
return response.json(data)
@blueprint.get("/<id>")
@doc.summary("View a specific template")
@doc.produces(
{
"id": str,
"name": str,
"styles": doc.List(str),
"blank": str,
"example": str,
"source": str,
"_self": str,
},
description="Successfully returned a specific templates",
content_type="application/json",
)
@doc.response(404, str, description="Template not found")
async def detail(request, id):
template = Template.objects.get_or_none(id)
if template:
return response.json(template.jsonify(request.app))
abort(404)
@blueprint.post("/<id>")
@doc.tag("Memes")
@doc.operation("Memes.create_from_template")
@doc.exclude(settings.DEPLOYED)
@doc.summary(settings.PREFIX + "Create a meme from a template")
@doc.consumes(
doc.JsonBody({"text_lines": [str], "extension": str, "redirect": bool}),
content_type="application/json",
location="body",
)
@doc.response(
201, {"url": str}, description="Successfully created a meme from a template"
)
async def build(request, id):
if request.form:
payload = dict(request.form)
with suppress(KeyError):
payload["image_url"] = payload.pop("image_url")[0]
with suppress(KeyError):
payload["extension"] = payload.pop("extension")[0]
with suppress(KeyError):
payload["redirect"] = payload.pop("redirect")[0]
else:
payload = request.json or {}
with suppress(KeyError):
payload["text_lines"] = payload.pop("text_lines[]")
template = Template.objects.get_or_create(id)
url = template.build_custom_url(
request,
payload.get("text_lines") or [],
extension=payload.get("extension"),
)
url, _updated = await utils.meta.tokenize(request, url)
if payload.get("redirect", False):
return response.redirect(url)
if template.valid:
status = 201
else:
status = 404
template.delete()
return response.json({"url": url}, status=status)
@blueprint.post("/custom")
@doc.tag("Memes")
@doc.exclude(settings.DEPLOYED)
@doc.summary(settings.PREFIX + "Create a meme from any image")
@doc.consumes(
doc.JsonBody(
{"image_url": str, "text_lines": [str], "extension": str, "redirect": bool}
),
content_type="application/json",
location="body",
)
@doc.response(
201, {"url": str}, description="Successfully created a meme from a custom image"
)
async def custom(request):
if request.form:
payload = dict(request.form)
with suppress(KeyError):
payload["image_url"] = payload.pop("image_url")[0]
with suppress(KeyError):
payload["extension"] = payload.pop("extension")[0]
with suppress(KeyError):
payload["redirect"] = payload.pop("redirect")[0]
else:
payload = request.json or {}
with suppress(KeyError):
payload["text_lines"] = payload.pop("text_lines[]")
url = Template("_custom").build_custom_url(
request,
payload.get("text_lines") or [],
background=payload.get("image_url", ""),
extension=payload.get("extension", ""),
)
url, _updated = await utils.meta.tokenize(request, url)
if payload.get("redirect", False):
return response.redirect(url)
return response.json({"url": url}, status=201)
|
the-stack_106_27746 | from flask import Flask, jsonify, request
from math import sqrt
app = Flask(__name__)
@app.route("/name", methods=["GET"])
def name():
name = {
"name": "Matthew"
}
return jsonify(name)
@app.route("/hello/<name>", methods=["GET"])
def hello(name):
message = {
"message": "Hello there, {}".format(name)
}
return jsonify(message)
@app.route("/distance", methods=["POST"])
def distance():
dict_r = request.get_json()
print(dict_r["a"])
print(dict_r["b"])
xdiff_squared = (dict_r["a"][0]-dict_r["b"][0])**2
ydiff_squared = (dict_r["a"][1]-dict_r["b"][1])**2
distance = sqrt(xdiff_squared + ydiff_squared)
json_return = {
"distance": distance,
"a": dict_r["a"],
"b": dict_r["b"]
}
return jsonify(json_return)
if __name__ == "__main__":
app.run(host="127.0.0.1")
|
the-stack_106_27747 | from distutils.core import setup
with open('requirements.txt') as fobj:
install_requires = [line.strip() for line in fobj]
setup(
name='instantnews',
version='1.2.4',
description='Get live news instantaneously',
author='shivam singh',
author_email='[email protected]',
url='https://github.com/shivam043/instantnews',
license='MIT',
py_modules=['instantnews'],
install_requires=install_requires,
entry_points='''
[console_scripts]
instantnews=instantnews:parser
''',
)
|
the-stack_106_27749 | #synthDrivers/mssp.py
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2011 NV Access Inc
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
from .sapi5 import SynthDriver
class SynthDriver(SynthDriver):
COM_CLASS = "speech.SPVoice"
name="mssp"
description="Microsoft Speech Platform"
|
the-stack_106_27750 | import h5py
import tensorflow as tf
from tensorflow.keras.applications.inception_v3 import InceptionV3
import file_utils
import numpy as np
import pandas as pd
from tensorflow.keras.preprocessing.image import load_img, img_to_array
from tensorflow.keras.preprocessing import image
import cv2
import math
from PIL import Image, ImageStat
def load_labels(file_path, prefix = None):
labels = list()
with open(file_path, "r") as file:
for l in file.readlines():
label = l.replace("\n", "").strip()
if prefix is not None:
label = prefix + "_"+label
labels.append(label)
return labels
def is_gray_scale(img_path):
im = Image.open(img_path).convert("RGB")
stat = ImageStat.Stat(im)
if sum(stat.sum) / 3 == stat.sum[0]:
return True
else:
return False
def load_image_pixels_cv2(filename, shape):
input_w, input_h = shape
image = Image.open(filename)
image = np.array(image, dtype=np.uint8)
if len(image.shape) != 3:
if is_gray_scale(filename):
image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
image = cv2.resize(image, (input_w, input_h))
image = np.expand_dims(image, 0)
return image
def load_image_pixels(filename, shape):
# load the image to get its shape
image = load_img(filename)
width, height = image.size
# load the image with the required size
image = load_img(filename, target_size=shape)
# convert to numpy array
image = img_to_array(image)
# scale pixel values to [0, 1]
image = image.astype('float32')
image /= 255.0
# add a dimension so that we have one sample
image = np.expand_dims(image, 0)
return image
def distance(a, b):
x1 = a[0];
y1 = a[1]
x2 = b[0];
y2 = b[1]
return math.sqrt(((x2 - x1) * (x2 - x1)) + ((y2 - y1) * (y2 - y1)))
def detect_face(img, face_detector, eye_detector, target_size=(224, 224), grayscale=False):
# -----------------------
exact_image = False
if type(img).__module__ == np.__name__:
exact_image = True
# -----------------------
if exact_image != True: # image path passed as input
img = cv2.imread(img)
img_raw = img.copy()
# --------------------------------
faces = face_detector.detectMultiScale(img, 1.3, 5)
# print("found faces in ",image_path," is ",len(faces))
extracted_faces = []
if len(faces) > 0:
for x, y, w, h in faces:
try:
detected_face = img[int(y):int(y + h), int(x):int(x + w)]
detected_face_gray = cv2.cvtColor(detected_face, cv2.COLOR_BGR2GRAY)
# ---------------------------
# face alignment
eyes = eye_detector.detectMultiScale(detected_face_gray)
if len(eyes) >= 2:
# find the largest 2 eye
base_eyes = eyes[:, 2]
items = []
for i in range(0, len(base_eyes)):
item = (base_eyes[i], i)
items.append(item)
df = pd.DataFrame(items, columns=["length", "idx"]).sort_values(by=['length'], ascending=False)
eyes = eyes[df.idx.values[0:2]]
# -----------------------
# decide left and right eye
eye_1 = eyes[0];
eye_2 = eyes[1]
if eye_1[0] < eye_2[0]:
left_eye = eye_1
right_eye = eye_2
else:
left_eye = eye_2
right_eye = eye_1
# -----------------------
# find center of eyes
left_eye_center = (int(left_eye[0] + (left_eye[2] / 2)), int(left_eye[1] + (left_eye[3] / 2)))
left_eye_x = left_eye_center[0];
left_eye_y = left_eye_center[1]
right_eye_center = (int(right_eye[0] + (right_eye[2] / 2)), int(right_eye[1] + (right_eye[3] / 2)))
right_eye_x = right_eye_center[0];
right_eye_y = right_eye_center[1]
# -----------------------
# find rotation direction
if left_eye_y > right_eye_y:
point_3rd = (right_eye_x, left_eye_y)
direction = -1 # rotate same direction to clock
else:
point_3rd = (left_eye_x, right_eye_y)
direction = 1 # rotate inverse direction of clock
# -----------------------
# find length of triangle edges
a = distance(left_eye_center, point_3rd)
b = distance(right_eye_center, point_3rd)
c = distance(right_eye_center, left_eye_center)
# -----------------------
# apply cosine rule
cos_a = (b * b + c * c - a * a) / (2 * b * c)
angle = np.arccos(cos_a) # angle in radian
angle = (angle * 180) / math.pi # radian to degree
# -----------------------
# rotate base image
if direction == -1:
angle = 90 - angle
img = Image.fromarray(img_raw)
img = np.array(img.rotate(direction * angle))
# you recover the base image and face detection disappeared. apply again.
faces = face_detector.detectMultiScale(img, 1.3, 5)
if len(faces) > 0:
x, y, w, h = faces[0]
detected_face = img[int(y):int(y + h), int(x):int(x + w)]
# -----------------------
# face alignment block end
# ---------------------------
# face alignment block needs colorful images. that's why, converting to gray scale logic moved to here.
if grayscale == True:
detected_face = cv2.cvtColor(detected_face, cv2.COLOR_BGR2GRAY)
detected_face = cv2.resize(detected_face, target_size)
img_pixels = image.img_to_array(detected_face)
img_pixels = np.expand_dims(img_pixels, axis=0)
# normalize input in [0, 1]
img_pixels /= 255
extracted_faces.append(img_pixels)
except:
pass
else:
if exact_image == True:
if grayscale == True:
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.resize(img, target_size)
img_pixels = image.img_to_array(img)
img_pixels = np.expand_dims(img_pixels, axis=0)
img_pixels /= 255
extracted_faces.append(img_pixels)
return extracted_faces
def extract_predictions(image_path, shape, model, labels: list, second_image_loader = False, model_name=''):
probabilities = None
input_w, input_h = shape
# load and prepare image
image = None
if second_image_loader == True:
try:
image = load_image_pixels_cv2(image_path, (input_w, input_h))
except:
pass
if image is None or second_image_loader == False:
try:
image = load_image_pixels(image_path, (input_w, input_h))
except:
pass
try:
# do prediction
prediction = model.predict(image)
max_predicted_index = np.argmax(prediction[0])
probability = prediction[0][max_predicted_index]
max_label = labels[max_predicted_index]
probabilities = np.zeros(len(labels))
for l in labels:
index = labels.index(l)
prob = prediction[0][index]
probabilities[labels.index(l)] = prob
except Exception as e:
pass
return probabilities
def extract_face_emotion_predictions(image_path, face_detector, eye_detector, face_emotion_model, labels):
emotion_labels = ['face_emotion_angry', 'face_emotion_afraid', 'face_emotion_happy', 'face_emotion_sad', 'face_emotion_surprised', 'face_emotion_neutral']
probabilities = np.zeros(len(labels), dtype=float)
try:
faces = detect_face(image_path, face_detector, eye_detector, (48, 48), True)
except:
faces = list()
if len(faces) == 0:
return probabilities
max_prob = 0
max_predictions = None
max_label = ''
for face in faces:
# run prediction on image
predictions = face_emotion_model.predict(face)[0]
max_predicted_index = np.argmax(predictions)
probability = predictions[max_predicted_index]
if max_prob < probability:
max_prob = probability
max_predictions = predictions
max_label = emotion_labels[max_predicted_index]
if max_prob >=0.7:
probabilities = max_predictions
return probabilities
def extract_all_predictions(ids, base_dir):
## Face emotion detector
all_labels = load_labels(base_dir + 'resources/image_predictions/feature_labels.txt')
face_emotion_labels = all_labels[4:10]
imagenet_labels = all_labels[375:1375]
nudity_labels = all_labels[0:2]
places365_labels = all_labels[10:375]
binary_hateword_labels = all_labels[1377:1379]
binary_finetuned_inception_labels = all_labels[1375:1377]
multiclass_hateword_labels = all_labels[1379:1805]
feature_size = 1805
all_features = np.zeros((len(ids), feature_size))
face_detector = cv2.CascadeClassifier(base_dir + 'resources/model_weights/haarcascade_frontalface_default.xml')
eye_detector = cv2.CascadeClassifier(base_dir + 'resources/model_weights/haarcascade_eye.xml')
face_emotion_model = tf.keras.models.load_model(base_dir + 'resources/model_weights/face_emotions.h5')
## ImageNet object detector
imagenet_model = InceptionV3(include_top=True, weights='imagenet', pooling='avg',
input_tensor=tf.keras.layers.Input(shape=(299, 299, 3)))
imagenet_shape = 299, 299
## Nudity Detector
nudity_full_model = tf.keras.models.load_model(base_dir + 'resources/model_weights/nudenet.h5')
nudity_shape = 256, 256
## Places365 detector
places365_full_model = tf.keras.models.load_model(base_dir + 'resources/model_weights/places365.h5')
places365_shape = 224, 224
finetuned_inception_model = tf.keras.models.load_model(base_dir + 'finetune_results/11-05-2020_11:44:07/model.h5')
binary_hateword_model = tf.keras.models.load_model(
base_dir + 'finetune_hate_image_results/11-05-2020_22:04:21/model.h5')
multiclass_hateword_model = tf.keras.models.load_model(
base_dir + 'finetune_hate_image_results/11-05-2020_22:06:21/model.h5')
c = 0
for id in ids:
image_path = base_dir+'resources/Memotion7k/images/' + str(id)
features = np.zeros(feature_size, dtype=float)
nudity_predictions = extract_predictions(image_path, nudity_shape, nudity_full_model, nudity_labels, 'nudity')
face_emotion_predictions = extract_face_emotion_predictions(image_path, face_detector, eye_detector, face_emotion_model, face_emotion_labels)
places365_predictions = extract_predictions(image_path, places365_shape, places365_full_model, places365_labels, second_image_loader=True, model_name='places')
imagenet_predictions = extract_predictions(image_path, imagenet_shape, imagenet_model, imagenet_labels, 'imagenet')
hateword_predictions = extract_predictions(image_path, (299, 299), binary_hateword_model,
binary_hateword_labels, 'binary hate')
multiclass_hateword_predictions = extract_predictions(image_path, (299, 299), multiclass_hateword_model,
multiclass_hateword_labels, 'multi-hate')
finetuned_inception_predictions = extract_predictions(image_path, (299, 299), finetuned_inception_model,
binary_finetuned_inception_labels, 'finetuned-inception')
start_index = 0
end_index = len(nudity_labels)
features[start_index:end_index] = nudity_predictions
start_index = end_index
end_index = end_index + len(binary_hateword_labels)
features[start_index:end_index] = hateword_predictions
start_index = end_index
end_index = end_index + len(face_emotion_labels)
features[start_index:end_index] = face_emotion_predictions
start_index = end_index
end_index = end_index + len(places365_labels)
features[start_index:end_index] = places365_predictions
start_index = end_index
end_index = end_index + len(imagenet_labels)
features[start_index:end_index] = imagenet_predictions
start_index = end_index
end_index = end_index + len(binary_finetuned_inception_labels)
features[start_index:end_index] = finetuned_inception_predictions
start_index = end_index
end_index = end_index + len(binary_hateword_labels)
features[start_index:end_index] = hateword_predictions
start_index = end_index
end_index = end_index + len(multiclass_hateword_labels)
features[start_index:end_index] = multiclass_hateword_predictions
all_features[c, :] = features
c += 1
# if c % 1 == 0:
print(str(c) + '/'+str(len(ids)))
return all_features
print('Loading pre-trained model weights')
base_dir = "/home/hakimovs/PycharmProjects/hate-speech-detection/"
print("Loading Memotion7k dataset")
splits = ['train']
for split in splits:
df = pd.read_json(base_dir + 'resources/Memotion7k/'+split+'.txt', lines=True, orient='string')
ids = df["image_name"].tolist()
print('Extracting '+split+' predictions')
valid_features = extract_all_predictions(ids, base_dir)
np.save(base_dir + 'resources/Memotion7k/image_predictions/'+split+'_image_predictions.npy', valid_features)
print("finished!") |
the-stack_106_27751 | from selfdrive.car import apply_std_steer_torque_limits
from selfdrive.car.subaru import subarucan
from selfdrive.car.subaru.values import DBC, PREGLOBAL_CARS
from opendbc.can.packer import CANPacker
from common.dp_common import common_controller_ctrl
class CarControllerParams():
def __init__(self):
self.STEER_MAX = 2047 # max_steer 4095
self.STEER_STEP = 2 # how often we update the steer cmd
self.STEER_DELTA_UP = 50 # torque increase per refresh, 0.8s to max
self.STEER_DELTA_DOWN = 70 # torque decrease per refresh
self.STEER_DRIVER_ALLOWANCE = 60 # allowed driver torque before start limiting
self.STEER_DRIVER_MULTIPLIER = 10 # weight driver torque heavily
self.STEER_DRIVER_FACTOR = 1 # from dbc
class CarController():
def __init__(self, dbc_name, CP, VM):
self.apply_steer_last = 0
self.es_distance_cnt = -1
self.es_accel_cnt = -1
self.es_lkas_cnt = -1
self.fake_button_prev = 0
self.steer_rate_limited = False
self.params = CarControllerParams()
self.packer = CANPacker(DBC[CP.carFingerprint]['pt'])
# dp
self.last_blinker_on = False
self.blinker_end_frame = 0.
def update(self, enabled, CS, frame, actuators, pcm_cancel_cmd, visual_alert, left_line, right_line, dragonconf):
can_sends = []
# *** steering ***
if (frame % self.params.STEER_STEP) == 0:
apply_steer = int(round(actuators.steer * self.params.STEER_MAX))
# limits due to driver torque
new_steer = int(round(apply_steer))
apply_steer = apply_std_steer_torque_limits(new_steer, self.apply_steer_last, CS.out.steeringTorque, self.params)
self.steer_rate_limited = new_steer != apply_steer
if not enabled:
apply_steer = 0
# dp
blinker_on = CS.out.leftBlinker or CS.out.rightBlinker
if not enabled:
self.blinker_end_frame = 0
if self.last_blinker_on and not blinker_on:
self.blinker_end_frame = frame + dragonconf.dpSignalOffDelay
apply_steer = common_controller_ctrl(enabled,
dragonconf.dpLatCtrl,
dragonconf.dpSteeringOnSignal,
blinker_on or frame < self.blinker_end_frame,
apply_steer)
self.last_blinker_on = blinker_on
if CS.CP.carFingerprint in PREGLOBAL_CARS:
can_sends.append(subarucan.create_preglobal_steering_control(self.packer, apply_steer, frame, self.params.STEER_STEP))
else:
can_sends.append(subarucan.create_steering_control(self.packer, apply_steer, frame, self.params.STEER_STEP))
if CS.CP.carFingerprint in PREGLOBAL_CARS:
can_sends.append(subarucan.create_preglobal_steering_control(self.packer, apply_steer, frame, self.params.STEER_STEP))
else:
can_sends.append(subarucan.create_steering_control(self.packer, apply_steer, frame, self.params.STEER_STEP))
self.apply_steer_last = apply_steer
# *** alerts and pcm cancel ***
if CS.CP.carFingerprint in PREGLOBAL_CARS:
if self.es_accel_cnt != CS.es_accel_msg["Counter"]:
# 1 = main, 2 = set shallow, 3 = set deep, 4 = resume shallow, 5 = resume deep
# disengage ACC when OP is disengaged
if pcm_cancel_cmd:
fake_button = 1
# turn main on if off and past start-up state
elif not CS.out.cruiseState.available and CS.ready:
fake_button = 1
else:
fake_button = CS.button
# unstick previous mocked button press
if fake_button == 1 and self.fake_button_prev == 1:
fake_button = 0
self.fake_button_prev = fake_button
can_sends.append(subarucan.create_es_throttle_control(self.packer, fake_button, CS.es_accel_msg))
self.es_accel_cnt = CS.es_accel_msg["Counter"]
else:
if self.es_distance_cnt != CS.es_distance_msg["Counter"]:
can_sends.append(subarucan.create_es_distance(self.packer, CS.es_distance_msg, pcm_cancel_cmd))
self.es_distance_cnt = CS.es_distance_msg["Counter"]
if self.es_lkas_cnt != CS.es_lkas_msg["Counter"]:
can_sends.append(subarucan.create_es_lkas(self.packer, CS.es_lkas_msg, visual_alert, left_line, right_line))
self.es_lkas_cnt = CS.es_lkas_msg["Counter"]
return can_sends
|
the-stack_106_27752 | #!/usr/lib/python2.7
#####!/usr/bin/env python
import subprocess
import os
import glob
import re
import sys
if sys.version_info[0] > 2:
print('Python Detected: ', sys.version_info[0])
else:
try:
import commands # py2
except NameError:
pass
# Global variables
WEAR_WARN_860_PRO_1TB = 5500
WEAR_CRIT_860_PRO_1TB = 5750
WEAR_WARN = 5
WEAR_CRIT = 2
SSDREMAP = 400
disk_list = []
# function to grab output from a shell command
def fetch(cmd, arg):
command = [cmd, arg]
p = subprocess.Popen(command, stdout=subprocess.PIPE)
text = p.stdout.read()
retcode = p.wait()
return str(text)
# Simpler function to fetch output of shell command, but may not work on Python3+
def shell(cmd):
(retval, output) = commands.getstatusoutput(cmd)
return output
# self-contained function to check the disk
# mode = sata or nvme
def disk_check(smart, file_name_pattern, mode):
for sdx in glob.iglob(file_name_pattern):
device = sdx.split("/")[3]
if mode == 'sata':
model = str(shell(smart + ' -i /dev/' + device + ' | grep "Device Model" | cut -c 19-'))
elif mode == 'nvme':
model = str(shell(smart + ' -i /dev/' + device + ' | grep "Model Number" | awk "{print $4}"'))
else:
print("2 check_smart_ " + device + " ERROR: Unrecognized device mode: " + mode + ".")
return
# Spinning disks, which key on pending sectors and remap count.
if (model.startswith('WDC') or model.startswith('TOSHIBA') or model.startswith('ST1') or model.startswith(
'ST3') or model.startswith('ST5') or model.startswith('ST9') or model.startswith('GB1000')):
remap = int(shell(smart + ' -a /dev/' + device + " | grep Reallocated_Sector_Ct | awk \'{print $10}\'"))
pend = int(shell(smart + ' -a /dev/' + device + " | grep Current_Pending_Sector | awk \'{print $10}\'"))
hours = int(shell(smart + ' -a /dev/' + device + " | grep Power_On_Hours | awk \'{print $10}\'"))
if (remap > 50) or (pend > 0):
print("2 Check_Smart_" + device + " - CRITICAL - " + device + " SMART failure Hours=" + str(
hours) + " Remap=" + str(remap) + " Pending=" + str(pend))
else:
print("0 Check_Smart_" + device + " - OK - " + device + " clean Hours=" + str(hours))
# Fetch NVMe data
elif mode == 'nvme':
# Normalize wear to mean life remaining, like is true for SATA
wear = 100 - int(
shell(smart + ' -a /dev/' + device + " | grep 'Percentage Used' | awk '{print $3}' | cut -d '%' -f1"))
# No rsvd block count exposed for NVMe, so put a 0 which is always less than the threshold for SATA disks
entry = {'device': device, 'wear': wear, 'rsvd': 0}
disk_list.append(entry)
# SSD relying on raw data due to normalized smartctl output data being too conservative. Tests wear level and
# thus cares about raid locality
elif '860 PRO 1TB' in model:
wear = int(shell(smart + ' -a /dev/' + device + " | grep Wear_Level | awk '{print $10}'"))
rsvd = int(shell(smart + ' -a /dev/' + device + " | grep Used_Rsvd | awk '{print $10}'"))
# Normalize manually
wear = 100 - (wear / WEAR_CRIT_860_PRO_1TB)
entry = {'device': device, 'wear': wear, 'rsvd': rsvd}
disk_list.append(entry)
# Other SSD models that have acceptable SMART values
elif ('SSD' in model or model.startswith('Kingston SKC') or model.startswith('Micro_1100') or model.startswith(
'SAMSUNG MZ7LM') or model.startswith('SAMSUNG MZ7LH')):
wear = int(shell(smart + ' -a /dev/' + device + " | grep Wear_Level | awk '{print $4}'"))
rsvd = int(shell(smart + ' -a /dev/' + device + " | grep Used_Rsvd | awk '{print $10}'"))
entry = {'device': device, 'wear': wear, 'rsvd': rsvd}
disk_list.append(entry)
# end of for looping over the disks
# Fetch RAID info from mdadm about these devices and integrate with the smartctl data
populate_raid_info(disk_list)
# Iterate over each disk and mark it good or bad based on thresholds
for disk in disk_list:
# Fail if too many remaps. The good/ok gets overwritten by wear leveling checks if needed
if disk['rsvd'] > SSDREMAP:
disk['status'] = "prefail"
disk['warn_type'] = "WARNING"
else:
disk['status'] = "good"
disk['warn_type'] = "OK"
# Fail independently if too much wear: permits a crit here to override a simple warn from remaps
# Wear values are 99 (Best) down to 0 (no predicted write life left), so <= is the proper check
if disk['wear'] <= WEAR_CRIT:
disk['status'] = "prefail"
disk['warn_type'] = "CRITICAL"
elif disk['wear'] <= WEAR_WARN:
disk['status'] = "prefail"
disk['warn_type'] = "WARNING"
# Now that health data on all disks are populated, run through each disk again and determine
# whether to alert it as good or bad.
# As long as one of the disks in a set is OK, both shall report OK (policy reasons)
for disk in disk_list:
if disk['status'] == "good":
print("0 Check_Smart_" + disk['device'] + " wear_life_remaining=" + str(disk['wear']) + ";" + str(
WEAR_WARN) + ";" + str(WEAR_CRIT) + " remaps=" + str(disk['rsvd']) + " " + disk['device'] + " OK")
# If status is not good, find its pair and see if it is in prefail also
else:
# Report bad but with 0 code (meaning OK) if one disk in the pair has problems
# Report bad and NOT ok if both disks in the pair have problems
pair = find_pair(disk, disk_list)
if pair['status'] == "good":
print("0 Check_Smart_" + disk['device'] + " wear_life_remaining=" + str(disk['wear']) + ";" + str(
WEAR_WARN) + ";" + str(WEAR_CRIT) + " remaps=" + str(disk['rsvd']) + " " + disk['device'] + " " + \
disk['warn_type'])
else:
print("2 Check_Smart_" + disk['device'] + " wear_life_remaining=" + str(disk['wear']) + ";" + str(
WEAR_WARN) + ";" + str(WEAR_CRIT) + " remaps=" + str(disk['rsvd']) + " " + disk['device'] + " " + \
disk['warn_type'])
# Fetch the list of md arrays from the system and populates devices dictionary with them
# Finds the first raid10 device and uses it to determine which disks are in what sets.
# Area for future improvement: check all arrays instead of just the first, for sanity
# Also, it relies on adjacency to determine set info. In a 4x R10 there are two set-As
# and two set-Bs and it presumes that near=2 is the setting for deciding which to check.
def populate_raid_info(devices):
arrays = shell("mdadm --detail --scan")
for array in arrays.splitlines():
device = array.split(' ')[1]
raid_type = shell("mdadm --detail " + device + " | grep 'Raid Level' | awk '{print $4}'")
if raid_type != 'raid10':
continue
# Fetch detailed set information
for dev in devices:
raid_device = shell("mdadm --detail " + device + " | grep " + dev['device'] + " | awk '{print $4}'")
dev['RaidDevice'] = int(raid_device)
set_info = shell("mdadm --detail " + device + " | grep " + dev['device'] + " | awk '{print $7}'")
dev['set'] = set_info
# Finds the R10 pair in a set
# Presumes near=2
def find_pair(disk, devices):
set_name = disk['set']
raid_device = disk['RaidDevice']
# If even, pair is +1 id
if (raid_device % 2) == 0:
return fetch_disk_by_id(disk['RaidDevice'] + 1, devices)
else:
return fetch_disk_by_id(disk['RaidDevice'] - 1, devices)
def fetch_disk_by_id(id, devices):
for d in devices:
if d['RaidDevice'] == id:
return d
return []
## MAIN CODE
# determine which disk type the machine uses
sdx = os.path.isfile("/sys/block/sda/size")
nvme_x = os.path.isfile("/sys/block/nvme0n1/size")
# Fail silently and early out of devices that lack both. These would be VMs with
# xvda and such, which ought to neither have SMARTmontools nor physical disks to check
if not sdx and not nvme_x:
exit()
# check for smartmontools
smart = fetch("which", "smartctl").split("\n")[0]
if not smart:
print("2 check_smart_sda ERROR: Unable to detect smartmontools. Is it installed?")
exit()
# execute appropriate check
if sdx and nvme_x:
disk_check(smart, '/sys/block/sd?', 'sata')
disk_check(smart, '/sys/block/nvme?n1', 'nvme')
elif sdx:
disk_check(smart, '/sys/block/sd?', 'sata')
elif nvme_x:
disk_check(smart, '/sys/block/nvme?n1', 'nvme')
|
the-stack_106_27753 | # -*- coding:utf-8 -*-
"""
Library for generating XML as a stream without first building a tree in memory.
Basic usage::
import elementflow
file = open('text.xml', 'w') # can be any object with .write() method
with elementflow.xml(file, 'root') as xml:
xml.element('item', attrs={'key': 'value'}, text='text')
with xml.container('container', attrs={'key': 'value'}):
xml.text('text')
xml.element('subelement', text='subelement text')
Usage with namespaces::
with elementflow.xml(file, 'root', namespaces={'': 'urn:n', 'n1': 'urn:n1'}) as xml:
xml.element('item')
with xml.container('container', namespaces={'n2': 'urn:n2'):
xml.element('n1:subelement')
xml.element('n2:subelement')
Pretty-printing::
with elementflow.xml(file, 'root', indent=2):
# ...
"""
import itertools
import textwrap
import codecs
from typing import Callable, Dict, IO, List, Optional, Sequence, Set, Tuple, TypeVar, Union
MapType = TypeVar('MapType')
def escape(value: str) -> str:
if '&' not in value and '<' not in value:
return value
return value.replace('&', '&').replace('<', '<')
def quote_value(value: str) -> str:
if '&' in value or '<' in value or '"' in value:
value = value.replace('&', '&').replace('<', '<').replace('"', '"')
return f'"{value}"'
def convert_attrs_to_string(attrs: Optional[Dict[str, str]] = None) -> str:
if not attrs:
return ''
return ''.join(f' {k}={quote_value(v)}' for k, v in attrs.items())
class XMLGenerator:
"""
Basic generator without support for namespaces or pretty-printing.
Constructor accepts:
- file: an object receiving XML output, anything with .write()
- root: name of the root element
- attrs: attributes dict
Constructor will implicitly open a root container element, you don't need
to call .container() for it
"""
def __init__(self, file: IO, root: str, attrs: Optional[Dict[str, str]] = None, **kwargs) -> None:
self.file = codecs.getwriter('utf-8')(file)
self.file.write('<?xml version="1.0" encoding="utf-8"?>')
self.stack: List[str] = []
self.container(root, attrs, **kwargs)
def __enter__(self) -> 'XMLGenerator':
return self
def __exit__(self, exc_type, exc_value, exc_tb) -> None:
if exc_type:
return
self.file.write(f'</{self.stack.pop()}>')
def container(self, name: str, attrs: Optional[Dict[str, str]] = None, **kwargs) -> 'XMLGenerator':
"""
Opens a new element containing sub-elements and text nodes.
Intends to be used under ``with`` statement.
"""
self.file.write(f'<{name}{convert_attrs_to_string(attrs)}>')
self.stack.append(name)
return self
def element(self, name: str, attrs: Optional[Dict[str, str]] = None, text: str = '') -> None:
"""
Generates a single element, either empty or with a text contents.
"""
if text:
self.file.write(f'<{name}{convert_attrs_to_string(attrs)}>{escape(text)}</{name}>')
else:
self.file.write(f'<{name}{convert_attrs_to_string(attrs)}/>')
def text(self, value: str) -> None:
"""
Generates a text in currently open container.
"""
self.file.write(escape(value))
def comment(self, value: str) -> None:
"""
Adds a comment to the xml
"""
value = value.replace('--', '')
self.file.write(f'<!--{value}-->')
def map(
self,
func: Callable[[MapType], Tuple[str, Optional[Dict[str, str]], str]],
sequence: Sequence[MapType],
) -> None:
"""
Convenience function for translating a sequence of objects into xml elements.
First parameter is a function that accepts an object from the sequence and
return a tuple of arguments for "element" method.
"""
for item in sequence:
self.element(*func(item))
class NamespacedGenerator(XMLGenerator):
"""
XML generator with support for namespaces.
"""
def __init__(
self,
file: IO,
root: str,
attrs: Optional[Dict[str, str]] = None,
namespaces: Optional[Dict[str, str]] = None,
) -> None:
self.namespaces: List[Set[str]] = [{'xml'}]
super().__init__(file, root, attrs=attrs, namespaces=namespaces)
def _process_namespaces(
self,
name: str,
attrs: Optional[Dict[str, str]] = None,
namespaces: Optional[Dict[str, str]] = None,
) -> Tuple[Dict[str, str], Set[str]]:
prefixes: Set[str] = self.namespaces[-1]
if namespaces:
prefixes |= set(namespaces)
attributes = attrs or {}
names = [n for n in itertools.chain((name,), attributes) if ':' in n]
for name in names:
prefix = name.split(':')[0]
if prefix not in prefixes:
raise ValueError(f'Unknown namespace prefix: {prefix}')
if namespaces:
namespaces = {
f'xmlns:{key}' if key else 'xmlns': value
for key, value in namespaces.items()
}
attributes.update(namespaces)
return attributes, prefixes
def __exit__(self, exc_type, exc_value, exc_tb) -> None:
super().__exit__(exc_type, exc_value, exc_tb)
self.namespaces.pop()
def container(
self,
name: str,
attrs: Optional[Dict[str, str]] = None,
namespaces: Optional[Dict[str, str]] = None,
) -> XMLGenerator:
attrs, prefixes = self._process_namespaces(name, attrs, namespaces)
self.namespaces.append(prefixes)
return super().container(name, attrs)
def element(
self,
name: str,
attrs: Optional[Dict[str, str]] = None,
namespaces: Optional[Dict[str, str]] = None,
text: str = '',
) -> None:
attributes, _ = self._process_namespaces(name, attrs, namespaces)
super().element(name, attributes, text)
class IndentingGenerator(NamespacedGenerator):
"""
XML generator with pretty-printing.
"""
def __init__(self, *args, **kwargs) -> None:
self._text_wrap: bool = kwargs.pop('text_wrap', True)
self._indent: str = ' ' * kwargs.pop('indent', 2)
self._width: int = kwargs.pop('width', 70)
self._min_width: int = kwargs.pop('min_width', 20)
super().__init__(*args, **kwargs)
def _format_value(self, value: str) -> str:
indent = self._indent * len(self.stack)
self.file.write(f'\n{indent}')
if len(value) > self._width and self._text_wrap:
fill = self._fill(value, indent + self._indent)
value = f'{fill}\n{indent}'
return value
def _fill(self, value: str, indent: Optional[str] = None) -> str:
if indent is None:
indent = self._indent * len(self.stack)
width = max(self._min_width, self._width - len(indent))
tw = textwrap.TextWrapper(width=width, initial_indent=indent, subsequent_indent=indent)
return f'\n{tw.fill(value)}'
def __exit__(self, *args, **kwargs) -> None:
fill = self._indent * (len(self.stack) - 1)
self.file.write(f'\n{fill}')
super().__exit__(*args, **kwargs)
if not self.stack:
self.file.write('\n')
def container(self, *args, **kwargs) -> XMLGenerator:
fill = self._indent * len(self.stack)
self.file.write(f'\n{fill}')
return super().container(*args, **kwargs)
def element(
self,
name: str,
attrs: Optional[Dict[str, str]] = None,
namespaces: Optional[Dict[str, str]] = None,
text: str = '',
) -> None:
text = self._format_value(text)
return super().element(name, attrs, namespaces, text)
def text(self, value: str) -> None:
super().text(self._fill(value))
def comment(self, value: str) -> None:
value = self._format_value(value)
return super().comment(value)
class Queue:
"""
In-memory queue for using as a temporary buffer in xml generator.
"""
def __init__(self) -> None:
self.data = bytearray()
def __len__(self) -> int:
return len(self.data)
def write(self, value: Union[bytes, bytearray]) -> None:
self.data.extend(value)
def pop(self) -> str:
result = str(self.data)
self.data = bytearray()
return result
def xml(
file: IO,
root: str,
attrs: Optional[Dict[str, str]] = None,
namespaces: Optional[Dict[str, str]] = None,
indent: Optional[int] = None,
text_wrap: bool = True,
**kwargs,
) -> XMLGenerator:
"""
Creates a streaming XML generator.
Parameters:
- file: an object receiving XML output, anything with .write()
- root: name of the root element
- attrs: attributes dict
- namespaces: namespaces dict {prefix: uri}, default namespace has prefix ''
- indent: indent size to pretty-print XML. When None then pretty-print is disabled.
"""
if indent is not None:
return IndentingGenerator(file, root, attrs, namespaces, text_wrap=text_wrap, indent=indent, **kwargs)
elif namespaces:
return NamespacedGenerator(file, root, attrs, namespaces)
else:
return XMLGenerator(file, root, attrs)
|
the-stack_106_27758 | from baselayer.app.custom_exceptions import AccessError
from .general_prediction import GeneralPredictionHandler
from ..models import Prediction, Project, DBSession
from .. import util
import tornado.gen
import cesium
import uuid
import datetime
import tempfile
import requests
import traceback
import json
class SciencePredictionHandler(GeneralPredictionHandler):
"""Handler for performing science predictions."""
@tornado.gen.coroutine
def _await_science_predictions(self, prediction, science_model_ids_and_probs):
try:
prediction = DBSession().merge(prediction)
while True:
preds_info = [
requests.get(
'{}/predictions/{}'.format(self.cfg['cesium_app']['url'],
cesium_app_prediction_id),
headers={'Authorization': f'token {self.get_cesium_auth_token()}'}
).json()['data']
for cesium_app_prediction_id in
prediction.cesium_app_sci_pred_ids]
if all([pred_info['finished'] for pred_info in preds_info]):
prediction.science_preds_task_id = None
prediction.science_preds_finished = datetime.datetime.now()
sci_pred_results = {pred_info['model_id']: pred_info['results']
for pred_info in preds_info}
prediction.science_results = bytes(json.dumps(
util.aggregate_pred_results_by_ts(
sci_pred_results, science_model_ids_and_probs,
token=self.get_cesium_auth_token())),
encoding='utf-8')
DBSession().add(prediction)
DBSession().commit()
break
else:
yield tornado.gen.sleep(1)
self.action('baselayer/SHOW_NOTIFICATION',
payload={"note": "Science prediction completed."})
except Exception as e:
traceback.print_exc()
DBSession().delete(prediction)
DBSession().commit()
self.action('baselayer/SHOW_NOTIFICATION',
payload={
"note": "Prediction failed "
"with error {}. Please try again.".format(e),
"type": "error"})
self.action('survey_app/FETCH_PREDICTIONS')
@tornado.web.authenticated
@tornado.gen.coroutine
def post(self):
data = self.get_json()
prediction_id = data['prediction_id']
prediction = Prediction.get_if_owned_by(prediction_id, self.current_user)
dataset = prediction.dataset
dataset_id = dataset.id
cesium_dataset_id = dataset.cesium_app_id
science_model_ids_and_probs = util.determine_model_ids(
prediction.display_info()['results'],
token=self.get_cesium_auth_token())
cesium_app_pred_ids = []
for model_id in set([mdl_id for ts_name in science_model_ids_and_probs
for mdl_id in
science_model_ids_and_probs[ts_name]]):
data = {'datasetID': cesium_dataset_id,
'modelID': model_id,
'ts_names': [ts_name for ts_name in science_model_ids_and_probs
if model_id in science_model_ids_and_probs[ts_name]]}
# POST prediction to cesium_web
r = requests.post('{}/predictions'.format(self.cfg['cesium_app']['url']),
json=data,
headers={'Authorization': f'token {self.get_cesium_auth_token()}'}
).json()
if r['status'] != 'success':
return self.error('An error occurred while processing the request'
'to cesium_web: {}'.format(r['message']))
cesium_app_pred_ids.append(r['data']['id'])
prediction.science_preds_task_id = str(uuid.uuid4())
prediction.cesium_app_sci_pred_ids = cesium_app_pred_ids
DBSession().add(prediction)
DBSession().commit()
loop = tornado.ioloop.IOLoop.current()
loop.spawn_callback(self._await_science_predictions, prediction,
science_model_ids_and_probs)
return self.success(prediction.display_info(),
'survey_app/FETCH_PREDICTIONS')
@tornado.web.authenticated
def get(self, prediction_id=None, action=None):
if action == 'download':
try:
pred = Prediction.get_if_owned_by(
prediction_id, self.current_user).display_info()
except OSError:
return self.error('The requested file could not be found. '
'The cesium_web app must be running on the '
'same machine to download prediction results.')
with tempfile.NamedTemporaryFile() as tf:
util.pred_results_to_csv(pred['science_results'], tf.name)
with open(tf.name) as f:
self.set_header("Content-Type", 'text/csv; charset="utf-8"')
self.set_header("Content-Disposition",
"attachment; filename=survey_app_prediction_results.csv")
self.write(f.read())
|
the-stack_106_27759 | import asyncio
import pytest
from click.testing import CliRunner
pytest.importorskip("requests")
import os
from multiprocessing import cpu_count
from time import sleep
import requests
from dask.utils import tmpfile
import distributed.cli.dask_worker
from distributed import Client
from distributed.compatibility import LINUX, to_thread
from distributed.deploy.utils import nprocesses_nthreads
from distributed.metrics import time
from distributed.utils import parse_ports, sync
from distributed.utils_test import gen_cluster, popen, requires_ipv6
def test_nanny_worker_ports(loop):
with popen(["dask-scheduler", "--port", "9359", "--no-dashboard"]):
with popen(
[
"dask-worker",
"127.0.0.1:9359",
"--host",
"127.0.0.1",
"--worker-port",
"9684",
"--nanny-port",
"5273",
"--no-dashboard",
]
):
with Client("127.0.0.1:9359", loop=loop) as c:
start = time()
while True:
d = sync(c.loop, c.scheduler.identity)
if d["workers"]:
break
else:
assert time() - start < 60
sleep(0.1)
assert (
d["workers"]["tcp://127.0.0.1:9684"]["nanny"]
== "tcp://127.0.0.1:5273"
)
@pytest.mark.slow
def test_nanny_worker_port_range(loop):
with popen(["dask-scheduler", "--port", "9359", "--no-dashboard"]) as sched:
n_workers = 3
worker_port = "9684:9686"
nanny_port = "9688:9690"
with popen(
[
"dask-worker",
"127.0.0.1:9359",
"--nworkers",
f"{n_workers}",
"--host",
"127.0.0.1",
"--worker-port",
worker_port,
"--nanny-port",
nanny_port,
"--no-dashboard",
]
):
with Client("127.0.0.1:9359", loop=loop) as c:
start = time()
while len(c.scheduler_info()["workers"]) < n_workers:
sleep(0.1)
assert time() - start < 60
def get_port(dask_worker):
return dask_worker.port
expected_worker_ports = set(parse_ports(worker_port))
worker_ports = c.run(get_port)
assert set(worker_ports.values()) == expected_worker_ports
expected_nanny_ports = set(parse_ports(nanny_port))
nanny_ports = c.run(get_port, nanny=True)
assert set(nanny_ports.values()) == expected_nanny_ports
def test_nanny_worker_port_range_too_many_workers_raises(loop):
with popen(["dask-scheduler", "--port", "9359", "--no-dashboard"]):
with popen(
[
"dask-worker",
"127.0.0.1:9359",
"--nworkers",
"3",
"--host",
"127.0.0.1",
"--worker-port",
"9684:9685",
"--nanny-port",
"9686:9687",
"--no-dashboard",
]
) as worker:
assert any(
b"Could not start" in worker.stderr.readline() for _ in range(100)
)
def test_memory_limit(loop):
with popen(["dask-scheduler", "--no-dashboard"]):
with popen(
[
"dask-worker",
"127.0.0.1:8786",
"--memory-limit",
"2e3MB",
"--no-dashboard",
]
):
with Client("127.0.0.1:8786", loop=loop) as c:
while not c.nthreads():
sleep(0.1)
info = c.scheduler_info()
[d] = info["workers"].values()
assert isinstance(d["memory_limit"], int)
assert d["memory_limit"] == 2e9
def test_no_nanny(loop):
with popen(["dask-scheduler", "--no-dashboard"]):
with popen(
["dask-worker", "127.0.0.1:8786", "--no-nanny", "--no-dashboard"]
) as worker:
assert any(b"Registered" in worker.stderr.readline() for i in range(15))
@pytest.mark.slow
@pytest.mark.parametrize("nanny", ["--nanny", "--no-nanny"])
@gen_cluster(client=True, nthreads=[])
async def test_no_reconnect(c, s, nanny):
with popen(
[
"dask-worker",
s.address,
"--no-reconnect",
nanny,
"--no-dashboard",
]
) as worker:
# roundtrip works
assert await c.submit(lambda x: x + 1, 10) == 11
(comm,) = s.stream_comms.values()
comm.abort()
# worker terminates as soon as the connection is aborted
await to_thread(worker.communicate, timeout=5)
assert worker.returncode == 0
@pytest.mark.slow
@pytest.mark.parametrize("nanny", ["--nanny", "--no-nanny"])
@gen_cluster(client=True, nthreads=[])
async def test_reconnect(c, s, nanny):
with popen(
[
"dask-worker",
s.address,
"--reconnect",
nanny,
"--no-dashboard",
]
) as worker:
# roundtrip works
await c.submit(lambda x: x + 1, 10) == 11
(comm,) = s.stream_comms.values()
comm.abort()
# roundtrip still works, which means the worker reconnected
assert await c.submit(lambda x: x + 1, 11) == 12
# closing the scheduler cleanly does terminate the worker
await s.close()
await to_thread(worker.communicate, timeout=5)
assert worker.returncode == 0
def test_resources(loop):
with popen(["dask-scheduler", "--no-dashboard"]):
with popen(
[
"dask-worker",
"tcp://127.0.0.1:8786",
"--no-dashboard",
"--resources",
"A=1 B=2,C=3",
]
):
with Client("127.0.0.1:8786", loop=loop) as c:
while not c.scheduler_info()["workers"]:
sleep(0.1)
info = c.scheduler_info()
worker = list(info["workers"].values())[0]
assert worker["resources"] == {"A": 1, "B": 2, "C": 3}
@pytest.mark.parametrize("nanny", ["--nanny", "--no-nanny"])
def test_local_directory(loop, nanny):
with tmpfile() as fn:
with popen(["dask-scheduler", "--no-dashboard"]):
with popen(
[
"dask-worker",
"127.0.0.1:8786",
nanny,
"--no-dashboard",
"--local-directory",
fn,
]
):
with Client("127.0.0.1:8786", loop=loop, timeout=10) as c:
start = time()
while not c.scheduler_info()["workers"]:
sleep(0.1)
assert time() < start + 8
info = c.scheduler_info()
worker = list(info["workers"].values())[0]
assert worker["local_directory"].startswith(fn)
@pytest.mark.parametrize("nanny", ["--nanny", "--no-nanny"])
def test_scheduler_file(loop, nanny):
with tmpfile() as fn:
with popen(["dask-scheduler", "--no-dashboard", "--scheduler-file", fn]):
with popen(
["dask-worker", "--scheduler-file", fn, nanny, "--no-dashboard"]
):
with Client(scheduler_file=fn, loop=loop) as c:
start = time()
while not c.scheduler_info()["workers"]:
sleep(0.1)
assert time() < start + 10
def test_scheduler_address_env(loop, monkeypatch):
monkeypatch.setenv("DASK_SCHEDULER_ADDRESS", "tcp://127.0.0.1:8786")
with popen(["dask-scheduler", "--no-dashboard"]):
with popen(["dask-worker", "--no-dashboard"]):
with Client(os.environ["DASK_SCHEDULER_ADDRESS"], loop=loop) as c:
start = time()
while not c.scheduler_info()["workers"]:
sleep(0.1)
assert time() < start + 10
def test_nworkers_requires_nanny(loop):
with popen(["dask-scheduler", "--no-dashboard"]):
with popen(
["dask-worker", "127.0.0.1:8786", "--nworkers=2", "--no-nanny"]
) as worker:
assert any(
b"Failed to launch worker" in worker.stderr.readline()
for i in range(15)
)
def test_nworkers_negative(loop):
with popen(["dask-scheduler", "--no-dashboard"]):
with popen(["dask-worker", "127.0.0.1:8786", "--nworkers=-1"]):
with Client("tcp://127.0.0.1:8786", loop=loop) as c:
c.wait_for_workers(cpu_count(), timeout="10 seconds")
def test_nworkers_auto(loop):
with popen(["dask-scheduler", "--no-dashboard"]):
with popen(["dask-worker", "127.0.0.1:8786", "--nworkers=auto"]):
with Client("tcp://127.0.0.1:8786", loop=loop) as c:
procs, _ = nprocesses_nthreads()
c.wait_for_workers(procs, timeout="10 seconds")
def test_nworkers_expands_name(loop):
with popen(["dask-scheduler", "--no-dashboard"]):
with popen(["dask-worker", "127.0.0.1:8786", "--nworkers", "2", "--name", "0"]):
with popen(["dask-worker", "127.0.0.1:8786", "--nworkers", "2"]):
with Client("tcp://127.0.0.1:8786", loop=loop) as c:
start = time()
while len(c.scheduler_info()["workers"]) < 4:
sleep(0.2)
assert time() < start + 30
info = c.scheduler_info()
names = [d["name"] for d in info["workers"].values()]
foos = [n for n in names if n.startswith("0-")]
assert len(foos) == 2
assert len(set(names)) == 4
def test_worker_cli_nprocs_renamed_to_nworkers(loop):
n_workers = 2
with popen(["dask-scheduler", "--no-dashboard"]):
with popen(
["dask-worker", "127.0.0.1:8786", f"--nprocs={n_workers}"]
) as worker:
assert any(
b"renamed to --nworkers" in worker.stderr.readline() for i in range(15)
)
with Client("tcp://127.0.0.1:8786", loop=loop) as c:
c.wait_for_workers(n_workers, timeout="30 seconds")
def test_worker_cli_nworkers_with_nprocs_is_an_error():
with popen(["dask-scheduler", "--no-dashboard"]):
with popen(
["dask-worker", "127.0.0.1:8786", "--nprocs=2", "--nworkers=2"]
) as worker:
assert any(
b"Both --nprocs and --nworkers" in worker.stderr.readline()
for i in range(15)
)
@pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
@pytest.mark.parametrize("nanny", ["--nanny", "--no-nanny"])
@pytest.mark.parametrize(
"listen_address", ["tcp://0.0.0.0:39837", "tcp://127.0.0.2:39837"]
)
def test_contact_listen_address(loop, nanny, listen_address):
with popen(["dask-scheduler", "--no-dashboard"]):
with popen(
[
"dask-worker",
"127.0.0.1:8786",
nanny,
"--no-dashboard",
"--contact-address",
"tcp://127.0.0.2:39837",
"--listen-address",
listen_address,
]
):
with Client("127.0.0.1:8786") as client:
while not client.nthreads():
sleep(0.1)
info = client.scheduler_info()
assert "tcp://127.0.0.2:39837" in info["workers"]
# roundtrip works
assert client.submit(lambda x: x + 1, 10).result() == 11
def func(dask_worker):
return dask_worker.listener.listen_address
assert client.run(func) == {"tcp://127.0.0.2:39837": listen_address}
@requires_ipv6
@pytest.mark.parametrize("nanny", ["--nanny", "--no-nanny"])
@pytest.mark.parametrize("listen_address", ["tcp://:39838", "tcp://[::1]:39838"])
def test_listen_address_ipv6(loop, nanny, listen_address):
with popen(["dask-scheduler", "--no-dashboard"]):
with popen(
[
"dask-worker",
"127.0.0.1:8786",
nanny,
"--no-dashboard",
"--listen-address",
listen_address,
]
):
# IPv4 used by default for name of global listener; IPv6 used by default when
# listening only on IPv6.
bind_all = "[::1]" not in listen_address
expected_ip = "127.0.0.1" if bind_all else "[::1]"
expected_name = f"tcp://{expected_ip}:39838"
expected_listen = "tcp://0.0.0.0:39838" if bind_all else listen_address
with Client("127.0.0.1:8786") as client:
while not client.nthreads():
sleep(0.1)
info = client.scheduler_info()
assert expected_name in info["workers"]
assert client.submit(lambda x: x + 1, 10).result() == 11
def func(dask_worker):
return dask_worker.listener.listen_address
assert client.run(func) == {expected_name: expected_listen}
@pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
@pytest.mark.parametrize("nanny", ["--nanny", "--no-nanny"])
@pytest.mark.parametrize("host", ["127.0.0.2", "0.0.0.0"])
def test_respect_host_listen_address(loop, nanny, host):
with popen(["dask-scheduler", "--no-dashboard"]):
with popen(
["dask-worker", "127.0.0.1:8786", nanny, "--no-dashboard", "--host", host]
) as worker:
with Client("127.0.0.1:8786") as client:
while not client.nthreads():
sleep(0.1)
client.scheduler_info()
# roundtrip works
assert client.submit(lambda x: x + 1, 10).result() == 11
def func(dask_worker):
return dask_worker.listener.listen_address
listen_addresses = client.run(func)
assert all(host in v for v in listen_addresses.values())
def test_dashboard_non_standard_ports(loop):
pytest.importorskip("bokeh")
try:
import jupyter_server_proxy # noqa: F401
proxy_exists = True
except ImportError:
proxy_exists = False
with popen(["dask-scheduler", "--port", "3449"]):
with popen(
[
"dask-worker",
"tcp://127.0.0.1:3449",
"--dashboard-address",
":4833",
"--host",
"127.0.0.1",
]
):
with Client("127.0.0.1:3449", loop=loop) as c:
c.wait_for_workers(1)
pass
response = requests.get("http://127.0.0.1:4833/status")
assert response.ok
redirect_resp = requests.get("http://127.0.0.1:4833/main")
redirect_resp.ok
# TEST PROXYING WORKS
if proxy_exists:
url = "http://127.0.0.1:8787/proxy/4833/127.0.0.1/status"
response = requests.get(url)
assert response.ok
with pytest.raises(Exception):
requests.get("http://localhost:4833/status/")
def test_version_option():
runner = CliRunner()
result = runner.invoke(distributed.cli.dask_worker.main, ["--version"])
assert result.exit_code == 0
@pytest.mark.slow
@pytest.mark.parametrize("no_nanny", [True, False])
def test_worker_timeout(no_nanny):
runner = CliRunner()
args = ["192.168.1.100:7777", "--death-timeout=1"]
if no_nanny:
args.append("--no-nanny")
result = runner.invoke(distributed.cli.dask_worker.main, args)
assert result.exit_code != 0
def test_bokeh_deprecation():
pytest.importorskip("bokeh")
runner = CliRunner()
with pytest.warns(UserWarning, match="dashboard"):
try:
runner.invoke(distributed.cli.dask_worker.main, ["--bokeh"])
except ValueError:
# didn't pass scheduler
pass
with pytest.warns(UserWarning, match="dashboard"):
try:
runner.invoke(distributed.cli.dask_worker.main, ["--no-bokeh"])
except ValueError:
# didn't pass scheduler
pass
@gen_cluster(nthreads=[])
async def test_integer_names(s):
with popen(["dask-worker", s.address, "--name", "123"]):
while not s.workers:
await asyncio.sleep(0.01)
[ws] = s.workers.values()
assert ws.name == 123
@pytest.mark.parametrize("nanny", ["--nanny", "--no-nanny"])
@gen_cluster(client=True, nthreads=[])
async def test_worker_class(c, s, tmp_path, nanny):
# Create module with custom worker class
WORKER_CLASS_TEXT = """
from distributed.worker import Worker
class MyWorker(Worker):
pass
"""
tmpdir = str(tmp_path)
tmpfile = str(tmp_path / "myworker.py")
with open(tmpfile, "w") as f:
f.write(WORKER_CLASS_TEXT)
# Put module on PYTHONPATH
env = os.environ.copy()
if "PYTHONPATH" in env:
env["PYTHONPATH"] = tmpdir + ":" + env["PYTHONPATH"]
else:
env["PYTHONPATH"] = tmpdir
with popen(
[
"dask-worker",
s.address,
nanny,
"--worker-class",
"myworker.MyWorker",
],
env=env,
):
await c.wait_for_workers(1)
def worker_type(dask_worker):
return type(dask_worker).__name__
worker_types = await c.run(worker_type)
assert all(name == "MyWorker" for name in worker_types.values())
@gen_cluster(nthreads=[], client=True)
async def test_preload_config(c, s):
# Ensure dask-worker pulls the preload from the Dask config if
# not specified via a command line option
preload_text = """
def dask_setup(worker):
worker.foo = 'setup'
"""
env = os.environ.copy()
env["DASK_DISTRIBUTED__WORKER__PRELOAD"] = preload_text
with popen(
[
"dask-worker",
s.address,
],
env=env,
):
await c.wait_for_workers(1)
[foo] = (await c.run(lambda dask_worker: dask_worker.foo)).values()
assert foo == "setup"
|
the-stack_106_27762 | """
Django settings for laalaa project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from os.path import join, abspath, dirname
import sys
def here(*x):
return join(abspath(dirname(__file__)), *x)
PROJECT_ROOT = here("..")
def root(*x):
return join(abspath(PROJECT_ROOT), *x)
sys.path.insert(0, root("apps"))
# See https://github.com/ministryofjustice/django-moj-irat#usage for usage
HEALTHCHECKS = ["moj_irat.healthchecks.database_healthcheck"]
AUTODISCOVER_HEALTHCHECKS = True
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get("SECRET_KEY", "DEV_KEY")
DEBUG = False
TEMPLATE_DEBUG = False
ALLOWED_HOSTS = ["*"]
# Application definition
INSTALLED_APPS = (
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.gis",
"kombu.transport.django",
"djcelery",
"rest_framework",
"rest_framework_gis",
"advisers",
)
MIDDLEWARE_CLASSES = (
"advisers.middleware.PingMiddleware",
"django.middleware.cache.UpdateCacheMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.auth.middleware.SessionAuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"django.middleware.cache.FetchFromCacheMiddleware",
)
ROOT_URLCONF = "laalaa.urls"
WSGI_APPLICATION = "laalaa.wsgi.application"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"APP_DIRS": True,
"DIRS": [root("templates")],
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
]
},
}
]
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.contrib.gis.db.backends.postgis",
"NAME": os.environ.get("DB_USERNAME", "laalaa"),
"USER": os.environ.get("DB_USERNAME", "postgres"),
"PASSWORD": os.environ.get("DB_PASSWORD", ""),
"HOST": os.environ.get("DB_HOST", "127.0.0.1"),
"PORT": os.environ.get("DB_PORT", "5432"),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = "en-gb"
TIME_ZONE = "Europe/London"
USE_I18N = True
USE_L10N = True
USE_TZ = True
MEDIA_ROOT = root("uploads")
MEDIA_URL = "/uploads/"
# 10MB
MAX_UPLOAD_SIZE = 10485760
# Force uploaded files to be written to disk
FILE_UPLOAD_MAX_MEMORY_SIZE = 0
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = "/static/"
STATIC_ROOT = root("static")
REST_FRAMEWORK = {
"DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.PageNumberPagination",
"PAGE_SIZE": 10,
"EXCEPTION_HANDLER": "advisers.views.custom_exception_handler",
}
CACHES = {"default": {"BACKEND": "django.core.cache.backends.locmem.LocMemCache", "LOCATION": root("cache")}}
CACHE_MIDDLEWARE_SECONDS = 3600
CELERY_ACCEPT_CONTENT = ["pickle", "json", "msgpack"]
CELERY_RESULT_BACKEND = "djcelery.backends.database:DatabaseBackend"
BROKER_URL = "amqp://%s:%s@%s//" % (
os.environ.get("RABBITMQ_USER", "guest"),
os.environ.get("RABBITMQ_PASS", "guest"),
os.environ.get("HOST_IP", "127.0.0.1"),
)
TEMP_DIRECTORY = root("tmp")
POSTCODES_IO_URL = "https://api.postcodes.io"
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"verbose": {"format": "%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s"},
"simple": {"format": "%(levelname)s %(message)s"},
"logstash": {"()": "logstash_formatter.LogstashFormatter"},
},
"filters": {
"require_debug_false": {"()": "django.utils.log.RequireDebugFalse"},
"require_debug_true": {"()": "django.utils.log.RequireDebugTrue"},
},
"handlers": {
"mail_admins": {
"level": "ERROR",
"filters": ["require_debug_false"],
"class": "django.utils.log.AdminEmailHandler",
}
},
"loggers": {"django.request": {"handlers": ["mail_admins"], "level": "ERROR", "propagate": True}},
}
# APP LOGGING CONFIG
LOGGING["handlers"]["production_file"] = {
"level": "INFO",
"class": "logging.handlers.RotatingFileHandler",
"filename": "app.log",
"maxBytes": 1024 * 1024 * 5, # 5 MB
"backupCount": 7,
"formatter": "logstash",
"filters": ["require_debug_false"],
}
LOGGING["handlers"]["debug_file"] = {
"level": "DEBUG",
"class": "logging.handlers.RotatingFileHandler",
"filename": "debug.log",
"maxBytes": 1024 * 1024 * 5, # 5 MB
"backupCount": 7,
"formatter": "logstash",
"filters": ["require_debug_true"],
}
LOGGING["handlers"]["console"] = {"level": "DEBUG", "class": "logging.StreamHandler", "stream": sys.stdout}
LOGGING["loggers"][""] = {"handlers": ["console"], "level": "DEBUG"}
# RAVEN SENTRY CONFIG
if "SENTRY_DSN" in os.environ:
RAVEN_CONFIG = {"dsn": os.environ.get("SENTRY_DSN")}
INSTALLED_APPS += ("raven.contrib.django.raven_compat",)
MIDDLEWARE_CLASSES = (
"raven.contrib.django.raven_compat.middleware.SentryResponseErrorIdMiddleware",
) + MIDDLEWARE_CLASSES
# .local.py overrides all the common settings.
try:
from laalaa.settings.local import * # noqa: F401,F403
except ImportError:
pass
def override_setting(arg):
prefix = "--override-setting="
if arg and arg.startswith(prefix):
exec(arg[len(prefix) :])
return arg
if not hasattr(sys, "cli_args_overrides"):
def remove_arg(arg):
return sys.argv.remove(arg)
map(remove_arg, filter(None, map(override_setting, sys.argv)))
setattr(sys, "cli_args_overrides", True)
|
the-stack_106_27763 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource_py3 import Resource
class DomainTopic(Resource):
"""Domain Topic.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Fully qualified identifier of the resource
:vartype id: str
:ivar name: Name of the resource
:vartype name: str
:ivar type: Type of the resource
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, **kwargs) -> None:
super(DomainTopic, self).__init__(**kwargs)
|
the-stack_106_27765 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import logging
import bcrypt
import pandas as pd
import zipfile
import tempfile
from collections import Counter
from PIL import Image
from singa_auto.constants import ServiceStatus, UserType, TrainJobStatus, ModelAccessRight, InferenceJobStatus
from singa_auto.config import SUPERADMIN_EMAIL, SUPERADMIN_PASSWORD
from singa_auto.meta_store import MetaStore
from singa_auto.model import LoggerUtils
from singa_auto.container import DockerSwarmContainerManager
from singa_auto.container import KubernetesContainerManager
from singa_auto.data_store import FileDataStore, DataStore
from singa_auto.param_store import FileParamStore, ParamStore
from .services_manager import ServicesManager
logger = logging.getLogger(__name__)
class UserExistsError(Exception):
pass
class UserAlreadyBannedError(Exception):
pass
class InvalidUserError(Exception):
pass
class InvalidPasswordError(Exception):
pass
class InvalidRunningInferenceJobError(Exception):
pass
class InvalidModelError(Exception):
pass
class InvalidTrainJobError(Exception):
pass
class InvalidTrialError(Exception):
pass
class RunningInferenceJobExistsError(Exception):
pass
class NoModelsForTrainJobError(Exception):
pass
class InvalidDatasetError(Exception):
pass
class Admin(object):
def __init__(self,
meta_store=None,
container_manager=None,
data_store=None,
param_store=None):
self._meta_store = meta_store or MetaStore()
if os.getenv('CONTAINER_MODE', 'SWARM') == 'SWARM':
container_manager = container_manager or DockerSwarmContainerManager(
)
else:
container_manager = container_manager or KubernetesContainerManager(
)
self._data_store: DataStore = data_store or FileDataStore()
self._param_store: ParamStore = param_store or FileParamStore()
self._base_worker_image = '{}:{}'.format(
os.environ['SINGA_AUTO_IMAGE_WORKER'],
os.environ['SINGA_AUTO_VERSION'])
self._services_manager = ServicesManager(self._meta_store,
container_manager)
def __enter__(self):
self._meta_store.connect()
def __exit__(self, exception_type, exception_value, traceback):
self._meta_store.disconnect()
def seed(self):
with self._meta_store:
self._seed_superadmin()
####################################
# Users
####################################
def authenticate_user(self, email, password):
user = self._meta_store.get_user_by_email(email)
if not user:
raise InvalidUserError()
if not self._if_hash_matches_password(password, user.password_hash):
raise InvalidPasswordError()
return {
'id': user.id,
'email': user.email,
'user_type': user.user_type,
'banned_date': user.banned_date
}
def create_user(self, email, password, user_type):
user = self._create_user(email, password, user_type)
return {'id': user.id, 'email': user.email, 'user_type': user.user_type}
def get_users(self):
users = self._meta_store.get_users()
return [{
'id': user.id,
'email': user.email,
'user_type': user.user_type,
'banned_date': user.banned_date
} for user in users]
def get_user_by_email(self, email):
user = self._meta_store.get_user_by_email(email)
if user is None:
return None
return {
'id': user.id,
'email': user.email,
'user_type': user.user_type,
'banned_date': user.banned_date
}
def ban_user(self, email):
user = self._meta_store.get_user_by_email(email)
if user is None:
raise InvalidUserError()
if user.banned_date is not None:
raise UserAlreadyBannedError()
self._meta_store.ban_user(user)
return {
'id': user.id,
'email': user.email,
'user_type': user.user_type,
'banned_date': user.banned_date
}
####################################
# Datasets
####################################
def create_dataset(self, user_id, name, task, data_file_path):
# Store dataset in data folder
print('begin saving to local path')
store_dataset = self._data_store.save(data_file_path)
# Get metadata for dataset. 'store_dataset' is a dictionary contains the following info only
store_dataset_id = store_dataset.id
size_bytes = store_dataset.size_bytes
stat = dict()
if len(os.path.splitext(data_file_path)) == 2 and os.path.splitext(
data_file_path)[1] == '.zip':
dataset_zipfile = zipfile.ZipFile(data_file_path, 'r')
if 'images.csv' in dataset_zipfile.namelist():
for fileName in dataset_zipfile.namelist():
if fileName.endswith('.csv'):
num_samples = len(dataset_zipfile.namelist()) - 1
# create tempdir to store unziped csv and a sample image
with tempfile.TemporaryDirectory() as dir_path:
# read dataset zipfile # data_file_path=os.path.join(os.getcwd(),name+'.zip')
# obtain csv file
# Extract a single file from zip
csv_path = dataset_zipfile.extract(fileName,
path=dir_path)
# obtain a sample
sample_name = pd.read_csv(csv_path,
nrows=1).iloc[0][0]
if task == 'IMAGE_CLASSIFICATION':
img_path = dataset_zipfile.extract(
sample_name, path=dir_path)
img = Image.open(img_path)
img_size = str(img.size)
# close dataset zipfile
dataset_zipfile.close()
csv = pd.read_csv(csv_path)
# num_classes = len(labels)
if len(csv.columns) == 2:
class_count = csv[csv.columns[1]].value_counts()
else:
labels = pd.read_csv(
csv_path, nrows=0).columns[1::].to_list()
class_count = (
csv[csv.columns[1::]] == 1).astype(int).sum(
axis=0)
num_labeled_samples = len(csv[csv.columns[0]].unique())
ratio = class_count / num_labeled_samples
num_unlabeled_samples = num_samples - num_labeled_samples
break
else:
# if the csv file was not provided in zip
with tempfile.TemporaryDirectory() as dir_path:
num_labeled_samples = len(dataset_zipfile.namelist())
num_unlabeled_samples = 0
d_list = [
x for x in dataset_zipfile.namelist()
if x.endswith('/') == False
]
labels = [os.path.dirname(x) for x in d_list]
class_count = pd.DataFrame(list(Counter(labels).values()),
list(Counter(labels).keys()))
ratio = class_count / num_labeled_samples
sample_name = d_list[0]
if task == 'IMAGE_CLASSIFICATION':
img_path = dataset_zipfile.extract(sample_name,
path=dir_path)
img = Image.open(img_path)
img_size = str(img.size)
if task == 'IMAGE_CLASSIFICATION':
stat = {
'num_labeled_samples': num_labeled_samples,
'num_unlabeled_samples': num_unlabeled_samples,
'class_count': class_count.to_json(),
'ratio': ratio.to_json(),
'img_size': img_size
}
else:
stat = {
'num_labeled_samples': num_labeled_samples,
'num_unlabeled_samples': num_unlabeled_samples,
'class_count': class_count.to_json(),
'ratio': ratio.to_json()
}
print('begin saving to db')
dataset = self._meta_store.create_dataset(name, task, size_bytes,
store_dataset_id, user_id,
stat)
self._meta_store.commit()
return {
'id': dataset.id,
'name': dataset.name,
'task': dataset.task,
'size_bytes': dataset.size_bytes,
'store_dataset_id': dataset.store_dataset_id,
'owner_id': dataset.owner_id,
'stat': dataset.stat,
}
def get_dataset(self, dataset_id):
dataset = self._meta_store.get_dataset(dataset_id)
if dataset is None:
raise InvalidDatasetError()
return {
'id': dataset.id,
'name': dataset.name,
'task': dataset.task,
'datetime_created': dataset.datetime_created,
'size_bytes': dataset.size_bytes,
'owner_id': dataset.owner_id,
'stat': dataset.stat,
}
def get_datasets(self, user_id, task=None):
datasets = self._meta_store.get_datasets(user_id, task)
datasetdicts = []
for x in datasets:
datasetdict = {
'id': x.id,
'name': x.name,
'task': x.task,
'datetime_created': x.datetime_created,
'size_bytes': x.size_bytes,
'store_dataset_id': x.store_dataset_id,
'stat': x.stat,
}
datasetdicts.append(datasetdict)
return datasetdicts
####################################
# Train Job
####################################
def create_train_job(self,
user_id,
app,
task,
train_dataset_id,
val_dataset_id,
budget,
model_ids=None,
train_args=None):
"""
Creates and starts a train job on SINGA-Auto.
A train job is uniquely identified by user, its associated app, and the app version (returned in output).
Only admins, model developers & app developers can manage train jobs. Model developers & app developers can only manage their own train jobs.
:param app: Name of the app associated with the train job
:param task: Task associated with the train job,
the train job will train models associated with the task
:param train_dataset_id: ID of the train dataset, previously created on SINGA-Auto
:param val_dataset_id: ID of the validation dataset, previously created on SINGA-Auto
:param budget: Budget for train job
The following describes the budget options available:
===================== =====================
**Budget Option** **Description**
--------------------- ---------------------
``TIME_HOURS`` Max no. of hours to train (soft target). Defaults to 0.1.
``GPU_COUNT`` No. of GPUs to allocate for training, across all models. Defaults to 0.
``MODEL_TRIAL_COUNT`` Max no. of trials to conduct for each model (soft target). -1 for unlimited. Defaults to -1.
===================== =====================
``budget`` should be a dictionary of ``{ <budget_type>: <budget_amount> }``, where
``<budget_type>`` is one of :class:`singa_auto.constants.BudgetOption` and
``<budget_amount>`` specifies the amount for the associated budget option.
:param model_ids: List of IDs of model to use for train job.
NOTE: only client.py defaults to all models if model_ids is None!
:param train_args: Additional arguments to pass to models during training, if any.
Refer to the task's specification for appropriate arguments
:returns: Created train job as dictionary
"""
if train_args is None:
train_args = {}
if model_ids is None:
avail_models = self.get_available_models(task)
model_ids = [x['id'] for x in avail_models]
# Ensure there is no existing train job for app
train_jobs = self._meta_store.get_train_jobs_by_app(user_id, app)
if any([
x.status in [TrainJobStatus.RUNNING, TrainJobStatus.STARTED]
for x in train_jobs
]):
raise InvalidTrainJobError(
'Another train job for app "{}" is still running!'.format(app))
# Ensure at least 1 model
if len(model_ids) == 0:
raise NoModelsForTrainJobError()
# Compute auto-incremented app version # config.load_kube_config(config_file='/root/singa_auto/k8sconfig')
logger.info('config k8s')
# self._client_service = kubernetes.client.CoreV1Api()
app_version = max([x.app_version for x in train_jobs], default=0) + 1
# Get models available to user
avail_model_ids = [
x.id for x in self._meta_store.get_available_models(user_id, task)
]
# Warn if there are no models for task
if len(avail_model_ids) == 0:
raise InvalidModelError(
f'No models are available for task "{task}"')
# Ensure all specified models are available
for model_id in model_ids:
if model_id not in avail_model_ids:
raise InvalidModelError(
f'model with ID "{model_id}" does not belong to the user "{user_id}" for task "{task}"'
)
# Ensure that datasets are valid and of the correct task
try:
train_dataset = self._meta_store.get_dataset(train_dataset_id)
assert train_dataset is not None
assert train_dataset.task == task
val_dataset = self._meta_store.get_dataset(val_dataset_id)
assert val_dataset is not None
assert val_dataset.task == task
except AssertionError as e:
raise InvalidDatasetError(e)
# Create train & sub train jobs in DB
train_job = self._meta_store.create_train_job(
user_id=user_id,
app=app,
app_version=app_version,
task=task,
budget=budget,
train_dataset_id=train_dataset_id,
val_dataset_id=val_dataset_id,
train_args=train_args)
self._meta_store.commit()
for model_id in model_ids:
self._meta_store.create_sub_train_job(train_job_id=train_job.id,
model_id=model_id)
self._meta_store.commit()
self._services_manager.create_train_services(train_job.id)
return {
'id': train_job.id,
'app': train_job.app,
'app_version': train_job.app_version
}
def stop_train_job(self, user_id, app, app_version=-1):
train_job = self._meta_store.get_train_job_by_app_version(
user_id, app, app_version=app_version)
if train_job is None:
raise InvalidTrainJobError()
self._services_manager.stop_train_services(train_job.id)
return {
'id': train_job.id,
'app': train_job.app,
'app_version': train_job.app_version
}
def stop_sub_train_job(self, sub_train_job_id):
self._services_manager.stop_sub_train_job_services(sub_train_job_id)
return {'id': sub_train_job_id}
def get_train_job(self, user_id, app, app_version=-1): # by app ver
"""
get_train_job() is called by:
@app.route('/train_jobs/<app>/<app_version>',
methods=['GET'])
"""
train_job = self._meta_store.get_train_job_by_app_version(
user_id, app, app_version=app_version)
if train_job is None:
raise InvalidTrainJobError()
return {
'id': train_job.id,
'status': train_job.status,
'app': train_job.app,
'app_version': train_job.app_version,
'task': train_job.task,
'train_dataset_id': train_job.train_dataset_id,
'val_dataset_id': train_job.val_dataset_id,
'train_args': train_job.train_args,
'datetime_started': train_job.datetime_started,
'datetime_stopped': train_job.datetime_stopped
}
def get_train_jobs_by_app(self, user_id, app):
"""
unlike get_train_jobs_by_user,
get_train_jobs_by_app is for:
GET /train_jobs/{app}
"""
train_jobs = self._meta_store.get_train_jobs_by_app(user_id, app)
return [{
'id': x.id,
'status': x.status,
'app': x.app,
'app_version': x.app_version,
'task': x.task,
'train_dataset_id': x.train_dataset_id,
'val_dataset_id': x.val_dataset_id,
'train_args': x.train_args,
'datetime_started': x.datetime_started,
'datetime_stopped': x.datetime_stopped,
'budget': x.budget
} for x in train_jobs]
def get_train_jobs_by_user(self, user_id):
"""
unlike get_train_jobs_by_app,
get_train_jobs_by_user is called by:
@app.route('/train_jobs', methods=['GET'])
"""
train_jobs = self._meta_store.get_train_jobs_by_user(user_id)
return [{
'id': x.id,
'status': x.status,
'app': x.app,
'app_version': x.app_version,
'task': x.task,
'train_dataset_id': x.train_dataset_id,
'val_dataset_id': x.val_dataset_id,
'train_args': x.train_args,
'datetime_started': x.datetime_started,
'datetime_stopped': x.datetime_stopped,
'budget': x.budget
} for x in train_jobs]
def stop_all_train_jobs(self):
train_jobs = self._meta_store.get_train_jobs_by_statuses(
[TrainJobStatus.STARTED, TrainJobStatus.RUNNING])
for train_job in train_jobs:
self._services_manager.stop_train_services(train_job.id)
return [{'id': train_job.id} for train_job in train_jobs]
####################################
# Trials
####################################
def get_trial(self, trial_id):
trial = self._meta_store.get_trial(trial_id)
model = self._meta_store.get_model(trial.model_id)
if trial is None:
raise InvalidTrialError()
return {
'id': trial.id,
'no': trial.no,
'worker_id': trial.worker_id,
'proposal': trial.proposal,
'datetime_started': trial.datetime_started,
'status': trial.status,
'datetime_stopped': trial.datetime_stopped,
'model_name': model.name,
'score': trial.score,
'is_params_saved': trial.is_params_saved
}
def get_best_trials_of_train_job(self,
user_id,
app,
app_version=-1,
max_count=2):
train_job = self._meta_store.get_train_job_by_app_version(
user_id, app, app_version=app_version)
if train_job is None:
raise InvalidTrainJobError()
best_trials = self._meta_store.get_best_trials_of_train_job(
train_job.id, max_count=max_count)
trials_models = [
self._meta_store.get_model(x.model_id) for x in best_trials
]
return [{
'id': trial.id,
'proposal': trial.proposal,
'datetime_started': trial.datetime_started,
'status': trial.status,
'datetime_stopped': trial.datetime_stopped,
'model_name': model.name,
'score': trial.score,
'is_params_saved': trial.is_params_saved
} for (trial, model) in zip(best_trials, trials_models)]
def get_trial_logs(self, trial_id):
trial = self._meta_store.get_trial(trial_id)
if trial is None:
raise InvalidTrialError()
trial_logs = self._meta_store.get_trial_logs(trial_id)
log_lines = [x.line for x in trial_logs]
(messages, metrics, plots) = LoggerUtils.parse_logs(log_lines)
return {'plots': plots, 'metrics': metrics, 'messages': messages}
def get_trial_parameters(self, trial_id):
trial = self._meta_store.get_trial(trial_id)
if trial is None:
raise InvalidTrialError()
if not trial.is_params_saved:
raise InvalidTrialError('Trial\'s model parameters were not saved')
params = self._param_store.load(trial.store_params_id)
return params
def get_trials_of_train_job(self,
user_id,
app,
app_version=-1,
limit=1000,
offset=0): ### return top 1000
train_job = self._meta_store.get_train_job_by_app_version(
user_id, app, app_version=app_version)
if train_job is None:
raise InvalidTrainJobError()
trials = self._meta_store.get_trials_of_train_job(train_job.id,
limit=limit,
offset=offset)
trials_models = [self._meta_store.get_model(x.model_id) for x in trials]
return [{
'id': trial.id,
'no': trial.no,
'worker_id': trial.worker_id,
'proposal': trial.proposal,
'datetime_started': trial.datetime_started,
'status': trial.status,
'datetime_stopped': trial.datetime_stopped,
'model_name': model.name,
'score': trial.score,
'is_params_saved': trial.is_params_saved
} for (trial, model) in zip(trials, trials_models)]
####################################
# Inference Job
####################################
def create_inference_job_by_checkpoint(self,
user_id,
budget,
model_name=None):
# if there no train job, create inference job by using pretrained model.
if model_name is None:
raise InvalidTrainJobError('please provide a model name')
model = self._meta_store.get_model_by_name(user_id=user_id,
name=model_name)
if model.checkpoint_id is None:
raise InvalidTrainJobError(
'Have you uploaded a checkpoint file for this {}?'.format(
model_name))
# Create inference job in DB
inference_job = self._meta_store.create_inference_job(user_id=user_id,
model_id=model.id,
budget=budget)
self._meta_store.commit()
(inference_job, predictor_service) = \
self._services_manager.create_inference_services(inference_job.id, use_checkpoint=True)
return {
'id': inference_job.id,
'model_id': model.id,
'predictor_host': predictor_service.host
}
def create_inference_job(self, user_id, app, app_version, budget):
train_job = self._meta_store.get_train_job_by_app_version(
user_id, app, app_version=app_version)
if train_job is None:
raise InvalidTrainJobError(
'Have you started a train job for this app?')
if train_job.status != TrainJobStatus.STOPPED:
raise InvalidTrainJobError('Train job must be of status `STOPPED`.')
# Ensure only 1 running inference job for 1 train job
inference_job = self._meta_store.get_deployed_inference_job_by_train_job(
train_job.id)
if inference_job is not None:
raise RunningInferenceJobExistsError()
# Get trials to load for inference job
best_trials = self._meta_store.get_best_trials_of_train_job(
train_job.id, max_count=2)
if len(best_trials) == 0:
raise InvalidTrainJobError(
'Train job has no trials with saved models!')
# Create inference job in DB
inference_job = self._meta_store.create_inference_job(
user_id=user_id, train_job_id=train_job.id, budget=budget)
self._meta_store.commit()
(inference_job, predictor_service) = \
self._services_manager.create_inference_services(inference_job.id)
return {
'id': inference_job.id,
'train_job_id': train_job.id,
'app': train_job.app,
'app_version': train_job.app_version,
'predictor_host': predictor_service.host
}
def stop_inference_job(self, user_id, app, app_version=-1):
train_job = self._meta_store.get_train_job_by_app_version(
user_id, app, app_version=app_version)
if train_job is None:
# TODO: REST api need to return some JSON, and not
# just raise errors!!
raise InvalidRunningInferenceJobError()
inference_job = self._meta_store.get_deployed_inference_job_by_train_job(
train_job.id)
if inference_job is None:
# TODO: REST api need to return some JSON, and not
# just raise errors!!
raise InvalidRunningInferenceJobError()
inference_job = self._services_manager.stop_inference_services(
inference_job.id)
return {
'id': inference_job.id,
'train_job_id': train_job.id,
'app': train_job.app,
'app_version': train_job.app_version
}
def get_running_inference_job(self, user_id, app, app_version=-1):
train_job = self._meta_store.get_train_job_by_app_version(
user_id, app, app_version=app_version)
if train_job is None:
# TODO: REST api need to return some JSON, and not
# just raise errors!!
raise InvalidRunningInferenceJobError()
inference_job = self._meta_store.get_deployed_inference_job_by_train_job(
train_job.id)
if inference_job is None:
# TODO: REST api need to return some JSON, and not
# just raise errors!!
raise InvalidRunningInferenceJobError()
predictor_service = self._meta_store.get_service(inference_job.predictor_service_id) \
if inference_job.predictor_service_id is not None else None
return {
'id':
inference_job.id,
'status':
inference_job.status,
'train_job_id':
train_job.id,
'app':
train_job.app,
'app_version':
train_job.app_version,
'datetime_started':
inference_job.datetime_started,
'datetime_stopped':
inference_job.datetime_stopped,
'predictor_host':
predictor_service.host
if predictor_service is not None else None
}
def get_inference_jobs_of_app(self, user_id, app):
inference_jobs = self._meta_store.get_inference_jobs_of_app(
user_id, app)
train_jobs = [
self._meta_store.get_train_job(x.train_job_id)
for x in inference_jobs
]
return [{
'id': inference_job.id,
'status': inference_job.status,
'train_job_id': train_job.id,
'app': train_job.app,
'app_version': train_job.app_version,
'datetime_started': inference_job.datetime_started,
'datetime_stopped': inference_job.datetime_stopped
} for (inference_job, train_job) in zip(inference_jobs, train_jobs)]
def get_inference_jobs_by_user(self, user_id):
inference_jobs = self._meta_store.get_inference_jobs_by_user(user_id)
res = list()
for inference_job in inference_jobs:
if inference_job.status in ['RUNNING', 'STARTED', 'ERRORED']:
if inference_job.train_job_id:
train_job = self._meta_store.get_train_job(
inference_job.train_job_id)
res.append({
'id': inference_job.id,
'status': inference_job.status,
'train_job_id': train_job.id,
'app': train_job.app,
'app_version': train_job.app_version,
'datetime_started': inference_job.datetime_started,
'datetime_stopped': inference_job.datetime_stopped
})
elif inference_job.model_id:
model = self._meta_store.get_model(inference_job.model_id)
res.append({
'id':
inference_job.id,
'status':
inference_job.status,
'train_job_id':
"ByCheckpoint: {}".format(model.checkpoint_id),
'app':
'N/A',
'app_version':
'N/A',
'datetime_started':
inference_job.datetime_started,
'datetime_stopped':
inference_job.datetime_stopped
})
return res
def stop_all_inference_jobs(self):
inference_jobs = self._meta_store.get_inference_jobs_by_statuses(
[InferenceJobStatus.STARTED, InferenceJobStatus.RUNNING])
for inference_job in inference_jobs:
self._services_manager.stop_inference_services(inference_job.id)
return [{'id': inference_job.id} for inference_job in inference_jobs]
####################################
# Events
####################################
def handle_event(self, name, **params):
# Call upon corresponding method of name
try:
method_name = f'_on_{name}'
method = getattr(self, method_name)
method(**params)
except AttributeError:
logger.error('Unknown event: "{}"'.format(name))
def _on_sub_train_job_advisor_started(self, sub_train_job_id):
self._services_manager.refresh_sub_train_job_status(sub_train_job_id)
def _on_sub_train_job_advisor_stopped(self, sub_train_job_id):
self._services_manager.refresh_sub_train_job_status(sub_train_job_id)
def _on_sub_train_job_budget_reached(self, sub_train_job_id):
self._services_manager.stop_sub_train_job_services(sub_train_job_id)
def _on_train_job_worker_started(self, sub_train_job_id):
self._services_manager.refresh_sub_train_job_status(sub_train_job_id)
def _on_train_job_worker_stopped(self, sub_train_job_id):
self._services_manager.refresh_sub_train_job_status(sub_train_job_id)
def _on_inference_job_worker_started(self, inference_job_id):
self._services_manager.refresh_inference_job_status(inference_job_id)
def _on_inference_job_worker_stopped(self, inference_job_id):
self._services_manager.refresh_inference_job_status(inference_job_id)
def _on_predictor_started(self, inference_job_id):
self._services_manager.refresh_inference_job_status(inference_job_id)
def _on_predictor_stopped(self, inference_job_id):
self._services_manager.refresh_inference_job_status(inference_job_id)
####################################
# Models
####################################
def create_model(self,
user_id,
name,
task,
model_file_bytes,
model_class,
docker_image=None,
dependencies=None,
access_right=ModelAccessRight.PRIVATE,
checkpoint_id=None):
if dependencies is None:
dependencies = {}
model = self._meta_store.create_model(
user_id=user_id,
name=name,
task=task,
model_file_bytes=model_file_bytes,
model_class=model_class,
docker_image=(docker_image or self._base_worker_image),
dependencies=dependencies,
access_right=access_right,
checkpoint_id=checkpoint_id)
self._meta_store.commit()
return {'id': model.id, 'user_id': model.user_id, 'name': model.name}
def delete_model(self, model_id):
model = self._meta_store.get_model(model_id)
if model is None:
raise InvalidModelError()
self._meta_store.delete_model(model)
return {'id': model.id, 'user_id': model.user_id, 'name': model.name}
def get_model_by_name(self, user_id, name):
model = self._meta_store.get_model_by_name(user_id, name)
if model is None:
raise InvalidModelError()
return {
'id': model.id,
'user_id': model.user_id,
'name': model.name,
'task': model.task,
'model_class': model.model_class,
'datetime_created': model.datetime_created,
'docker_image': model.docker_image,
'dependencies': model.dependencies,
'access_right': model.access_right,
'checkpoint_id': model.checkpoint_id,
}
def get_model(self, model_id):
model = self._meta_store.get_model(model_id)
if model is None:
raise InvalidModelError()
return {
'id': model.id,
'user_id': model.user_id,
'name': model.name,
'task': model.task,
'model_class': model.model_class,
'datetime_created': model.datetime_created,
'docker_image': model.docker_image,
'dependencies': model.dependencies,
'access_right': model.access_right
}
def get_model_file(self, model_id):
model = self._meta_store.get_model(model_id)
if model is None:
raise InvalidModelError()
return model.model_file_bytes
def get_available_models(self, user_id, task=None):
models = self._meta_store.get_available_models(user_id, task)
return [{
'id': model.id,
'user_id': model.user_id,
'name': model.name,
'task': model.task,
'datetime_created': model.datetime_created,
'dependencies': model.dependencies,
'access_right': model.access_right
} for model in models]
def get_recommend_models(self, user_id, dataset_id):
dataset = self._meta_store.get_dataset(dataset_id)
task = dataset.task
models = self._meta_store.get_available_models(user_id, task)
for model in models:
if model.name == 'resnet':
return [{
'id': model.id,
'user_id': model.user_id,
'name': model.name,
'task': model.task,
'datetime_created': model.datetime_created,
'dependencies': model.dependencies,
'access_right': model.access_right
}]
# If we can not found resnet, return the first model
for model in models:
return [{
'id': model.id,
'user_id': model.user_id,
'name': model.name,
'task': model.task,
'datetime_created': model.datetime_created,
'dependencies': model.dependencies,
'access_right': model.access_right
}]
####################################
# Private / Users
####################################
def _seed_superadmin(self):
# Seed superadmin
try:
self._create_user(email=SUPERADMIN_EMAIL,
password=SUPERADMIN_PASSWORD,
user_type=UserType.SUPERADMIN)
logger.info('Seeded superadmin...')
except UserExistsError:
logger.info('Skipping superadmin creation as it already exists...')
def _hash_password(self, password):
password_hash = bcrypt.hashpw(password.encode('utf-8'),
bcrypt.gensalt())
return password_hash
def _if_hash_matches_password(self, password, password_hash):
return bcrypt.checkpw(password.encode('utf-8'), password_hash)
def _create_user(self, email, password, user_type):
password_hash = self._hash_password(password)
user = self._meta_store.get_user_by_email(email)
if user is not None:
raise UserExistsError()
user = self._meta_store.create_user(email, password_hash, user_type)
self._meta_store.commit()
return user
|
the-stack_106_27766 | # coding=utf-8
import copy
import time
from config.TicketEnmu import ticket
from config.emailConf import sendEmail
from config.serverchanConf import sendServerChan
from config.bearyChat import notification_by_bearyChat
from myException.ticketIsExitsException import ticketIsExitsException
from myException.ticketNumOutException import ticketNumOutException
class queryOrderWaitTime:
"""
ๆ้
"""
def __init__(self, session):
self.session = session
def sendQueryOrderWaitTime(self):
"""
ๆ้่ทๅ่ฎขๅ็ญๅพ
ไฟกๆฏ,ๆฏ้3็ง่ฏทๆฑไธๆฌก๏ผๆ้ซ่ฏทๆฑๆฌกๆฐไธบ20ๆฌก๏ผ
:return:
"""
num = 1
while True:
num += 1
if num > ticket.OUT_NUM:
print(ticket.WAIT_OUT_NUM)
order_id = self.queryMyOrderNoComplete() # ๆ้ๅคฑ่ดฅ๏ผ่ชๅจๅๆถๆ้่ฎขๅ
if order_id:
self.cancelNoCompleteMyOrder(order_id)
break
try:
queryOrderWaitTimeUrl = copy.deepcopy(self.session.urls["queryOrderWaitTimeUrl"])
queryOrderWaitTimeUrl["req_url"] = queryOrderWaitTimeUrl["req_url"].format(int(round(time.time() * 1000)))
queryOrderWaitTimeResult = self.session.httpClint.send(queryOrderWaitTimeUrl)
except ValueError:
queryOrderWaitTimeResult = {}
if queryOrderWaitTimeResult:
if queryOrderWaitTimeResult.get("status", False):
data = queryOrderWaitTimeResult.get("data", False)
if data and data.get("orderId", ""):
sendEmail(ticket.WAIT_ORDER_SUCCESS.format(
data.get("orderId", "")))
sendServerChan(ticket.WAIT_ORDER_SUCCESS.format(
data.get("orderId", "")))
notification_by_bearyChat()
raise ticketIsExitsException(ticket.WAIT_ORDER_SUCCESS.format(
data.get("orderId")))
elif data.get("msg", False):
print(data.get("msg", ""))
break
elif data.get("waitTime", False):
print(ticket.WAIT_ORDER_CONTINUE.format(0 - data.get("waitTime", False)))
else:
pass
elif queryOrderWaitTimeResult.get("messages", False):
print(ticket.WAIT_ORDER_FAIL.format(queryOrderWaitTimeResult.get("messages", "")))
else:
print(ticket.WAIT_ORDER_NUM.format(num + 1))
else:
pass
time.sleep(2)
else:
print(ticketNumOutException(ticket.WAIT_ORDER_SUB_FAIL))
def queryMyOrderNoComplete(self):
"""
่ทๅ่ฎขๅๅ่กจไฟกๆฏ
:return:
"""
self.initNoComplete()
queryMyOrderNoCompleteUrl = self.session.urls["queryMyOrderNoCompleteUrl"]
data = {"_json_att": ""}
try:
queryMyOrderNoCompleteResult = self.session.httpClint.send(queryMyOrderNoCompleteUrl, data)
except ValueError:
queryMyOrderNoCompleteResult = {}
if queryMyOrderNoCompleteResult:
if queryMyOrderNoCompleteResult.get("data", False) and queryMyOrderNoCompleteResult["data"].get("orderDBList", False):
return queryMyOrderNoCompleteResult["data"]
elif queryMyOrderNoCompleteResult.get("data", False) and queryMyOrderNoCompleteResult["data"].get("orderCacheDTO", False):
if queryMyOrderNoCompleteResult["data"]["orderCacheDTO"].get("message", False):
print(queryMyOrderNoCompleteResult["data"]["orderCacheDTO"]["message"]["message"])
raise ticketNumOutException(
queryMyOrderNoCompleteResult["data"]["orderCacheDTO"]["message"]["message"])
else:
if queryMyOrderNoCompleteResult.get("message", False):
print(queryMyOrderNoCompleteResult.get("message", False))
return False
else:
return False
else:
return False
def initNoComplete(self):
"""
่ทๅ่ฎขๅๅ้่ฆ่ฟๅ
ฅ่ฎขๅๅ่กจ้กต๏ผ่ทๅ่ฎขๅๅ่กจ้กตsession
:return:
"""
initNoCompleteUrl = self.session.urls["initNoCompleteUrl"]
data = {"_json_att": ""}
self.session.httpClint.send(initNoCompleteUrl, data)
def cancelNoCompleteMyOrder(self, sequence_no):
"""
ๅๆถ่ฎขๅ
:param sequence_no: ่ฎขๅ็ผๅท
:return:
"""
cancelNoCompleteMyOrderUrl = self.session.urls["cancelNoCompleteMyOrder"]
cancelNoCompleteMyOrderData = {
"sequence_no": sequence_no,
"cancel_flag": "cancel_order",
"_json_att": ""
}
cancelNoCompleteMyOrderResult = self.session.httpClint.send(cancelNoCompleteMyOrderUrl,
cancelNoCompleteMyOrderData)
if cancelNoCompleteMyOrderResult.get("data", False) and cancelNoCompleteMyOrderResult["data"].get("existError", "N"):
print(ticket.CANCEL_ORDER_SUCCESS.format(sequence_no))
time.sleep(2)
return True
else:
print(ticket.CANCEL_ORDER_FAIL.format(sequence_no))
return False
|
the-stack_106_27767 | # coding=utf-8
# Copyright 2020 The ML Fairness Gym Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main file to run lending experiments for demonstration purposes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
from agents import threshold_policies
from experiments import lending
from experiments import lending_plots
import matplotlib.pyplot as plt
import numpy as np
import simplejson as json
flags.DEFINE_string('plots_directory', None, 'Directory to write out plots.')
flags.DEFINE_bool('equalize_opportunity', False,
'If true, apply equality of opportunity constraints.')
flags.DEFINE_integer('num_steps', 10000,
'Number of steps to run the simulation.')
FLAGS = flags.FLAGS
# Control float precision in json encoding.
json.encoder.FLOAT_REPR = lambda o: repr(round(o, 3))
MAXIMIZE_REWARD = threshold_policies.ThresholdPolicy.MAXIMIZE_REWARD
EQUALIZE_OPPORTUNITY = threshold_policies.ThresholdPolicy.EQUALIZE_OPPORTUNITY
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
np.random.seed(100)
group_0_prob = 0.5
maximize_reward_result = lending.Experiment(
group_0_prob=group_0_prob,
interest_rate=1.0,
bank_starting_cash=10000,
seed=200,
num_steps=FLAGS.num_steps,
burnin=200,
cluster_shift_increment=0.01,
include_cumulative_loans=True,
return_json=False,
threshold_policy=MAXIMIZE_REWARD).run()
equality_of_opportunity_result = lending.Experiment(
group_0_prob=group_0_prob,
interest_rate=1.0,
bank_starting_cash=10000,
seed=200,
num_steps=FLAGS.num_steps,
burnin=200,
cluster_shift_increment=0.01,
include_cumulative_loans=True,
return_json=False,
threshold_policy=EQUALIZE_OPPORTUNITY).run()
lending_plots.do_plotting(maximize_reward_result,
equality_of_opportunity_result,
equality_of_opportunity_result,
# static_equality_of_opportunity_result,
FLAGS.plots_directory,
options=None)
if __name__ == '__main__':
app.run(main)
|
the-stack_106_27768 | import datetime
import re
import time
import unicodedata
import urllib
from django.contrib.gis.db import models
from django.conf import settings
from django.db import connections, transaction
from django.db.models import Q
from connections import get_connection_name
from constants import (STATUS_CHOICES, STATUS_LIVE, USER_SCHEMAS, NEIGHBOR_MESSAGE_SCHEMAS, UGC, RemovalReasons)
def field_mapping(schema_id_list, db):
"""
Given a list of schema IDs, returns a dictionary of dictionaries, mapping
schema_ids to dictionaries mapping the fields' name->real_name.
Example return value:
{1: {u'crime_type': 'varchar01', u'crime_date', 'date01'},
2: {u'permit_number': 'varchar01', 'to_date': 'date01'},
}
"""
# schema_fields = [{'schema_id': 1, 'name': u'crime_type', 'real_name': u'varchar01'},
# {'schema_id': 1, 'name': u'crime_date', 'real_name': u'date01'}]
result = {}
for sf in SchemaField.objects.using(db).filter(schema__id__in=(schema_id_list)).values('schema', 'name', 'real_name'):
result.setdefault(sf['schema'], {})[sf['name']] = sf['real_name']
return result
class SchemaManager(models.Manager):
def for_metro(self, short_name):
return self.using(get_connection_name(short_name))
class PublicSchemaManager(SchemaManager):
def get_queryset(self):
return super(PublicSchemaManager, self).get_queryset().filter(is_public=True)
BUCKET_CHOICES = (
(0, 'From the Web'),
(1, 'Public records'),
(3, 'Neighbor messages'),
)
class FakeSchema(object):
def __init__(self, slug, name, plural_name):
self.slug = slug
self.name = name
self.plural_name = plural_name
self.is_active = True
def is_neighbor_message(self):
return True
class Schema(models.Model):
bucket = models.SmallIntegerField(choices=BUCKET_CHOICES)
name = models.CharField(max_length=100)
plural_name = models.CharField(max_length=100)
indefinite_article = models.CharField(max_length=2) # 'a' or 'an'
slug = models.CharField(max_length=32, unique=True)
min_date = models.DateField() # the earliest available NewsItem.pub_date for this Schema
last_updated = models.DateField()
date_name = models.CharField(max_length=32) # human-readable name for the NewsItem.item_date field
date_name_plural = models.CharField(max_length=32)
is_public = models.BooleanField(db_index=True)
is_active = models.BooleanField() # Whether this is still updated, displayed in navigation lists, etc.
has_newsitem_detail = models.BooleanField()
allow_comments = models.BooleanField()
has_linkable_locations = models.BooleanField()
pattern = models.CharField(max_length=32, blank=True)
launch_date = models.DateField() # the date that this schema was first made public
#objects = SchemaManager()
#public_objects = PublicSchemaManager()
def __unicode__(self):
return self.name
def url(self):
return '/%s/' % self.slug
def is_new(self):
return datetime.date.today() - self.launch_date < datetime.timedelta(days=7)
def is_neighbor_content(self):
return self.slug in USER_SCHEMAS
def is_neighbor_message(self):
return self.slug in NEIGHBOR_MESSAGE_SCHEMAS
def pattern_slug(self):
return 'neighbor-message' if self.slug in NEIGHBOR_MESSAGE_SCHEMAS else self.slug
class SchemaInfo(models.Model):
schema = models.ForeignKey(Schema)
short_description = models.TextField()
summary = models.TextField()
source = models.TextField()
grab_bag_headline = models.CharField(max_length=128, blank=True)
grab_bag = models.TextField(blank=True)
short_source = models.CharField(max_length=128)
update_frequency = models.CharField(max_length=64)
intro = models.TextField()
def __unicode__(self):
return unicode(self.schema)
class SchemaField(models.Model):
schema = models.ForeignKey(Schema)
name = models.CharField(max_length=32)
real_name = models.CharField(max_length=10) # 'varchar01', 'varchar02', etc.
pretty_name = models.CharField(max_length=32) # human-readable name, for presentation
pretty_name_plural = models.CharField(max_length=32) # plural human-readable name
pattern_slot = models.CharField(max_length=32) # name used in newsitem_list pattern template
display = models.BooleanField() # whether to display value on the public site
is_lookup = models.BooleanField() # whether the value is a foreign key to Lookup
display_order = models.SmallIntegerField()
display_format = models.CharField(max_length=50, blank=True)
display_api = models.BooleanField(default=False) #whether to display value on an api
def __unicode__(self):
return u'%s - %s' % (self.schema, self.name)
def _get_slug(self):
return self.name.replace('_', '-')
slug = property(_get_slug)
def _datatype(self):
return self.real_name[:-2]
datatype = property(_datatype)
def is_type(self, *data_types):
"""
Returns True if this SchemaField is of *any* of the given data types.
Allowed values are 'varchar', 'date', 'time', 'datetime', 'bool', 'int'.
"""
for t in data_types:
if t == self.real_name[:-2]:
return True
return False
def is_many_to_many_lookup(self):
"""
Returns True if this SchemaField is a many-to-many lookup.
"""
return self.is_lookup and not self.is_type('int') and not self.is_type('lookup')
def smart_pretty_name(self):
"""
Returns the pretty name for this SchemaField, taking into account
many-to-many fields.
"""
if self.is_many_to_many_lookup():
return self.pretty_name_plural
return self.pretty_name
class SchemaFieldInfo(models.Model):
schema = models.ForeignKey(Schema)
schema_field = models.ForeignKey(SchemaField)
help_text = models.TextField()
def __unicode__(self):
return unicode(self.schema_field)
class LocationType(models.Model):
name = models.CharField(max_length=255) # e.g., "Ward" or "Congressional District"
plural_name = models.CharField(max_length=64) # e.g., "Wards"
scope = models.CharField(max_length=64) # e.g., "Chicago" or "U.S.A."
slug = models.CharField(max_length=32, unique=True)
is_significant = models.BooleanField() # whether this is used to display aggregates, etc.
def __unicode__(self):
return u'%s, %s' % (self.name, self.scope)
def url(self):
return '/locations/%s/' % self.slug
class LocationQuerySet(models.query.GeoQuerySet):
def alphabetize(self, location_type_slug):
for i, loc in enumerate(self.filter(location_type__slug=location_type_slug).order_by('name')):
loc.display_order = i
loc.save()
class LocationManager(models.GeoManager):
def get_queryset(self, *args, **kwargs):
return LocationQuerySet(self.model).filter(is_deleted=False)
def alphabetize(self, *args, **kwargs):
return self.get_queryset().alphabetize(*args, **kwargs)
def largest_overlapping_neighborhood(self, zipcode, using='default'):
sql = """
SELECT * FROM (
SELECT loc.*, ST_Area(ST_Intersection(loc.location, zipcode.location)) AS overlapping_area
FROM db_location loc
LEFT JOIN (
SELECT location
FROM db_location
INNER JOIN db_locationtype
ON db_location.location_type_id = db_locationtype.id
WHERE db_location.name = %(zipcode)s AND db_locationtype.slug = 'zipcodes'
) AS zipcode
ON 1=1
INNER JOIN db_locationtype lt
ON lt.id = loc.location_type_id
WHERE loc.is_public = true AND ST_Intersects(loc.location, zipcode.location) AND lt.slug='neighborhoods'
) as locations
ORDER BY locations.overlapping_area DESC;
"""
params = {'zipcode': str(zipcode)}
qs = self.db_manager(using).raw(sql, params)
try:
return qs[0]
except IndexError:
return None
class Location(models.Model):
name = models.CharField(max_length=255) # e.g., "35th Ward"
normalized_name = models.CharField(max_length=255, db_index=True)
slug = models.CharField(max_length=64, db_index=True)
location_type = models.ForeignKey(LocationType)
location = models.GeometryField(null=True)
centroid = models.PointField(null=True)
display_order = models.SmallIntegerField()
city = models.CharField(max_length=255)
source = models.CharField(max_length=64)
area = models.FloatField(blank=True, null=True) # in square meters
population = models.IntegerField(blank=True, null=True) # from the 2000 Census
user_id = models.IntegerField(blank=True, null=True)
is_public = models.BooleanField()
description = models.TextField(blank=True)
creation_date = models.DateTimeField(blank=True, null=True)
last_mod_date = models.DateTimeField(blank=True, null=True)
is_deleted = models.BooleanField(default=False)
is_indexed = models.BooleanField(default=False)
newsitem_density = models.IntegerField(blank=True, null=True)
follower_count = models.IntegerField(blank=True, null=True)
active_users = models.IntegerField(blank=True, null=True)
objects = LocationManager()
all_locations = models.GeoManager()
class Meta:
unique_together = (('slug', 'location_type'),)
def __unicode__(self):
return self.name
def url(self):
return '/locations/%s/%s/' % (self.location_type.slug, self.slug)
def url_with_domain(self):
return 'https://%s.everyblock.com%s' % (settings.SHORT_NAME, self.url())
def edit_url(self):
# This is only used for custom locations.
return '/accounts/custom-locations/edit/%s/' % self.slug
def delete_url(self):
# This is only used for custom locations.
return '/accounts/custom-locations/delete/%s/' % self.slug
def _is_custom(self):
return self.location_type.slug == 'custom'
is_custom = property(_is_custom)
def _get_user(self):
if self.user_id is None:
return None
if not hasattr(self, '_user_cache'):
from everyblock.accounts.models import User
try:
self._user_cache = User.objects.get(id=self.user_id)
except User.DoesNotExist:
self._user_cache = None
return self._user_cache
user = property(_get_user)
# Stuff for the place interface (see utils/pids.py).
place_type = 'location'
place_preposition = 'in'
def _pretty_name(self):
return self.name
pretty_name = property(_pretty_name)
def _place_type_name(self):
if self.location_type.slug == 'custom':
return 'area'
else:
return self.location_type.name
place_type_name = property(_place_type_name)
def _pid(self):
return 'l:%s' % self.id
pid = property(_pid)
def _geom(self):
return self.location
geom = property(_geom)
search_geom = property(_geom)
def _allows_messages(self):
if self.location_type.slug == 'custom' and not self.is_public:
return False
if self.location_type.slug in ('cities', 'quadrants'):
return False
return True
allows_messages = property(_allows_messages)
def _is_unknown(self):
return self.slug == 'unknown'
is_unknown = property(_is_unknown)
def alert_url(self):
return '%salerts/' % self.url()
def rss_url(self):
return '/rss%s' % self.url()
class AttributesDescriptor(object):
"""
This class provides the functionality that makes the attributes available
as `attributes` on a model instance.
"""
def __get__(self, instance, instance_type=None):
if instance is None:
raise AttributeError("%s must be accessed via instance" % self.__class__.__name__)
if not hasattr(instance, '_attributes_cache'):
instance._attributes_cache = AttributeDict(instance)
return instance._attributes_cache
def __set__(self, instance, value):
if instance is None:
raise AttributeError("%s must be accessed via instance" % self.__class__.__name__)
if not isinstance(value, dict):
raise ValueError('Only a dictionary is allowed')
db = instance._state.db
mapping = field_mapping([instance.schema_id], db)[instance.schema_id]
for k, v in mapping.items():
if v.startswith('lookup'):
mapping[k] += '_id'
mapping = mapping.items()
values = [value.get(k, None) for k, v in mapping]
db = instance._state.db
conn = connections[db]
cursor = conn.cursor()
cursor.execute("""
UPDATE %s
SET %s
WHERE news_item_id = %%s
""" % (Attribute._meta.db_table, ','.join(['%s=%%s' % v for k, v in mapping])),
values + [instance.id])
# If no records were updated, that means the DB doesn't yet have a
# row in the attributes table for this news item. Do an INSERT.
if cursor.rowcount < 1:
cursor.execute("""
INSERT INTO %s (news_item_id, schema_id, %s)
VALUES (%%s, %%s, %s)""" % (Attribute._meta.db_table, ','.join([v for k, v in mapping]), ','.join(['%s' for k in mapping])),
[instance.id, instance.schema_id] + values)
transaction.commit_unless_managed(using=db)
class AttributeDict(dict):
"""
A dictionary-like object that serves as a wrapper around attributes for a
given NewsItem.
"""
def __init__(self, ni):
dict.__init__(self)
self.ni = ni
self.news_item_id = ni.id
self.cached = False
self.fields = {}
for schemafield in SchemaField.objects.using(self.ni._state.db).filter(schema=ni.schema_id):
self.fields[schemafield.name] = { "real_name": schemafield.real_name, "is_lookup": schemafield.is_lookup }
def __do_query(self):
if not self.cached:
db = self.ni._state.db
attributes_to_fetch = getattr(self.ni, 'restricted_attributes', None) or self.fields.keys()
try:
att_objs = self.ni.attribute_set.all()
atts = {}
for obj in att_objs:
for att in attributes_to_fetch:
real_name = self.fields[att]['real_name']
if str(att) == "source":
src_id = getattr(obj, real_name+"_id", None)
if not src_id:
src_id = getattr(obj, real_name, None)
try:
lookup_obj = Lookup.objects.using(self.ni._state.db).get(id=src_id)
atts["source_name"] = lookup_obj.name
except Lookup.DoesNotExist:
logger.warn("?? lookup doesn't exist: ")
pass
if self.fields[att]['is_lookup'] and real_name.startswith('lookup'):
atts[att] = getattr(obj, real_name+"_id", None)
else:
atts[att] = getattr(obj, real_name, None)
except IndexError:
return # Attributes for this newsitem haven't been saved yet. Just return.
self.update(atts)
self.cached = True
def get(self, *args, **kwargs):
self.__do_query()
return dict.get(self, *args, **kwargs)
def __len__(self):
self.__do_query()
return dict.__len__(self)
def __repr__(self):
self.__do_query()
return dict.__repr__(self)
def __getitem__(self, name):
self.__do_query()
return dict.__getitem__(self, name)
def __setitem__(self, name, value):
db = self.ni._state.db
real_name = self.fields[name]['real_name']
rows_updated = self.ni.attribute_set.all().update(**{real_name:value})
# If no records were updated, that means the DB doesn't yet have a
# row in the attributes table for this news item. Do an INSERT.
if rows_updated < 1:
attr = Attribute(news_item_id=self.news_item_id, schema_id=self.ni.schema_id, **{real_name:value})
attr.save(using=db)
transaction.commit_unless_managed(using=db)
dict.__setitem__(self, name, value)
def load_newsitem_attributes(newsitem_list, using='default', columns_to_select=[]):
"Edits newsitem_list in place, adding `attribute_values` and `attribute_slots` attributes."
# fmap = {schema_id: {'fields': [(name, real_name, pattern_slot)], 'lookups': [real_name1, real_name2]}}
fmap = {}
attribute_columns_to_select = set(['news_item'])
schema_ids = set([ni.schema_id for ni in newsitem_list])
schema_field_qs = SchemaField.objects.using(using).filter(schema__id__in=schema_ids)
if columns_to_select:
schema_field_qs = schema_field_qs.filter(name__in=columns_to_select)
for sf in schema_field_qs.values('schema', 'name', 'real_name', 'pattern_slot', 'is_lookup', 'display'):
fmap.setdefault(sf['schema'], {'fields': [], 'lookups': []})
fmap[sf['schema']]['fields'].append((sf['name'], sf['real_name'], sf['pattern_slot']))
if sf['is_lookup'] or sf['real_name'].startswith('lookup'):
fmap[sf['schema']]['lookups'].append(sf['real_name'])
attribute_columns_to_select.add(str(sf['real_name']))
att_dict = dict([(i['news_item'], i) for i in Attribute.objects.using(using).filter(news_item__id__in=[ni.id for ni in newsitem_list]).values(*list(attribute_columns_to_select))])
# Determine which Lookup objects need to be retrieved.
lookup_ids = set()
for ni in newsitem_list:
for real_name in fmap.get(ni.schema_id, {}).get('lookups', []):
value = att_dict.get(ni.id, {}).get(real_name)
if not value: continue
elif ',' in str(value):
lookup_ids.update(value.split(','))
else:
lookup_ids.add(value)
# Retrieve only the Lookups that are referenced in newsitem_list.
lookup_ids = [i for i in lookup_ids if i]
if lookup_ids:
lookup_objs = Lookup.objects.using(using).in_bulk(lookup_ids)
else:
lookup_objs = {}
# Set 'attribute_values' for each NewsItem in newsitem_list.
for ni in newsitem_list:
att = att_dict.get(ni.id, {})
att_values = {}
att_slots = {}
for field_name, real_name, pattern_slot in fmap.get(ni.schema_id, {}).get('fields', []):
value = att.get(real_name)
if real_name in fmap.get(ni.schema_id, {}).get('lookups', {}):
if real_name.startswith('int') or real_name.startswith('lookup'):
# value might be None, in which case it wouldn't be in lookup_objs,
# so use get() to fallback to None in that case.
value = lookup_objs.get(value)
else: # Many-to-many lookups are comma-separated strings.
value = [lookup_objs[int(i)] for i in value.split(',') if i]
att_slots[pattern_slot] = att_values[field_name] = value
ni.attribute_values = att_values
ni.attribute_slots = att_slots
class NewsItemQuerySet(models.query.GeoQuerySet):
def restrict_attributes(self, attributes):
clone = self._clone()
clone.restricted_attributes = attributes
return clone
def iterator(self, *args, **kwargs):
for obj in super(NewsItemQuerySet, self).iterator(*args, **kwargs):
obj.restricted_attributes = getattr(self, 'restricted_attributes', None)
yield obj
def _clone(self, *args, **kwargs):
obj = super(NewsItemQuerySet, self)._clone(*args, **kwargs)
obj.restricted_attributes = getattr(self, 'restricted_attributes', None)
return obj
def delete(self, *args, **kwargs):
raise NotImplementedError('NewsItem QuerySets cannot be deleted. Delete instances individually.')
def update(self, _force=False, *args, **kwargs):
if not _force:
raise NotImplementedError('NewsItem QuerySets cannot be updated. Update instances individually.')
super(NewsItemQuerySet, self).update(*args, **kwargs)
def prepare_attribute_qs(self, schema_id=None):
clone = self._clone()
if 'db_attribute' not in clone.query.extra_tables:
clone.query.extra_tables += ('db_attribute',)
clone = clone.extra(where=['db_newsitem.id = db_attribute.news_item_id'])
# schema_id is an optimization. We've found that adding the
# db_attribute.schema_id check to the WHERE clause can vastly improve
# the speed of the query. It probably gives some crucial clues to the
# PostgreSQL query planner.
if schema_id is not None:
clone = clone.extra(where=['db_attribute.schema_id = %s' % schema_id])
return clone
def with_attributes(self, columns_to_select=[]):
"""
Returns a list of NewsItems, each of which has an `attribute_values`
attribute. `attribute_values` is a dictionary mapping attribute names
to values. If an attribute is a Lookup, the value will be the Lookup
object.
"""
newsitem_list = list(self)
load_newsitem_attributes(newsitem_list, self._db, columns_to_select=columns_to_select)
return newsitem_list
def top_news(self, *args, **kwargs):
"""
Returns a QuerySet of NewsItems ordered by their blockscore.
"""
from everyblock.messages.constants import LIVE_BAD, REMOVED_BY_STAFF, REMOVED_BY_USER
max_date = datetime.datetime.now()
# Exclude specific types of items from citywide top news.
qs = self.filter(schema__has_newsitem_detail=True).exclude(status__in=(LIVE_BAD, REMOVED_BY_STAFF, REMOVED_BY_USER))
return qs.filter(blockscore__isnull=False, pub_date__lte=max_date).order_by('-blockscore')
def all_user_news(self, *args, **kwargs):
"""
Returns a QuerySet of all neighbor content
"""
from everyblock.messages.constants import LIVE_BAD, REMOVED_BY_STAFF, REMOVED_BY_USER
max_date = datetime.datetime.now()
# Exclude specific types of items from citywide top news.
qs = self.filter(schema__slug__in=UGC).exclude(status__in=(LIVE_BAD, REMOVED_BY_STAFF, REMOVED_BY_USER))
return qs.filter(pub_date__lte=max_date).order_by('-pub_date')
def by_attribute(self, schema_field, att_value, is_lookup=False):
"""
Returns a QuerySet of NewsItems whose attribute value for the given
SchemaField is att_value. If att_value is a list, this will do the
equivalent of an "OR" search, returning all NewsItems that have an
attribute value in the att_value list.
This handles many-to-many lookups correctly behind the scenes.
If is_lookup is True, then att_value is treated as the 'code' of a
Lookup object, and the Lookup's ID will be retrieved for use in the
query.
"""
clone = self.prepare_attribute_qs(schema_field.schema_id)
real_name = str(schema_field.real_name)
if real_name.startswith('lookup'): real_name += '_id'
if not isinstance(att_value, (list, tuple)):
att_value = [att_value]
if is_lookup:
att_value = Lookup.objects.filter(schema_field__id=schema_field.id, code__in=att_value)
if not att_value:
# If the lookup values don't exist, then there aren't any
# NewsItems with this attribute value. Note that we aren't
# using QuerySet.none() here, because we want the result to
# be a NewsItemQuerySet, and none() returns a normal QuerySet.
clone = clone.extra(where=['1=0'])
return clone
att_value = [val.id for val in att_value]
if schema_field.is_many_to_many_lookup():
# We have to use a regular expression search to look for all rows
# with the given att_value *somewhere* in the column. The [[:<:]]
# thing is a word boundary.
for value in att_value:
if not str(value).isdigit():
raise ValueError('Only integer strings allowed for att_value in many-to-many SchemaFields')
clone = clone.extra(where=["db_attribute.%s ~ '[[:<:]]%s[[:>:]]'" % (real_name, '|'.join([str(val) for val in att_value]))])
elif None in att_value:
if att_value != [None]:
raise ValueError('by_attribute() att_value list cannot have more than one element if it includes None')
clone = clone.extra(where=["db_attribute.%s IS NULL" % real_name])
else:
clone = clone.extra(where=["db_attribute.%s IN (%s)" % (real_name, ','.join(['%s' for val in att_value]))], params=tuple(att_value))
return clone
def by_place(self, place, block_radius=BLOCK_RADIUS_DEFAULT):
"""
Returns a QuerySet of NewsItems filtered to the given place
(either a Block, Location or Spot).
"""
if place.place_type == 'location':
if place.location is not None:
return self.by_location(place)
else:
return self.filter(location__isnull=True)
elif place.place_type == 'block':
search_buf = make_search_buffer(place.location.centroid, block_radius)
return self.filter(location__intersects=search_buf)
elif place.place_type == 'spot':
return self.filter(spot__id=place.id)
else:
raise ValueError('Got unknown place type %r' % place.place_type)
def by_place_and_date(self, place, start_datetime, end_datetime, block_radius=BLOCK_RADIUS_DEFAULT):
"""
Returns a QuerySet filtered by the given place and pub_date range.
(Uses Redis for Locations.)
"""
return self.by_place(place, block_radius).filter(pub_date__range=(start_datetime, end_datetime))
def by_block(self, block, radius=3):
block_buffer = make_search_buffer(block.location.centroid, radius)
return self.filter(location__intersects=block_buffer)
def by_location(self, location):
sql = """
(location_id=%s OR (location_id IS NULL AND ST_Intersects(location, (SELECT location FROM db_location WHERE id=%s))))
"""
return self.extra(where=[sql], params=(location.id, location.id))
def by_neighborhood(self, neighborhood):
locations = Q(location_id=neighborhood.id)
blocks = Q(location_id__isnull=True, location__intersects=neighborhood.location)
return self.filter(locations | blocks)
def by_user(self, user, email=False):
sql = []
field_name = 'send_email' if email else 'show_on_dashboard'
# Directly followed locations
sql.append("(location_id IN (SELECT location_id FROM savedplaces_savedplace WHERE user_id=%%s AND %s='t'))" % field_name)
# Blocks, spots, and points within followed places
sql.append("(location_id IS NULL AND ST_Intersects(location, (SELECT ST_Union(geometry) FROM savedplaces_savedplace WHERE user_id=%%s AND %s='t')))" % field_name)
# Locations that contain followed blocks
# TODO: Filter out custom locations?
sql.append("""(location_id IN (
SELECT id FROM db_location WHERE EXISTS (
SELECT 1 FROM savedplaces_savedplace
WHERE %s='t' AND block_id IS NOT NULL AND user_id=%%s AND ST_Contains(location, ST_Centroid(geometry))
)
))""" % field_name)
# Directly followed spots
sql.append("(spot_id IN (SELECT spot_id FROM savedplaces_savedplace WHERE user_id=%%s AND spot_id IS NOT NULL AND %s='t'))" % field_name)
muted_sql = "db_newsitem.id NOT IN (SELECT newsitem_id FROM preferences_mutednewsitem WHERE user_id=%s)"
sql = '(%s)' % " OR ".join(sql)
return self.extra(where=[sql, muted_sql], params=(user.id, user.id, user.id, user.id, user.id))
# Schemas #################################################################
def user_messages(self):
return self.filter(schema__slug__in=USER_SCHEMAS)
def neighbor_messages(self):
return self.filter(schema__slug__in=NEIGHBOR_MESSAGE_SCHEMAS)
def neighbor_events(self):
return self.filter(schema__slug='neighbor-events')
def neighbor_ads(self):
return self.filter(schema__slug='neighbor-ads')
def web_posts(self):
return self.filter(schema__bucket=0)
def web_events(self):
return self.filter(schema__slug='events')
def public_records(self):
return self.filter(schema__bucket=1)
# Visibility ##############################################################
def live(self):
status_live = Q(status__isnull=True) | Q(status__in=STATUS_LIVE)
return self.filter(status_live, is_public=True)
def pending(self):
from everyblock.messages.constants import STATUS_PENDING
return self.filter(status__in=STATUS_PENDING)
def removed(self):
from everyblock.messages.constants import STATUS_REMOVED
return self.filter(Q(is_public=False) | Q(status__in=STATUS_REMOVED))
class NewsItemManager(models.GeoManager):
def get_queryset(self):
return NewsItemQuerySet(self.model)
def for_metro(self, short_name):
return self.using(get_connection_name(short_name))
def filter_attributes(self, **kwargs):
return self.get_queryset().filter_attributes(**kwargs)
def exclude_attributes(self, **kwargs):
return self.get_queryset().exclude_attributes(**kwargs)
def by_attribute(self, *args, **kwargs):
return self.get_queryset().by_attribute(*args, **kwargs)
def by_place(self, *args, **kwargs):
return self.get_queryset().by_place(*args, **kwargs)
def by_place_and_date(self, *args, **kwargs):
return self.get_queryset().by_place_and_date(*args, **kwargs)
def by_user(self, *args, **kwargs):
return self.get_queryset().by_user(*args, **kwargs)
def with_attributes(self, *args, **kwargs):
return self.get_queryset().with_attributes(*args, **kwargs)
def top_news(self, *args, **kwargs):
return self.get_queryset().top_news(*args, **kwargs)
def top_nearby_newsitem_ids(self, user, limit=3, min_days_old=3, short_name=None):
"""
Returns the top N neighbor messages posted to neighborhoods that
intersect the given user's followed places.
"""
if short_name is None:
short_name = settings.SHORT_NAME
today = datetime.date.today()
if settings.DEBUG:
min_days_old = 30
min_date = today - datetime.timedelta(days=min_days_old)
min_blockscore = int(min_date.strftime('%Y%m%d0000'))
conn = connections[short_name]
cursor = conn.cursor()
params = {
'user_id': user.id,
'limit': limit,
'blockscore': min_blockscore,
'pub_date': min_date,
'status': STATUS_LIVE,
# The following numbers are completely made up and should be refactored into something more clear and deliberate someday.
'radius': 0.001 if settings.SHORT_NAME == 'chicago' else .02,
'schemas': tuple(USER_SCHEMAS),
}
cursor.execute("""
SELECT ni.id
FROM db_newsitem ni
WHERE ni.schema_id IN (SELECT id FROM db_schema WHERE slug IN %(schemas)s)
AND ni.is_public = true
AND ni.id NOT IN (SELECT newsitem_id FROM preferences_mutednewsitem WHERE user_id=%(user_id)s)
AND blockscore > %(blockscore)s
AND pub_date >= %(pub_date)s
AND ni.location_id IS NOT NULL
AND (ni.status IN %(status)s OR ni.status IS NULL)
AND ni.location_id IN (
SELECT l.id
FROM db_location l
WHERE ST_Intersects(l.location, (
SELECT ST_Buffer(ST_Union(sp.geometry), %(radius)s)
FROM savedplaces_savedplace sp
WHERE user_id=%(user_id)s))
AND l.id NOT IN (
SELECT location_id
FROM savedplaces_savedplace
WHERE location_id IS NOT NULL
AND user_id=%(user_id)s)
AND l.id NOT IN (
SELECT id FROM db_location WHERE EXISTS (
SELECT 1 FROM savedplaces_savedplace
WHERE block_id IS NOT NULL AND user_id=%(user_id)s AND ST_Contains(location, ST_Centroid(geometry))
)
))
ORDER BY blockscore DESC
LIMIT %(limit)s;
""", params)
result = (r[0] for r in cursor.fetchall())
cursor.close()
conn.close()
return result
@transaction.commit_on_success
def create_with_attributes(self, *args, **kwargs):
"""
Create and return a NewsItem with Attributes in a transaction.
"""
using = kwargs.pop('using', None)
attributes = kwargs.pop('attributes')
if using is None:
ni = super(NewsItemManager, self).create(*args, **kwargs)
else:
ni = self.get_queryset().using(using).create(*args, **kwargs)
ni.attributes = attributes
return ni
class NewsItem(models.Model):
schema = models.ForeignKey(Schema)
title = models.CharField(max_length=255)
description = models.TextField()
url = models.TextField(blank=True)
pub_date = models.DateTimeField(db_index=True)
item_date = models.DateField(db_index=True)
last_update = models.DateTimeField(db_index=True)
location = models.GeometryField(blank=True, null=True)
location_name = models.CharField(max_length=255)
is_public = models.NullBooleanField(default=True)
status = models.IntegerField(choices=STATUS_CHOICES, blank=True, null=True) # TODO: Merge is_public into this field.
was_reported = models.NullBooleanField()
allow_comments = models.BooleanField(default=True) # Lets us turn off comments on a per-newsitem basis.
user_id = models.IntegerField(blank=True, null=True, db_index=True)
blockscore = models.BigIntegerField(blank=True, null=True, db_index=True) # null=True because of legacy records.
reason = models.IntegerField(choices=RemovalReasons.REASONS, null=True) # Removal reason
place_type = models.CharField(max_length=25, choices=places.PLACE_TYPE_CHOICES, null=True)
place_id = models.IntegerField(null=True)
source_name = models.CharField(null=True, max_length=100) # Not fkey because some newsitems don't have source info
source_id = models.IntegerField(null=True) # Not fkey because some newsitems don't have source info
ip_address = models.CharField(max_length=20, blank=True, null=True)
# TODO: place_type and place_id should eventually replace these fields.
spot = models.ForeignKey(Spot, blank=True, null=True)
location_id = models.IntegerField(blank=True, null=True) # Using a real FK sometimes causes unwanted joins.
# Denormalized fields
is_first_post = models.NullBooleanField(default=False)
comment_count = models.IntegerField(default=0, db_index=True)
thank_count = models.IntegerField(blank=True, null=True)
poster_name = models.CharField(max_length=255)
poster_description = models.CharField(max_length=50)
poster_image_name = models.CharField(max_length=100)
poster_badge = models.CharField(max_length=100)
poster_status = models.IntegerField()
is_watched_thread = models.BooleanField(default=False)
objects = NewsItemManager()
attributes = AttributesDescriptor()
def __unicode__(self):
return self.title
def save(self, *args, **kwargs):
if self.location is not None and not self.location.valid:
raise Exception('Invalid geometry: %s' % self.location.wkt)
if self.last_update is None:
self.last_update = self.pub_date
self.blockscore = self.calculate_blockscore()
super(NewsItem, self).save(*args, **kwargs)
def remove(self, *args, **kwargs):
self.is_public = False
self.save()
def restore(self, *args, **kwargs):
self.is_public = True
self.save()
@property
def location_tuple(self):
location_coordinates = []
coords = []
if not self.location:
coords.append({'longitude': "", 'latitude': ""})
if self.location.geom_type.upper() == 'POLYGON' or self.location.geom_type.upper() == 'MULTIPOLYGON':
coords.append(self.location.centroid.coords)
elif self.location.geom_type.upper() == 'MULTIPOINT':
coords = list(self.location.coords)
else:
coords.append(self.location.coords)
for c in coords:
location_coordinates.append({'longitude': c[0], 'latitude': c[1]})
return location_coordinates
@property
def last_comment_user(self):
from everyblock.comments.models import Comment
comments = list(Comment.objects.using(self._state.db).filter(newsitem_id=self.id).order_by('-pub_date'))
if len(comments) > 0:
return comments[0].user.public_name
else:
return ""
@property
def last_comment_date(self):
from everyblock.comments.models import Comment
comments = list(Comment.objects.using(self._state.db).filter(newsitem_id=self.id).order_by('-pub_date'))
if len(comments ) > 0:
return comments[0].pub_date
else:
return ""
@property
def cmnt_cnt(self):
from everyblock.comments.models import Comment
comments = list(Comment.objects.using(self._state.db).filter(newsitem_id=self.id))
return len(comments)
@property
def timestamp(self):
return time.mktime(self.pub_date.timetuple())
def _get_metro(self):
if not hasattr(self, '_metro'):
self._metro = self._state.db
if self._metro in ('default', 'standby'):
self._metro = settings.SHORT_NAME
return self._metro
def _set_metro(self, short_name):
self._metro = short_name
metro = property(_get_metro, _set_metro)
@property
def place(self):
if not (self.place_type and self.place_id):
return None
return places.get_place(self.place_type, self.place_id, short_name=self.metro)
@property
def poster(self):
if not hasattr(self, '_poster'):
self._poster = Poster.from_newsitem(self)
return self._poster
def calculate_blockscore(self, comment_max=None, day_multiplier=None):
d = self.pub_date
date_score = (d.year * 100000000) + (d.month * 1000000) + (d.day * 10000) + (d.hour * 100) + d.minute
# Number of comments that signifies "a fantastic comment thread,
# and any more comments shouldn't give this thread a higher score."
if comment_max is None:
comment_max = get_metro(self.metro, only_public=False)['blockscore_comment_max']
# Calculate an activity score based on three factors:
# * number of comments
# * whether it's a neighbor message (and a good/bad/normal one)
# * whether it's a media mention
# These three factors are weighted differently, with number of
# comments having the biggest weight.
comment_score = float(min(self.comment_count, comment_max)) / comment_max # between 0 and 1.
if self.is_public and self.schema.is_neighbor_content():
message_score = .7
from everyblock.messages import constants
status = self.status
if status == constants.LIVE_GOOD:
message_score = 1
elif status == constants.LIVE_BAD:
message_score = 0
else:
message_score = 0
media_mention_score = self.schema.slug == 'news-articles' and .7 or 0
# These should add up to 1000.
COMMENT_WEIGHT = 600
MESSAGE_WEIGHT = 300
MEDIA_MENTION_WEIGHT = 100
activity_score = (comment_score * COMMENT_WEIGHT) + (message_score * MESSAGE_WEIGHT) + (media_mention_score * MEDIA_MENTION_WEIGHT)
# day_multiplier lets us weigh recency vs. message_score. Use a
# higher day_multiplier to allow older items to stay at the top
# of the list for longer. Use a lower value to favor recency.
if day_multiplier is None:
day_multiplier = get_metro(self.metro, only_public=False)['blockscore_day_multiplier']
return date_score + int(activity_score * day_multiplier)
# new_url and new_url_with_domain are for v2, item_url and item_url_with_domain for v1.
def new_url(self):
value = unicodedata.normalize('NFKD', self.title).encode('ascii', 'ignore')
value = re.sub(r'\'s', 's', value)
words = re.findall(r'(\w+)', value)
stopwords = [
'I', 'a', 'about', 'an', 'are', 'as', 'at', 'be', 'by', 'com',
'for', 'from', 'how', 'in', 'is', 'it', 'of', 'on', 'or', 'that',
'the', 'this', 'to', 'was', 'what', 'when', 'where', 'who',
'will', 'with', 'the', 'www'
]
slug_words = [w.lower() for w in words if w not in stopwords][:8]
date = self.pub_date if self.schema.slug in (NEIGHBOR_MESSAGE_SCHEMAS + ['neighbor-ads']) else self.item_date
date_segment = date.strftime('%b%d').lower()
slug = '-'.join(slug_words)
return '/%s/%s-%s-%s/' % (self.schema.slug, date_segment, slug, self.id)
def new_url_with_domain(self):
return 'https://%s.everyblock.com%s' % (self.metro, self.new_url())
def item_url(self):
return '/%s/by-date/%s/%s/%s/%s/' % (self.schema.slug, self.item_date.year, self.item_date.month, self.item_date.day, self.id)
def item_url_with_domain(self):
return 'https://%s.everyblock.com%s' % (self.metro, self.item_url())
def item_date_url(self):
return '/%s/by-date/%s/%s/%s/' % (self.schema.slug, self.item_date.year, self.item_date.month, self.item_date.day)
def place_url(self):
# TODO: Would be nice to be smarter here, perhaps determining the place
# type and determining the direct URL, instead of relying on search.
# Also take into account whether the NewsItem is associated with a
# private custom location and maybe return None in that case?
if self.schema.has_linkable_locations and self.location_name.lower() != 'multiple locations':
try:
return '/search/?q=%s&type=place' % urllib.quote_plus(self.location_name)
except KeyError:
pass # In case location_name has non-ASCII text in it. We've seen u'\x92' for example.
return ''
def attributes_for_template(self):
"""
Return a list of AttributeForTemplate objects for this NewsItem. The
objects are ordered by SchemaField.display_order.
"""
fields = SchemaField.objects.filter(schema__id=self.schema_id).select_related().order_by('display_order')
field_infos = dict([(obj.schema_field_id, obj.help_text) for obj in SchemaFieldInfo.objects.filter(schema__id=self.schema_id)])
try:
attribute_row = Attribute.objects.using(self._state.db).filter(news_item__id=self.id).values(*[f.real_name for f in fields])[0]
except KeyError:
return []
return [AttributeForTemplate(f, attribute_row, field_infos.get(f.id, None)) for f in fields]
def load_attributes(self):
load_newsitem_attributes([self], self._state.db)
def _set_photos(self, photo_list):
self._photos = photo_list
def _get_photos(self):
from everyblock.photos.models import Photo
if not hasattr(self, '_photos'):
self._photos = list(Photo.objects.using(self._state.db).filter(object_type=Photo.NEWSITEM, object_id=self.id, status=Photo.LIVE))
return self._photos
photos = property(_get_photos, _set_photos)
def _set_embeds(self, embed_list):
self._embeds = embed_list
def get_embeds(self):
return Embed.objects.using(self._state.db).filter(newsitem=self.id)
embeds = property(get_embeds, _set_embeds)
def reason_description(self):
reason = RemovalReasons.DETAILS.get(self.reason, None)
description = 'This neighbor message has been removed by EveryBlock staff.'
if reason:
description = reason.get('short_message', description).format('neighbor message')
return description
def reason_slug(self):
reason = RemovalReasons.DETAILS.get(self.reason, None)
if reason:
description = reason.get('slug', '')
else:
description = ''
return description
class Embed(models.Model):
# Newsitems attached to this embed
newsitem = models.ManyToManyField(NewsItem)
url = models.URLField(max_length=2084, unique=True)
url_type = models.CharField(max_length=50)
# Full JSON response from embedly
response = models.TextField()
# Only iframe html (if available) for the embed
embed_html = models.TextField()
provider_url = models.URLField()
description = models.TextField()
title = models.CharField(max_length=255)
author_name = models.CharField(max_length=500)
provider_name = models.CharField(max_length=500)
thumbnail_url = models.URLField()
def __unicode__(self):
return "Embed for url: {}".format(self.url)
class PromotedNewsItem(models.Model):
"""
A staff picked and edited newsitem for use on the local slice and other
places we distribute our news.
"""
newsitem = models.OneToOneField(NewsItem)
headline = models.CharField(max_length=255)
excerpt = models.TextField(blank=True)
image_url = models.URLField(max_length=255, blank=True)
def __unicode__(self):
return self.headline
class AttributeForTemplate(object):
def __init__(self, schema_field, attribute_row, help_text):
self.sf = schema_field
self.raw_value = attribute_row[schema_field.real_name]
self.schema_slug = schema_field.schema.slug
self.is_lookup = schema_field.is_lookup
self.help_text = help_text
if self.sf.display_format:
self.formatted_value = formatting.apply_format(self.raw_value, self.sf.display_format)
else:
self.formatted_value = None
if self.is_lookup:
if self.raw_value == '' or not self.raw_value:
self.values = []
elif self.sf.is_many_to_many_lookup():
try:
id_values = map(int, self.raw_value.split(','))
except ValueError:
self.values = []
else:
lookups = Lookup.objects.in_bulk(id_values)
self.values = [lookups[i] for i in id_values]
else:
self.values = [Lookup.objects.get(id=self.raw_value)]
else:
self.values = [self.raw_value]
def value_list(self):
"""
Returns a list of {value, description} dictionaries representing each value for
this attribute.
"""
from django.utils.dateformat import format, time_format
descriptions = [None]
if self.formatted_value is not None:
values = [self.formatted_value]
elif self.is_lookup:
values = [val and val.name or 'None' for val in self.values]
descriptions = [val and val.description or None for val in self.values]
elif isinstance(self.raw_value, datetime.datetime):
values = [format(self.raw_value, 'F j, Y, P')]
elif isinstance(self.raw_value, datetime.date):
values = [format(self.raw_value, 'F j, Y')]
elif isinstance(self.raw_value, datetime.time):
values = [time_format(self.raw_value, 'P')]
elif self.raw_value is True:
values = ['Yes']
elif self.raw_value is False:
values = ['No']
elif self.raw_value is None:
values = ['N/A']
else:
values = [self.raw_value]
return [{'value': value, 'description': description} for value, description in zip(values, descriptions)]
class Attribute(models.Model):
news_item = models.ForeignKey(NewsItem, primary_key=True, unique=True)
schema = models.ForeignKey(Schema)
# All data-type field names must end in two digits, because the code assumes this.
varchar01 = models.CharField(max_length=255, blank=True, null=True)
varchar02 = models.CharField(max_length=255, blank=True, null=True)
varchar03 = models.CharField(max_length=255, blank=True, null=True)
varchar04 = models.CharField(max_length=255, blank=True, null=True)
varchar05 = models.CharField(max_length=255, blank=True, null=True)
date01 = models.DateField(blank=True, null=True)
date02 = models.DateField(blank=True, null=True)
date03 = models.DateField(blank=True, null=True)
date04 = models.DateField(blank=True, null=True)
date05 = models.DateField(blank=True, null=True)
time01 = models.TimeField(blank=True, null=True)
time02 = models.TimeField(blank=True, null=True)
datetime01 = models.DateTimeField(blank=True, null=True)
datetime02 = models.DateTimeField(blank=True, null=True)
datetime03 = models.DateTimeField(blank=True, null=True)
datetime04 = models.DateTimeField(blank=True, null=True)
bool01 = models.NullBooleanField(blank=True)
bool02 = models.NullBooleanField(blank=True)
bool03 = models.NullBooleanField(blank=True)
bool04 = models.NullBooleanField(blank=True)
bool05 = models.NullBooleanField(blank=True)
int01 = models.IntegerField(blank=True, null=True)
int02 = models.IntegerField(blank=True, null=True)
int03 = models.IntegerField(blank=True, null=True)
int04 = models.IntegerField(blank=True, null=True)
int05 = models.IntegerField(blank=True, null=True)
int06 = models.IntegerField(blank=True, null=True)
int07 = models.IntegerField(blank=True, null=True)
lookup01 = models.ForeignKey("Lookup", blank=True, null=True, related_name='+')
lookup02 = models.ForeignKey("Lookup", blank=True, null=True, related_name='+')
lookup03 = models.ForeignKey("Lookup", blank=True, null=True, related_name='+')
lookup04 = models.ForeignKey("Lookup", blank=True, null=True, related_name='+')
lookup05 = models.ForeignKey("Lookup", blank=True, null=True, related_name='+')
lookup06 = models.ForeignKey("Lookup", blank=True, null=True, related_name='+')
lookup07 = models.ForeignKey("Lookup", blank=True, null=True, related_name='+')
text01 = models.TextField(blank=True, null=True)
text02 = models.TextField(blank=True, null=True)
def __unicode__(self):
return u'Attributes for news item %s' % self.news_item_id
class LookupManager(models.Manager):
def get_or_create_lookup(self, schema_field, name, code=None, description='', make_text_slug=True, logger=None, using=None):
"""
Returns the Lookup instance matching the given SchemaField, name and
Lookup.code, creating it (with the given name/code/description) if it
doesn't already exist.
If make_text_slug is True, then a slug will be created from the given
name. If it's False, then the slug will be the Lookup's ID.
"""
code = code or name # code defaults to name if it wasn't provided
# Convert code to a string if it's not. Otherwise, the Lookup.objects.get(...)
# will fail.
if not isinstance(code, basestring):
code = unicode(code)
if not using:
using = settings.SHORT_NAME
try:
obj = Lookup.objects.using(using).get(schema_field__id=schema_field.id, code=code)
except Lookup.DoesNotExist:
if make_text_slug:
slug = slugify(name)
if len(slug) > 32:
# Only bother to warn if we're actually going to use the slug.
if make_text_slug and logger:
logger.warn("Trimming slug %r to %r in order to fit 32-char limit." % (slug, slug[:32]))
slug = slug[:32]
else:
# To avoid integrity errors in the slug when creating the Lookup,
# use a temporary dummy slug that's guaranteed not to be in use.
# We'll change it back immediately afterward.
slug = '__TEMPORARY__'
if len(name) > 255:
old_name = name
name = name[:250] + '...'
# Save the full name in the description.
if not description:
description = old_name
if logger:
logger.warn("Trimming name %r to %r in order to fit 255-char limit." % (old_name, name))
obj = Lookup.objects.using(using).create(schema_field_id=schema_field.id, name=name, code=code, slug=slug,
description=description)
if not make_text_slug:
# Set the slug to the ID.
obj.slug = obj.id
Lookup.objects.using(using).filter(id=obj.id).update(slug=obj.id)
if logger:
logger.info('Created %s %r' % (schema_field.name, name))
return obj
class Lookup(models.Model):
schema_field = models.ForeignKey(SchemaField)
name = models.CharField(max_length=255)
# `code` is the optional internal code to use during retrieval.
# For example, in scraping Chicago crimes, we use the crime type code
# to find the appropriate crime type in this table. We can't use `name`
# in that case, because we've massaged `name` to use a "prettier"
# formatting than exists in the data source.
code = models.CharField(max_length=255, blank=True)
slug = models.CharField(max_length=32, db_index=True)
description = models.TextField(blank=True)
objects = LookupManager()
class Meta:
unique_together = (('slug', 'schema_field'),)
def __unicode__(self):
return u'%s - %s' % (self.schema_field, self.name)
class SearchSpecialCase(models.Model):
query = models.CharField(max_length=64, unique=True)
redirect_to = models.CharField(max_length=255, blank=True)
title = models.CharField(max_length=128, blank=True)
body = models.TextField(blank=True)
def __unicode__(self):
return self.query
class DataUpdate(models.Model):
# Keeps track of each time we update our data.
schema = models.ForeignKey(Schema)
update_start = models.DateTimeField() # When the scraper/importer started running.
update_finish = models.DateTimeField() # When the scraper/importer finished.
num_added = models.IntegerField()
num_changed = models.IntegerField()
num_deleted = models.IntegerField()
num_skipped = models.IntegerField()
got_error = models.BooleanField()
list_records_seen = models.IntegerField(null=True)
detail_records_seen = models.IntegerField(null=True)
exc_type = models.CharField(max_length=100)
exc_value = models.TextField()
traceback = models.TextField()
scraper = models.CharField(max_length=100)
def __unicode__(self):
return u'%s started on %s' % (self.schema.name, self.update_start)
def total_time(self):
delta = self.update_finish - self.update_start
return str(delta).split('.')[0]
|
the-stack_106_27772 | """HTML slide show Exporter class"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from copy import deepcopy
from warnings import warn
from traitlets import Bool, Unicode, default
from ..preprocessors.base import Preprocessor
from .html import HTMLExporter
class _RevealMetadataPreprocessor(Preprocessor):
# A custom preprocessor adding convenience metadata to cells
def preprocess(self, nb, resources=None):
nb = deepcopy(nb)
for cell in nb.cells:
# Make sure every cell has a slide_type
try:
slide_type = cell.metadata.get("slideshow", {}).get("slide_type", "-")
except AttributeError:
slide_type = "-"
cell.metadata.slide_type = slide_type
# Find the first visible cell
for index, cell in enumerate(nb.cells):
if cell.metadata.slide_type not in {"notes", "skip"}:
cell.metadata.slide_type = "slide"
cell.metadata.slide_start = True
cell.metadata.subslide_start = True
first_slide_ix = index
break
else:
raise ValueError("All cells are hidden, cannot create slideshow")
in_fragment = False
for index, cell in enumerate(nb.cells[first_slide_ix + 1 :], start=(first_slide_ix + 1)):
previous_cell = nb.cells[index - 1]
# Slides are <section> elements in the HTML, subslides (the vertically
# stacked slides) are also <section> elements inside the slides,
# and fragments are <div>s within subslides. Subslide and fragment
# elements can contain content:
# <section>
# <section>
# (content)
# <div class="fragment">(content)</div>
# </section>
# </section>
# Get the slide type. If type is subslide or slide,
# end the last slide/subslide/fragment as applicable.
if cell.metadata.slide_type == "slide":
previous_cell.metadata.slide_end = True
cell.metadata.slide_start = True
if cell.metadata.slide_type in {"subslide", "slide"}:
previous_cell.metadata.fragment_end = in_fragment
previous_cell.metadata.subslide_end = True
cell.metadata.subslide_start = True
in_fragment = False
elif cell.metadata.slide_type == "fragment":
cell.metadata.fragment_start = True
if in_fragment:
previous_cell.metadata.fragment_end = True
else:
in_fragment = True
# The last cell will always be the end of a slide
nb.cells[-1].metadata.fragment_end = in_fragment
nb.cells[-1].metadata.subslide_end = True
nb.cells[-1].metadata.slide_end = True
return nb, resources
class SlidesExporter(HTMLExporter):
"""Exports HTML slides with reveal.js"""
# Overrides from HTMLExporter
#################################
export_from_notebook = "Reveal.js slides"
@default("template_name")
def _template_name_default(self):
return "reveal"
@default("file_extension")
def _file_extension_default(self):
return ".slides.html"
@default("template_extension")
def _template_extension_default(self):
return ".html.j2"
# Extra resources
#################################
reveal_url_prefix = Unicode(
help="""The URL prefix for reveal.js (version 3.x).
This defaults to the reveal CDN, but can be any url pointing to a copy
of reveal.js.
For speaker notes to work, this must be a relative path to a local
copy of reveal.js: e.g., "reveal.js".
If a relative path is given, it must be a subdirectory of the
current directory (from which the server is run).
See the usage documentation
(https://nbconvert.readthedocs.io/en/latest/usage.html#reveal-js-html-slideshow)
for more details.
"""
).tag(config=True)
@default("reveal_url_prefix")
def _reveal_url_prefix_default(self):
if "RevealHelpPreprocessor.url_prefix" in self.config:
warn(
"Please update RevealHelpPreprocessor.url_prefix to "
"SlidesExporter.reveal_url_prefix in config files."
)
return self.config.RevealHelpPreprocessor.url_prefix
return "https://unpkg.com/[email protected]"
reveal_theme = Unicode(
"simple",
help="""
Name of the reveal.js theme to use.
We look for a file with this name under
``reveal_url_prefix``/css/theme/``reveal_theme``.css.
https://github.com/hakimel/reveal.js/tree/master/css/theme has
list of themes that ship by default with reveal.js.
""",
).tag(config=True)
reveal_transition = Unicode(
"slide",
help="""
Name of the reveal.js transition to use.
The list of transitions that ships by default with reveal.js are:
none, fade, slide, convex, concave and zoom.
""",
).tag(config=True)
reveal_scroll = Bool(
False,
help="""
If True, enable scrolling within each slide
""",
).tag(config=True)
reveal_number = Unicode(
"",
help="""
slide number format (e.g. 'c/t'). Choose from:
'c': current, 't': total, 'h': horizontal, 'v': vertical
""",
).tag(config=True)
font_awesome_url = Unicode(
"https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.css",
help="""
URL to load font awesome from.
Defaults to loading from cdnjs.
""",
).tag(config=True)
def _init_resources(self, resources):
resources = super()._init_resources(resources)
if "reveal" not in resources:
resources["reveal"] = {}
resources["reveal"]["url_prefix"] = self.reveal_url_prefix
resources["reveal"]["theme"] = self.reveal_theme
resources["reveal"]["transition"] = self.reveal_transition
resources["reveal"]["scroll"] = self.reveal_scroll
resources["reveal"]["number"] = self.reveal_number
return resources
|
the-stack_106_27775 | from db_user import User
from blog_handler import BlogHandler
from utils import *
from db_comment import Comment
from db_like import Like
import time
## Class to delete post
class DeletePost(BlogHandler):
def get(self, post_id):
if self.user:
## Getting post key using post_id and user_id
key = db.Key.from_path('Post', int(post_id), parent=blog_key())
post = db.get(key)
if not post:
self.redirect("/")
## Searching and getting all likes relative to current post
like = db.GqlQuery("select * from Like where post_id = "
+ post_id)
if not like:
self.redirect("/")
## Getting all comments relative to current post
comment = db.GqlQuery("select * from Comment where post_id = "
+ post_id)
if not comment:
self.redirect("/")
if int(self.read_secure_cookie('user_id')) == post.author:
## delete all comments, like and post
db.delete(comment)
db.delete(like)
db.delete(post)
time.sleep(0.1)
self.redirect("/")
else:
self.redirect("/")
else:
self.redirect("/") |
the-stack_106_27776 | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data generators for the Recognizing Textual Entailment dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import zipfile
import six
from tensor2tensor.data_generators import generator_utils
from tensor2tensor.data_generators import problem
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.data_generators import text_problems
from tensor2tensor.utils import registry
import tensorflow as tf
EOS = text_encoder.EOS
@registry.register_problem
class RTE(text_problems.TextConcat2ClassProblem):
"""Recognizing Textual Entailment classification problems."""
# Link to data from GLUE: https://gluebenchmark.com/tasks
_RTE_URL = ("https://firebasestorage.googleapis.com/v0/b/"
"mtl-sentence-representations.appspot.com/o/"
"data%2FRTE.zip?alt=media&token=5efa7e85-a0bb-"
"4f19-8ea2-9e1840f077fb")
@property
def is_generate_per_split(self):
return True
@property
def dataset_splits(self):
return [{
"split": problem.DatasetSplit.TRAIN,
"shards": 1,
}, {
"split": problem.DatasetSplit.EVAL,
"shards": 1,
}]
@property
def approx_vocab_size(self):
return 2**13 # 8k vocab suffices for this small dataset.
@property
def num_classes(self):
return 2
def class_labels(self, data_dir):
del data_dir
# Note this binary classification is different from usual MNLI.
return ["not_entailment", "entailment"]
def _maybe_download_corpora(self, tmp_dir):
rte_filename = "RTE.zip"
rte_finalpath = os.path.join(tmp_dir, "RTE")
if not tf.gfile.Exists(rte_finalpath):
zip_filepath = generator_utils.maybe_download(
tmp_dir, rte_filename, self._RTE_URL)
zip_ref = zipfile.ZipFile(zip_filepath, "r")
zip_ref.extractall(tmp_dir)
zip_ref.close()
return rte_finalpath
def example_generator(self, filename):
label_list = self.class_labels(data_dir=None)
for idx, line in enumerate(tf.gfile.Open(filename, "rb")):
if idx == 0: continue # skip header
if six.PY2:
line = unicode(line.strip(), "utf-8")
else:
line = line.strip().decode("utf-8")
_, s1, s2, l = line.split("\t")
inputs = [s1, s2]
l = label_list.index(l)
yield {
"inputs": inputs,
"label": l
}
def generate_samples(self, data_dir, tmp_dir, dataset_split):
rte_dir = self._maybe_download_corpora(tmp_dir)
if dataset_split == problem.DatasetSplit.TRAIN:
filesplit = "train.tsv"
else:
filesplit = "dev.tsv"
filename = os.path.join(rte_dir, filesplit)
for example in self.example_generator(filename):
yield example
@registry.register_problem
class RTECharacters(RTE):
"""Recognizing Textual Entailment classification problems, character level"""
@property
def vocab_type(self):
return text_problems.VocabType.CHARACTER
def global_task_id(self):
return problem.TaskID.EN_NLI
|
the-stack_106_27779 | #calculate output cell errors
def calculate_output_cell_error(batch_labels,output_cache,parameters):
#to store the output errors for each time step
output_error_cache = dict()
activation_error_cache = dict()
how = parameters['how']
#loop through each time step
for i in range(1,len(output_cache)+1):
#get true and predicted labels
labels = batch_labels[i]
pred = output_cache['o'+str(i)]
#calculate the output_error for time step 't'
error_output = pred - labels
#calculate the activation error for time step 't'
error_activation = np.matmul(error_output,how.T)
#store the output and activation error in dict
output_error_cache['eo'+str(i)] = error_output
activation_error_cache['ea'+str(i)] = error_activation
return output_error_cache,activation_error_cache |
the-stack_106_27781 | # -*- coding: utf-8 -*-
# MooQuant
#
# Copyright 2017 bopo.wang<[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. moduleauthor:: bopo.wang <[email protected]>
"""
import numpy as np
from mooquant import technical
from mooquant.utils import collections, dt
from scipy import stats
# Using scipy.stats.linregress instead of numpy.linalg.lstsq because of this:
# http://stackoverflow.com/questions/20736255/numpy-linalg-lstsq-with-big-values
def lsreg(x, y):
x = np.asarray(x)
y = np.asarray(y)
res = stats.linregress(x, y)
return res[0], res[1]
class LeastSquaresRegressionWindow(technical.EventWindow):
def __init__(self, windowSize):
assert (windowSize > 1)
super().__init__(windowSize)
self.__timestamps = collections.NumPyDeque(windowSize)
def onNewValue(self, dateTime, value):
technical.EventWindow.onNewValue(self, dateTime, value)
if value is not None:
timestamp = dt.datetime_to_timestamp(dateTime)
if len(self.__timestamps):
assert (timestamp > self.__timestamps[-1])
self.__timestamps.append(timestamp)
def __getValueAtImpl(self, timestamp):
ret = None
if self.windowFull():
a, b = lsreg(self.__timestamps.data(), self.getValues())
ret = a * timestamp + b
return ret
def getTimeStamps(self):
return self.__timestamps
def getValueAt(self, dateTime):
return self.__getValueAtImpl(dt.datetime_to_timestamp(dateTime))
def getValue(self):
ret = None
if self.windowFull():
ret = self.__getValueAtImpl(self.__timestamps.data()[-1])
return ret
class LeastSquaresRegression(technical.EventBasedFilter):
"""Calculates values based on a least-squares regression.
:param dataSeries: The DataSeries instance being filtered.
:type dataSeries: :class:`mooquant.dataseries.DataSeries`.
:param windowSize: The number of values to use to calculate the regression.
:type windowSize: int.
:param maxLen: The maximum number of values to hold.
Once a bounded length is full, when new items are added, a corresponding number of items are discarded from the
opposite end. If None then dataseries.DEFAULT_MAX_LEN is used.
:type maxLen: int.
"""
def __init__(self, dataSeries, windowSize, maxLen=None):
super().__init__(dataSeries, LeastSquaresRegressionWindow(windowSize), maxLen)
def getValueAt(self, dateTime):
"""Calculates the value at a given time based on the regression line.
:param dateTime: The datetime to calculate the value at.
Will return None if there are not enough values in the underlying DataSeries.
:type dateTime: :class:`datetime.datetime`.
"""
return self.getEventWindow().getValueAt(dateTime)
class SlopeEventWindow(technical.EventWindow):
def __init__(self, windowSize):
super().__init__(windowSize)
self.__x = np.asarray(list(range(windowSize)))
def getValue(self):
ret = None
if self.windowFull():
y = self.getValues()
ret = lsreg(self.__x, y)[0]
return ret
class Slope(technical.EventBasedFilter):
"""The Slope filter calculates the slope of a least-squares regression line.
:param dataSeries: The DataSeries instance being filtered.
:type dataSeries: :class:`mooquant.dataseries.DataSeries`.
:param period: The number of values to use to calculate the slope.
:type period: int.
:param maxLen: The maximum number of values to hold.
Once a bounded length is full, when new items are added, a corresponding number of items are discarded from the
opposite end. If None then dataseries.DEFAULT_MAX_LEN is used.
:type maxLen: int.
.. note::
This filter ignores the time elapsed between the different values.
"""
def __init__(self, dataSeries, period, maxLen=None):
super().__init__(dataSeries, SlopeEventWindow(period), maxLen)
class TrendEventWindow(SlopeEventWindow):
def __init__(self, windowSize, positiveThreshold, negativeThreshold):
if negativeThreshold > positiveThreshold:
raise Exception("Invalid thresholds")
super(TrendEventWindow, self).__init__(windowSize)
self.__positiveThreshold = positiveThreshold
self.__negativeThreshold = negativeThreshold
def getValue(self):
ret = super().getValue()
if ret is not None:
if ret > self.__positiveThreshold:
ret = True
elif ret < self.__negativeThreshold:
ret = False
else: # Between negative and postive thresholds.
ret = None
return ret
class Trend(technical.EventBasedFilter):
def __init__(self, dataSeries, trendDays, positiveThreshold=0, negativeThreshold=0, maxLen=None):
super(Trend, self).__init__(dataSeries, TrendEventWindow(trendDays, positiveThreshold, negativeThreshold),
maxLen)
|
the-stack_106_27782 | import pytest
from pathlib import Path
from daemon.models import FlowModel
from jina import Client, Document
cur_dir = Path(__file__).parent
api = '/flow'
@pytest.mark.skip(
reason='TestClient uses a ThreadPoolExecutor which messes up RollingUpdate'
)
def test_flow_api(monkeypatch, partial_flow_client):
flow_model = FlowModel()
flow_model.uses = f'{cur_dir}/good_flow_dummy.yml'
create_response = partial_flow_client.post(
api, json={'flow': flow_model.dict(exclude={'log_config'})}
)
get_response = partial_flow_client.get(api)
endpoint_responses = Client(port=56789).post(
on='/any_endpoint', inputs=Document(), return_results=True
)
rolling_update_response = partial_flow_client.put(
f'{api}/rolling_update',
params={
'deployment_name': 'dummy_executor',
'uses_with': {},
},
)
delete_response = partial_flow_client.delete(api)
assert create_response
assert get_response
assert get_response.json()['arguments']['port_expose'] == 56789
assert endpoint_responses[0].docs[0].content == 'https://jina.ai'
assert rolling_update_response.status_code == 200
assert delete_response
|
the-stack_106_27785 | from PIL import Image, ImageDraw
from cv.face_recognition import face_recognition
# Load the jpg file into a numpy array
image = face_recognition.load_image_file("biden.jpg")
# Find all facial features in all the faces in the image
face_landmarks_list = face_recognition.face_landmarks(image)
print("I found {} face(s) in this photograph.".format(len(face_landmarks_list)))
for face_landmarks in face_landmarks_list:
# Print the location of each facial feature in this image
facial_features = [
'chin',
'left_eyebrow',
'right_eyebrow',
'nose_bridge',
'nose_tip',
'left_eye',
'right_eye',
'top_lip',
'bottom_lip'
]
for facial_feature in facial_features:
print("The {} in this face has the following points: {}".format(facial_feature, face_landmarks[facial_feature]))
# Let's trace out each facial feature in the image with a line!
pil_image = Image.fromarray(image)
d = ImageDraw.Draw(pil_image)
for facial_feature in facial_features:
d.line(face_landmarks[facial_feature], width=5)
pil_image.show()
|
the-stack_106_27788 | state = '10011111011011001'
disk_length = 35651584
def mutate(a):
b = ''.join(['1' if x == '0' else '0' for x in reversed(a)])
return a + '0' + b
def checksum(a):
result = ''
i = 0
while i < len(a) - 1:
if a[i] == a[i+1]:
result += '1'
else:
result += '0'
i += 2
if len(result) % 2 != 1:
result = checksum(result)
return result
while len(state) < disk_length:
state = mutate(state)
state = state[:disk_length]
print(checksum(state))
|
the-stack_106_27791 | from django.shortcuts import redirect, render
from django.views.generic import View, DetailView
from django.db.models import Q
from config.settings import AWS_ACCESS_KEY_ID, AWS_S3_REGION_NAME, AWS_SECRET_ACCESS_KEY, AWS_STORAGE_BUCKET_NAME
import boto3
from boto3.session import Session
from datetime import datetime
import glob, os
from crud.models import Cat, CatImage, Comment
class AddView(View):
def get(self, request, *args, **kwargs):
return render(request, 'cat_add.html')
def post(self, request, *args, **kwargs):
catname=request.POST['catname']
friendly=request.POST['friendly']
gender=request.POST['gender']
color=request.POST['color']
neutering=request.POST['neutering']
location_lat=request.POST['location_lat']
location_lon=request.POST['location_lon']
upload_user=request.user
image = request.FILES.getlist('img')
cat_locations = '{0:0.3f}'.format(float(location_lat)) + '{0:0.3f}'.format(float(location_lon))
path = './crud/cat_location/*'
location_file_lists = glob.glob(path)
location_file_names = []
for location_file_list in location_file_lists:
file_path = os.path.splitext(location_file_list)[0]
location_file_names.append(file_path.split('/')[-1])
if not catname or not friendly:
content = {'state': True, 'error': '๋น ํญ๋ชฉ์ด ์์ต๋๋ค. ๋ชจ๋ ์ฑ์์ฃผ์ธ์!'}
return render(request, 'cat_add.html', content)
# if not type(friendly) == int:
# content = {'state': True, 'error': '๊ฐ๋ฅ์ด ์ง์๋ ์ซ์๋ง ์
๋ ฅ ๊ฐ๋ฅํฉ๋๋ค!'}
# return render(request, 'cat_add.html', content)
if cat_locations in location_file_names:
with open('./crud/cat_location/{}.txt'.format(cat_locations), 'r') as f:
cat_list = f.readlines()
cat_pk_list = cat_list[0].split(',')[:-1]
cat_lists = []
for cat_pk in cat_pk_list:
cat = Cat.objects.filter(pk=int(cat_pk)).first()
cat_lists.append(cat)
return render(request, 'overlap.html', {
'cat_lists': cat_lists,
'catname': catname,
'friendly': friendly,
'gender': gender,
'color': color,
'neutering': neutering,
'location_lat': location_lat,
'location_lon': location_lon,
'upload_user': upload_user,
'image': image,
})
cat = Cat.objects.create(
catname=catname,
friendly=friendly,
gender=gender,
color=color,
neutering=neutering,
location_lat=location_lat,
location_lon=location_lon,
upload_user=upload_user,
)
files = request.FILES.getlist('img')
session = Session(
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
region_name=AWS_S3_REGION_NAME,
)
s3 = session.resource('s3')
now = datetime.now().strftime('%Y%H%M%S')
s3_url="https://django-cat-project.s3.ap-northeast-2.amazonaws.com/"
for file in files:
s3.Bucket(AWS_STORAGE_BUCKET_NAME).put_object(
Key=now+file.name,
Body=file
)
CatImage.objects.create(
cat=cat,
url=s3_url+now+file.name,
)
with open('./crud/cat_location/{}.txt'.format(cat_locations), 'a') as f:
f.write(str(cat.pk))
f.write(',')
return redirect('index')
class CheckedView(View):
def get(self, request, *args, **kwargs):
return redirect('index')
def post(self, request, *args, **kwargs):
catname=request.POST['catname']
friendly=request.POST['friendly']
gender=request.POST['gender']
color=request.POST['color']
neutering=request.POST['neutering']
location_lat=request.POST['location_lat']
location_lon=request.POST['location_lon']
upload_user=request.user
cat_locations = '{0:0.3f}'.format(float(location_lat)) + '{0:0.3f}'.format(float(location_lon))
cat = Cat.objects.create(
catname=catname,
friendly=friendly,
gender=gender,
color=color,
neutering=neutering,
location_lat=location_lat,
location_lon=location_lon,
upload_user=upload_user,
)
files = request.FILES.getlist('img')
session = Session(
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
region_name=AWS_S3_REGION_NAME,
)
s3 = session.resource('s3')
now = datetime.now().strftime('%Y%H%M%S')
s3_url="https://django-cat-project.s3.ap-northeast-2.amazonaws.com/"
for file in files:
s3.Bucket(AWS_STORAGE_BUCKET_NAME).put_object(
Key=now+file.name,
Body=file
)
CatImage.objects.create(
cat=cat,
url=s3_url+now+file.name,
)
with open('./crud/cat_location/{}.txt'.format(cat_locations), 'a') as f:
f.write(str(cat.pk))
f.write(',')
return redirect('index')
class CatDetailView(DetailView):
model = Cat
context_object_name = 'cat'
template_name = 'cat_detail.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
# context['cat'] = Cat.objects.filter(cat=kwargs['object'])
context['image_lists'] = CatImage.objects.filter(cat=kwargs['object'])
context['comments'] = Comment.objects.filter(cat=kwargs['object'])
return context
class EditView(View):
def get(self, request, *args, **kwargs):
cat = Cat.objects.filter(pk=kwargs['pk']).first()
return render(request, 'cat_edit.html', {'cat': cat})
def post(self, request, *args, **kwargs):
Cat.objects.filter(pk=kwargs['pk']).update(
catname=request.POST['catname'],
friendly=request.POST['friendly'],
gender=request.POST['gender'],
color=request.POST['color'],
neutering=request.POST['neutering'],
location_lat=request.POST['location_lat'],
location_lon=request.POST['location_lon'],
upload_user=request.user,
)
return redirect('crud:cat_detail', kwargs['pk'])
def CatDelete(request, pk):
cat = Cat.objects.filter(pk=pk)
cat.update(catname='deleted_cat', is_deleted=True, location_lat=0, location_lon=0)
return redirect('index')
class SearchView(View):
def get(self, request, *args, **kwargs):
keyword = request.GET.get('keyword', '')
search_cats = {}
if keyword:
search_cats = Cat.objects.filter(
Q(catname__icontains=keyword)
)
return render(request, 'search.html', { 'search_cats': search_cats })
class CommentView(View):
def get(self, request, *args, **kwargs):
return render(request, 'cat_detail.html')
def post(self, request, *args, **kwargs):
cat = Cat.objects.filter(pk=kwargs['pk']).first()
user = request.user
Comment.objects.create(
cat=cat,
user=user,
content=request.POST['content'],
)
return redirect('crud:cat_detail', kwargs['pk'])
|
the-stack_106_27792 | import container_service_extension.broker_manager as broker_manager
from container_service_extension.exceptions import ClusterAlreadyExistsError
from container_service_extension.exceptions import ClusterNotFoundError
import container_service_extension.ovdc_utils as ovdc_utils
import container_service_extension.pksbroker_manager as pks_broker_manager
from container_service_extension.server_constants import K8S_PROVIDER_KEY
from container_service_extension.server_constants import K8sProvider
from container_service_extension.server_constants import PKS_CLUSTER_DOMAIN_KEY
from container_service_extension.server_constants import PKS_PLANS_KEY
from container_service_extension.shared_constants import RequestKey
import container_service_extension.utils as utils
from container_service_extension.vcdbroker import VcdBroker
def cluster_create(request_data, tenant_auth_token):
"""Request handler for cluster create operation.
Required data: org_name, ovdc_name, cluster_name
Conditional data and default values:
if k8s_provider is 'native':
network_name, num_nodes=2, num_cpu=None, mb_memory=None,
storage_profile_name=None, template_name=default,
template_revision=default, ssh_key_filepath=None, enable_nfs=False,
rollback=True
(data validation handled in brokers)
:return: Dict
"""
required = [
RequestKey.CLUSTER_NAME
]
utils.ensure_keys_in_dict(required, request_data, dict_name='data')
cluster_name = request_data[RequestKey.CLUSTER_NAME]
# TODO HACK 'is_org_admin_search' is used here to prevent users from
# creating clusters with the same name, including clusters in PKS
# True means that the cluster list is filtered by the org name of
# the logged-in user to check that there are no duplicate clusters
request_data['is_org_admin_search'] = True
try:
broker_manager.get_cluster_and_broker(request_data, tenant_auth_token)
raise ClusterAlreadyExistsError(f"Cluster {cluster_name} "
f"already exists.")
except ClusterNotFoundError:
pass
k8s_metadata = \
ovdc_utils.get_ovdc_k8s_provider_metadata(
org_name=request_data[RequestKey.ORG_NAME],
ovdc_name=request_data[RequestKey.OVDC_NAME],
include_credentials=True,
include_nsxt_info=True)
if k8s_metadata.get(K8S_PROVIDER_KEY) == K8sProvider.PKS:
request_data[RequestKey.PKS_PLAN_NAME] = k8s_metadata[PKS_PLANS_KEY][0]
request_data['pks_ext_host'] = \
f"{cluster_name}.{k8s_metadata[PKS_CLUSTER_DOMAIN_KEY]}"
broker = broker_manager.get_broker_from_k8s_metadata(k8s_metadata,
tenant_auth_token)
return broker.create_cluster(request_data)
def cluster_resize(request_data, tenant_auth_token):
"""Request handler for cluster resize operation.
Required data: cluster_name, num_nodes
Optional data and default values: org_name=None, ovdc_name=None
Conditional data and default values:
if k8s_provider is 'native':
network_name, rollback=True
(data validation handled in brokers)
:return: Dict
"""
_, broker = broker_manager.get_cluster_info(request_data,
tenant_auth_token)
return broker.resize_cluster(request_data)
def cluster_delete(request_data, tenant_auth_token):
"""Request handler for cluster delete operation.
Required data: cluster_name
Optional data and default values: org_name=None, ovdc_name=None
(data validation handled in brokers)
:return: Dict
"""
_, broker = broker_manager.get_cluster_info(request_data,
tenant_auth_token)
return broker.delete_cluster(request_data)
def cluster_info(request_data, tenant_auth_token):
"""Request handler for cluster info operation.
Required data: cluster_name
Optional data and default values: org_name=None, ovdc_name=None
(data validation handled in brokers)
:return: Dict
"""
cluster, _ = broker_manager.get_cluster_info(request_data,
tenant_auth_token)
return cluster
def cluster_config(request_data, tenant_auth_token):
"""Request handler for cluster config operation.
Required data: cluster_name
Optional data and default values: org_name=None, ovdc_name=None
(data validation handled in brokers)
:return: Dict
"""
_, broker = broker_manager.get_cluster_info(request_data,
tenant_auth_token)
return broker.get_cluster_config(request_data)
def cluster_list(request_data, tenant_auth_token):
"""Request handler for cluster list operation.
All (vCD/PKS) brokers in the org do 'list cluster' operation.
Post-process the result returned by pks broker.
Aggregate all the results into a list.
Optional data and default values: org_name=None, ovdc_name=None
(data validation handled in brokers)
:return: List
"""
vcd_clusters_info = \
VcdBroker(tenant_auth_token).list_clusters(request_data)
pks_clusters_info = []
if utils.is_pks_enabled():
pks_clusters_info = pks_broker_manager.list_clusters(request_data,
tenant_auth_token)
all_cluster_infos = vcd_clusters_info + pks_clusters_info
common_cluster_properties = [
'name',
'vdc',
'status',
'org_name',
K8S_PROVIDER_KEY
]
result = []
for cluster_info in all_cluster_infos:
filtered_cluster_info = \
{k: cluster_info.get(k) for k in common_cluster_properties}
result.append(filtered_cluster_info)
return result
def node_create(request_data, tenant_auth_token):
"""Request handler for node create operation.
Required data: cluster_name, network_name
Optional data and default values: org_name=None, ovdc_name=None,
num_nodes=1, num_cpu=None, mb_memory=None, storage_profile_name=None,
template_name=default, template_revision=default,
ssh_key_filepath=None, rollback=True, enable_nfs=False,
(data validation handled in brokers)
:return: Dict
"""
# Currently node create is a vCD only operation.
# Different from resize because this can create nfs nodes
return VcdBroker(tenant_auth_token).create_nodes(request_data)
def node_delete(request_data, tenant_auth_token):
"""Request handler for node delete operation.
Required data: cluster_name, node_names_list
Optional data and default values: org_name=None, ovdc_name=None
(data validation handled in brokers)
:return: Dict
"""
# Currently node delete is a vCD only operation.
# TODO remove once resize is able to scale down native clusters
return VcdBroker(tenant_auth_token).delete_nodes(request_data)
def node_info(request_data, tenant_auth_token):
"""Request handler for node info operation.
Required data: cluster_name, node_name
Optional data and default values: org_name=None, ovdc_name=None
(data validation handled in brokers)
:return: Dict
"""
# Currently node info is a vCD only operation.
return VcdBroker(tenant_auth_token).get_node_info(request_data)
|
the-stack_106_27795 | #!/usr/bin/env python3
# Copyright (c) 2020 The C1pzo Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the send RPC command."""
from decimal import Decimal, getcontext
from itertools import product
from test_framework.authproxy import JSONRPCException
from test_framework.descriptors import descsum_create
from test_framework.test_framework import C1pzoTestFramework
from test_framework.util import (
assert_equal,
assert_fee_amount,
assert_greater_than,
assert_raises_rpc_error,
)
class WalletSendTest(C1pzoTestFramework):
def set_test_params(self):
self.num_nodes = 2
# whitelist all peers to speed up tx relay / mempool sync
self.extra_args = [
["-whitelist=127.0.0.1","-walletrbf=1"],
["-whitelist=127.0.0.1","-walletrbf=1"],
]
getcontext().prec = 8 # Satoshi precision for Decimal
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def test_send(self, from_wallet, to_wallet=None, amount=None, data=None,
arg_conf_target=None, arg_estimate_mode=None, arg_fee_rate=None,
conf_target=None, estimate_mode=None, fee_rate=None, add_to_wallet=None, psbt=None,
inputs=None, add_inputs=None, change_address=None, change_position=None, change_type=None,
include_watching=None, locktime=None, lock_unspents=None, replaceable=None, subtract_fee_from_outputs=None,
expect_error=None):
assert (amount is None) != (data is None)
from_balance_before = from_wallet.getbalance()
if to_wallet is None:
assert amount is None
else:
to_untrusted_pending_before = to_wallet.getbalances()["mine"]["untrusted_pending"]
if amount:
dest = to_wallet.getnewaddress()
outputs = {dest: amount}
else:
outputs = {"data": data}
# Construct options dictionary
options = {}
if add_to_wallet is not None:
options["add_to_wallet"] = add_to_wallet
else:
if psbt:
add_to_wallet = False
else:
add_to_wallet = from_wallet.getwalletinfo()["private_keys_enabled"] # Default value
if psbt is not None:
options["psbt"] = psbt
if conf_target is not None:
options["conf_target"] = conf_target
if estimate_mode is not None:
options["estimate_mode"] = estimate_mode
if fee_rate is not None:
options["fee_rate"] = fee_rate
if inputs is not None:
options["inputs"] = inputs
if add_inputs is not None:
options["add_inputs"] = add_inputs
if change_address is not None:
options["change_address"] = change_address
if change_position is not None:
options["change_position"] = change_position
if change_type is not None:
options["change_type"] = change_type
if include_watching is not None:
options["include_watching"] = include_watching
if locktime is not None:
options["locktime"] = locktime
if lock_unspents is not None:
options["lock_unspents"] = lock_unspents
if replaceable is None:
replaceable = True # default
else:
options["replaceable"] = replaceable
if subtract_fee_from_outputs is not None:
options["subtract_fee_from_outputs"] = subtract_fee_from_outputs
if len(options.keys()) == 0:
options = None
if expect_error is None:
res = from_wallet.send(outputs=outputs, conf_target=arg_conf_target, estimate_mode=arg_estimate_mode, fee_rate=arg_fee_rate, options=options)
else:
try:
assert_raises_rpc_error(expect_error[0], expect_error[1], from_wallet.send,
outputs=outputs, conf_target=arg_conf_target, estimate_mode=arg_estimate_mode, fee_rate=arg_fee_rate, options=options)
except AssertionError:
# Provide debug info if the test fails
self.log.error("Unexpected successful result:")
self.log.error(arg_conf_target)
self.log.error(arg_estimate_mode)
self.log.error(arg_fee_rate)
self.log.error(options)
res = from_wallet.send(outputs=outputs, conf_target=arg_conf_target, estimate_mode=arg_estimate_mode, fee_rate=arg_fee_rate, options=options)
self.log.error(res)
if "txid" in res and add_to_wallet:
self.log.error("Transaction details:")
try:
tx = from_wallet.gettransaction(res["txid"])
self.log.error(tx)
self.log.error("testmempoolaccept (transaction may already be in mempool):")
self.log.error(from_wallet.testmempoolaccept([tx["hex"]]))
except JSONRPCException as exc:
self.log.error(exc)
raise
return
if locktime:
return res
if from_wallet.getwalletinfo()["private_keys_enabled"] and not include_watching:
assert_equal(res["complete"], True)
assert "txid" in res
else:
assert_equal(res["complete"], False)
assert not "txid" in res
assert "psbt" in res
if add_to_wallet and not include_watching:
# Ensure transaction exists in the wallet:
tx = from_wallet.gettransaction(res["txid"])
assert tx
assert_equal(tx["bip125-replaceable"], "yes" if replaceable else "no")
# Ensure transaction exists in the mempool:
tx = from_wallet.getrawtransaction(res["txid"], True)
assert tx
if amount:
if subtract_fee_from_outputs:
assert_equal(from_balance_before - from_wallet.getbalance(), amount)
else:
assert_greater_than(from_balance_before - from_wallet.getbalance(), amount)
else:
assert next((out for out in tx["vout"] if out["scriptPubKey"]["asm"] == "OP_RETURN 35"), None)
else:
assert_equal(from_balance_before, from_wallet.getbalance())
if to_wallet:
self.sync_mempools()
if add_to_wallet:
if not subtract_fee_from_outputs:
assert_equal(to_wallet.getbalances()["mine"]["untrusted_pending"], to_untrusted_pending_before + Decimal(amount if amount else 0))
else:
assert_equal(to_wallet.getbalances()["mine"]["untrusted_pending"], to_untrusted_pending_before)
return res
def run_test(self):
self.log.info("Setup wallets...")
# w0 is a wallet with coinbase rewards
w0 = self.nodes[0].get_wallet_rpc(self.default_wallet_name)
# w1 is a regular wallet
self.nodes[1].createwallet(wallet_name="w1")
w1 = self.nodes[1].get_wallet_rpc("w1")
# w2 contains the private keys for w3
self.nodes[1].createwallet(wallet_name="w2", blank=True)
w2 = self.nodes[1].get_wallet_rpc("w2")
xpriv = "tprv8ZgxMBicQKsPfHCsTwkiM1KT56RXbGGTqvc2hgqzycpwbHqqpcajQeMRZoBD35kW4RtyCemu6j34Ku5DEspmgjKdt2qe4SvRch5Kk8B8A2v"
xpub = "tpubD6NzVbkrYhZ4YkEfMbRJkQyZe7wTkbTNRECozCtJPtdLRn6cT1QKb8yHjwAPcAr26eHBFYs5iLiFFnCbwPRsncCKUKCfubHDMGKzMVcN1Jg"
if self.options.descriptors:
w2.importdescriptors([{
"desc": descsum_create("wpkh(" + xpriv + "/0/0/*)"),
"timestamp": "now",
"range": [0, 100],
"active": True
},{
"desc": descsum_create("wpkh(" + xpriv + "/0/1/*)"),
"timestamp": "now",
"range": [0, 100],
"active": True,
"internal": True
}])
else:
w2.sethdseed(True)
# w3 is a watch-only wallet, based on w2
self.nodes[1].createwallet(wallet_name="w3", disable_private_keys=True)
w3 = self.nodes[1].get_wallet_rpc("w3")
if self.options.descriptors:
# Match the privkeys in w2 for descriptors
res = w3.importdescriptors([{
"desc": descsum_create("wpkh(" + xpub + "/0/0/*)"),
"timestamp": "now",
"range": [0, 100],
"keypool": True,
"active": True,
"watchonly": True
},{
"desc": descsum_create("wpkh(" + xpub + "/0/1/*)"),
"timestamp": "now",
"range": [0, 100],
"keypool": True,
"active": True,
"internal": True,
"watchonly": True
}])
assert_equal(res, [{"success": True}, {"success": True}])
for _ in range(3):
a2_receive = w2.getnewaddress()
if not self.options.descriptors:
# Because legacy wallets use exclusively hardened derivation, we can't do a ranged import like we do for descriptors
a2_change = w2.getrawchangeaddress() # doesn't actually use change derivation
res = w3.importmulti([{
"desc": w2.getaddressinfo(a2_receive)["desc"],
"timestamp": "now",
"keypool": True,
"watchonly": True
},{
"desc": w2.getaddressinfo(a2_change)["desc"],
"timestamp": "now",
"keypool": True,
"internal": True,
"watchonly": True
}])
assert_equal(res, [{"success": True}, {"success": True}])
w0.sendtoaddress(a2_receive, 10) # fund w3
self.nodes[0].generate(1)
self.sync_blocks()
if not self.options.descriptors:
# w4 has private keys enabled, but only contains watch-only keys (from w2)
# This is legacy wallet behavior only as descriptor wallets don't allow watchonly and non-watchonly things in the same wallet.
self.nodes[1].createwallet(wallet_name="w4", disable_private_keys=False)
w4 = self.nodes[1].get_wallet_rpc("w4")
for _ in range(3):
a2_receive = w2.getnewaddress()
res = w4.importmulti([{
"desc": w2.getaddressinfo(a2_receive)["desc"],
"timestamp": "now",
"keypool": False,
"watchonly": True
}])
assert_equal(res, [{"success": True}])
w0.sendtoaddress(a2_receive, 10) # fund w4
self.nodes[0].generate(1)
self.sync_blocks()
self.log.info("Send to address...")
self.test_send(from_wallet=w0, to_wallet=w1, amount=1)
self.test_send(from_wallet=w0, to_wallet=w1, amount=1, add_to_wallet=True)
self.log.info("Don't broadcast...")
res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, add_to_wallet=False)
assert(res["hex"])
self.log.info("Return PSBT...")
res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, psbt=True)
assert(res["psbt"])
self.log.info("Create transaction that spends to address, but don't broadcast...")
self.test_send(from_wallet=w0, to_wallet=w1, amount=1, add_to_wallet=False)
# conf_target & estimate_mode can be set as argument or option
res1 = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, arg_conf_target=1, arg_estimate_mode="economical", add_to_wallet=False)
res2 = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, conf_target=1, estimate_mode="economical", add_to_wallet=False)
assert_equal(self.nodes[1].decodepsbt(res1["psbt"])["fee"],
self.nodes[1].decodepsbt(res2["psbt"])["fee"])
# but not at the same time
for mode in ["unset", "economical", "conservative"]:
self.test_send(from_wallet=w0, to_wallet=w1, amount=1, arg_conf_target=1, arg_estimate_mode="economical",
conf_target=1, estimate_mode=mode, add_to_wallet=False,
expect_error=(-8, "Pass conf_target and estimate_mode either as arguments or in the options object, but not both"))
self.log.info("Create PSBT from watch-only wallet w3, sign with w2...")
res = self.test_send(from_wallet=w3, to_wallet=w1, amount=1)
res = w2.walletprocesspsbt(res["psbt"])
assert res["complete"]
if not self.options.descriptors:
# Descriptor wallets do not allow mixed watch-only and non-watch-only things in the same wallet.
# This is specifically testing that w4 ignores its own private keys and creates a psbt with send
# which is not something that needs to be tested in descriptor wallets.
self.log.info("Create PSBT from wallet w4 with watch-only keys, sign with w2...")
self.test_send(from_wallet=w4, to_wallet=w1, amount=1, expect_error=(-4, "Insufficient funds"))
res = self.test_send(from_wallet=w4, to_wallet=w1, amount=1, include_watching=True, add_to_wallet=False)
res = w2.walletprocesspsbt(res["psbt"])
assert res["complete"]
self.log.info("Create OP_RETURN...")
self.test_send(from_wallet=w0, to_wallet=w1, amount=1)
self.test_send(from_wallet=w0, data="Hello World", expect_error=(-8, "Data must be hexadecimal string (not 'Hello World')"))
self.test_send(from_wallet=w0, data="23")
res = self.test_send(from_wallet=w3, data="23")
res = w2.walletprocesspsbt(res["psbt"])
assert res["complete"]
self.log.info("Test setting explicit fee rate")
res1 = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, arg_fee_rate="1", add_to_wallet=False)
res2 = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, fee_rate="1", add_to_wallet=False)
assert_equal(self.nodes[1].decodepsbt(res1["psbt"])["fee"], self.nodes[1].decodepsbt(res2["psbt"])["fee"])
res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, fee_rate=7, add_to_wallet=False)
fee = self.nodes[1].decodepsbt(res["psbt"])["fee"]
assert_fee_amount(fee, Decimal(len(res["hex"]) / 2), Decimal("0.00007"))
# "unset" and None are treated the same for estimate_mode
res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, fee_rate=2, estimate_mode="unset", add_to_wallet=False)
fee = self.nodes[1].decodepsbt(res["psbt"])["fee"]
assert_fee_amount(fee, Decimal(len(res["hex"]) / 2), Decimal("0.00002"))
res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, arg_fee_rate=4.531, add_to_wallet=False)
fee = self.nodes[1].decodepsbt(res["psbt"])["fee"]
assert_fee_amount(fee, Decimal(len(res["hex"]) / 2), Decimal("0.00004531"))
res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, arg_fee_rate=3, add_to_wallet=False)
fee = self.nodes[1].decodepsbt(res["psbt"])["fee"]
assert_fee_amount(fee, Decimal(len(res["hex"]) / 2), Decimal("0.00003"))
# Test that passing fee_rate as both an argument and an option raises.
self.test_send(from_wallet=w0, to_wallet=w1, amount=1, arg_fee_rate=1, fee_rate=1, add_to_wallet=False,
expect_error=(-8, "Pass the fee_rate either as an argument, or in the options object, but not both"))
assert_raises_rpc_error(-8, "Use fee_rate (sat/vB) instead of feeRate", w0.send, {w1.getnewaddress(): 1}, 6, "conservative", 1, {"feeRate": 0.01})
assert_raises_rpc_error(-3, "Unexpected key totalFee", w0.send, {w1.getnewaddress(): 1}, 6, "conservative", 1, {"totalFee": 0.01})
for target, mode in product([-1, 0, 1009], ["economical", "conservative"]):
self.test_send(from_wallet=w0, to_wallet=w1, amount=1, conf_target=target, estimate_mode=mode,
expect_error=(-8, "Invalid conf_target, must be between 1 and 1008")) # max value of 1008 per src/policy/fees.h
msg = 'Invalid estimate_mode parameter, must be one of: "unset", "economical", "conservative"'
for target, mode in product([-1, 0], ["pzo/kb", "sat/b"]):
self.test_send(from_wallet=w0, to_wallet=w1, amount=1, conf_target=target, estimate_mode=mode, expect_error=(-8, msg))
for mode in ["", "foo", Decimal("3.141592")]:
self.test_send(from_wallet=w0, to_wallet=w1, amount=1, conf_target=0.1, estimate_mode=mode, expect_error=(-8, msg))
self.test_send(from_wallet=w0, to_wallet=w1, amount=1, arg_conf_target=0.1, arg_estimate_mode=mode, expect_error=(-8, msg))
assert_raises_rpc_error(-8, msg, w0.send, {w1.getnewaddress(): 1}, 0.1, mode)
for mode in ["economical", "conservative", "pzo/kb", "sat/b"]:
self.log.debug("{}".format(mode))
for k, v in {"string": "true", "object": {"foo": "bar"}}.items():
self.test_send(from_wallet=w0, to_wallet=w1, amount=1, conf_target=v, estimate_mode=mode,
expect_error=(-3, "Expected type number for conf_target, got {}".format(k)))
# Test setting explicit fee rate just below the minimum and at zero.
self.log.info("Explicit fee rate raises RPC error 'fee rate too low' if fee_rate of 0.99999999 is passed")
self.test_send(from_wallet=w0, to_wallet=w1, amount=1, fee_rate=0.99999999,
expect_error=(-4, "Fee rate (0.999 sat/vB) is lower than the minimum fee rate setting (1.000 sat/vB)"))
self.test_send(from_wallet=w0, to_wallet=w1, amount=1, arg_fee_rate=0.99999999,
expect_error=(-4, "Fee rate (0.999 sat/vB) is lower than the minimum fee rate setting (1.000 sat/vB)"))
self.test_send(from_wallet=w0, to_wallet=w1, amount=1, fee_rate=0,
expect_error=(-4, "Fee rate (0.000 sat/vB) is lower than the minimum fee rate setting (1.000 sat/vB)"))
self.test_send(from_wallet=w0, to_wallet=w1, amount=1, arg_fee_rate=0,
expect_error=(-4, "Fee rate (0.000 sat/vB) is lower than the minimum fee rate setting (1.000 sat/vB)"))
# TODO: Return hex if fee rate is below -maxmempool
# res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, conf_target=0.1, estimate_mode="sat/b", add_to_wallet=False)
# assert res["hex"]
# hex = res["hex"]
# res = self.nodes[0].testmempoolaccept([hex])
# assert not res[0]["allowed"]
# assert_equal(res[0]["reject-reason"], "...") # low fee
# assert_fee_amount(fee, Decimal(len(res["hex"]) / 2), Decimal("0.000001"))
self.log.info("If inputs are specified, do not automatically add more...")
res = self.test_send(from_wallet=w0, to_wallet=w1, amount=51, inputs=[], add_to_wallet=False)
assert res["complete"]
utxo1 = w0.listunspent()[0]
assert_equal(utxo1["amount"], 50)
self.test_send(from_wallet=w0, to_wallet=w1, amount=51, inputs=[utxo1],
expect_error=(-4, "Insufficient funds"))
self.test_send(from_wallet=w0, to_wallet=w1, amount=51, inputs=[utxo1], add_inputs=False,
expect_error=(-4, "Insufficient funds"))
res = self.test_send(from_wallet=w0, to_wallet=w1, amount=51, inputs=[utxo1], add_inputs=True, add_to_wallet=False)
assert res["complete"]
self.log.info("Manual change address and position...")
self.test_send(from_wallet=w0, to_wallet=w1, amount=1, change_address="not an address",
expect_error=(-5, "Change address must be a valid c1pzo address"))
change_address = w0.getnewaddress()
self.test_send(from_wallet=w0, to_wallet=w1, amount=1, add_to_wallet=False, change_address=change_address)
assert res["complete"]
res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, add_to_wallet=False, change_address=change_address, change_position=0)
assert res["complete"]
assert_equal(self.nodes[0].decodepsbt(res["psbt"])["tx"]["vout"][0]["scriptPubKey"]["addresses"], [change_address])
res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, add_to_wallet=False, change_type="legacy", change_position=0)
assert res["complete"]
change_address = self.nodes[0].decodepsbt(res["psbt"])["tx"]["vout"][0]["scriptPubKey"]["addresses"][0]
assert change_address[0] == "m" or change_address[0] == "n"
self.log.info("Set lock time...")
height = self.nodes[0].getblockchaininfo()["blocks"]
res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, locktime=height + 1)
assert res["complete"]
assert res["txid"]
txid = res["txid"]
# Although the wallet finishes the transaction, it can't be added to the mempool yet:
hex = self.nodes[0].gettransaction(res["txid"])["hex"]
res = self.nodes[0].testmempoolaccept([hex])
assert not res[0]["allowed"]
assert_equal(res[0]["reject-reason"], "non-final")
# It shouldn't be confirmed in the next block
self.nodes[0].generate(1)
assert_equal(self.nodes[0].gettransaction(txid)["confirmations"], 0)
# The mempool should allow it now:
res = self.nodes[0].testmempoolaccept([hex])
assert res[0]["allowed"]
# Don't wait for wallet to add it to the mempool:
res = self.nodes[0].sendrawtransaction(hex)
self.nodes[0].generate(1)
assert_equal(self.nodes[0].gettransaction(txid)["confirmations"], 1)
self.sync_all()
self.log.info("Lock unspents...")
utxo1 = w0.listunspent()[0]
assert_greater_than(utxo1["amount"], 1)
res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, inputs=[utxo1], add_to_wallet=False, lock_unspents=True)
assert res["complete"]
locked_coins = w0.listlockunspent()
assert_equal(len(locked_coins), 1)
# Locked coins are automatically unlocked when manually selected
res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, inputs=[utxo1], add_to_wallet=False)
assert res["complete"]
self.log.info("Replaceable...")
res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, add_to_wallet=True, replaceable=True)
assert res["complete"]
assert_equal(self.nodes[0].gettransaction(res["txid"])["bip125-replaceable"], "yes")
res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, add_to_wallet=True, replaceable=False)
assert res["complete"]
assert_equal(self.nodes[0].gettransaction(res["txid"])["bip125-replaceable"], "no")
self.log.info("Subtract fee from output")
self.test_send(from_wallet=w0, to_wallet=w1, amount=1, subtract_fee_from_outputs=[0])
if __name__ == '__main__':
WalletSendTest().main()
|
the-stack_106_27797 | import glob
import math
import os
import shutil
from datetime import datetime
from datetime import timezone
import regex as re
import yaml
from feedgen.feed import FeedGenerator
from .CONSTANTS.directories import content_dir, static_dir, public_dir
from .CONSTANTS.environment import jinja_env
from .CONSTANTS.config import CONFIG
from .CONSTANTS.regex import yaml_re, header_re
from .MarkdownDocument import MarkdownDocument
from .social.social import DefaultSites, SocialLink
def build():
"""Build webpage into public directory"""
try:
# Remove existing build
files_to_remove = glob.glob(str(public_dir.relative_to(os.getcwd()).joinpath("**")))
if os.path.exists(public_dir):
for file in files_to_remove:
try:
shutil.rmtree(file)
except NotADirectoryError:
os.remove(file)
# Non-empty social links
config_links = CONFIG["index"]["socialLinks"]
# Links to appear on page
social_links = []
other_links = []
for link in config_links:
if config_links[link] != "":
if link == "linkedin":
social_links.append(SocialLink(DefaultSites.LINKEDIN, config_links["linkedin"]))
elif link == "github":
social_links.append(SocialLink(DefaultSites.GITHUB, config_links["github"]))
elif link == "gitlab":
social_links.append(SocialLink(DefaultSites.GITLAB, config_links["gitlab"]))
elif link == "twitter":
social_links.append(SocialLink(DefaultSites.TWITTER, config_links["twitter"]))
else:
other_links.append({"icon": "fa-"+link, "link": config_links[link]})
# Copy stylesheets
shutil.copytree(os.path.join(os.path.dirname(__file__), "assets"), os.path.join(public_dir,
"assets"))
# Copy static
shutil.copytree(static_dir, public_dir, dirs_exist_ok=True)
# Build index
index_page = jinja_env.get_template("index.html")
with open(os.path.join(public_dir, "index.html"), "w") as f:
f.write(index_page.render(CONFIG=CONFIG, social_links=social_links, other_links=other_links))
# Create sitemap.xml
with open(public_dir.joinpath("sitemap.xml"), "w") as sitemap:
sitemap.write("""<urlset
xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"
xmlns:xhtml="http://www.w3.org/1999/xhtml">""")
# Build content
content_files = content_dir.glob("**/*.md")
# List of converted documents
documents = []
for file in content_files:
page = file.stem
# Place html in directory with name of page
directory = public_dir.joinpath(file.relative_to(content_dir).parent.joinpath(page))
os.makedirs(directory, exist_ok=True)
# Copy original file to be accessed at index.md
shutil.copy(file, os.path.join(directory, "index.md"))
# Export file
html_export = directory.joinpath("index.html")
# Convert markdown (without yaml header) to html
with open(file, "r") as src, open(os.path.join(public_dir, html_export), "w") as dest:
markdown = src.read()
yaml_data = re.findall(yaml_re, markdown)[0]
header = re.findall(header_re, markdown)[0]
text = markdown.replace(header, "")
metadata = yaml.safe_load(yaml_data)
document = MarkdownDocument(path=directory.joinpath(file.name), markdown=text, metadata=metadata)
documents.append(document)
if content_dir.joinpath("projects") in file.parents or content_dir.joinpath("posts") in file.parents:
template = jinja_env.get_template("posts/post.html")
else:
template = jinja_env.get_template("info.html")
dest.write(template.render(CONFIG=CONFIG, page_title=metadata["title"], post=document))
# Add to sitemap
if document.include_in_sitemap:
with open(public_dir.joinpath("sitemap.xml"), "a") as sitemap:
sitemap.write(
f"""
<url>
<loc>{CONFIG["base_url"]}{directory.relative_to(public_dir)}/</loc>
<lastmod>{datetime.now().strftime('%Y-%m-%dT%H:%M:%S+00:00')}</lastmod>
<changefreq>weekly</changefreq>
<priority>0.5</priority>
</url>
""")
with open(public_dir.joinpath("sitemap.xml"), "a") as sitemap:
# close sitemap
sitemap.write("</urlset>")
# Arrange posts page
posts = []
for document in documents:
if public_dir.joinpath("posts") in document.path.parents:
posts.append(document)
posts.sort(key=lambda x: datetime.timestamp(x.date), reverse=True)
# Create rss feed
fg = FeedGenerator()
fg.title(CONFIG["title"])
fg.link(href=CONFIG["base_url"], rel='alternate')
fg.author(name=CONFIG["name"], email=CONFIG["email"])
fg.logo(str(public_dir.joinpath("favicon.ico")))
fg.subtitle(CONFIG["description"])
fg.language("en")
for post in posts:
fe = fg.add_entry()
fe.id = CONFIG["base_url"] + str(post.path.relative_to(public_dir))
fe.link(href=CONFIG["base_url"] + str(post.path.relative_to(public_dir)))
# Remove html tags
fe.title(title=re.sub('<[^<]+?>', '', post.title))
fe.description(post.html if post.rss_full_text else post.description)
fe.author(name=post.author)
fe.content(post.html)
fe.pubDate(post.date.replace(tzinfo=timezone.utc))
fg.rss_file(str(public_dir.joinpath("index.xml")), pretty=True)
# Render post pages
with open(public_dir.joinpath("posts/index.html"), "w") as post_page:
next_page = f"posts/1" if 1 < math.ceil(len(posts) / 16) else None
post_page.write(jinja_env.get_template("posts/list.html").render(CONFIG=CONFIG, page_title="Posts",
posts=posts[:16], public_dir=public_dir,
next_page=next_page, prev_page=None))
# Creat 'next' amd 'previous' links
for i in range(math.ceil(len(posts) / 16)):
page = public_dir.joinpath(f"posts/{i}")
next_page = f"posts/{i + 1}" if (i + 1) < math.ceil(len(posts) / 16) else None
prev_page = f"posts/{i - 1}" if (i - 1) >= 0 else None
os.makedirs(page)
with open(page.joinpath("index.html"), "w") as project_page:
project_page.write(
jinja_env.get_template("posts/list.html").render(CONFIG=CONFIG, page_title="Posts",
posts=posts[(i * 16):(i + 1) * 16],
public_dir=public_dir,
next_page=next_page,
prev_page=prev_page))
# Arrange projects page
projects = []
for document in documents:
if public_dir.joinpath("projects") in document.path.parents:
projects.append(document)
projects.sort(key=lambda x: datetime.timestamp(x.date), reverse=True)
# Render project page
with open(public_dir.joinpath("projects/index.html"), "w") as project_page:
project_page.write(jinja_env.get_template("projects/list.html").render(CONFIG=CONFIG, page_title="Projects",
projects=projects,
public_dir=public_dir))
except FileNotFoundError as e:
print(f"{e.filename} was not found, have you ran init?")
except KeyError as e:
print(f"{e.args[0]} was not found in config, please add this field or reinitialise")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.